diff --git a/.gitattributes b/.gitattributes index 593d5399ad9722d11bb64314d9ec1430ad04843d..8a7cd01d7414d7702f628f51497c60dd33543b12 100644 --- a/.gitattributes +++ b/.gitattributes @@ -3134,3 +3134,4 @@ platform/dbops/binaries/build/bin/m4 filter=lfs diff=lfs merge=lfs -text platform/dbops/binaries/build/bin/bison filter=lfs diff=lfs merge=lfs -text platform/dbops/binaries/build/bin/flex filter=lfs diff=lfs merge=lfs -text platform/dbops/binaries/build/bin/flex++ filter=lfs diff=lfs merge=lfs -text +platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/compression_tests/fixtures/restart-from-zero-segments/1234567 filter=lfs diff=lfs merge=lfs -text diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted/stopwords/detector.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted/stopwords/detector.go new file mode 100644 index 0000000000000000000000000000000000000000..25c0d49af2ff4dd9597df30f4add929a598785e0 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted/stopwords/detector.go @@ -0,0 +1,89 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package stopwords + +import ( + "sync" + + "github.com/weaviate/weaviate/entities/models" + + "github.com/pkg/errors" +) + +type StopwordDetector interface { + IsStopword(string) bool +} + +type Detector struct { + sync.Mutex + stopwords map[string]struct{} +} + +func NewDetectorFromConfig(config models.StopwordConfig) (*Detector, error) { + d, err := NewDetectorFromPreset(config.Preset) + if err != nil { + return nil, errors.Wrap(err, "failed to create new detector from config") + } + + d.SetAdditions(config.Additions) + d.SetRemovals(config.Removals) + + return d, nil +} + +func NewDetectorFromPreset(preset string) (*Detector, error) { + var list []string + var ok bool + + if preset != "" { + list, ok = Presets[preset] + if !ok { + return nil, errors.Errorf("preset %q not known to stopword detector", preset) + } + } + + d := &Detector{ + stopwords: map[string]struct{}{}, + } + + for _, word := range list { + d.stopwords[word] = struct{}{} + } + + return d, nil +} + +func (d *Detector) SetAdditions(additions []string) { + d.Lock() + defer d.Unlock() + + for _, add := range additions { + d.stopwords[add] = struct{}{} + } +} + +func (d *Detector) SetRemovals(removals []string) { + d.Lock() + defer d.Unlock() + + for _, rem := range removals { + delete(d.stopwords, rem) + } +} + +func (d *Detector) IsStopword(word string) bool { + d.Lock() + defer d.Unlock() + + _, ok := d.stopwords[word] + return ok +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted/stopwords/detector_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted/stopwords/detector_test.go new file mode 100644 index 0000000000000000000000000000000000000000..92ba09f4ed3131fb9cf1589470cf5c79d038b8f4 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted/stopwords/detector_test.go @@ -0,0 +1,159 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package stopwords + +import ( + "testing" + + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/entities/models" +) + +func TestStopwordDetector(t *testing.T) { + type testcase struct { + cfg models.StopwordConfig + input []string + expectedCountable int + } + + runTest := func(t *testing.T, tests []testcase) { + for _, test := range tests { + sd, err := NewDetectorFromConfig(test.cfg) + require.Nil(t, err) + + var result []string + for _, word := range test.input { + if !sd.IsStopword(word) { + result = append(result, word) + } + } + require.Equal(t, test.expectedCountable, len(result)) + } + } + + t.Run("with en preset, additions", func(t *testing.T) { + tests := []testcase{ + { + cfg: models.StopwordConfig{ + Preset: "en", + Additions: []string{"dog"}, + }, + input: []string{"dog", "dog", "dog", "dog"}, + expectedCountable: 0, + }, + { + cfg: models.StopwordConfig{ + Preset: "en", + Additions: []string{"dog"}, + }, + input: []string{"dog", "dog", "dog", "cat"}, + expectedCountable: 1, + }, + { + cfg: models.StopwordConfig{ + Preset: "en", + Additions: []string{"dog"}, + }, + input: []string{"a", "dog", "is", "the", "best"}, + expectedCountable: 1, + }, + } + + runTest(t, tests) + }) + + t.Run("with no preset, additions", func(t *testing.T) { + tests := []testcase{ + { + cfg: models.StopwordConfig{ + Preset: "none", + Additions: []string{"dog"}, + }, + input: []string{"a", "dog", "is", "the", "best"}, + expectedCountable: 4, + }, + } + + runTest(t, tests) + }) + + t.Run("with en preset, removals", func(t *testing.T) { + tests := []testcase{ + { + cfg: models.StopwordConfig{ + Preset: "en", + Removals: []string{"a"}, + }, + input: []string{"a", "dog", "is", "the", "best"}, + expectedCountable: 3, + }, + { + cfg: models.StopwordConfig{ + Preset: "en", + Removals: []string{"a", "is", "the"}, + }, + input: []string{"a", "dog", "is", "the", "best"}, + expectedCountable: 5, + }, + } + + runTest(t, tests) + }) + + t.Run("with en preset, removals", func(t *testing.T) { + tests := []testcase{ + { + cfg: models.StopwordConfig{ + Preset: "en", + Removals: []string{"a"}, + }, + input: []string{"a", "dog", "is", "the", "best"}, + expectedCountable: 3, + }, + { + cfg: models.StopwordConfig{ + Preset: "en", + Removals: []string{"a", "is", "the"}, + }, + input: []string{"a", "dog", "is", "the", "best"}, + expectedCountable: 5, + }, + } + + runTest(t, tests) + }) + + t.Run("with en preset, additions, removals", func(t *testing.T) { + tests := []testcase{ + { + cfg: models.StopwordConfig{ + Preset: "en", + Additions: []string{"dog"}, + Removals: []string{"a"}, + }, + input: []string{"a", "dog", "is", "the", "best"}, + expectedCountable: 2, + }, + { + cfg: models.StopwordConfig{ + Preset: "en", + Additions: []string{"dog", "best"}, + Removals: []string{"a", "the", "is"}, + }, + input: []string{"a", "dog", "is", "the", "best"}, + expectedCountable: 3, + }, + } + + runTest(t, tests) + }) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted/stopwords/presets.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted/stopwords/presets.go new file mode 100644 index 0000000000000000000000000000000000000000..12cb594eef3c89ad0a39522c47923c78cd7d9049 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted/stopwords/presets.go @@ -0,0 +1,27 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package stopwords + +const ( + EnglishPreset = "en" + NoPreset = "none" +) + +var Presets = map[string][]string{ + EnglishPreset: { + "a", "an", "and", "are", "as", "at", "be", "but", "by", "for", + "if", "in", "into", "is", "it", "no", "not", "of", "on", "or", "such", "that", + "the", "their", "then", "there", "these", "they", "this", "to", "was", "will", + "with", + }, + NoPreset: {}, +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted/terms/terms.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted/terms/terms.go new file mode 100644 index 0000000000000000000000000000000000000000..eaf7aef3d2aaf6536e382dffd117f6eb0d2bb104 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted/terms/terms.go @@ -0,0 +1,500 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package terms + +import ( + "context" + "encoding/binary" + "math" + "sort" + + "github.com/pkg/errors" + "github.com/weaviate/weaviate/entities/schema" +) + +type DocPointerWithScore struct { + Id uint64 + // A Frequency of 0 indicates a tombstone + Frequency float32 + PropLength float32 +} + +func (d *DocPointerWithScore) FromBytes(in []byte, isTombstone bool, boost float32) error { + if len(in) < 12 { + return errors.Errorf("DocPointerWithScore: FromBytes: input too short, expected at least 12 bytes, got %d", len(in)) + } + // This class is only to be used with a MapList that has fixed key and value lengths (8 and 8) for posting lists + // Thus, we can proceed with fixed offsets, and ignore reading the key and value lengths, at offset 0 and 10 + // key will be at offset 2, value at offset 12 + return d.FromKeyVal(in[2:10], in[12:], isTombstone, boost) +} + +func (d *DocPointerWithScore) FromBytesInverted(in []byte, boost float32, propLen float32) error { + isTombstone := len(in) == 8 + d.FromKeyVal(in[0:8], in[8:], isTombstone, boost) + d.PropLength = propLen + return nil +} + +func (d *DocPointerWithScore) FromKeyVal(key []byte, value []byte, isTombstone bool, boost float32) error { + if len(key) != 8 { + return errors.Errorf("DocPointerWithScore: FromKeyVal: key length must be 8, got %d", len(key)) + } + + d.Id = binary.BigEndian.Uint64(key) + if isTombstone || len(value) < 8 { // tombstone, value length is also checked due to #4125 + // Id and Freq are automatically set to 0 + return nil + } + d.Frequency = math.Float32frombits(binary.LittleEndian.Uint32(value[:4])) * boost + d.PropLength = math.Float32frombits(binary.LittleEndian.Uint32(value[4:])) + return nil +} + +type SortedDocPointerWithScoreMerger struct { + input [][]DocPointerWithScore + output []DocPointerWithScore + offsets []int +} + +func NewSortedDocPointerWithScoreMerger() *SortedDocPointerWithScoreMerger { + return &SortedDocPointerWithScoreMerger{} +} + +func (s *SortedDocPointerWithScoreMerger) init(segments [][]DocPointerWithScore) error { + s.input = segments + + // all offset pointers initialized at 0 which is where we want to start + s.offsets = make([]int, len(segments)) + + // The maximum output is the sum of all the input segments if there are only + // unique keys and zero tombstones. If there are duplicate keys (i.e. + // updates) or tombstones, we will slice off some elements of the output + // later, but this way we can be sure each index will always be initialized + // correctly + maxOutput := 0 + for _, seg := range segments { + maxOutput += len(seg) + } + s.output = make([]DocPointerWithScore, maxOutput) + + return nil +} + +func (s *SortedDocPointerWithScoreMerger) findSegmentWithLowestKey() (DocPointerWithScore, bool) { + bestSeg := -1 + bestKey := uint64(0) + + for segmentID := 0; segmentID < len(s.input); segmentID++ { + // check if a segment is already exhausted, then skip + if s.offsets[segmentID] >= len(s.input[segmentID]) { + continue + } + + currKey := s.input[segmentID][s.offsets[segmentID]].Id + if bestSeg == -1 { + // first time we're running, no need to compare, just set to current + bestSeg = segmentID + bestKey = currKey + continue + } + + if currKey > bestKey { + // the segment we are currently looking at has a higher key than our + // current best so we can completely ignore it + continue + } + + if currKey < bestKey { + // the segment we are currently looking at is a better match than the + // previous, this means, we have found a new favorite, but the previous + // best will still be valid in a future round + bestSeg = segmentID + bestKey = currKey + continue + } + + if currKey == bestKey { + // this the most interesting case: we are looking at a duplicate key. In + // this case the rightmost ("latest") segment takes precedence, however, + // we must make sure that the previous match gets discarded, otherwise we + // will find it again in the next round. + // + // We can simply increase the offset before updating the bestSeg pointer, + // which means we will never look at this element again + s.offsets[bestSeg]++ + + // now that the old element is discarded, we can update our pointers + bestSeg = segmentID + bestKey = currKey + } + } + + if bestSeg == -1 { + // we didn't find anything, looks like we have exhausted all segments + return DocPointerWithScore{}, false + } + + // we can now be sure that bestSeg,bestKey is the latest version of the + // lowest key, there is only one job left to do: increase the offset, so we + // never find this segment again + bestMatch := s.input[bestSeg][s.offsets[bestSeg]] + s.offsets[bestSeg]++ + + return bestMatch, true +} + +func (s *SortedDocPointerWithScoreMerger) Do(ctx context.Context, segments [][]DocPointerWithScore) ([]DocPointerWithScore, error) { + if err := s.init(segments); err != nil { + return nil, errors.Wrap(err, "init sorted map decoder") + } + + i := 0 + for { + if i%100 == 0 && ctx.Err() != nil { + return nil, ctx.Err() + } + + match, ok := s.findSegmentWithLowestKey() + if !ok { + break + } + + if match.Frequency == 0 { // tombstone + // the latest version of this key was a tombstone, so we can ignore it + continue + } + + s.output[i] = match + i++ + } + + return s.output[:i], nil +} + +type TermInterface interface { + // doubles as max impact (with tf=1, the max impact would be 1*Idf), if there + // is a boost for a queryTerm, simply apply it here once + Idf() float64 + IdPointer() uint64 + Exhausted() bool + Count() int + QueryTermIndex() int + AdvanceAtLeast(minID uint64) + AdvanceAtLeastShallow(minID uint64) + Advance() + Score(averagePropLength float64, additionalExplanations bool) (uint64, float64, *DocPointerWithScore) + CurrentBlockImpact() float32 + CurrentBlockMaxId() uint64 +} + +type Term struct { + // doubles as max impact (with tf=1, the max impact would be 1*Idf), if there + // is a boost for a queryTerm, simply apply it here once + idf float64 + + idPointer uint64 + posPointer uint64 + Data []DocPointerWithScore + exhausted bool + queryTerm string + queryTermIndex int + propertyBoost float64 + config schema.BM25Config +} + +func NewTerm(queryTerm string, queryTermIndex int, propertyBoost float32, config schema.BM25Config) *Term { + return &Term{ + queryTerm: queryTerm, + queryTermIndex: queryTermIndex, + propertyBoost: float64(propertyBoost), + config: config, + } +} + +func (t *Term) Score(averagePropLength float64, additionalExplanations bool) (uint64, float64, *DocPointerWithScore) { + pair := t.Data[t.posPointer] + freq := float64(pair.Frequency) + tf := freq / (freq + t.config.K1*(1-t.config.B+t.config.B*float64(pair.PropLength)/averagePropLength)) + if !additionalExplanations { + return t.idPointer, tf * t.idf * t.propertyBoost, nil + } + return t.idPointer, tf * t.idf * t.propertyBoost, &pair +} + +func (t *Term) Advance() { + t.posPointer++ + if t.posPointer >= uint64(len(t.Data)) { + t.exhausted = true + t.idPointer = math.MaxUint64 // force them to the end of the term list + } else { + t.idPointer = t.Data[t.posPointer].Id + } +} + +func (t *Term) AdvanceAtLeast(minID uint64) { + for t.idPointer < minID { + t.posPointer++ + if t.posPointer >= uint64(len(t.Data)) { + t.exhausted = true + t.idPointer = math.MaxUint64 // force them to the end of the term list + return + } + t.idPointer = t.Data[t.posPointer].Id + } +} + +func (t *Term) AdvanceAtLeastShallow(minID uint64) { + t.AdvanceAtLeast(minID) + // go back one document, as the advance blockmax implementation relies on going to the document right before on a shallow advance, + // due to the way decoding works in the SegmentBlockMax implementation + t.posPointer-- + t.exhausted = false + t.idPointer = t.Data[t.posPointer].Id +} + +func (t *Term) Count() int { + return len(t.Data) +} + +func (t *Term) Idf() float64 { + return t.idf +} + +func (t *Term) IdPointer() uint64 { + return t.idPointer +} + +func (t *Term) PosPointer() uint64 { + return t.posPointer +} + +func (t *Term) Exhausted() bool { + return t.exhausted +} + +func (t *Term) QueryTerm() string { + return t.queryTerm +} + +func (t *Term) QueryTermIndex() int { + return t.queryTermIndex +} + +func (t *Term) SetIdf(idf float64) { + t.idf = idf +} + +func (t *Term) SetPosPointer(posPointer uint64) { + t.posPointer = posPointer +} + +func (t *Term) SetIdPointer(idPointer uint64) { + t.idPointer = idPointer +} + +func (t *Term) CurrentBlockImpact() float32 { + return float32(t.idf * t.propertyBoost) +} + +func (t *Term) CurrentBlockMaxId() uint64 { + return t.idPointer +} + +type Terms struct { + T []TermInterface + Count int +} + +func (t *Terms) CompletelyExhausted() bool { + for i := range t.T { + if !t.T[i].Exhausted() { + return false + } + } + return true +} + +func (t *Terms) FindMinIDWand(minScore float64) (uint64, int, bool) { + cumScore := float64(0) + + for i, term := range t.T { + if term.Exhausted() { + continue + } + cumScore += term.Idf() + if cumScore >= minScore { + return term.IdPointer(), i, false + } + } + + return 0, 0, true +} + +func (t *Terms) Pivot(minScore float64) bool { + minID, pivotPoint, abort := t.FindMinIDWand(minScore) + if abort { + return true + } + if pivotPoint == 0 { + return false + } + + t.AdvanceAllAtLeast(minID, len(t.T)-1) + + // we don't need to sort the entire list, just the first pivotPoint elements + t.SortFirst() + + return false +} + +func (t *Terms) AdvanceAllAtLeast(minID uint64, pivot int) { + for i := range t.T[:pivot] { + t.T[i].AdvanceAtLeast(minID) + } +} + +func (t *Terms) FindMinID(minScore float64) (uint64, int, bool) { + cumScore := float64(0) + for i, term := range t.T { + if term.Exhausted() { + continue + } + cumScore += float64(term.CurrentBlockImpact()) + if cumScore >= minScore { + // find if there is another term with the same id + for j := i + 1; j < len(t.T); j++ { + if t.T[j].IdPointer() != term.IdPointer() { + return t.T[j-1].IdPointer(), j - 1, false + } + } + return t.T[len(t.T)-1].IdPointer(), len(t.T) - 1, false + } + } + + return 0, 0, true +} + +func (t *Terms) FindFirstNonExhausted() (int, bool) { + for i := range t.T { + if !t.T[i].Exhausted() { + return i, true + } + } + + return -1, false +} + +func (t *Terms) ScoreNext(averagePropLength float64, additionalExplanations bool, minimumOrTokensMatch int) (uint64, float64, []*DocPointerWithScore, bool) { + var docInfos []*DocPointerWithScore + + pos, ok := t.FindFirstNonExhausted() + if !ok { + // done, nothing left to score + return 0, 0, docInfos, false + } + + if len(t.T) == 0 { + return 0, 0, docInfos, false + } + + if additionalExplanations { + docInfos = make([]*DocPointerWithScore, t.Count) + } + + id := t.T[pos].IdPointer() + var cumScore float64 + + matchedTerms := 0 + + if len(t.T)-pos < minimumOrTokensMatch { + return 0, 0, docInfos, false + } + + for i := pos; i < len(t.T); i++ { + if t.T[i].IdPointer() != id || t.T[i].Exhausted() { + continue + } + matchedTerms++ + term := t.T[i] + _, score, docInfo := term.Score(averagePropLength, additionalExplanations) + term.Advance() + if additionalExplanations { + docInfos[term.QueryTermIndex()] = docInfo + } + cumScore += score + } + + if matchedTerms < minimumOrTokensMatch { + // not enough terms matched, return 0 + return 0, 0, docInfos, false + } + + // t.FullSort() + return id, cumScore, docInfos, true +} + +// provide sort interface +func (t *Terms) Len() int { + return len(t.T) +} + +func (t *Terms) Less(i, j int) bool { + return t.T[i].IdPointer() < t.T[j].IdPointer() +} + +func (t *Terms) Swap(i, j int) { + t.T[i], t.T[j] = t.T[j], t.T[i] +} + +func (t *Terms) SortFull() { + sort.Sort(t) +} + +func (t *Terms) SortFirst() { + min := uint64(0) + minIndex := -1 + for i := 0; i < len(t.T); i++ { + if minIndex == -1 || (t.T[i].IdPointer() < min && !t.T[i].Exhausted()) { + min = t.T[i].IdPointer() + minIndex = i + } + } + if minIndex > 0 { + t.T[0], t.T[minIndex] = t.T[minIndex], t.T[0] + } +} + +func (t *Terms) SortPartial(nextList int) { + for i := nextList + 1; i < len(t.T); i++ { + if t.T[i].IdPointer() <= t.T[i-1].IdPointer() { + // swap + t.T[i], t.T[i-1] = t.T[i-1], t.T[i] + } else { + break + } + } +} + +func (t *Terms) GetBlockUpperBound(pivot int, pivotId uint64) float32 { + blockMaxScore := float32(0) + for i := 0; i < pivot+1; i++ { + if t.T[i].Exhausted() { + continue + } + if t.T[i].CurrentBlockMaxId() < pivotId { + t.T[i].AdvanceAtLeastShallow(pivotId) + } + blockMaxScore += t.T[i].CurrentBlockImpact() + } + return blockMaxScore +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted/terms/terms_block.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted/terms/terms_block.go new file mode 100644 index 0000000000000000000000000000000000000000..130ba6734abff5d20d807a20fbbea955e93940ba --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted/terms/terms_block.go @@ -0,0 +1,96 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package terms + +import ( + "encoding/binary" +) + +var ( + BLOCK_SIZE = 128 + // if we are only encoding few documents, we can encode the doc ids and tfs as full bytes. + // Limit for this is currently set to 1 + ENCODE_AS_FULL_BYTES = 1 +) + +type BlockEntry struct { + MaxId uint64 + Offset uint32 + MaxImpactTf uint32 + MaxImpactPropLength uint32 +} + +func (b BlockEntry) Size() int { + return 20 +} + +func (b *BlockEntry) Encode() []byte { + out := make([]byte, 20) + binary.LittleEndian.PutUint64(out, b.MaxId) + binary.LittleEndian.PutUint32(out[8:], b.Offset) + binary.LittleEndian.PutUint32(out[12:], b.MaxImpactTf) + binary.LittleEndian.PutUint32(out[16:], b.MaxImpactPropLength) + return out +} + +func DecodeBlockEntry(data []byte) *BlockEntry { + return &BlockEntry{ + MaxId: binary.LittleEndian.Uint64(data), + Offset: binary.LittleEndian.Uint32(data[8:]), + MaxImpactTf: binary.LittleEndian.Uint32(data[12:]), + MaxImpactPropLength: binary.LittleEndian.Uint32(data[16:]), + } +} + +type BlockDataDecoded struct { + DocIds []uint64 + Tfs []uint64 +} + +type BlockData struct { + DocIds []byte + Tfs []byte +} + +func (b *BlockData) Size() int { + return 2*2 + len(b.DocIds) + len(b.Tfs) +} + +func (b *BlockData) Encode() []byte { + out := make([]byte, len(b.DocIds)+len(b.Tfs)+4) + offset := 0 + // write the lengths of the slices + binary.LittleEndian.PutUint16(out[offset:], uint16(len(b.DocIds))) + offset += 2 + binary.LittleEndian.PutUint16(out[offset:], uint16(len(b.Tfs))) + offset += 2 + + offset += copy(out[offset:], b.DocIds) + copy(out[offset:], b.Tfs) + return out +} + +func DecodeBlockData(data []byte) *BlockData { + docIdsLen := binary.LittleEndian.Uint16(data) + termFreqsLen := binary.LittleEndian.Uint16(data[2:]) + return &BlockData{ + DocIds: data[4 : 4+docIdsLen], + Tfs: data[4+docIdsLen : 4+docIdsLen+termFreqsLen], + } +} + +func DecodeBlockDataReusable(data []byte, out *BlockData) { + docIdsLen := binary.LittleEndian.Uint16(data) + termFreqsLen := binary.LittleEndian.Uint16(data[2:]) + out.DocIds = data[4 : 4+docIdsLen] + out.Tfs = data[4+docIdsLen : 4+docIdsLen+termFreqsLen] +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/cursor_segment_collection_reusable.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/cursor_segment_collection_reusable.go new file mode 100644 index 0000000000000000000000000000000000000000..52be43b3aadfa7f3e0071efa9fc47443ce2b32c8 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/cursor_segment_collection_reusable.go @@ -0,0 +1,132 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package lsmkv + +import ( + "errors" + "io" + + "github.com/weaviate/weaviate/entities/lsmkv" +) + +type segmentCursorCollectionReusable struct { + cache *cacheReader + nodeBuf segmentCollectionNode +} + +func (s *segment) newCollectionCursorReusable() *segmentCursorCollectionReusable { + return &segmentCursorCollectionReusable{ + cache: newCacheReader(s), + } +} + +func (s *segmentCursorCollectionReusable) next() ([]byte, []value, error) { + if err := s.cache.CheckPosition(); err != nil { + return nil, nil, err + } + return s.parseCollectionNodeInto() +} + +func (s *segmentCursorCollectionReusable) first() ([]byte, []value, error) { + s.cache.Reset() + if err := s.cache.CheckPosition(); err != nil { + return nil, nil, err + } + return s.parseCollectionNodeInto() +} + +func (s *segmentCursorCollectionReusable) parseCollectionNodeInto() ([]byte, []value, error) { + err := ParseCollectionNodeInto(s.cache, &s.nodeBuf) + if err != nil { + return s.nodeBuf.primaryKey, nil, err + } + + return s.nodeBuf.primaryKey, s.nodeBuf.values, nil +} + +type cacheReader struct { + readCache []byte + positionInCache uint64 + segment *segment + positionInSegment uint64 +} + +func newCacheReader(s *segment) *cacheReader { + cacheSize := uint64(4096) + if s.dataEndPos-s.dataStartPos < cacheSize { + cacheSize = s.dataEndPos - s.dataStartPos + } + + return &cacheReader{ + readCache: make([]byte, 0, cacheSize), + segment: s, + positionInSegment: s.dataStartPos, + } +} + +func (c *cacheReader) CheckPosition() error { + if c.positionInSegment >= c.segment.dataEndPos { + return lsmkv.NotFound + } + return nil +} + +func (c *cacheReader) Reset() { + c.positionInCache = 0 + c.positionInSegment = c.segment.dataStartPos + c.readCache = c.readCache[:0] // forces a new read +} + +func (c *cacheReader) Read(p []byte) (n int, err error) { + length := uint64(len(p)) + if c.positionInSegment+length > c.segment.dataEndPos { + return 0, lsmkv.NotFound + } + if c.positionInCache+length > uint64(len(c.readCache)) { + if err := c.loadDataIntoCache(len(p)); err != nil { + return 0, err + } + } + copy(p, c.readCache[c.positionInCache:c.positionInCache+length]) + + c.positionInSegment += length + c.positionInCache += length + + return len(p), nil +} + +func (c *cacheReader) loadDataIntoCache(readLength int) error { + at, err := c.segment.newNodeReader(nodeOffset{start: c.positionInSegment}, "CursorCollectionReusable") + if err != nil { + return err + } + defer at.Release() + + // Restore the original buffer capacity before reading + c.readCache = c.readCache[:cap(c.readCache)] + + if readLength > len(c.readCache) { + c.readCache = make([]byte, readLength) + } + + read, err := at.Read(c.readCache) + if err != nil && (!errors.Is(err, io.EOF) || read == 0) { + return err + } + if read < readLength { + return lsmkv.NotFound + } + + c.readCache = c.readCache[:read] + c.positionInCache = 0 + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/cursor_segment_inverted_reusable.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/cursor_segment_inverted_reusable.go new file mode 100644 index 0000000000000000000000000000000000000000..f1ed0af4026d540e6ecc75c246d1cc586189d2fc --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/cursor_segment_inverted_reusable.go @@ -0,0 +1,140 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package lsmkv + +import ( + "encoding/binary" + + "github.com/weaviate/weaviate/adapters/repos/db/inverted/terms" + "github.com/weaviate/weaviate/entities/lsmkv" +) + +type segmentCursorInvertedReusable struct { + segment *segment + nextOffset uint64 + nodeBuf binarySearchNodeMap + propLengths map[uint64]uint32 +} + +func (s *segment) newInvertedCursorReusable() *segmentCursorInvertedReusable { + propLengths, err := s.GetPropertyLengths() + if err != nil { + return nil + } + return &segmentCursorInvertedReusable{ + segment: s, + propLengths: propLengths, + } +} + +func (s *segmentCursorInvertedReusable) seek(key []byte) ([]byte, []MapPair, error) { + node, err := s.segment.index.Seek(key) + if err != nil { + return nil, nil, err + } + + err = s.parseInvertedNodeInto(nodeOffset{node.Start, node.End}) + if err != nil { + return nil, nil, err + } + + s.nextOffset = node.End + + return s.nodeBuf.key, s.nodeBuf.values, nil +} + +func (s *segmentCursorInvertedReusable) next() ([]byte, []MapPair, error) { + if s.nextOffset >= s.segment.dataEndPos { + return nil, nil, lsmkv.NotFound + } + + err := s.parseInvertedNodeInto(nodeOffset{start: s.nextOffset}) + if err != nil { + return nil, nil, err + } + + return s.nodeBuf.key, s.nodeBuf.values, nil +} + +func (s *segmentCursorInvertedReusable) first() ([]byte, []MapPair, error) { + s.nextOffset = s.segment.dataStartPos + + if s.nextOffset >= s.segment.dataEndPos { + return nil, nil, lsmkv.NotFound + } + + err := s.parseInvertedNodeInto(nodeOffset{start: s.nextOffset}) + if err != nil { + return nil, nil, err + } + return s.nodeBuf.key, s.nodeBuf.values, nil +} + +func (s *segmentCursorInvertedReusable) parseInvertedNodeInto(offset nodeOffset) error { + buffer := make([]byte, 16) + r, err := s.segment.newNodeReader(offset, "segmentCursorInvertedReusable") + if err != nil { + return err + } + defer r.Release() + + _, err = r.Read(buffer) + if err != nil { + return err + } + docCount := binary.LittleEndian.Uint64(buffer[:8]) + end := uint64(20) + if docCount > uint64(terms.ENCODE_AS_FULL_BYTES) { + end = binary.LittleEndian.Uint64(buffer[8:16]) + 16 + } + offset.end = offset.start + end + 4 + + r, err = s.segment.newNodeReader(offset, "segmentCursorInvertedReusable") + if err != nil { + return err + } + defer r.Release() + + allBytes := make([]byte, offset.end-offset.start) + + _, err = r.Read(allBytes) + if err != nil { + return err + } + + nodes, _ := decodeAndConvertFromBlocks(allBytes) + + keyLen := binary.LittleEndian.Uint32(allBytes[len(allBytes)-4:]) + + offset.start = offset.end + offset.end += uint64(keyLen) + key := make([]byte, keyLen) + + // empty keys are possible if using non-word tokenizers, so let's handle them + if keyLen > 0 { + r, err = s.segment.newNodeReader(offset, "segmentCursorInvertedReusable") + if err != nil { + return err + } + defer r.Release() + _, err = r.Read(key) + if err != nil { + return err + } + } + s.nodeBuf.key = key + s.nodeBuf.values = nodes + + s.nextOffset = offset.end + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/cursor_segment_map.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/cursor_segment_map.go new file mode 100644 index 0000000000000000000000000000000000000000..2796111727503de0c63cdd82b4f293390b52e128 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/cursor_segment_map.go @@ -0,0 +1,167 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package lsmkv + +import ( + "io" + + "github.com/pkg/errors" + "github.com/weaviate/weaviate/adapters/repos/db/lsmkv/segmentindex" + "github.com/weaviate/weaviate/entities/lsmkv" +) + +type segmentCursorMap struct { + segment *segment + nextOffset uint64 +} + +func (s *segment) newMapCursor() *segmentCursorMap { + return &segmentCursorMap{ + segment: s, + } +} + +func (sg *SegmentGroup) newMapCursors() ([]innerCursorMap, func()) { + segments, release := sg.getAndLockSegments() + + out := make([]innerCursorMap, len(segments)) + + for i, segment := range segments { + sgm := segment.getSegment() + if sgm.getStrategy() == segmentindex.StrategyInverted { + out[i] = sgm.newInvertedCursorReusable() + } else { + out[i] = sgm.newMapCursor() + } + } + + return out, release +} + +func (s *segmentCursorMap) decode(parsed segmentCollectionNode) ([]MapPair, error) { + pairs := make([]MapPair, len(parsed.values)) + for i := range pairs { + if s.segment.strategy == segmentindex.StrategyInverted { + if err := pairs[i].FromBytesInverted(parsed.values[i].value, false); err != nil { + return nil, err + } + } else { + if err := pairs[i].FromBytes(parsed.values[i].value, false); err != nil { + return nil, err + } + } + pairs[i].Tombstone = parsed.values[i].tombstone + } + return pairs, nil +} + +func (s *segmentCursorMap) seek(key []byte) ([]byte, []MapPair, error) { + node, err := s.segment.index.Seek(key) + if err != nil { + return nil, nil, err + } + + var parsed segmentCollectionNode + + if s.segment.strategy == segmentindex.StrategyInverted { + parsed, err = s.parseInvertedNode(nodeOffset{node.Start, node.End}) + } else { + parsed, err = s.parseCollectionNode(nodeOffset{node.Start, node.End}) + } + // make sure to set the next offset before checking the error. The error + // could be 'Deleted' which would require that the offset is still advanced + // for the next cycle + s.nextOffset = node.End + if err != nil { + return parsed.primaryKey, nil, err + } + + pairs, err := s.decode(parsed) + return parsed.primaryKey, pairs, err +} + +func (s *segmentCursorMap) next() ([]byte, []MapPair, error) { + if s.nextOffset >= s.segment.dataEndPos { + return nil, nil, lsmkv.NotFound + } + + var parsed segmentCollectionNode + var err error + + if s.segment.strategy == segmentindex.StrategyInverted { + parsed, err = s.parseInvertedNode(nodeOffset{start: s.nextOffset}) + } else { + parsed, err = s.parseCollectionNode(nodeOffset{start: s.nextOffset}) + } + // make sure to set the next offset before checking the error. The error + // could be 'Deleted' which would require that the offset is still advanced + // for the next cycle + s.nextOffset = s.nextOffset + uint64(parsed.offset) + if err != nil { + return parsed.primaryKey, nil, err + } + + pairs, err := s.decode(parsed) + return parsed.primaryKey, pairs, err +} + +func (s *segmentCursorMap) first() ([]byte, []MapPair, error) { + if s.segment.dataStartPos == s.segment.dataEndPos { + return nil, nil, lsmkv.NotFound + } + + s.nextOffset = s.segment.dataStartPos + + var parsed segmentCollectionNode + var err error + + if s.segment.strategy == segmentindex.StrategyInverted { + parsed, err = s.parseInvertedNode(nodeOffset{start: s.nextOffset}) + } else { + parsed, err = s.parseCollectionNode(nodeOffset{start: s.nextOffset}) + } + // make sure to set the next offset before checking the error. The error + // could be 'Deleted' which would require that the offset is still advanced + // for the next cycle + s.nextOffset = s.nextOffset + uint64(parsed.offset) + if err != nil { + if errors.Is(err, io.EOF) { + // an empty map could have been generated due to an issue in compaction + return nil, nil, lsmkv.NotFound + } + + return parsed.primaryKey, nil, err + } + + pairs, err := s.decode(parsed) + return parsed.primaryKey, pairs, err +} + +func (s *segmentCursorMap) parseCollectionNode(offset nodeOffset) (segmentCollectionNode, error) { + r, err := s.segment.newNodeReader(offset, "segmentCursorMap") + if err != nil { + return segmentCollectionNode{}, err + } + defer r.Release() + + return ParseCollectionNode(r) +} + +func (s *segmentCursorMap) parseInvertedNode(offset nodeOffset) (segmentCollectionNode, error) { + r, err := s.segment.newNodeReader(offset, "segmentCursorMap") + if err != nil { + return segmentCollectionNode{}, err + } + defer r.Release() + + return ParseInvertedNode(r) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/cursor_segment_replace.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/cursor_segment_replace.go new file mode 100644 index 0000000000000000000000000000000000000000..e7f3cf83d59af12b784ba3f73b9ba6b34027b7fc --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/cursor_segment_replace.go @@ -0,0 +1,303 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package lsmkv + +import ( + "github.com/weaviate/weaviate/entities/lsmkv" + "github.com/weaviate/weaviate/usecases/byteops" +) + +type segmentCursorReplace struct { + segment *segment + index diskIndex + keyFn func(n *segmentReplaceNode) []byte + firstOffsetFn func() (uint64, error) + nextOffsetFn func(n *segmentReplaceNode) (uint64, error) + currOffset uint64 + reusableNode *segmentReplaceNode + reusableBORW byteops.ReadWriter +} + +func (s *segment) newCursor() *segmentCursorReplace { + cursor := &segmentCursorReplace{ + segment: s, + index: s.index, + firstOffsetFn: func() (uint64, error) { + if s.dataStartPos == s.dataEndPos { + return 0, lsmkv.NotFound + } + return s.dataStartPos, nil + }, + currOffset: s.dataStartPos, + keyFn: func(n *segmentReplaceNode) []byte { + return n.primaryKey + }, + reusableNode: &segmentReplaceNode{}, + reusableBORW: byteops.NewReadWriter(nil), + } + + cursor.nextOffsetFn = func(n *segmentReplaceNode) (uint64, error) { + return cursor.currOffset + uint64(n.offset), nil + } + + return cursor +} + +// Note: scanning over secondary keys is sub-optimal +// i.e. no sequential scan is possible as when scanning over the primary key + +func (s *segment) newCursorWithSecondaryIndex(pos int) *segmentCursorReplace { + return &segmentCursorReplace{ + segment: s, + index: s.secondaryIndices[pos], + keyFn: func(n *segmentReplaceNode) []byte { + return n.secondaryKeys[pos] + }, + firstOffsetFn: func() (uint64, error) { + index := s.secondaryIndices[pos] + n, err := index.Seek(nil) + if err != nil { + return 0, err + } + return n.Start, nil + }, + nextOffsetFn: func(n *segmentReplaceNode) (uint64, error) { + index := s.secondaryIndices[pos] + next, err := index.Next(n.secondaryKeys[pos]) + if err != nil { + return 0, err + } + return next.Start, nil + }, + reusableNode: &segmentReplaceNode{ + secondaryIndexCount: s.secondaryIndexCount, + secondaryKeys: make([][]byte, s.secondaryIndexCount), + }, + reusableBORW: byteops.NewReadWriter(nil), + } +} + +func (sg *SegmentGroup) newCursors() ([]innerCursorReplace, func()) { + segments, release := sg.getAndLockSegments() + + out := make([]innerCursorReplace, len(segments)) + + for i, segment := range segments { + out[i] = segment.newCursor() + } + + return out, release +} + +func (sg *SegmentGroup) newCursorsWithFlushingSupport() ([]innerCursorReplace, func()) { + sg.cursorsLock.Lock() + defer sg.cursorsLock.Unlock() + + sg.activeCursors++ + + sg.maintenanceLock.RLock() + + var segments []Segment + + if len(sg.enqueuedSegments) == 0 { + segments = sg.segments + } else { + segments = make([]Segment, 0, len(sg.segments)+len(sg.enqueuedSegments)) + segments = append(segments, sg.segments...) + segments = append(segments, sg.enqueuedSegments...) + } + + out := make([]innerCursorReplace, 0, len(segments)) + + for _, segment := range segments { + out = append(out, segment.newCursor()) + } + + release := func() { + sg.maintenanceLock.RUnlock() + + sg.cursorsLock.Lock() + defer sg.cursorsLock.Unlock() + + sg.activeCursors-- + + if sg.activeCursors == 0 && len(sg.enqueuedSegments) > 0 { + sg.maintenanceLock.Lock() + defer sg.maintenanceLock.Unlock() + + sg.segments = append(sg.segments, sg.enqueuedSegments...) + sg.enqueuedSegments = nil + } + } + + return out, release +} + +func (sg *SegmentGroup) newCursorsWithSecondaryIndex(pos int) ([]innerCursorReplace, func()) { + segments, release := sg.getAndLockSegments() + out := make([]innerCursorReplace, 0, len(segments)) + + for _, segment := range segments { + if int(segment.getSecondaryIndexCount()) <= pos { + continue + } + out = append(out, segment.newCursorWithSecondaryIndex(pos)) + } + + return out, release +} + +func (s *segmentCursorReplace) seek(key []byte) ([]byte, []byte, error) { + node, err := s.index.Seek(key) + if err != nil { + return nil, nil, err + } + + s.currOffset = node.Start + + err = s.parseReplaceNodeInto(nodeOffset{start: node.Start, end: node.End}, + s.segment.contents[node.Start:node.End]) + if err != nil { + return s.keyFn(s.reusableNode), nil, err + } + + return s.keyFn(s.reusableNode), s.reusableNode.value, nil +} + +func (s *segmentCursorReplace) next() ([]byte, []byte, error) { + nextOffset, err := s.nextOffsetFn(s.reusableNode) + if err != nil { + return nil, nil, err + } + + if nextOffset >= s.segment.dataEndPos { + return nil, nil, lsmkv.NotFound + } + + s.currOffset = nextOffset + + err = s.parseReplaceNodeInto(nodeOffset{start: s.currOffset}, + s.segment.contents[s.currOffset:]) + if err != nil { + return s.keyFn(s.reusableNode), nil, err + } + + return s.keyFn(s.reusableNode), s.reusableNode.value, nil +} + +func (s *segmentCursorReplace) first() ([]byte, []byte, error) { + firstOffset, err := s.firstOffsetFn() + if err != nil { + return nil, nil, err + } + + s.currOffset = firstOffset + + err = s.parseReplaceNodeInto(nodeOffset{start: s.currOffset}, + s.segment.contents[s.currOffset:]) + if err != nil { + return s.keyFn(s.reusableNode), nil, err + } + + return s.keyFn(s.reusableNode), s.reusableNode.value, nil +} + +func (s *segmentCursorReplace) nextWithAllKeys() (n segmentReplaceNode, err error) { + nextOffset, err := s.nextOffsetFn(s.reusableNode) + if err != nil { + return n, err + } + + if nextOffset >= s.segment.dataEndPos { + return n, lsmkv.NotFound + } + + s.currOffset = nextOffset + + n, err = s.parseReplaceNode(nodeOffset{start: s.currOffset}) + + s.reusableNode = &n + + return n, err +} + +func (s *segmentCursorReplace) firstWithAllKeys() (n segmentReplaceNode, err error) { + firstOffset, err := s.firstOffsetFn() + if err != nil { + return n, err + } + + s.currOffset = firstOffset + + n, err = s.parseReplaceNode(nodeOffset{start: s.currOffset}) + + s.reusableNode = &n + + return n, err +} + +func (s *segmentCursorReplace) parseReplaceNode(offset nodeOffset) (segmentReplaceNode, error) { + r, err := s.segment.newNodeReader(offset, "segmentCursorReplace") + if err != nil { + return segmentReplaceNode{}, err + } + defer r.Release() + + out, err := ParseReplaceNode(r, s.segment.secondaryIndexCount) + if out.tombstone { + return out, lsmkv.Deleted + } + return out, err +} + +func (s *segmentCursorReplace) parseReplaceNodeInto(offset nodeOffset, buf []byte) error { + if s.segment.readFromMemory { + return s.parse(buf) + } + + r, err := s.segment.newNodeReader(offset, "segmentCursorReplace") + if err != nil { + return err + } + defer r.Release() + + err = ParseReplaceNodeIntoPread(r, s.segment.secondaryIndexCount, s.reusableNode) + if err != nil { + return err + } + + if s.reusableNode.tombstone { + return lsmkv.Deleted + } + + return nil +} + +func (s *segmentCursorReplace) parse(in []byte) error { + if len(in) == 0 { + return lsmkv.NotFound + } + + s.reusableBORW.ResetBuffer(in) + + err := ParseReplaceNodeIntoMMAP(&s.reusableBORW, s.segment.secondaryIndexCount, + s.reusableNode) + if err != nil { + return err + } + + if s.reusableNode.tombstone { + return lsmkv.Deleted + } + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/cursor_segment_roaring_set.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/cursor_segment_roaring_set.go new file mode 100644 index 0000000000000000000000000000000000000000..4d5644a644ec9dc54d4b59d1bbacc562a6eac39f --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/cursor_segment_roaring_set.go @@ -0,0 +1,54 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package lsmkv + +import ( + "github.com/weaviate/weaviate/adapters/repos/db/lsmkv/segmentindex" + "github.com/weaviate/weaviate/adapters/repos/db/roaringset" +) + +func (s *segment) newRoaringSetCursor() *roaringset.SegmentCursor { + return roaringset.NewSegmentCursor(s.contents[s.dataStartPos:s.dataEndPos], + &roaringSetSeeker{s.index}) +} + +func (sg *SegmentGroup) newRoaringSetCursors() ([]roaringset.InnerCursor, func()) { + segments, release := sg.getAndLockSegments() + + out := make([]roaringset.InnerCursor, len(segments)) + + for i, segment := range segments { + out[i] = segment.newRoaringSetCursor() + } + + return out, release +} + +// diskIndex returns node's Start and End offsets +// taking into account HeaderSize. SegmentCursor of RoaringSet +// accepts only payload part of underlying segment content, therefore +// offsets should be adjusted and reduced by HeaderSize +type roaringSetSeeker struct { + diskIndex diskIndex +} + +func (s *roaringSetSeeker) Seek(key []byte) (segmentindex.Node, error) { + node, err := s.diskIndex.Seek(key) + if err != nil { + return segmentindex.Node{}, err + } + return segmentindex.Node{ + Key: node.Key, + Start: node.Start - segmentindex.HeaderSize, + End: node.End - segmentindex.HeaderSize, + }, nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/cursor_segment_roaring_set_range.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/cursor_segment_roaring_set_range.go new file mode 100644 index 0000000000000000000000000000000000000000..d70b529744586fd0da4b5835145bf6581a457c41 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/cursor_segment_roaring_set_range.go @@ -0,0 +1,57 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package lsmkv + +import ( + "io" + + "github.com/weaviate/weaviate/adapters/repos/db/roaringsetrange" + "github.com/weaviate/weaviate/entities/concurrency" +) + +func (sg *SegmentGroup) newRoaringSetRangeReaders() ([]roaringsetrange.InnerReader, func()) { + segments, release := sg.getAndLockSegments() + + readers := make([]roaringsetrange.InnerReader, len(segments)) + for i, segment := range segments { + readers[i] = segment.newRoaringSetRangeReader() + } + + return readers, release +} + +func (s *segment) newRoaringSetRangeReader() *roaringsetrange.SegmentReader { + var segmentCursor roaringsetrange.SegmentCursor + if s.readFromMemory { + segmentCursor = roaringsetrange.NewSegmentCursorMmap(s.contents[s.dataStartPos:s.dataEndPos]) + } else { + sectionReader := io.NewSectionReader(s.contentFile, int64(s.dataStartPos), int64(s.dataEndPos)) + // since segment reader concurrenlty fetches next segment and merges bitmaps of previous segments + // at least 2 buffers needs to be used by cursor not to overwrite data before they are consumed. + segmentCursor = roaringsetrange.NewSegmentCursorPread(sectionReader, 2) + } + + return roaringsetrange.NewSegmentReaderConcurrent( + roaringsetrange.NewGaplessSegmentCursor(segmentCursor), + concurrency.SROAR_MERGE) +} + +func (s *segment) newRoaringSetRangeCursor() roaringsetrange.SegmentCursor { + if s.readFromMemory { + return roaringsetrange.NewSegmentCursorMmap(s.contents[s.dataStartPos:s.dataEndPos]) + } + + sectionReader := io.NewSectionReader(s.contentFile, int64(s.dataStartPos), int64(s.dataEndPos)) + // compactor does not work concurrently, next segment is fetched after previous one gets consumed, + // therefore just one buffer is sufficient. + return roaringsetrange.NewSegmentCursorPread(sectionReader, 1) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/doc.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/doc.go new file mode 100644 index 0000000000000000000000000000000000000000..a458c88b47cec5fe9c95226dba83faf7d92742a5 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/doc.go @@ -0,0 +1,75 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +/* +# LSMKV (= Log-structured Merge-Tree Key-Value Store) + +This package contains Weaviate's custom LSM store. While modeled after the +usecases that are required for Weaviate to be fast, reliable, and scalable, it +is technically completely independent. You could build your own database on top +of this key-value store. + +Covering the architecture of [LSM Stores] in general goes beyond the scope of +this documentation. Therefore things that are specific to this implementation +are highlighted. + +# Strategies + +To understand the different type of buckets in this store, you need to +familiarize yourself with the following strategies. A strategy defines a +different usecase for a [Bucket]. + + - "Replace" + + Replace resembles the classical key-value store. Each key has exactly one + value. A subsequent PUT on an an existing key, replaces the value (hence + the name "replace"). Once replaced a former value can no longer be + retrieved, and will eventually be removed in compactions. + + - "Set" (aka "SetCollection") + + A set behaves like an unordered collection of independent values. In other + words a single key has multiple values. For example, for key "foo", you + could have values "bar1", "bar2", "bazzinga". A bucket of this type is + optimized for cheap writes to add new set additions. For example adding + another set element has a fixed cost independent of the number of the + existing set length. This makes it very well suited for building an + inverted index. + + Retrieving a Set has a slight cost to it if a set is spread across multiple + segments. This cost will eventually reduce as more and more compactions + happen. In the ideal case (fully compacted DB), retrieving a Set requires + just a single disk read. + + - "Map" (aka "MapCollection") + + Maps are similar to Sets in the sense that for a single key there are + multiple values. However, each value is in itself a key-value pair. This + makes this type very similar to a dict or hashmap type. For example for + key "foo", you could have value pairs: "bar":17, "baz":19. + + This makes a map a great use case for an inverted index that needs to store + additional info beyond just the docid-pointer, such as in the case of a + BM25 index where the term frequency needs to be stored. + + The same performance-considerations as for sets apply. + +# Navigate around these docs + +Good entrypoints to learn more about how this package works include [Store] +with [New] and [Store.CreateOrLoadBucket], as well as [Bucket] with +[Bucket.Get], [Bucket.GetBySecondary], [Bucket.Put], etc. + +Each strategy also supports cursor types: [CursorReplace] can be created using [Bucket.Cursor], [CursorSet] can be created with [Bucket.SetCursor] , and [CursorMap] can be created with [Bucket.MapCursor]. + +[LSM Stores]: https://en.wikipedia.org/wiki/Log-structured_merge-tree +*/ +package lsmkv diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/entities/doc.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/entities/doc.go new file mode 100644 index 0000000000000000000000000000000000000000..a0a259107c18b13753b380e3896a154994ef722f --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/entities/doc.go @@ -0,0 +1,13 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// ent contains common types used throughout various lsmkv (sub-)packages +package entities diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/entities/strategies.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/entities/strategies.go new file mode 100644 index 0000000000000000000000000000000000000000..1718d10b67ee63b48c2eae1b08919dc1ef350a28 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/entities/strategies.go @@ -0,0 +1,48 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package entities + +const ( + // StrategyReplace allows for idem-potent PUT where the latest takes presence + StrategyReplace = "replace" + StrategySetCollection = "setcollection" + StrategyMapCollection = "mapcollection" + StrategyRoaringSet = "roaringset" + StrategyInverted = "inverted" +) + +type SegmentStrategy uint16 + +const ( + SegmentStrategyReplace SegmentStrategy = iota + SegmentStrategySetCollection + SegmentStrategyMapCollection + SegmentStrategyRoaringSet + SegmentStrategyInverted +) + +func SegmentStrategyFromString(in string) SegmentStrategy { + switch in { + case StrategyReplace: + return SegmentStrategyReplace + case StrategySetCollection: + return SegmentStrategySetCollection + case StrategyMapCollection: + return SegmentStrategyMapCollection + case StrategyRoaringSet: + return SegmentStrategyRoaringSet + case StrategyInverted: + return SegmentStrategyInverted + default: + panic("unsupported strategy") + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/fake.wal b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/fake.wal new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/global_bucket_registry.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/global_bucket_registry.go new file mode 100644 index 0000000000000000000000000000000000000000..1a69041d33f8387e9197a25ac08ed8797d117aaf --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/global_bucket_registry.go @@ -0,0 +1,56 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package lsmkv + +import ( + "errors" + "fmt" + "sync" +) + +type globalBucketRegistry struct { + buckets map[string]struct{} + mu sync.Mutex +} + +func newGlobalBucketRegistry() *globalBucketRegistry { + return &globalBucketRegistry{ + buckets: make(map[string]struct{}), + } +} + +var GlobalBucketRegistry *globalBucketRegistry + +func init() { + GlobalBucketRegistry = newGlobalBucketRegistry() +} + +var ErrBucketAlreadyRegistered = errors.New("bucket already registered") + +func (r *globalBucketRegistry) TryAdd(absoluteBucketPath string) error { + r.mu.Lock() + defer r.mu.Unlock() + + if _, ok := r.buckets[absoluteBucketPath]; ok { + return fmt.Errorf("bucket %q: %w", absoluteBucketPath, ErrBucketAlreadyRegistered) + } + + r.buckets[absoluteBucketPath] = struct{}{} + return nil +} + +func (r *globalBucketRegistry) Remove(absoluteBucketPath string) { + r.mu.Lock() + defer r.mu.Unlock() + + delete(r.buckets, absoluteBucketPath) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/helper_for_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/helper_for_test.go new file mode 100644 index 0000000000000000000000000000000000000000..6ad05ec4cdf35620482bc586d3394510272cbb10 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/helper_for_test.go @@ -0,0 +1,24 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +//go:build integrationTest +// +build integrationTest + +package lsmkv + +import ( + "math/rand" + "time" +) + +func getRandomSeed() *rand.Rand { + return rand.New(rand.NewSource(time.Now().UnixNano())) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/lazy_segment.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/lazy_segment.go new file mode 100644 index 0000000000000000000000000000000000000000..120003dfe58f923f2890fae3be81b667d49e5a97 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/lazy_segment.go @@ -0,0 +1,274 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package lsmkv + +import ( + "fmt" + "regexp" + "strconv" + "sync" + + "github.com/weaviate/sroar" + + "github.com/sirupsen/logrus" + "github.com/weaviate/weaviate/adapters/repos/db/lsmkv/segmentindex" + "github.com/weaviate/weaviate/adapters/repos/db/roaringset" + "github.com/weaviate/weaviate/adapters/repos/db/roaringsetrange" +) + +type lazySegment struct { + path string + logger logrus.FieldLogger + metrics *Metrics + existsLower existsOnLowerSegmentsFn + cfg segmentConfig + + segment *segment + mux sync.Mutex +} + +func newLazySegment(path string, logger logrus.FieldLogger, metrics *Metrics, + existsLower existsOnLowerSegmentsFn, cfg segmentConfig, +) (*lazySegment, error) { + if metrics != nil && metrics.LazySegmentInit != nil { + metrics.LazySegmentInit.Inc() + } + + return &lazySegment{ + path: path, + logger: logger, + metrics: metrics, + existsLower: existsLower, + cfg: cfg, + }, nil +} + +func (s *lazySegment) load() error { + s.mux.Lock() + defer s.mux.Unlock() + + if s.segment == nil { + segment, err := newSegment(s.path, s.logger, s.metrics, s.existsLower, s.cfg) + if err != nil { + return err + } + s.segment = segment + if s.metrics != nil && s.metrics.LazySegmentLoad != nil { + s.metrics.LazySegmentLoad.Inc() + } + } + + return nil +} + +func (s *lazySegment) mustLoad() { + err := s.load() + if err != nil { + panic(fmt.Errorf("error loading segment %q: %w", s.path, err)) + } +} + +func (s *lazySegment) getPath() string { + return s.path +} + +func (s *lazySegment) setPath(path string) { + s.mustLoad() + s.segment.setPath(path) +} + +func (s *lazySegment) getStrategy() segmentindex.Strategy { + strategy, found := s.numberFromPath("s") + if found { + return segmentindex.Strategy(strategy) + } + s.mustLoad() + return s.segment.getStrategy() +} + +func (s *lazySegment) getSecondaryIndexCount() uint16 { + s.mustLoad() + return s.segment.getSecondaryIndexCount() +} + +func (s *lazySegment) getLevel() uint16 { + level, found := s.numberFromPath("l") + if found { + return uint16(level) + } + + s.mustLoad() + return s.segment.getLevel() +} + +func (s *lazySegment) getSize() int64 { + s.mustLoad() + return s.segment.getSize() +} + +func (s *lazySegment) setSize(size int64) { + s.mustLoad() + s.segment.setSize(size) +} + +func (s *lazySegment) PayloadSize() int { + s.mustLoad() + return s.segment.PayloadSize() +} + +func (s *lazySegment) Size() int { + s.mustLoad() + return s.segment.Size() +} + +func (s *lazySegment) close() error { + s.mux.Lock() + defer s.mux.Unlock() + + if s.metrics != nil && s.metrics.LazySegmentClose != nil { + s.metrics.LazySegmentClose.Inc() + } + if s.segment == nil { + return nil + } + if s.metrics != nil && s.metrics.LazySegmentUnLoad != nil { + s.metrics.LazySegmentUnLoad.Inc() + } + return s.segment.close() +} + +func (s *lazySegment) get(key []byte) ([]byte, error) { + s.mustLoad() + return s.segment.get(key) +} + +func (s *lazySegment) getBySecondaryIntoMemory(pos int, key []byte, buffer []byte) ([]byte, []byte, []byte, error) { + s.mustLoad() + return s.segment.getBySecondaryIntoMemory(pos, key, buffer) +} + +func (s *lazySegment) getCollection(key []byte) ([]value, error) { + s.mustLoad() + return s.segment.getCollection(key) +} + +func (s *lazySegment) getInvertedData() *segmentInvertedData { + s.mustLoad() + return s.segment.getInvertedData() +} + +func (s *lazySegment) getSegment() *segment { + s.mustLoad() + return s.segment +} + +func (s *lazySegment) isLoaded() bool { + s.mux.Lock() + defer s.mux.Unlock() + + return s.segment != nil +} + +func (s *lazySegment) markForDeletion() error { + s.mustLoad() + return s.segment.markForDeletion() +} + +func (s *lazySegment) MergeTombstones(other *sroar.Bitmap) (*sroar.Bitmap, error) { + s.mustLoad() + return s.segment.MergeTombstones(other) +} + +func (s *lazySegment) newCollectionCursor() *segmentCursorCollection { + s.mustLoad() + return s.segment.newCollectionCursor() +} + +func (s *lazySegment) newCollectionCursorReusable() *segmentCursorCollectionReusable { + s.mustLoad() + return s.segment.newCollectionCursorReusable() +} + +func (s *lazySegment) newCursor() *segmentCursorReplace { + s.mustLoad() + return s.segment.newCursor() +} + +func (s *lazySegment) newCursorWithSecondaryIndex(pos int) *segmentCursorReplace { + s.mustLoad() + return s.segment.newCursorWithSecondaryIndex(pos) +} + +func (s *lazySegment) newMapCursor() *segmentCursorMap { + s.mustLoad() + return s.segment.newMapCursor() +} + +func (s *lazySegment) newNodeReader(offset nodeOffset, operation string) (*nodeReader, error) { + s.mustLoad() + return s.segment.newNodeReader(offset, operation) +} + +func (s *lazySegment) newRoaringSetCursor() *roaringset.SegmentCursor { + s.mustLoad() + return s.segment.newRoaringSetCursor() +} + +func (s *lazySegment) newRoaringSetRangeCursor() roaringsetrange.SegmentCursor { + s.mustLoad() + return s.segment.newRoaringSetRangeCursor() +} + +func (s *lazySegment) newRoaringSetRangeReader() *roaringsetrange.SegmentReader { + s.mustLoad() + return s.segment.newRoaringSetRangeReader() +} + +func (s *lazySegment) quantileKeys(q int) [][]byte { + s.mustLoad() + return s.segment.quantileKeys(q) +} + +func (s *lazySegment) ReadOnlyTombstones() (*sroar.Bitmap, error) { + s.mustLoad() + return s.segment.ReadOnlyTombstones() +} + +func (s *lazySegment) replaceStratParseData(in []byte) ([]byte, []byte, error) { + s.mustLoad() + return s.segment.replaceStratParseData(in) +} + +func (s *lazySegment) roaringSetGet(key []byte, bitmapBufPool roaringset.BitmapBufPool, +) (roaringset.BitmapLayer, func(), error) { + s.mustLoad() + return s.segment.roaringSetGet(key, bitmapBufPool) +} + +func (s *lazySegment) roaringSetMergeWith(key []byte, input roaringset.BitmapLayer, bitmapBufPool roaringset.BitmapBufPool, +) error { + s.mustLoad() + return s.segment.roaringSetMergeWith(key, input, bitmapBufPool) +} + +func (s *lazySegment) numberFromPath(str string) (int, bool) { + template := fmt.Sprintf(`\.%s(\d+)\.`, str) + re := regexp.MustCompile(template) + match := re.FindStringSubmatch(s.path) + if len(match) > 1 { + num, err := strconv.Atoi(match[1]) + if err == nil { + return num, true + } + } + return 0, false +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/memtable.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/memtable.go new file mode 100644 index 0000000000000000000000000000000000000000..bfeb919c3bdd7d1c9405e7827cd0b107e97515df --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/memtable.go @@ -0,0 +1,507 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package lsmkv + +import ( + "encoding/binary" + "fmt" + "math" + "path/filepath" + "sync" + "time" + + "github.com/weaviate/weaviate/usecases/memwatch" + + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/weaviate/sroar" + "github.com/weaviate/weaviate/adapters/repos/db/roaringset" + "github.com/weaviate/weaviate/adapters/repos/db/roaringsetrange" + "github.com/weaviate/weaviate/entities/lsmkv" + "github.com/weaviate/weaviate/entities/models" +) + +type Memtable struct { + sync.RWMutex + key *binarySearchTree + keyMulti *binarySearchTreeMulti + keyMap *binarySearchTreeMap + primaryIndex *binarySearchTree + roaringSet *roaringset.BinarySearchTree + roaringSetRange *roaringsetrange.Memtable + commitlog memtableCommitLogger + allocChecker memwatch.AllocChecker + size uint64 + path string + strategy string + secondaryIndices uint16 + secondaryToPrimary []map[string][]byte + // stores time memtable got dirty to determine when flush is needed + dirtyAt time.Time + createdAt time.Time + metrics *memtableMetrics + writesSinceLastSync bool + + tombstones *sroar.Bitmap + + enableChecksumValidation bool + + bm25config *models.BM25Config + averagePropLength float64 + propLengthCount uint64 + writeSegmentInfoIntoFileName bool +} + +func newMemtable(path string, strategy string, secondaryIndices uint16, + cl memtableCommitLogger, metrics *Metrics, logger logrus.FieldLogger, + enableChecksumValidation bool, bm25config *models.BM25Config, writeSegmentInfoIntoFileName bool, allocChecker memwatch.AllocChecker, +) (*Memtable, error) { + m := &Memtable{ + key: &binarySearchTree{}, + keyMulti: &binarySearchTreeMulti{}, + keyMap: &binarySearchTreeMap{}, + primaryIndex: &binarySearchTree{}, // todo, sort upfront + roaringSet: &roaringset.BinarySearchTree{}, + roaringSetRange: roaringsetrange.NewMemtable(logger), + commitlog: cl, + path: path, + strategy: strategy, + secondaryIndices: secondaryIndices, + dirtyAt: time.Time{}, + createdAt: time.Now(), + metrics: newMemtableMetrics(metrics, filepath.Dir(path), strategy), + enableChecksumValidation: enableChecksumValidation, + bm25config: bm25config, + writeSegmentInfoIntoFileName: writeSegmentInfoIntoFileName, + } + + if m.secondaryIndices > 0 { + m.secondaryToPrimary = make([]map[string][]byte, m.secondaryIndices) + for i := range m.secondaryToPrimary { + m.secondaryToPrimary[i] = map[string][]byte{} + } + } + + m.metrics.size(m.size) + + if m.strategy == StrategyInverted { + m.tombstones = sroar.NewBitmap() + } + + return m, nil +} + +func (m *Memtable) get(key []byte) ([]byte, error) { + start := time.Now() + defer m.metrics.get(start.UnixNano()) + + if m.strategy != StrategyReplace { + return nil, errors.Errorf("get only possible with strategy 'replace'") + } + + m.RLock() + defer m.RUnlock() + + return m.key.get(key) +} + +func (m *Memtable) getBySecondary(pos int, key []byte) ([]byte, error) { + start := time.Now() + defer m.metrics.getBySecondary(start.UnixNano()) + + if m.strategy != StrategyReplace { + return nil, errors.Errorf("get only possible with strategy 'replace'") + } + + m.RLock() + defer m.RUnlock() + + primary := m.secondaryToPrimary[pos][string(key)] + if primary == nil { + return nil, lsmkv.NotFound + } + + return m.key.get(primary) +} + +func (m *Memtable) put(key, value []byte, opts ...SecondaryKeyOption) error { + start := time.Now() + defer m.metrics.put(start.UnixNano()) + + if m.strategy != StrategyReplace { + return errors.Errorf("put only possible with strategy 'replace'") + } + + m.Lock() + defer m.Unlock() + m.writesSinceLastSync = true + + var secondaryKeys [][]byte + if m.secondaryIndices > 0 { + secondaryKeys = make([][]byte, m.secondaryIndices) + for _, opt := range opts { + if err := opt(secondaryKeys); err != nil { + return err + } + } + } + + if err := m.commitlog.put(segmentReplaceNode{ + primaryKey: key, + value: value, + secondaryIndexCount: m.secondaryIndices, + secondaryKeys: secondaryKeys, + tombstone: false, + }); err != nil { + return errors.Wrap(err, "write into commit log") + } + + netAdditions, previousKeys := m.key.insert(key, value, secondaryKeys) + + for i, sec := range previousKeys { + m.secondaryToPrimary[i][string(sec)] = nil + } + + for i, sec := range secondaryKeys { + m.secondaryToPrimary[i][string(sec)] = key + } + + m.size += uint64(netAdditions) + m.metrics.size(m.size) + m.updateDirtyAt() + + return nil +} + +func (m *Memtable) setTombstone(key []byte, opts ...SecondaryKeyOption) error { + start := time.Now() + defer m.metrics.setTombstone(start.UnixNano()) + + if m.strategy != "replace" { + return errors.Errorf("setTombstone only possible with strategy 'replace'") + } + + m.Lock() + defer m.Unlock() + m.writesSinceLastSync = true + + var secondaryKeys [][]byte + if m.secondaryIndices > 0 { + secondaryKeys = make([][]byte, m.secondaryIndices) + for _, opt := range opts { + if err := opt(secondaryKeys); err != nil { + return err + } + } + } + + if err := m.commitlog.put(segmentReplaceNode{ + primaryKey: key, + value: nil, + secondaryIndexCount: m.secondaryIndices, + secondaryKeys: secondaryKeys, + tombstone: true, + }); err != nil { + return errors.Wrap(err, "write into commit log") + } + + m.key.setTombstone(key, nil, secondaryKeys) + m.size += uint64(len(key)) + 1 // 1 byte for tombstone + m.metrics.size(m.size) + m.updateDirtyAt() + + return nil +} + +func (m *Memtable) setTombstoneWith(key []byte, deletionTime time.Time, opts ...SecondaryKeyOption) error { + start := time.Now() + defer m.metrics.setTombstone(start.UnixNano()) + + if m.strategy != "replace" { + return errors.Errorf("setTombstone only possible with strategy 'replace'") + } + + m.Lock() + defer m.Unlock() + m.writesSinceLastSync = true + + var secondaryKeys [][]byte + if m.secondaryIndices > 0 { + secondaryKeys = make([][]byte, m.secondaryIndices) + for _, opt := range opts { + if err := opt(secondaryKeys); err != nil { + return err + } + } + } + + tombstonedVal := tombstonedValue(deletionTime) + + if err := m.commitlog.put(segmentReplaceNode{ + primaryKey: key, + value: tombstonedVal[:], + secondaryIndexCount: m.secondaryIndices, + secondaryKeys: secondaryKeys, + tombstone: true, + }); err != nil { + return errors.Wrap(err, "write into commit log") + } + + m.key.setTombstone(key, tombstonedVal[:], secondaryKeys) + m.size += uint64(len(key)) + 1 // 1 byte for tombstone + m.metrics.size(m.size) + m.updateDirtyAt() + + return nil +} + +func tombstonedValue(deletionTime time.Time) []byte { + var tombstonedVal [1 + 8]byte // version=1 deletionTime + tombstonedVal[0] = 1 + binary.LittleEndian.PutUint64(tombstonedVal[1:], uint64(deletionTime.UnixMilli())) + return tombstonedVal[:] +} + +func errorFromTombstonedValue(tombstonedVal []byte) error { + if len(tombstonedVal) == 0 { + return lsmkv.Deleted + } + + if tombstonedVal[0] != 1 { + return fmt.Errorf("unexpected tomstoned value, unsupported version %d", tombstonedVal[0]) + } + + if len(tombstonedVal) != 9 { + return fmt.Errorf("unexpected tomstoned value, invalid length") + } + + deletionTimeUnixMilli := int64(binary.LittleEndian.Uint64(tombstonedVal[1:])) + + return lsmkv.NewErrDeleted(time.UnixMilli(deletionTimeUnixMilli)) +} + +func (m *Memtable) getCollection(key []byte) ([]value, error) { + start := time.Now() + defer m.metrics.getCollection(start.UnixNano()) + + // TODO amourao: check if this is needed for StrategyInverted + if m.strategy != StrategySetCollection && m.strategy != StrategyMapCollection && m.strategy != StrategyInverted { + return nil, errors.Errorf("getCollection only possible with strategies %q, %q, %q", + StrategySetCollection, StrategyMapCollection, StrategyInverted) + } + + m.RLock() + defer m.RUnlock() + + v, err := m.keyMulti.get(key) + if err != nil { + return nil, err + } + + return v, nil +} + +func (m *Memtable) getMap(key []byte) ([]MapPair, error) { + start := time.Now() + defer m.metrics.getMap(start.UnixNano()) + + if m.strategy != StrategyMapCollection && m.strategy != StrategyInverted { + return nil, errors.Errorf("getMap only possible with strategies %q, %q", + StrategyMapCollection, StrategyInverted) + } + + m.RLock() + defer m.RUnlock() + + v, err := m.keyMap.get(key) + if err != nil { + return nil, err + } + + return v, nil +} + +func (m *Memtable) append(key []byte, values []value) error { + start := time.Now() + defer m.metrics.append(start.UnixNano()) + + if m.strategy != StrategySetCollection && m.strategy != StrategyMapCollection { + return errors.Errorf("append only possible with strategies %q, %q", + StrategySetCollection, StrategyMapCollection) + } + + m.Lock() + defer m.Unlock() + m.writesSinceLastSync = true + + if err := m.commitlog.append(segmentCollectionNode{ + primaryKey: key, + values: values, + }); err != nil { + return errors.Wrap(err, "write into commit log") + } + + m.keyMulti.insert(key, values) + m.size += uint64(len(key)) + for _, value := range values { + m.size += uint64(len(value.value)) + } + m.metrics.size(m.size) + m.updateDirtyAt() + + return nil +} + +func (m *Memtable) appendMapSorted(key []byte, pair MapPair) error { + start := time.Now() + defer m.metrics.appendMapSorted(start.UnixNano()) + + if m.strategy != StrategyMapCollection && m.strategy != StrategyInverted { + return errors.Errorf("append only possible with strategy %q, %q", + StrategyMapCollection, StrategyInverted) + } + + valuesForCommitLog, err := pair.Bytes() + if err != nil { + return err + } + + newNode := segmentCollectionNode{ + primaryKey: key, + values: []value{ + { + value: valuesForCommitLog, + tombstone: pair.Tombstone, + }, + }, + } + + m.Lock() + defer m.Unlock() + m.writesSinceLastSync = true + + if err := m.commitlog.append(newNode); err != nil { + return errors.Wrap(err, "write into commit log") + } + + m.keyMap.insert(key, pair) + m.size += uint64(len(key) + len(valuesForCommitLog)) + m.metrics.size(m.size) + m.updateDirtyAt() + + return nil +} + +func (m *Memtable) Size() uint64 { + m.RLock() + defer m.RUnlock() + + return m.size +} + +func (m *Memtable) ActiveDuration() time.Duration { + m.RLock() + defer m.RUnlock() + + return time.Since(m.createdAt) +} + +func (m *Memtable) updateDirtyAt() { + if m.dirtyAt.IsZero() { + m.dirtyAt = time.Now() + } +} + +// returns time memtable got dirty (1st write occurred) +// (0 if clean) +func (m *Memtable) DirtyDuration() time.Duration { + m.RLock() + defer m.RUnlock() + + if m.dirtyAt.IsZero() { + return 0 + } + return time.Since(m.dirtyAt) +} + +func (m *Memtable) countStats() *countStats { + m.RLock() + defer m.RUnlock() + return m.key.countStats() +} + +// the WAL uses a buffer and isn't written until the buffer size is crossed or +// this function explicitly called. This allows to safge unnecessary disk +// writes in larger operations, such as batches. It is sufficient to call write +// on the WAL just once. This does not make a batch atomic, but it guarantees +// that the WAL is written before a successful response is returned to the +// user. +func (m *Memtable) writeWAL() error { + m.Lock() + defer m.Unlock() + + return m.commitlog.flushBuffers() +} + +func (m *Memtable) ReadOnlyTombstones() (*sroar.Bitmap, error) { + if m.strategy != StrategyInverted { + return nil, errors.Errorf("tombstones only supported for strategy %q", StrategyInverted) + } + + m.RLock() + defer m.RUnlock() + + if m.tombstones != nil { + return m.tombstones.Clone(), nil + } + + return nil, lsmkv.NotFound +} + +func (m *Memtable) SetTombstone(docId uint64) error { + if m.strategy != StrategyInverted { + return errors.Errorf("tombstones only supported for strategy %q", StrategyInverted) + } + + m.Lock() + defer m.Unlock() + + m.tombstones.Set(docId) + + return nil +} + +func (m *Memtable) GetPropLengths() (uint64, uint64, error) { + m.RLock() + flatA := m.keyMap.flattenInOrder() + m.RUnlock() + + docIdsLengths := make(map[uint64]uint32) + propLengthSum := uint64(0) + propLengthCount := uint64(0) + + for _, mapNode := range flatA { + for j := range mapNode.values { + docId := binary.BigEndian.Uint64(mapNode.values[j].Key) + if !mapNode.values[j].Tombstone { + fieldLength := math.Float32frombits(binary.LittleEndian.Uint32(mapNode.values[j].Value[4:])) + if _, ok := docIdsLengths[docId]; !ok { + propLengthSum += uint64(fieldLength) + propLengthCount++ + } + docIdsLengths[docId] = uint32(fieldLength) + } + } + } + + return propLengthSum, propLengthCount, nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/memtable_flush.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/memtable_flush.go new file mode 100644 index 0000000000000000000000000000000000000000..b6d9997cdd52516470924da302dd928575ff75e1 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/memtable_flush.go @@ -0,0 +1,333 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package lsmkv + +import ( + "bufio" + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/prometheus/client_golang/prometheus" + "github.com/weaviate/weaviate/usecases/monitoring" + + "github.com/pkg/errors" + "github.com/weaviate/weaviate/adapters/repos/db/lsmkv/segmentindex" + "github.com/weaviate/weaviate/entities/diskio" +) + +func (m *Memtable) flushWAL() error { + if err := m.commitlog.close(); err != nil { + return err + } + + if m.Size() == 0 { + // this is an empty memtable, nothing to do + // however, we still have to cleanup the commit log, otherwise we will + // attempt to recover from it on the next cycle + if err := m.commitlog.delete(); err != nil { + return errors.Wrap(err, "delete commit log file") + } + return nil + } + + // fsync parent directory + err := diskio.Fsync(filepath.Dir(m.path)) + if err != nil { + return err + } + + return nil +} + +func (m *Memtable) flush() (segmentPath string, rerr error) { + // close the commit log first, this also forces it to be fsynced. If + // something fails there, don't proceed with flushing. The commit log will + // only be deleted at the very end, if the flush was successful + // (indicated by a successful close of the flush file - which indicates a + // successful fsync) + + if err := m.commitlog.close(); err != nil { + return "", errors.Wrap(err, "close commit log file") + } + + if m.Size() == 0 { + // this is an empty memtable, nothing to do + // however, we still have to cleanup the commit log, otherwise we will + // attempt to recover from it on the next cycle + if err := m.commitlog.delete(); err != nil { + return "", errors.Wrap(err, "delete commit log file") + } + return "", nil + } + var tmpSegmentPath string + if m.writeSegmentInfoIntoFileName { + // new segments are always level 0 + tmpSegmentPath = m.path + segmentExtraInfo(0, SegmentStrategyFromString(m.strategy)) + ".db.tmp" + } else { + tmpSegmentPath = m.path + ".db.tmp" + } + + f, err := os.OpenFile(tmpSegmentPath, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0o666) + if err != nil { + return "", err + } + defer func() { + if rerr != nil { + f.Close() + os.Remove(tmpSegmentPath) + } + }() + + observeWrite := m.metrics.writeMemtable + cb := func(written int64) { + observeWrite(written) + } + meteredF := diskio.NewMeteredWriter(f, cb) + + bufw := bufio.NewWriter(meteredF) + segmentFile := segmentindex.NewSegmentFile( + segmentindex.WithBufferedWriter(bufw), + segmentindex.WithChecksumsDisabled(!m.enableChecksumValidation), + ) + + var keys []segmentindex.Key + skipIndices := false + + switch m.strategy { + case StrategyReplace: + if keys, err = m.flushDataReplace(segmentFile); err != nil { + return "", err + } + + case StrategySetCollection: + if keys, err = m.flushDataSet(segmentFile); err != nil { + return "", err + } + + case StrategyRoaringSet: + if keys, err = m.flushDataRoaringSet(segmentFile); err != nil { + return "", err + } + + case StrategyRoaringSetRange: + if keys, err = m.flushDataRoaringSetRange(segmentFile); err != nil { + return "", err + } + skipIndices = true + + case StrategyMapCollection: + if keys, err = m.flushDataMap(segmentFile); err != nil { + return "", err + } + case StrategyInverted: + if keys, _, err = m.flushDataInverted(segmentFile, meteredF, bufw); err != nil { + return "", err + } + skipIndices = true + default: + return "", fmt.Errorf("cannot flush strategy %s", m.strategy) + } + + if !skipIndices { + indexes := &segmentindex.Indexes{ + Keys: keys, + SecondaryIndexCount: m.secondaryIndices, + ScratchSpacePath: m.path + ".scratch.d", + ObserveWrite: monitoring.GetMetrics().FileIOWrites.With(prometheus.Labels{ + "strategy": m.strategy, + "operation": "writeIndices", + }), + AllocChecker: m.allocChecker, + } + + if _, err := segmentFile.WriteIndexes(indexes, int64(m.size)); err != nil { + return "", err + } + } + + if _, err := segmentFile.WriteChecksum(); err != nil { + return "", err + } + + if err := f.Sync(); err != nil { + return "", err + } + + if err := f.Close(); err != nil { + return "", err + } + + segmentPath = strings.TrimSuffix(tmpSegmentPath, ".tmp") + err = os.Rename(tmpSegmentPath, segmentPath) + if err != nil { + return "", err + } + + // fsync parent directory + err = diskio.Fsync(filepath.Dir(m.path)) + if err != nil { + return "", err + } + + // only now that the file has been flushed is it safe to delete the commit log + // TODO: there might be an interest in keeping the commit logs around for + // longer as they might come in handy for replication + return segmentPath, m.commitlog.delete() +} + +func (m *Memtable) flushDataReplace(f *segmentindex.SegmentFile) ([]segmentindex.Key, error) { + flat := m.key.flattenInOrder() + + totalDataLength := totalKeyAndValueSize(flat) + perObjectAdditions := len(flat) * (1 + 8 + 4 + int(m.secondaryIndices)*4) // 1 byte for the tombstone, 8 bytes value length encoding, 4 bytes key length encoding, + 4 bytes key encoding for every secondary index + headerSize := segmentindex.HeaderSize + header := &segmentindex.Header{ + IndexStart: uint64(totalDataLength + perObjectAdditions + headerSize), + Level: 0, // always level zero on a new one + Version: segmentindex.ChooseHeaderVersion(m.enableChecksumValidation), + SecondaryIndices: m.secondaryIndices, + Strategy: SegmentStrategyFromString(m.strategy), + } + + n, err := f.WriteHeader(header) + if err != nil { + return nil, err + } + headerSize = int(n) + keys := make([]segmentindex.Key, len(flat)) + + totalWritten := headerSize + for i, node := range flat { + segNode := &segmentReplaceNode{ + offset: totalWritten, + tombstone: node.tombstone, + value: node.value, + primaryKey: node.key, + secondaryKeys: node.secondaryKeys, + secondaryIndexCount: m.secondaryIndices, + } + + ki, err := segNode.KeyIndexAndWriteTo(f.BodyWriter()) + if err != nil { + return nil, errors.Wrapf(err, "write node %d", i) + } + + keys[i] = ki + totalWritten = ki.ValueEnd + } + + return keys, nil +} + +func (m *Memtable) flushDataSet(f *segmentindex.SegmentFile) ([]segmentindex.Key, error) { + flat := m.keyMulti.flattenInOrder() + return m.flushDataCollection(f, flat) +} + +func (m *Memtable) flushDataMap(f *segmentindex.SegmentFile) ([]segmentindex.Key, error) { + m.RLock() + flat := m.keyMap.flattenInOrder() + m.RUnlock() + + // by encoding each map pair we can force the same structure as for a + // collection, which means we can reuse the same flushing logic + asMulti := make([]*binarySearchNodeMulti, len(flat)) + for i, mapNode := range flat { + asMulti[i] = &binarySearchNodeMulti{ + key: mapNode.key, + values: make([]value, len(mapNode.values)), + } + + for j := range asMulti[i].values { + enc, err := mapNode.values[j].Bytes() + if err != nil { + return nil, err + } + + asMulti[i].values[j] = value{ + value: enc, + tombstone: mapNode.values[j].Tombstone, + } + } + + } + return m.flushDataCollection(f, asMulti) +} + +func (m *Memtable) flushDataCollection(f *segmentindex.SegmentFile, + flat []*binarySearchNodeMulti, +) ([]segmentindex.Key, error) { + totalDataLength := totalValueSizeCollection(flat) + header := &segmentindex.Header{ + IndexStart: uint64(totalDataLength + segmentindex.HeaderSize), + Level: 0, // always level zero on a new one + Version: segmentindex.ChooseHeaderVersion(m.enableChecksumValidation), + SecondaryIndices: m.secondaryIndices, + Strategy: SegmentStrategyFromString(m.strategy), + } + + n, err := f.WriteHeader(header) + if err != nil { + return nil, err + } + headerSize := int(n) + keys := make([]segmentindex.Key, len(flat)) + + totalWritten := headerSize + for i, node := range flat { + ki, err := (&segmentCollectionNode{ + values: node.values, + primaryKey: node.key, + offset: totalWritten, + }).KeyIndexAndWriteTo(f.BodyWriter()) + if err != nil { + return nil, errors.Wrapf(err, "write node %d", i) + } + + keys[i] = ki + totalWritten = ki.ValueEnd + } + + return keys, nil +} + +func totalKeyAndValueSize(in []*binarySearchNode) int { + var sum int + for _, n := range in { + sum += len(n.value) + sum += len(n.key) + for _, sec := range n.secondaryKeys { + sum += len(sec) + } + } + + return sum +} + +func totalValueSizeCollection(in []*binarySearchNodeMulti) int { + var sum int + for _, n := range in { + sum += 8 // uint64 to indicate array length + for _, v := range n.values { + sum += 1 // bool to indicate value tombstone + sum += 8 // uint64 to indicate value length + sum += len(v.value) + } + + sum += 4 // uint32 to indicate key size + sum += len(n.key) + } + + return sum +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/memtable_flush_inverted.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/memtable_flush_inverted.go new file mode 100644 index 0000000000000000000000000000000000000000..7e7ae827d3be76a7c03370afc3a7a4484b4cd94d --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/memtable_flush_inverted.go @@ -0,0 +1,261 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package lsmkv + +import ( + "bufio" + "bytes" + "encoding/binary" + "encoding/gob" + "fmt" + "math" + + "github.com/prometheus/client_golang/prometheus" + "github.com/weaviate/sroar" + "github.com/weaviate/weaviate/adapters/repos/db/compactor" + "github.com/weaviate/weaviate/adapters/repos/db/lsmkv/segmentindex" + "github.com/weaviate/weaviate/adapters/repos/db/lsmkv/varenc" + "github.com/weaviate/weaviate/entities/diskio" + "github.com/weaviate/weaviate/usecases/config" + "github.com/weaviate/weaviate/usecases/monitoring" +) + +func (m *Memtable) flushDataInverted(f *segmentindex.SegmentFile, ogF *diskio.MeteredWriter, bufw *bufio.Writer) ([]segmentindex.Key, *sroar.Bitmap, error) { + m.RLock() + flatA := m.keyMap.flattenInOrder() + m.RUnlock() + + // by encoding each map pair we can force the same structure as for a + // collection, which means we can reuse the same flushing logic + flat := make([]*binarySearchNodeMap, len(flatA)) + + actuallyWritten := 0 + actuallyWrittenKeys := make(map[string]struct{}) + tombstones := m.tombstones + + docIdsLengths := make(map[uint64]uint32) + propLengthSum := uint64(0) + propLengthCount := uint64(0) + + for i, mapNode := range flatA { + flat[i] = &binarySearchNodeMap{ + key: mapNode.key, + values: make([]MapPair, 0, len(mapNode.values)), + } + + for j := range mapNode.values { + docId := binary.BigEndian.Uint64(mapNode.values[j].Key) + if !mapNode.values[j].Tombstone { + fieldLength := math.Float32frombits(binary.LittleEndian.Uint32(mapNode.values[j].Value[4:])) + flat[i].values = append(flat[i].values, mapNode.values[j]) + actuallyWritten++ + actuallyWrittenKeys[string(mapNode.key)] = struct{}{} + if _, ok := docIdsLengths[docId]; !ok { + propLengthSum += uint64(fieldLength) + propLengthCount++ + } + docIdsLengths[docId] = uint32(fieldLength) + } else { + tombstones.Set(docId) + } + } + + } + + // weighted average of m.averagePropLength and the average of the current flush + // averaged by propLengthCount and m.propLengthCount + if m.averagePropLength == 0 { + m.averagePropLength = float64(propLengthSum) / float64(propLengthCount) + m.propLengthCount = propLengthCount + } else { + m.averagePropLength = (m.averagePropLength*float64(m.propLengthCount) + float64(propLengthSum)) / float64(m.propLengthCount+propLengthCount) + m.propLengthCount += propLengthCount + } + + tombstoneBuffer := make([]byte, 0) + if !tombstones.IsEmpty() { + tombstoneBuffer = tombstones.ToBuffer() + } + + header := segmentindex.Header{ + Version: segmentindex.ChooseHeaderVersion(m.enableChecksumValidation), + IndexStart: 0, // will be updated later + Level: 0, // always level zero on a new one + SecondaryIndices: m.secondaryIndices, + Strategy: SegmentStrategyFromString(StrategyInverted), + } + + headerInverted := segmentindex.HeaderInverted{ + KeysOffset: uint64(segmentindex.HeaderSize + segmentindex.SegmentInvertedDefaultHeaderSize + segmentindex.SegmentInvertedDefaultFieldCount), + TombstoneOffset: 0, + PropertyLengthsOffset: 0, + Version: 0, + BlockSize: uint8(segmentindex.SegmentInvertedDefaultBlockSize), + DataFieldCount: uint8(segmentindex.SegmentInvertedDefaultFieldCount), + DataFields: []varenc.VarEncDataType{varenc.DeltaVarIntUint64, varenc.VarIntUint64}, + } + + docIdEncoder := varenc.GetVarEncEncoder64(headerInverted.DataFields[0]) + tfEncoder := varenc.GetVarEncEncoder64(headerInverted.DataFields[1]) + docIdEncoder.Init(segmentindex.SegmentInvertedDefaultBlockSize) + tfEncoder.Init(segmentindex.SegmentInvertedDefaultBlockSize) + + headerEmpty := make([]byte, headerInverted.KeysOffset) + if _, err := bufw.Write(headerEmpty); err != nil { + return nil, nil, err + } + + totalWritten := len(headerEmpty) + keysStartOffset := totalWritten + + buf := make([]byte, 8) + + keys := make([]segmentindex.Key, len(flat)) + actuallyWritten = 0 + + bw := f.BodyWriter() + if bw == nil { + return nil, nil, fmt.Errorf("segment file body writer is nil, cannot write inverted index") + } + + for _, mapNode := range flat { + if len(mapNode.values) > 0 { + + ki := segmentindex.Key{ + Key: mapNode.key, + ValueStart: totalWritten, + } + + b := config.DefaultBM25b + k1 := config.DefaultBM25k1 + if m.bm25config != nil { + b = m.bm25config.B + k1 = m.bm25config.K1 + } + + blocksEncoded, _ := createAndEncodeBlocksWithLengths(mapNode.values, docIdEncoder, tfEncoder, float64(b), float64(k1), m.averagePropLength) + + if _, err := bw.Write(blocksEncoded); err != nil { + return nil, nil, err + } + totalWritten += len(blocksEncoded) + + // write key length + binary.LittleEndian.PutUint32(buf, uint32(len(mapNode.key))) + if _, err := bw.Write(buf[:4]); err != nil { + return nil, nil, err + } + + totalWritten += 4 + + // write key + if _, err := bw.Write(mapNode.key); err != nil { + return nil, nil, err + } + totalWritten += len(mapNode.key) + + ki.ValueEnd = totalWritten + + keys[actuallyWritten] = ki + actuallyWritten++ + } + } + + tombstoneOffset := totalWritten + + binary.LittleEndian.PutUint64(buf, uint64(len(tombstoneBuffer))) + if _, err := bw.Write(buf); err != nil { + return nil, nil, err + } + totalWritten += 8 + + if _, err := bw.Write(tombstoneBuffer); err != nil { + return nil, nil, err + } + totalWritten += len(tombstoneBuffer) + propLengthsOffset := totalWritten + + b := new(bytes.Buffer) + + propLengthAvg := float64(propLengthSum) / float64(propLengthCount) + + binary.LittleEndian.PutUint64(buf, math.Float64bits(propLengthAvg)) + if _, err := bw.Write(buf); err != nil { + return nil, nil, err + } + totalWritten += 8 + + binary.LittleEndian.PutUint64(buf, propLengthCount) + if _, err := bw.Write(buf); err != nil { + return nil, nil, err + } + totalWritten += 8 + + e := gob.NewEncoder(b) + + // Encoding the map + err := e.Encode(docIdsLengths) + if err != nil { + return nil, nil, err + } + + binary.LittleEndian.PutUint64(buf, uint64(b.Len())) + if _, err := bw.Write(buf); err != nil { + return nil, nil, err + } + totalWritten += 8 + + if _, err := bw.Write(b.Bytes()); err != nil { + return nil, nil, err + } + + totalWritten += b.Len() + + treeOffset := totalWritten + + header.IndexStart = uint64(treeOffset) + + headerInverted.KeysOffset = uint64(keysStartOffset) + headerInverted.TombstoneOffset = uint64(tombstoneOffset) + headerInverted.PropertyLengthsOffset = uint64(propLengthsOffset) + + f.SetHeader(&header) + f.SetHeaderInverted(&headerInverted) + + indexes := &segmentindex.Indexes{ + Keys: keys, + SecondaryIndexCount: m.secondaryIndices, + ScratchSpacePath: m.path + ".scratch.d", + ObserveWrite: monitoring.GetMetrics().FileIOWrites.With(prometheus.Labels{ + "strategy": m.strategy, + "operation": "writeIndices", + }), + } + + if _, err := f.WriteIndexes(indexes, int64(m.size)); err != nil { + return nil, nil, err + } + + // flush buffered, so we can safely seek on underlying writer + + if err := bufw.Flush(); err != nil { + return nil, nil, fmt.Errorf("flush buffered: %w", err) + } + + version := segmentindex.ChooseHeaderVersion(m.enableChecksumValidation) + if err := compactor.WriteHeaders(nil, ogF, bufw, f, 0, version, + header.SecondaryIndices, header.IndexStart, segmentindex.StrategyInverted, &headerInverted); err != nil { + return nil, nil, fmt.Errorf("write headers: %w", err) + } + + return keys[:actuallyWritten], tombstones, nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/memtable_flush_roaring_set.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/memtable_flush_roaring_set.go new file mode 100644 index 0000000000000000000000000000000000000000..7ce8e9cf5d46f3158cc54d54e17cf07f49c2d458 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/memtable_flush_roaring_set.go @@ -0,0 +1,73 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package lsmkv + +import ( + "fmt" + + "github.com/weaviate/weaviate/adapters/repos/db/lsmkv/segmentindex" + "github.com/weaviate/weaviate/adapters/repos/db/roaringset" +) + +func (m *Memtable) flushDataRoaringSet(f *segmentindex.SegmentFile) ([]segmentindex.Key, error) { + flat := m.roaringSet.FlattenInOrder() + + totalDataLength := totalPayloadSizeRoaringSet(flat) + header := &segmentindex.Header{ + IndexStart: uint64(totalDataLength + segmentindex.HeaderSize), + Level: 0, // always level zero on a new one + Version: segmentindex.ChooseHeaderVersion(m.enableChecksumValidation), + SecondaryIndices: 0, + Strategy: segmentindex.StrategyRoaringSet, + } + + n, err := f.WriteHeader(header) + if err != nil { + return nil, err + } + headerSize := int(n) + keys := make([]segmentindex.Key, len(flat)) + + totalWritten := headerSize + for i, node := range flat { + sn, err := roaringset.NewSegmentNode(node.Key, node.Value.Additions, + node.Value.Deletions) + if err != nil { + return nil, fmt.Errorf("create segment node: %w", err) + } + + ki, err := sn.KeyIndexAndWriteTo(f.BodyWriter(), totalWritten) + if err != nil { + return nil, fmt.Errorf("write node %d: %w", i, err) + } + + keys[i] = ki + totalWritten = ki.ValueEnd + } + + return keys, nil +} + +func totalPayloadSizeRoaringSet(in []*roaringset.BinarySearchNode) int { + var sum int + for _, n := range in { + sum += 8 // uint64 to segment length + sum += 8 // uint64 to indicate length of additions bitmap + sum += len(n.Value.Additions.ToBuffer()) + sum += 8 // uint64 to indicate length of deletions bitmap + sum += len(n.Value.Deletions.ToBuffer()) + sum += 4 // uint32 to indicate key size + sum += len(n.Key) + } + + return sum +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/memtable_flush_roaring_set_range.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/memtable_flush_roaring_set_range.go new file mode 100644 index 0000000000000000000000000000000000000000..3219d70bb4120811de230cb461566c29619c9bbe --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/memtable_flush_roaring_set_range.go @@ -0,0 +1,68 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package lsmkv + +import ( + "fmt" + + "github.com/weaviate/weaviate/adapters/repos/db/lsmkv/segmentindex" + "github.com/weaviate/weaviate/adapters/repos/db/roaringsetrange" +) + +func (m *Memtable) flushDataRoaringSetRange(f *segmentindex.SegmentFile) ([]segmentindex.Key, error) { + nodes := m.roaringSetRange.Nodes() + + totalDataLength := totalPayloadSizeRoaringSetRange(nodes) + header := &segmentindex.Header{ + IndexStart: uint64(totalDataLength + segmentindex.HeaderSize), + Level: 0, // always level zero on a new one + Version: segmentindex.ChooseHeaderVersion(m.enableChecksumValidation), + SecondaryIndices: 0, + Strategy: segmentindex.StrategyRoaringSetRange, + } + + _, err := f.WriteHeader(header) + if err != nil { + return nil, err + } + + for i, node := range nodes { + sn, err := roaringsetrange.NewSegmentNode(node.Key, node.Additions, node.Deletions) + if err != nil { + return nil, fmt.Errorf("create segment node: %w", err) + } + + _, err = f.BodyWriter().Write(sn.ToBuffer()) + if err != nil { + return nil, fmt.Errorf("write segment node %d: %w", i, err) + } + } + + return make([]segmentindex.Key, 0), nil +} + +func totalPayloadSizeRoaringSetRange(nodes []*roaringsetrange.MemtableNode) int { + var sum int + for _, node := range nodes { + sum += 8 // uint64 to segment length + sum += 1 // key (fixed size) + sum += 8 // uint64 to indicate length of additions bitmap + sum += len(node.Additions.ToBuffer()) + + if node.Key == 0 { + sum += 8 // uint64 to indicate length of deletions bitmap + sum += len(node.Deletions.ToBuffer()) + } + } + + return sum +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/memtable_metrics.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/memtable_metrics.go new file mode 100644 index 0000000000000000000000000000000000000000..701ab6a2c7167bf3f53133ccc806364b80ff3ed0 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/memtable_metrics.go @@ -0,0 +1,43 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package lsmkv + +type memtableMetrics struct { + put NsObserver + setTombstone NsObserver + append NsObserver + appendMapSorted NsObserver + get NsObserver + getBySecondary NsObserver + getMap NsObserver + getCollection NsObserver + size Setter + writeMemtable BytesWriteObserver +} + +// newMemtableMetrics curries the prometheus-functions just once to make sure +// they don't have to be curried on the hotpath where we this would lead to a +// lot of allocations. +func newMemtableMetrics(metrics *Metrics, path, strategy string) *memtableMetrics { + return &memtableMetrics{ + put: metrics.MemtableOpObserver(path, strategy, "put"), + setTombstone: metrics.MemtableOpObserver(path, strategy, "setTombstone"), + append: metrics.MemtableOpObserver(path, strategy, "append"), + appendMapSorted: metrics.MemtableOpObserver(path, strategy, "appendMapSorted"), + get: metrics.MemtableOpObserver(path, strategy, "get"), + getBySecondary: metrics.MemtableOpObserver(path, strategy, "getBySecondary"), + getMap: metrics.MemtableOpObserver(path, strategy, "getMap"), + getCollection: metrics.MemtableOpObserver(path, strategy, "getCollection"), + size: metrics.MemtableSizeSetter(path, strategy), + writeMemtable: metrics.MemtableWriteObserver(strategy, "flushMemtable"), + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/memtable_roaring_set.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/memtable_roaring_set.go new file mode 100644 index 0000000000000000000000000000000000000000..15522d9c2c34ab1e81e3172f23620afcfd0693b3 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/memtable_roaring_set.go @@ -0,0 +1,149 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package lsmkv + +import ( + "github.com/pkg/errors" + "github.com/weaviate/sroar" + "github.com/weaviate/weaviate/adapters/repos/db/roaringset" +) + +func (m *Memtable) roaringSetAddOne(key []byte, value uint64) error { + return m.roaringSetAddList(key, []uint64{value}) +} + +func (m *Memtable) roaringSetAddList(key []byte, values []uint64) error { + if err := CheckStrategyRoaringSet(m.strategy); err != nil { + return err + } + + m.Lock() + defer m.Unlock() + + if err := m.roaringSetAddCommitLog(key, values, []uint64{}); err != nil { + return err + } + + m.roaringSet.Insert(key, roaringset.Insert{Additions: values}) + + m.roaringSetAdjustMeta(len(values)) + return nil +} + +func (m *Memtable) roaringSetAddBitmap(key []byte, bm *sroar.Bitmap) error { + if err := CheckStrategyRoaringSet(m.strategy); err != nil { + return err + } + + m.Lock() + defer m.Unlock() + + if err := m.roaringSetAddCommitLog(key, bm.ToArray(), []uint64{}); err != nil { + return err + } + + m.roaringSet.Insert(key, roaringset.Insert{Additions: bm.ToArray()}) + + m.roaringSetAdjustMeta(bm.GetCardinality()) + return nil +} + +func (m *Memtable) roaringSetRemoveOne(key []byte, value uint64) error { + return m.roaringSetRemoveList(key, []uint64{value}) +} + +func (m *Memtable) roaringSetRemoveList(key []byte, values []uint64) error { + if err := CheckStrategyRoaringSet(m.strategy); err != nil { + return err + } + + m.Lock() + defer m.Unlock() + + if err := m.roaringSetAddCommitLog(key, []uint64{}, values); err != nil { + return err + } + + m.roaringSet.Insert(key, roaringset.Insert{Deletions: values}) + + m.roaringSetAdjustMeta(len(values)) + return nil +} + +func (m *Memtable) roaringSetRemoveBitmap(key []byte, bm *sroar.Bitmap) error { + if err := CheckStrategyRoaringSet(m.strategy); err != nil { + return err + } + + m.Lock() + defer m.Unlock() + + if err := m.roaringSetAddCommitLog(key, []uint64{}, bm.ToArray()); err != nil { + return err + } + + m.roaringSet.Insert(key, roaringset.Insert{Deletions: bm.ToArray()}) + + m.roaringSetAdjustMeta(bm.GetCardinality()) + return nil +} + +func (m *Memtable) roaringSetAddRemoveSlices(key []byte, additions []uint64, deletions []uint64) error { + if err := CheckStrategyRoaringSet(m.strategy); err != nil { + return err + } + + m.Lock() + defer m.Unlock() + + if err := m.roaringSetAddCommitLog(key, additions, deletions); err != nil { + return err + } + + m.roaringSet.Insert(key, roaringset.Insert{ + Additions: additions, + Deletions: deletions, + }) + + m.roaringSetAdjustMeta(len(additions) + len(deletions)) + return nil +} + +// returned bitmaps are cloned and safe to mutate +func (m *Memtable) roaringSetGet(key []byte) (roaringset.BitmapLayer, error) { + if err := CheckStrategyRoaringSet(m.strategy); err != nil { + return roaringset.BitmapLayer{}, err + } + + m.RLock() + defer m.RUnlock() + + return m.roaringSet.Get(key) +} + +func (m *Memtable) roaringSetAdjustMeta(entriesChanged int) { + // in the worst case roaring bitmaps take 2 bytes per entry. A reasonable + // estimation is therefore to take the changed entries and multiply them by + // 2. + m.size += uint64(entriesChanged * 2) + m.metrics.size(m.size) + m.updateDirtyAt() +} + +func (m *Memtable) roaringSetAddCommitLog(key []byte, additions []uint64, deletions []uint64) error { + if node, err := roaringset.NewSegmentNodeList(key, additions, deletions); err != nil { + return errors.Wrap(err, "create node for commit log") + } else if err := m.commitlog.add(node); err != nil { + return errors.Wrap(err, "add node to commit log") + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/memtable_roaring_set_range.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/memtable_roaring_set_range.go new file mode 100644 index 0000000000000000000000000000000000000000..f433c1ca573db4b96531eff31547ce79523f5f92 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/memtable_roaring_set_range.go @@ -0,0 +1,98 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package lsmkv + +import ( + "encoding/binary" + + "github.com/pkg/errors" + "github.com/weaviate/weaviate/adapters/repos/db/roaringset" +) + +func (m *Memtable) roaringSetRangeAdd(key uint64, values ...uint64) error { + if err := CheckStrategyRoaringSetRange(m.strategy); err != nil { + return err + } + + m.Lock() + defer m.Unlock() + + if err := m.roaringSetRangeAddCommitLog(key, values, nil); err != nil { + return err + } + + m.roaringSetRange.Insert(key, values) + + m.roaringSetRangeAdjustMeta(len(values)) + return nil +} + +func (m *Memtable) roaringSetRangeRemove(key uint64, values ...uint64) error { + if err := CheckStrategyRoaringSetRange(m.strategy); err != nil { + return err + } + + m.Lock() + defer m.Unlock() + + if err := m.roaringSetRangeAddCommitLog(key, nil, values); err != nil { + return err + } + + m.roaringSetRange.Delete(key, values) + + m.roaringSetRangeAdjustMeta(len(values)) + return nil +} + +func (m *Memtable) roaringSetRangeAddRemove(key uint64, additions []uint64, deletions []uint64) error { + if err := CheckStrategyRoaringSetRange(m.strategy); err != nil { + return err + } + + m.Lock() + defer m.Unlock() + + if err := m.roaringSetRangeAddCommitLog(key, additions, deletions); err != nil { + return err + } + + m.roaringSetRange.Delete(key, deletions) + m.roaringSetRange.Insert(key, additions) + + m.roaringSetRangeAdjustMeta(len(additions) + len(deletions)) + return nil +} + +func (m *Memtable) roaringSetRangeAdjustMeta(entriesChanged int) { + // TODO roaring-set-range new estimations + + // in the worst case roaring bitmaps take 2 bytes per entry. A reasonable + // estimation is therefore to take the changed entries and multiply them by + // 2. + m.size += uint64(entriesChanged * 2) + m.metrics.size(m.size) + m.updateDirtyAt() +} + +func (m *Memtable) roaringSetRangeAddCommitLog(key uint64, additions []uint64, deletions []uint64) error { + // TODO roaring-set-range improved commit log + + keyBuf := make([]byte, 8) + binary.BigEndian.PutUint64(keyBuf, key) + if node, err := roaringset.NewSegmentNodeList(keyBuf, additions, deletions); err != nil { + return errors.Wrap(err, "create node for commit log") + } else if err := m.commitlog.add(node); err != nil { + return errors.Wrap(err, "add node to commit log") + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/memtable_roaring_set_range_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/memtable_roaring_set_range_test.go new file mode 100644 index 0000000000000000000000000000000000000000..ec8d762af689be9c9ff7f83298e521e5bf0c5cca --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/memtable_roaring_set_range_test.go @@ -0,0 +1,120 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package lsmkv + +import ( + "context" + "math/rand" + "path" + "testing" + "time" + + "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/adapters/repos/db/roaringsetrange" + "github.com/weaviate/weaviate/entities/filters" +) + +func TestMemtableRoaringSetRange(t *testing.T) { + rnd := rand.New(rand.NewSource(time.Now().UnixNano())) + logger, _ := test.NewNullLogger() + memPath := func() string { + return path.Join(t.TempDir(), "memtable") + } + + t.Run("concurrent writes and search", func(t *testing.T) { + cl, err := newCommitLogger(memPath(), StrategyRoaringSetRange, 0) + require.NoError(t, err) + m, err := newMemtable(memPath(), StrategyRoaringSetRange, 0, cl, nil, logger, false, nil, false, nil) + require.Nil(t, err) + + addKeyVals := func(k uint64) error { + return m.roaringSetRangeAdd(k, k+1000, k+2000, k+3000) + } + removeKeyVals := func(k uint64) error { + return m.roaringSetRangeRemove(k, k+1000, k+2000, k+3000) + } + assertRead_LTE4_GT7 := func(t *testing.T, reader roaringsetrange.InnerReader) { + expAddLTE4 := []uint64{ + 1000, 2000, 3000, + 1002, 2002, 3002, + 1004, 2004, 3004, + } + expAddGT7 := []uint64{ + 1008, 2008, 3008, + } + expDel := []uint64{ + 1000, 2000, 3000, + 1001, 2001, 3001, + 1002, 2002, 3002, + 1003, 2003, 3003, + 1004, 2004, 3004, + 1005, 2005, 3005, + 1006, 2006, 3006, + 1007, 2007, 3007, + 1008, 2008, 3008, + 1009, 2009, 3009, + } + + layerLTE4, release, err := reader.Read(context.Background(), 4, filters.OperatorLessThanEqual) + require.NoError(t, err) + defer release() + + layerGT7, release, err := reader.Read(context.Background(), 7, filters.OperatorGreaterThan) + require.NoError(t, err) + defer release() + + assert.ElementsMatch(t, expAddLTE4, layerLTE4.Additions.ToArray()) + assert.ElementsMatch(t, expDel, layerLTE4.Deletions.ToArray()) + assert.ElementsMatch(t, expAddGT7, layerGT7.Additions.ToArray()) + assert.ElementsMatch(t, expDel, layerGT7.Deletions.ToArray()) + } + + // populate with initial data + for i := uint64(0); i < 10; i = i + 2 { + assert.NoError(t, addKeyVals(i)) + } + for i := uint64(1); i < 10; i = i + 2 { + assert.NoError(t, removeKeyVals(i)) + } + + // create reader + reader := m.newRoaringSetRangeReader() + + // assert data + assertRead_LTE4_GT7(t, reader) + + // concurrently mutate memtable + chStart := make(chan struct{}) + chFinish := make(chan struct{}) + go func() { + chStart <- struct{}{} + for { + select { + case <-chFinish: + return + default: + addKeyVals(uint64(rnd.Int31n(1000))) + removeKeyVals(uint64(rnd.Int31n(1000))) + } + } + }() + + // assert search results do not contain muted data + <-chStart + for i := 0; i < 256; i++ { + assertRead_LTE4_GT7(t, reader) + } + chFinish <- struct{}{} + }) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/memtable_roaring_set_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/memtable_roaring_set_test.go new file mode 100644 index 0000000000000000000000000000000000000000..184c625c2c3923310f719bf3abdfcc0f5064871a --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/memtable_roaring_set_test.go @@ -0,0 +1,245 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package lsmkv + +import ( + "path" + "testing" + + "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/adapters/repos/db/roaringset" +) + +func TestMemtableRoaringSet(t *testing.T) { + logger, _ := test.NewNullLogger() + memPath := func() string { + return path.Join(t.TempDir(), "fake") + } + + t.Run("inserting individual entries", func(t *testing.T) { + cl, err := newCommitLogger(memPath(), StrategyRoaringSet, 0) + require.NoError(t, err) + + m, err := newMemtable(memPath(), StrategyRoaringSet, 0, cl, nil, logger, false, nil, false, nil) + require.Nil(t, err) + + key1, key2 := []byte("key1"), []byte("key2") + + assert.Nil(t, m.roaringSetAddOne(key1, 1)) + assert.Nil(t, m.roaringSetAddOne(key1, 2)) + assert.Nil(t, m.roaringSetAddOne(key2, 3)) + assert.Nil(t, m.roaringSetAddOne(key2, 4)) + assert.Greater(t, m.Size(), uint64(0)) + + setKey1, err := m.roaringSetGet(key1) + require.Nil(t, err) + assert.True(t, setKey1.Additions.Contains(1)) + assert.True(t, setKey1.Additions.Contains(2)) + assert.False(t, setKey1.Additions.Contains(3)) + assert.False(t, setKey1.Additions.Contains(4)) + + setKey2, err := m.roaringSetGet(key2) + require.Nil(t, err) + assert.False(t, setKey2.Additions.Contains(1)) + assert.False(t, setKey2.Additions.Contains(2)) + assert.True(t, setKey2.Additions.Contains(3)) + assert.True(t, setKey2.Additions.Contains(4)) + + require.Nil(t, m.commitlog.close()) + }) + + t.Run("inserting lists", func(t *testing.T) { + cl, err := newCommitLogger(memPath(), StrategyRoaringSet, 0) + require.NoError(t, err) + + m, err := newMemtable(memPath(), StrategyRoaringSet, 0, cl, nil, logger, false, nil, false, nil) + require.Nil(t, err) + + key1, key2 := []byte("key1"), []byte("key2") + + assert.Nil(t, m.roaringSetAddList(key1, []uint64{1, 2})) + assert.Nil(t, m.roaringSetAddList(key2, []uint64{3, 4})) + assert.Greater(t, m.Size(), uint64(0)) + + setKey1, err := m.roaringSetGet(key1) + require.Nil(t, err) + assert.True(t, setKey1.Additions.Contains(1)) + assert.True(t, setKey1.Additions.Contains(2)) + assert.False(t, setKey1.Additions.Contains(3)) + assert.False(t, setKey1.Additions.Contains(4)) + + setKey2, err := m.roaringSetGet(key2) + require.Nil(t, err) + assert.False(t, setKey2.Additions.Contains(1)) + assert.False(t, setKey2.Additions.Contains(2)) + assert.True(t, setKey2.Additions.Contains(3)) + assert.True(t, setKey2.Additions.Contains(4)) + + require.Nil(t, m.commitlog.close()) + }) + + t.Run("inserting bitmaps", func(t *testing.T) { + cl, err := newCommitLogger(memPath(), StrategyRoaringSet, 0) + require.NoError(t, err) + + m, err := newMemtable(memPath(), StrategyRoaringSet, 0, cl, nil, logger, false, nil, false, nil) + require.Nil(t, err) + + key1, key2 := []byte("key1"), []byte("key2") + + bm1 := roaringset.NewBitmap(1, 2) + assert.Nil(t, m.roaringSetAddBitmap(key1, bm1)) + bm2 := roaringset.NewBitmap(3, 4) + assert.Nil(t, m.roaringSetAddBitmap(key2, bm2)) + assert.Greater(t, m.Size(), uint64(0)) + + setKey1, err := m.roaringSetGet(key1) + require.Nil(t, err) + assert.True(t, setKey1.Additions.Contains(1)) + assert.True(t, setKey1.Additions.Contains(2)) + assert.False(t, setKey1.Additions.Contains(3)) + assert.False(t, setKey1.Additions.Contains(4)) + + setKey2, err := m.roaringSetGet(key2) + require.Nil(t, err) + assert.False(t, setKey2.Additions.Contains(1)) + assert.False(t, setKey2.Additions.Contains(2)) + assert.True(t, setKey2.Additions.Contains(3)) + assert.True(t, setKey2.Additions.Contains(4)) + + require.Nil(t, m.commitlog.close()) + }) + + t.Run("removing individual entries", func(t *testing.T) { + cl, err := newCommitLogger(memPath(), StrategyRoaringSet, 0) + require.NoError(t, err) + + m, err := newMemtable(memPath(), StrategyRoaringSet, 0, cl, nil, logger, false, nil, false, nil) + require.Nil(t, err) + + key1, key2 := []byte("key1"), []byte("key2") + + assert.Nil(t, m.roaringSetRemoveOne(key1, 7)) + assert.Nil(t, m.roaringSetRemoveOne(key2, 8)) + assert.Greater(t, m.Size(), uint64(0)) + + setKey1, err := m.roaringSetGet(key1) + require.Nil(t, err) + assert.False(t, setKey1.Additions.Contains(7)) + assert.True(t, setKey1.Deletions.Contains(7)) + + setKey2, err := m.roaringSetGet(key2) + require.Nil(t, err) + assert.False(t, setKey2.Additions.Contains(8)) + assert.True(t, setKey2.Deletions.Contains(8)) + + require.Nil(t, m.commitlog.close()) + }) + + t.Run("removing lists", func(t *testing.T) { + cl, err := newCommitLogger(memPath(), StrategyRoaringSet, 0) + require.NoError(t, err) + + m, err := newMemtable(memPath(), StrategyRoaringSet, 0, cl, nil, logger, false, nil, false, nil) + require.Nil(t, err) + + key1, key2 := []byte("key1"), []byte("key2") + + assert.Nil(t, m.roaringSetRemoveList(key1, []uint64{7, 8})) + assert.Nil(t, m.roaringSetRemoveList(key2, []uint64{9, 10})) + assert.Greater(t, m.Size(), uint64(0)) + + setKey1, err := m.roaringSetGet(key1) + require.Nil(t, err) + assert.Equal(t, 0, setKey1.Additions.GetCardinality()) + assert.Equal(t, 2, setKey1.Deletions.GetCardinality()) + assert.True(t, setKey1.Deletions.Contains(7)) + assert.True(t, setKey1.Deletions.Contains(8)) + + setKey2, err := m.roaringSetGet(key2) + require.Nil(t, err) + assert.Equal(t, 0, setKey2.Additions.GetCardinality()) + assert.Equal(t, 2, setKey2.Deletions.GetCardinality()) + assert.True(t, setKey2.Deletions.Contains(9)) + assert.True(t, setKey2.Deletions.Contains(10)) + + require.Nil(t, m.commitlog.close()) + }) + + t.Run("removing bitmaps", func(t *testing.T) { + cl, err := newCommitLogger(memPath(), StrategyRoaringSet, 0) + require.NoError(t, err) + + m, err := newMemtable(memPath(), StrategyRoaringSet, 0, cl, nil, logger, false, nil, false, nil) + require.Nil(t, err) + + key1, key2 := []byte("key1"), []byte("key2") + + assert.Nil(t, m.roaringSetRemoveBitmap(key1, roaringset.NewBitmap(7, 8))) + assert.Nil(t, m.roaringSetRemoveBitmap(key2, roaringset.NewBitmap(9, 10))) + assert.Greater(t, m.Size(), uint64(0)) + + setKey1, err := m.roaringSetGet(key1) + require.Nil(t, err) + assert.Equal(t, 0, setKey1.Additions.GetCardinality()) + assert.Equal(t, 2, setKey1.Deletions.GetCardinality()) + assert.True(t, setKey1.Deletions.Contains(7)) + assert.True(t, setKey1.Deletions.Contains(8)) + + setKey2, err := m.roaringSetGet(key2) + require.Nil(t, err) + assert.Equal(t, 0, setKey2.Additions.GetCardinality()) + assert.Equal(t, 2, setKey2.Deletions.GetCardinality()) + assert.True(t, setKey2.Deletions.Contains(9)) + assert.True(t, setKey2.Deletions.Contains(10)) + + require.Nil(t, m.commitlog.close()) + }) + + t.Run("adding/removing slices", func(t *testing.T) { + cl, err := newCommitLogger(memPath(), StrategyRoaringSet, 0) + require.NoError(t, err) + + m, err := newMemtable(memPath(), StrategyRoaringSet, 0, cl, nil, logger, false, nil, false, nil) + require.Nil(t, err) + + key1, key2 := []byte("key1"), []byte("key2") + + assert.Nil(t, m.roaringSetAddRemoveSlices(key1, + []uint64{1, 2}, []uint64{7, 8})) + assert.Nil(t, m.roaringSetAddRemoveSlices(key2, + []uint64{3, 4}, []uint64{9, 10})) + assert.Greater(t, m.Size(), uint64(0)) + + setKey1, err := m.roaringSetGet(key1) + require.Nil(t, err) + assert.Equal(t, 2, setKey1.Additions.GetCardinality()) + assert.True(t, setKey1.Additions.Contains(1)) + assert.True(t, setKey1.Additions.Contains(2)) + assert.Equal(t, 2, setKey1.Deletions.GetCardinality()) + assert.True(t, setKey1.Deletions.Contains(7)) + assert.True(t, setKey1.Deletions.Contains(8)) + + setKey2, err := m.roaringSetGet(key2) + require.Nil(t, err) + assert.Equal(t, 2, setKey2.Additions.GetCardinality()) + assert.True(t, setKey2.Additions.Contains(3)) + assert.True(t, setKey2.Additions.Contains(4)) + assert.Equal(t, 2, setKey2.Deletions.GetCardinality()) + assert.True(t, setKey2.Deletions.Contains(9)) + assert.True(t, setKey2.Deletions.Contains(10)) + + require.Nil(t, m.commitlog.close()) + }) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/memtable_size_advisor.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/memtable_size_advisor.go new file mode 100644 index 0000000000000000000000000000000000000000..10debacd7b17c980bf39934b26a4f0a9ab48a89c --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/memtable_size_advisor.go @@ -0,0 +1,87 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package lsmkv + +import "time" + +// if not enough config is provided we can fall back to this reasonable default +// value +const reasonableMemtableDefault = 10 * 1024 * 1024 + +type memtableSizeAdvisorCfg struct { + initial int + stepSize int + maxSize int + minDuration time.Duration + maxDuration time.Duration +} + +type memtableSizeAdvisor struct { + cfg memtableSizeAdvisorCfg + active bool +} + +func newMemtableSizeAdvisor(cfg memtableSizeAdvisorCfg) *memtableSizeAdvisor { + a := &memtableSizeAdvisor{ + cfg: cfg, + } + + // only activate if initial size, step size, max size, and max duration are + // given + if a.cfg.maxSize > 0 && a.cfg.initial > 0 && a.cfg.stepSize > 0 && a.cfg.maxDuration > 0 { + a.active = true + } + + return a +} + +func (m memtableSizeAdvisor) Initial() int { + if m.active { + return m.cfg.initial + } else { + return reasonableMemtableDefault + } +} + +func (m memtableSizeAdvisor) NextTarget(previousTarget int, + timeSinceFlush time.Duration, +) (int, bool) { + if !m.active { + return reasonableMemtableDefault, false + } + + if timeSinceFlush < m.cfg.minDuration { + next := min(previousTarget+m.cfg.stepSize, m.cfg.maxSize) + return next, next != previousTarget + } + if timeSinceFlush > m.cfg.maxDuration { + next := max(previousTarget-m.cfg.stepSize, m.cfg.initial) + return next, next != previousTarget + } + return previousTarget, false +} + +func min(a, b int) int { + if a <= b { + return a + } + + return b +} + +func max(a, b int) int { + if a >= b { + return a + } + + return b +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/memtable_size_advisor_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/memtable_size_advisor_test.go new file mode 100644 index 0000000000000000000000000000000000000000..7706d55615a67da44cb3b18eda4549eef454b39d --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/memtable_size_advisor_test.go @@ -0,0 +1,119 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package lsmkv + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +const Megabyte = 1024 * 1024 + +func TestMemtableSizeAdvisor_Initial(t *testing.T) { + a := newMemtableSizeAdvisor(memtableSizeAdvisorCfg{ + initial: 10 * Megabyte, + }) + + assert.Equal(t, 10485760, a.Initial()) +} + +func TestMemtableSizeAdvisor_NextTarget(t *testing.T) { + a := newMemtableSizeAdvisor(memtableSizeAdvisorCfg{ + initial: 10 * Megabyte, + minDuration: 10 * time.Second, + maxDuration: 30 * time.Second, + stepSize: 10 * Megabyte, + maxSize: 100 * Megabyte, + }) + + type test struct { + name string + current int + lastCycle time.Duration + expectedChanged bool + expectedTarget int + } + + tests := []test{ + { + name: "completely within range", + current: 10 * Megabyte, + lastCycle: 17 * time.Second, + expectedChanged: false, + expectedTarget: 10 * Megabyte, + }, + { + name: "cycle too short", + current: 10 * Megabyte, + lastCycle: 7 * time.Second, + expectedChanged: true, + expectedTarget: 20 * Megabyte, + }, + { + name: "cycle too long", + current: 100 * Megabyte, + lastCycle: 47 * time.Second, + expectedChanged: true, + expectedTarget: 90 * Megabyte, + }, + { + name: "cycle too short, but approaching limit", + current: 95 * Megabyte, + lastCycle: 7 * time.Second, + expectedChanged: true, + expectedTarget: 100 * Megabyte, // not 105 (!) + }, + { + name: "cycle too short, but already at limit", + current: 100 * Megabyte, + lastCycle: 7 * time.Second, + expectedChanged: false, + expectedTarget: 100 * Megabyte, + }, + { + name: "cycle too long, but barely above initial size", + current: 12 * Megabyte, + lastCycle: 47 * time.Second, + expectedChanged: true, + expectedTarget: 10 * Megabyte, // not 2 (1) + }, + { + name: "cycle too long, but already at initial size", + current: 10 * Megabyte, + lastCycle: 47 * time.Second, + expectedChanged: false, + expectedTarget: 10 * Megabyte, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + newTarget, changed := a.NextTarget(test.current, test.lastCycle) + assert.Equal(t, test.expectedTarget, newTarget, "expect new target") + assert.Equal(t, test.expectedChanged, changed, "expect changed") + }) + } + + target, changed := a.NextTarget(10*1024*1024, 17*time.Second) + assert.False(t, changed) + assert.Equal(t, 10*1024*1024, target) +} + +func TestMemtableSizeAdvisor_MissingConfig(t *testing.T) { + // even with an all-default value config the advisor should still return + // reasonable results, for example many integration tests might not provide a + // reasonable config to the advisor + a := newMemtableSizeAdvisor(memtableSizeAdvisorCfg{}) + assert.Equal(t, 10485760, a.Initial()) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/memtable_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/memtable_test.go new file mode 100644 index 0000000000000000000000000000000000000000..4708cec428b7e6c798da8508d3d0a3f027d8862a --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/memtable_test.go @@ -0,0 +1,80 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package lsmkv + +import ( + "path" + "testing" + + "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/entities/lsmkv" +) + +// This test prevents a regression on +// https://www.youtube.com/watch?v=OS8taasZl8k +func Test_MemtableSecondaryKeyBug(t *testing.T) { + dir := t.TempDir() + + logger, _ := test.NewNullLogger() + cl, err := newCommitLogger(dir, StrategyReplace, 0) + require.NoError(t, err) + + m, err := newMemtable(path.Join(dir, "will-never-flush"), StrategyReplace, 1, cl, nil, logger, false, nil, false, nil) + require.Nil(t, err) + t.Cleanup(func() { + require.Nil(t, m.commitlog.close()) + }) + + t.Run("add initial value", func(t *testing.T) { + err = m.put([]byte("my-key"), []byte("my-value"), + WithSecondaryKey(0, []byte("secondary-key-initial"))) + require.Nil(t, err) + }) + + t.Run("retrieve by primary", func(t *testing.T) { + val, err := m.get([]byte("my-key")) + require.Nil(t, err) + assert.Equal(t, []byte("my-value"), val) + }) + + t.Run("retrieve by initial secondary", func(t *testing.T) { + val, err := m.getBySecondary(0, []byte("secondary-key-initial")) + require.Nil(t, err) + assert.Equal(t, []byte("my-value"), val) + }) + + t.Run("update value with different secondary key", func(t *testing.T) { + err = m.put([]byte("my-key"), []byte("my-value-updated"), + WithSecondaryKey(0, []byte("different-secondary-key"))) + require.Nil(t, err) + }) + + t.Run("retrieve by primary again", func(t *testing.T) { + val, err := m.get([]byte("my-key")) + require.Nil(t, err) + assert.Equal(t, []byte("my-value-updated"), val) + }) + + t.Run("retrieve by updated secondary", func(t *testing.T) { + val, err := m.getBySecondary(0, []byte("different-secondary-key")) + require.Nil(t, err) + assert.Equal(t, []byte("my-value-updated"), val) + }) + + t.Run("retrieve by initial secondary - should not find anything", func(t *testing.T) { + val, err := m.getBySecondary(0, []byte("secondary-key-initial")) + assert.Equal(t, lsmkv.NotFound, err) + assert.Nil(t, val) + }) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/metrics.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/metrics.go new file mode 100644 index 0000000000000000000000000000000000000000..3bfd97c9698bad5b14294efce0b9cc10360f38c7 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/metrics.go @@ -0,0 +1,328 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package lsmkv + +import ( + "time" + + "github.com/prometheus/client_golang/prometheus" + "github.com/weaviate/weaviate/usecases/monitoring" +) + +type ( + NsObserver func(ns int64) + BytesWriteObserver func(bytes int64) + BytesReadObserver func(bytes int64, nanoseconds int64) + Setter func(val uint64) + TimeObserver func(start time.Time) +) + +type Metrics struct { + CompactionReplace *prometheus.GaugeVec + CompactionSet *prometheus.GaugeVec + CompactionMap *prometheus.GaugeVec + CompactionRoaringSet *prometheus.GaugeVec + CompactionRoaringSetRange *prometheus.GaugeVec + ActiveSegments *prometheus.GaugeVec + ObjectsBucketSegments *prometheus.GaugeVec + CompressedVecsBucketSegments *prometheus.GaugeVec + bloomFilters prometheus.ObserverVec + SegmentObjects *prometheus.GaugeVec + SegmentSize *prometheus.GaugeVec + SegmentCount *prometheus.GaugeVec + SegmentUnloaded *prometheus.GaugeVec + startupDurations prometheus.ObserverVec + startupDiskIO prometheus.ObserverVec + objectCount prometheus.Gauge + memtableDurations prometheus.ObserverVec + memtableSize *prometheus.GaugeVec + DimensionSum *prometheus.GaugeVec + IOWrite *prometheus.SummaryVec + IORead *prometheus.SummaryVec + LazySegmentUnLoad prometheus.Gauge + LazySegmentLoad prometheus.Gauge + LazySegmentClose prometheus.Gauge + LazySegmentInit prometheus.Gauge + + groupClasses bool + criticalBucketsOnly bool +} + +func NewMetrics(promMetrics *monitoring.PrometheusMetrics, className, + shardName string, +) *Metrics { + if promMetrics.Group { + className = "n/a" + shardName = "n/a" + } + + replace := promMetrics.AsyncOperations.MustCurryWith(prometheus.Labels{ + "operation": "compact_lsm_segments_stratreplace", + "class_name": className, + "shard_name": shardName, + }) + + set := promMetrics.AsyncOperations.MustCurryWith(prometheus.Labels{ + "operation": "compact_lsm_segments_stratset", + "class_name": className, + "shard_name": shardName, + }) + + roaringSet := promMetrics.AsyncOperations.MustCurryWith(prometheus.Labels{ + "operation": "compact_lsm_segments_stratroaringset", + "class_name": className, + "shard_name": shardName, + }) + + roaringSetRange := promMetrics.AsyncOperations.MustCurryWith(prometheus.Labels{ + "operation": "compact_lsm_segments_stratroaringsetrange", + "class_name": className, + "shard_name": shardName, + }) + + stratMap := promMetrics.AsyncOperations.MustCurryWith(prometheus.Labels{ + "operation": "compact_lsm_segments_stratmap", + "class_name": className, + "shard_name": shardName, + }) + + lazySegmentInit := monitoring.GetMetrics().AsyncOperations.With(prometheus.Labels{ + "operation": "lazySegmentInit", + "class_name": className, + "shard_name": shardName, + "path": "n/a", + }) + + lazySegmentLoad := monitoring.GetMetrics().AsyncOperations.With(prometheus.Labels{ + "operation": "lazySegmentLoad", + "class_name": className, + "shard_name": shardName, + "path": "n/a", + }) + + lazySegmentClose := monitoring.GetMetrics().AsyncOperations.With(prometheus.Labels{ + "operation": "lazySegmentClose", + "class_name": className, + "shard_name": shardName, + "path": "n/a", + }) + lazySegmentUnload := monitoring.GetMetrics().AsyncOperations.With(prometheus.Labels{ + "operation": "lazySegmentUnLoad", + "class_name": className, + "shard_name": shardName, + "path": "n/a", + }) + + return &Metrics{ + groupClasses: promMetrics.Group, + criticalBucketsOnly: promMetrics.LSMCriticalBucketsOnly, + CompactionReplace: replace, + CompactionSet: set, + CompactionMap: stratMap, + CompactionRoaringSet: roaringSet, + CompactionRoaringSetRange: roaringSetRange, + ActiveSegments: promMetrics.LSMSegmentCount.MustCurryWith(prometheus.Labels{ + "class_name": className, + "shard_name": shardName, + }), + ObjectsBucketSegments: promMetrics.LSMObjectsBucketSegmentCount.MustCurryWith(prometheus.Labels{ + "class_name": className, + "shard_name": shardName, + }), + CompressedVecsBucketSegments: promMetrics.LSMCompressedVecsBucketSegmentCount.MustCurryWith(prometheus.Labels{ + "class_name": className, + "shard_name": shardName, + }), + bloomFilters: promMetrics.LSMBloomFilters.MustCurryWith(prometheus.Labels{ + "class_name": className, + "shard_name": shardName, + }), + SegmentObjects: promMetrics.LSMSegmentObjects.MustCurryWith(prometheus.Labels{ + "class_name": className, + "shard_name": shardName, + }), + SegmentSize: promMetrics.LSMSegmentSize.MustCurryWith(prometheus.Labels{ + "class_name": className, + "shard_name": shardName, + }), + SegmentCount: promMetrics.LSMSegmentCountByLevel.MustCurryWith(prometheus.Labels{ + "class_name": className, + "shard_name": shardName, + }), + SegmentUnloaded: promMetrics.LSMSegmentUnloaded.MustCurryWith(prometheus.Labels{ + "class_name": className, + "shard_name": shardName, + }), + startupDiskIO: promMetrics.StartupDiskIO.MustCurryWith(prometheus.Labels{ + "class_name": className, + "shard_name": shardName, + }), + startupDurations: promMetrics.StartupDurations.MustCurryWith(prometheus.Labels{ + "class_name": className, + "shard_name": shardName, + }), + objectCount: promMetrics.ObjectCount.With(prometheus.Labels{ + "class_name": className, + "shard_name": shardName, + }), + memtableDurations: promMetrics.LSMMemtableDurations.MustCurryWith(prometheus.Labels{ + "class_name": className, + "shard_name": shardName, + }), + memtableSize: promMetrics.LSMMemtableSize.MustCurryWith(prometheus.Labels{ + "class_name": className, + "shard_name": shardName, + }), + DimensionSum: promMetrics.VectorDimensionsSum.MustCurryWith(prometheus.Labels{ + "class_name": className, + "shard_name": shardName, + }), + IOWrite: promMetrics.FileIOWrites, + IORead: promMetrics.FileIOReads, + LazySegmentLoad: lazySegmentLoad, + LazySegmentClose: lazySegmentClose, + LazySegmentInit: lazySegmentInit, + LazySegmentUnLoad: lazySegmentUnload, + } +} + +func noOpTimeObserver(start time.Time) { + // do nothing +} + +func noOpNsObserver(startNs int64) { + // do nothing +} + +func noOpNsReadObserver(startNs int64, time int64) { + // do nothing +} + +func noOpSetter(val uint64) { + // do nothing +} + +func (m *Metrics) MemtableOpObserver(path, strategy, op string) NsObserver { + if m == nil { + return noOpNsObserver + } + + if m.groupClasses { + path = "n/a" + } + + curried := m.memtableDurations.With(prometheus.Labels{ + "operation": op, + "path": path, + "strategy": strategy, + }) + + return func(startNs int64) { + took := float64(time.Now().UnixNano()-startNs) / float64(time.Millisecond) + curried.Observe(took) + } +} + +func (m *Metrics) MemtableWriteObserver(strategy, op string) BytesWriteObserver { + if m == nil { + return noOpNsObserver + } + + curried := m.IOWrite.With(prometheus.Labels{ + "operation": op, + "strategy": strategy, + }) + + return func(bytes int64) { + curried.Observe(float64(bytes)) + } +} + +func (m *Metrics) ReadObserver(op string) BytesReadObserver { + if m == nil { + return noOpNsReadObserver + } + + curried := m.IORead.With(prometheus.Labels{ + "operation": op, + }) + + return func(n int64, nanoseconds int64) { curried.Observe(float64(n)) } +} + +func (m *Metrics) MemtableSizeSetter(path, strategy string) Setter { + if m == nil || m.groupClasses { + // this metric would set absolute values, that's not possible in + // grouped mode, each call would essentially overwrite the last + return noOpSetter + } + + curried := m.memtableSize.With(prometheus.Labels{ + "path": path, + "strategy": strategy, + }) + + return func(size uint64) { + curried.Set(float64(size)) + } +} + +func (m *Metrics) BloomFilterObserver(strategy, operation string) TimeObserver { + if m == nil { + return noOpTimeObserver + } + + curried := m.bloomFilters.With(prometheus.Labels{ + "strategy": strategy, + "operation": operation, + }) + + return func(before time.Time) { + curried.Observe(float64(time.Since(before)) / float64(time.Millisecond)) + } +} + +func (m *Metrics) TrackStartupReadWALDiskIO(read int64, nanoseconds int64) { + if m == nil { + return + } + + seconds := float64(nanoseconds) / float64(time.Second) + throughput := float64(read) / float64(seconds) + m.startupDiskIO.With(prometheus.Labels{"operation": "lsm_recover_wal"}).Observe(throughput) +} + +func (m *Metrics) TrackStartupBucket(start time.Time) { + if m == nil { + return + } + + took := float64(time.Since(start)) / float64(time.Millisecond) + m.startupDurations.With(prometheus.Labels{"operation": "lsm_startup_bucket"}).Observe(took) +} + +func (m *Metrics) TrackStartupBucketRecovery(start time.Time) { + if m == nil { + return + } + + took := float64(time.Since(start)) / float64(time.Millisecond) + m.startupDurations.With(prometheus.Labels{"operation": "lsm_startup_bucket_recovery"}).Observe(took) +} + +func (m *Metrics) ObjectCount(count int) { + if m == nil { + return + } + + m.objectCount.Set(float64(count)) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/mmap_vs_read_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/mmap_vs_read_test.go new file mode 100644 index 0000000000000000000000000000000000000000..e0e1056825328bbd83a7c6dcd1b387284945534e --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/mmap_vs_read_test.go @@ -0,0 +1,99 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package lsmkv + +import ( + "crypto/rand" + "io" + "os" + "strconv" + "testing" + + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/usecases/mmap" +) + +func BenchmarkMMap(b *testing.B) { + tests := []struct { + size int + }{ + {size: 100}, + {size: 1000}, + {size: 10000}, + {size: 100000}, + } + + dir := b.TempDir() + f, err := os.Create(dir + "/test.tmp") + require.NoError(b, err) + + bytes := make([]byte, 100000) + read, err := rand.Read(bytes) + require.NoError(b, err) + require.Equal(b, read, len(bytes)) + + written, err := f.Write(bytes) + require.NoError(b, err) + require.Equal(b, written, len(bytes)) + + b.ResetTimer() + + for _, test := range tests { + sum := 0 + for i := range bytes[:test.size] { + sum += int(bytes[i]) + } + + b.Run(strconv.Itoa(test.size)+"mmap", func(b *testing.B) { + b.ReportAllocs() + + for i := 0; i < b.N; i++ { + // not needed here, but we need to do it to have the same overhead in both tests + _, err := f.Seek(0, io.SeekStart) + require.NoError(b, err) + + contents, err := mmap.MapRegion(f, int(test.size), mmap.RDONLY, 0, 0) + require.NoError(b, err) + + innerSum := 0 + for j := range contents { + innerSum += int(contents[j]) + } + require.Equal(b, sum, innerSum) + require.NoError(b, contents.Unmap()) + } + }) + + b.Run(strconv.Itoa(test.size)+"full read", func(b *testing.B) { + b.ReportAllocs() + + for i := 0; i < b.N; i++ { + _, err := f.Seek(0, io.SeekStart) + require.NoError(b, err) + + data := make([]byte, test.size) + n, err := f.Read(data) + if err != nil { + return + } + require.NoError(b, err) + require.Equal(b, n, test.size) + + innerSum := 0 + for j := range data { + innerSum += int(data[j]) + } + require.Equal(b, sum, innerSum) + } + }) + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/mock_bucket_creator.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/mock_bucket_creator.go new file mode 100644 index 0000000000000000000000000000000000000000..0f427e85aca62d2a84cfca9526eb9ede740c0746 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/mock_bucket_creator.go @@ -0,0 +1,129 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by mockery v2.53.2. DO NOT EDIT. + +package lsmkv + +import ( + context "context" + + logrus "github.com/sirupsen/logrus" + cyclemanager "github.com/weaviate/weaviate/entities/cyclemanager" + + mock "github.com/stretchr/testify/mock" +) + +// MockBucketCreator is an autogenerated mock type for the BucketCreator type +type MockBucketCreator struct { + mock.Mock +} + +type MockBucketCreator_Expecter struct { + mock *mock.Mock +} + +func (_m *MockBucketCreator) EXPECT() *MockBucketCreator_Expecter { + return &MockBucketCreator_Expecter{mock: &_m.Mock} +} + +// NewBucket provides a mock function with given fields: ctx, dir, rootDir, logger, metrics, compactionCallbacks, flushCallbacks, opts +func (_m *MockBucketCreator) NewBucket(ctx context.Context, dir string, rootDir string, logger logrus.FieldLogger, metrics *Metrics, compactionCallbacks cyclemanager.CycleCallbackGroup, flushCallbacks cyclemanager.CycleCallbackGroup, opts ...BucketOption) (*Bucket, error) { + _va := make([]interface{}, len(opts)) + for _i := range opts { + _va[_i] = opts[_i] + } + var _ca []interface{} + _ca = append(_ca, ctx, dir, rootDir, logger, metrics, compactionCallbacks, flushCallbacks) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for NewBucket") + } + + var r0 *Bucket + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string, string, logrus.FieldLogger, *Metrics, cyclemanager.CycleCallbackGroup, cyclemanager.CycleCallbackGroup, ...BucketOption) (*Bucket, error)); ok { + return rf(ctx, dir, rootDir, logger, metrics, compactionCallbacks, flushCallbacks, opts...) + } + if rf, ok := ret.Get(0).(func(context.Context, string, string, logrus.FieldLogger, *Metrics, cyclemanager.CycleCallbackGroup, cyclemanager.CycleCallbackGroup, ...BucketOption) *Bucket); ok { + r0 = rf(ctx, dir, rootDir, logger, metrics, compactionCallbacks, flushCallbacks, opts...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*Bucket) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string, string, logrus.FieldLogger, *Metrics, cyclemanager.CycleCallbackGroup, cyclemanager.CycleCallbackGroup, ...BucketOption) error); ok { + r1 = rf(ctx, dir, rootDir, logger, metrics, compactionCallbacks, flushCallbacks, opts...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockBucketCreator_NewBucket_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'NewBucket' +type MockBucketCreator_NewBucket_Call struct { + *mock.Call +} + +// NewBucket is a helper method to define mock.On call +// - ctx context.Context +// - dir string +// - rootDir string +// - logger logrus.FieldLogger +// - metrics *Metrics +// - compactionCallbacks cyclemanager.CycleCallbackGroup +// - flushCallbacks cyclemanager.CycleCallbackGroup +// - opts ...BucketOption +func (_e *MockBucketCreator_Expecter) NewBucket(ctx interface{}, dir interface{}, rootDir interface{}, logger interface{}, metrics interface{}, compactionCallbacks interface{}, flushCallbacks interface{}, opts ...interface{}) *MockBucketCreator_NewBucket_Call { + return &MockBucketCreator_NewBucket_Call{Call: _e.mock.On("NewBucket", + append([]interface{}{ctx, dir, rootDir, logger, metrics, compactionCallbacks, flushCallbacks}, opts...)...)} +} + +func (_c *MockBucketCreator_NewBucket_Call) Run(run func(ctx context.Context, dir string, rootDir string, logger logrus.FieldLogger, metrics *Metrics, compactionCallbacks cyclemanager.CycleCallbackGroup, flushCallbacks cyclemanager.CycleCallbackGroup, opts ...BucketOption)) *MockBucketCreator_NewBucket_Call { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]BucketOption, len(args)-7) + for i, a := range args[7:] { + if a != nil { + variadicArgs[i] = a.(BucketOption) + } + } + run(args[0].(context.Context), args[1].(string), args[2].(string), args[3].(logrus.FieldLogger), args[4].(*Metrics), args[5].(cyclemanager.CycleCallbackGroup), args[6].(cyclemanager.CycleCallbackGroup), variadicArgs...) + }) + return _c +} + +func (_c *MockBucketCreator_NewBucket_Call) Return(_a0 *Bucket, _a1 error) *MockBucketCreator_NewBucket_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockBucketCreator_NewBucket_Call) RunAndReturn(run func(context.Context, string, string, logrus.FieldLogger, *Metrics, cyclemanager.CycleCallbackGroup, cyclemanager.CycleCallbackGroup, ...BucketOption) (*Bucket, error)) *MockBucketCreator_NewBucket_Call { + _c.Call.Return(run) + return _c +} + +// NewMockBucketCreator creates a new instance of MockBucketCreator. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewMockBucketCreator(t interface { + mock.TestingT + Cleanup(func()) +}) *MockBucketCreator { + mock := &MockBucketCreator{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/quantile_keys.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/quantile_keys.go new file mode 100644 index 0000000000000000000000000000000000000000..23bbd7fe9a0e32e751b6bc7edc226d3560a926bb --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/quantile_keys.go @@ -0,0 +1,127 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package lsmkv + +import ( + "bytes" + "math" + "sort" +) + +// QuantileKeys returns an approximation of the keys that make up the specified +// quantiles. This can be used to start parallel cursors at fairly evenly +// distributed positions in the segment. +// +// To understand the approximation, checkout +// [lsmkv.segmentindex.DiskTree.QuantileKeys] that runs on each segment. +// +// Some things to keep in mind: +// +// 1. It may return fewer keys than requested (including 0) if the segment +// contains fewer entries +// 2. It may return keys that do not exist, for example because they are +// tombstoned. This is acceptable, as a key does not have to exist to be used +// as part of .Seek() in a cursor. +// 3. It will never return duplicates, to make sure all parallel cursors +// return unique values. +func (b *Bucket) QuantileKeys(q int) [][]byte { + if q <= 0 { + return nil + } + + b.flushLock.RLock() + defer b.flushLock.RUnlock() + + keys := b.disk.quantileKeys(q) + return keys +} + +func (sg *SegmentGroup) quantileKeys(q int) [][]byte { + segments, release := sg.getAndLockSegments() + defer release() + + var keys [][]byte + + if len(segments) == 0 { + return keys + } + + for _, s := range segments { + keys = append(keys, s.quantileKeys(q)...) + } + + // re-sort keys + sort.Slice(keys, func(i, j int) bool { + return bytes.Compare(keys[i], keys[j]) < 0 + }) + + // There could be duplicates if a key was modified in multiple segments, we + // need to remove them. Since the list is sorted at this, this is fairly easy + // to do: + uniqueKeys := make([][]byte, 0, len(keys)) + for i := range keys { + if i == 0 || !bytes.Equal(keys[i], keys[i-1]) { + uniqueKeys = append(uniqueKeys, keys[i]) + } + } + + return pickEvenlyDistributedKeys(uniqueKeys, q) +} + +func (s *segment) quantileKeys(q int) [][]byte { + return s.index.QuantileKeys(q) +} + +// pickEvenlyDistributedKeys picks q keys from the input keys, trying to keep +// the distribution as even as possible. The input keys are assumed to be +// sorted. It never returns duplicates, see the unit test proving this. +// +// Important to keep in mind is that our input values do not contain the first +// and last elements, but rather the first quantile points. +// This is because they were obtained using +// [lsmkv.segmentindex.DiskTree.QuantileKeys] which traverses the binary tree +// to a certain depth. The first element in the list is the element you get +// from continuously following the left child until you hit the maximum +// traversal depth. Respectively, the last element is the element you get from +// continuously following the right child until you hit the maximum traversal +// depth. +// This means that when a cursor uses those keys, it will need to add two +// special cases: +// +// 1. It needs to start with the actual first element and read to the first +// checkpoint +// 2. When reaching the last checkpoint, it needs to keep reading +// until the cursor no longer returns elements. +// +// As a result our goal here is to keep the gaps as even as possible. For +// example, assume the keys ["A", "B", "C", "D", "E", "F", "G", "H", "I", "J"] +// and we want to pick 3 keys. We would return ["C", "F", "I"], thus keeping +// the spacing fairly even. +func pickEvenlyDistributedKeys(uniqueKeys [][]byte, q int) [][]byte { + if q >= len(uniqueKeys) { + // impossible to pick, simply return the input + return uniqueKeys + } + + // we now have the guarantee that q > len(uniqueKeys), which means it is + // possible to pick q keys without overlap while keeping the distribution as + // even as possible + finalKeys := make([][]byte, q) + stepSize := float64(len(uniqueKeys)) / float64(q) + for i := range finalKeys { + pos := int(math.Round(float64(i)*stepSize + 0.5*stepSize)) + + finalKeys[i] = uniqueKeys[pos] + } + + return finalKeys +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/quantile_keys_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/quantile_keys_test.go new file mode 100644 index 0000000000000000000000000000000000000000..4e70c2fd25b146cac1542f805e2ce53a822a4b96 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/quantile_keys_test.go @@ -0,0 +1,174 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package lsmkv + +import ( + "context" + "encoding/binary" + "fmt" + "testing" + + "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/entities/cyclemanager" +) + +func TestQuantileKeysSingleSegment(t *testing.T) { + dir := t.TempDir() + ctx := context.Background() + logger, _ := test.NewNullLogger() + + b, err := NewBucketCreator().NewBucket( + ctx, dir, "", logger, nil, cyclemanager.NewCallbackGroupNoop(), + cyclemanager.NewCallbackGroupNoop()) + require.Nil(t, err) + + importConsecutiveKeys(t, b, 0, 1000) + + // all cyclemmanagers are noops, so we need to explicitly flush if we want a + // segment to be built + require.Nil(t, b.FlushAndSwitch()) + + quantiles := b.QuantileKeys(10) + + asNumbers := make([]uint64, len(quantiles)) + for i, q := range quantiles { + asNumbers[i] = binary.BigEndian.Uint64(q) + } + + // validate there are no duplicates, and each key is strictly greater than + // the last + for i, n := range asNumbers { + if i == 0 { + continue + } + + prev := asNumbers[i-1] + assert.Greater(t, n, prev) + } + + // assert on distribution + idealStepSize := float64(1000) / float64(len(asNumbers)+1) + for i, n := range asNumbers { + actualStepSize := float64(n) / float64(i+1) + assert.InEpsilon(t, idealStepSize, actualStepSize, 0.1) + } +} + +func TestQuantileKeysMultipleSegmentsUniqueEntries(t *testing.T) { + dir := t.TempDir() + ctx := context.Background() + logger, _ := test.NewNullLogger() + + b, err := NewBucketCreator().NewBucket( + ctx, dir, "", logger, nil, cyclemanager.NewCallbackGroupNoop(), + cyclemanager.NewCallbackGroupNoop()) + require.Nil(t, err) + + importConsecutiveKeys(t, b, 0, 1000) + + // all cyclemmanagers are noops, so we need to explicitly flush if we want a + // segment to be built + require.Nil(t, b.FlushAndSwitch()) + + importConsecutiveKeys(t, b, 1000, 2000) + + // all cyclemmanagers are noops, so we need to explicitly flush if we want a + // segment to be built + require.Nil(t, b.FlushAndSwitch()) + + quantiles := b.QuantileKeys(10) + + asNumbers := make([]uint64, len(quantiles)) + for i, q := range quantiles { + asNumbers[i] = binary.BigEndian.Uint64(q) + } + + // validate there are no duplicates, and each key is strictly greater than + // the last + for i, n := range asNumbers { + if i == 0 { + continue + } + + prev := asNumbers[i-1] + assert.Greater(t, n, prev) + } +} + +func importConsecutiveKeys(t *testing.T, b *Bucket, start, end uint64) { + for i := start; i < end; i++ { + key := make([]byte, 8) + binary.BigEndian.PutUint64(key, i) + err := b.Put(key, key) + require.Nil(t, err) + } +} + +func TestKeyDistributionExample(t *testing.T) { + inputKeyStrings := []string{"A", "B", "C", "D", "E", "F", "G", "H", "I", "J"} + inputKeys := make([][]byte, len(inputKeyStrings)) + for i, s := range inputKeyStrings { + inputKeys[i] = []byte(s) + } + q := 3 + + picked := pickEvenlyDistributedKeys(inputKeys, q) + expectKeyStrings := []string{"C", "F", "I"} + expectKeys := make([][]byte, len(expectKeyStrings)) + for i, s := range expectKeyStrings { + expectKeys[i] = []byte(s) + } + + assert.Equal(t, expectKeys, picked) +} + +func TestPickEvenlyDistributedKeys(t *testing.T) { + for input := 0; input < 100; input++ { + for q := 1; q < 100; q++ { + t.Run(fmt.Sprintf("input=%d, q=%d", input, q), func(t *testing.T) { + keys := make([][]byte, input) + for i := 0; i < input; i++ { + key := make([]byte, 8) + binary.BigEndian.PutUint64(key, uint64(i)) + keys[i] = key + } + + picked := pickEvenlyDistributedKeys(keys, q) + + // make sure there are never more results than q + require.LessOrEqual(t, len(picked), q) + + // make sure that we get q results if there are at least q keys + if input >= q { + require.Equal(t, q, len(picked)) + } else { + // if there are fewer keys than q, we should get all of them + require.Equal(t, input, len(picked)) + } + + // make sure there are no duplicates + for i, key := range picked { + if i == 0 { + continue + } + + prev := binary.BigEndian.Uint64(picked[i-1]) + curr := binary.BigEndian.Uint64(key) + + require.Greater(t, curr, prev, "found duplicate picks") + } + }) + } + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/rbtree/rbtree.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/rbtree/rbtree.go new file mode 100644 index 0000000000000000000000000000000000000000..dfb093f7ecf800957cae061c19acde2e56f78d2a --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/rbtree/rbtree.go @@ -0,0 +1,176 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package rbtree + +type Node interface { + Parent() Node + SetParent(Node) + Left() Node + SetLeft(Node) + Right() Node + SetRight(Node) + IsRed() bool + SetRed(bool) + IsNil() bool +} + +// This function rebalances and recolours trees to be valid RB trees. It needs to be called after each node that +// was added to the tree. +// +// Deletions are currently not supported as this is done through the tombstone flag and from the POV of the RB-tree +// tombstone-nodes are just normal nodes that get rebalanced the normal way. +// +// Throughout this file the following relationships between nodes are used: +// GP = grandparent, P = parent, U = uncle, S = sibling, N = node that was just added +// +// GP +// / \ +// U P +// / \ +// S N +func Rebalance(node Node) Node { + for { + parent := node.Parent() + + // if parent is black or the current node is the root node (== parent is nil) there is nothing to do + if !parent.IsRed() { + return nil + } + + grandparent := node.Parent().Parent() + var uncle Node + if parent == grandparent.Right() { + uncle = grandparent.Left() + } else { + uncle = grandparent.Right() + } + + if uncle.IsRed() { + // if uncle is red, recoloring the tree up to the grandparent results in a valid RBtree. + // The color of the grandfather changes to red, so there might be more fixes needed. Therefore + // go up the tree and repeat. + recolourNodes(parent, grandparent, uncle) + node = grandparent + } else { + // if uncle is black, there are four possible cases: + // parent is the right child grandparent: + // 1) node is right child of parent => left rotate around GP + // 2) node is left child of parent => right rotate around parent results in case 1 + // For cases 3 and 4 just replace left and right in the two cases above + // + // In all of these cases the grandfather stays black and there is no need for further fixes up the tree + var newRoot Node + if parent == grandparent.Right() { + if node == parent.Left() { + rightRotate(parent) + // node and parent switch places in the tree, update parent to recolour the current node + parent = node + } + newRoot = leftRotate(grandparent) + } else { // parent == grandparent.left + if node == parent.Right() { + leftRotate(parent) + parent = node + } + newRoot = rightRotate(grandparent) + } + recolourNodes(grandparent, parent) + return newRoot + } + } +} + +func recolourNodes(nodes ...Node) { + for _, n := range nodes { + if !n.IsNil() { + if n.IsRed() { + n.SetRed(false) + } else { + n.SetRed(true) + } + } + } +} + +// Rotate the tree left around the given node. +// +// After this rotation, the former right child (FC) will be the new parent and the former parent (FP) will +// be the left node of the new parent. The left child of the former child is transferred to the former parent. +// +// FP FC +// / \ left rotate / \ +// FP_R FC => FP FC_R +// / \ / \ +// FC_L FC_R FP_R FC_L +// +// In case FP was the root of the tree, FC will be the new root of the tree. +func leftRotate(rotationNode Node) Node { + formerChild := rotationNode.Right() + rootRotate := rotationNode.Parent().IsNil() + + // former child node becomes new parent unless the rotation is around the root node + if rootRotate { + formerChild.SetParent(nil) + } else { + if rotationNode.Parent().Left() == rotationNode { + rotationNode.Parent().SetLeft(formerChild) + } else { + rotationNode.Parent().SetRight(formerChild) + } + formerChild.SetParent(rotationNode.Parent()) + } + + rotationNode.SetParent(formerChild) + + // Switch left child from former_child to rotation node + rotationNode.SetRight(formerChild.Left()) + if formerChild.Left() != nil { + formerChild.Left().SetParent(rotationNode) + } + formerChild.SetLeft(rotationNode) + + if rootRotate { + return formerChild + } else { + return nil + } +} + +// Same as leftRotate, just switch left and right everywhere +func rightRotate(rotationNode Node) Node { + formerChild := rotationNode.Left() + rootRotate := rotationNode.Parent().IsNil() + + if rootRotate { + formerChild.SetParent(nil) + } else { + if rotationNode.Parent().Left() == rotationNode { + rotationNode.Parent().SetLeft(formerChild) + } else { + rotationNode.Parent().SetRight(formerChild) + } + formerChild.SetParent(rotationNode.Parent()) + } + rotationNode.SetParent(formerChild) + + rotationNode.SetLeft(formerChild.Right()) + if formerChild.Right() != nil { + formerChild.Right().SetParent(rotationNode) + } + formerChild.SetRight(rotationNode) + + if rootRotate { + return formerChild + } else { + return nil + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/recover_from_wal_integration_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/recover_from_wal_integration_test.go new file mode 100644 index 0000000000000000000000000000000000000000..2ffd4bec0dca4f39366794d489ab723ce7cc7661 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/recover_from_wal_integration_test.go @@ -0,0 +1,973 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +//go:build integrationTest +// +build integrationTest + +package lsmkv + +import ( + "bytes" + "context" + "fmt" + "io" + "os" + "os/exec" + "path/filepath" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/adapters/repos/db/roaringset" + "github.com/weaviate/weaviate/entities/cyclemanager" + "github.com/weaviate/weaviate/entities/filters" +) + +func TestReplaceStrategy_RecoverFromWAL(t *testing.T) { + dirNameOriginal := t.TempDir() + dirNameRecovered := t.TempDir() + + t.Run("with some previous state", func(t *testing.T) { + b, err := NewBucketCreator().NewBucket(testCtx(), dirNameOriginal, "", nullLogger(), nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), + WithStrategy(StrategyReplace), WithMinWalThreshold(0)) + require.Nil(t, err) + + // so big it effectively never triggers as part of this test + b.SetMemtableThreshold(1e9) + + t.Run("set one key that will be flushed orderly", func(t *testing.T) { + // the motivation behind flushing this initial segment is to check that + // deletion as part of the recovery also works correctly. If we would + // just delete something that was created as part of the same memtable, + // the tests would still pass, even with removing the logic that recovers + // tombstones. + // + // To make sure they fail in this case, this prior state was introduced. + // An entry with key "key-2" is introduced in a previous segment, so if + // the deletion fails as part of the recovery this key would still be + // present later on. With the deletion working correctly it will be gone. + // + // You can test this by commenting the "p.memtable.setTombstone()" line + // in p.doReplace(). This will fail the tests suite, but prior to this + // addition it would have passed. + key2 := []byte("key-2") + orig2 := []byte("delete me later - you should never find me again") + + err = b.Put(key2, orig2) + require.Nil(t, err) + }) + + t.Run("shutdown (orderly) bucket to create first segment", func(t *testing.T) { + b.Shutdown(context.Background()) + + // then recreate bucket + var err error + b, err = NewBucketCreator().NewBucket(testCtx(), dirNameOriginal, "", nullLogger(), nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), + WithStrategy(StrategyReplace), WithMinWalThreshold(0)) + require.Nil(t, err) + }) + + t.Run("set original values", func(t *testing.T) { + key1 := []byte("key-1") + key2 := []byte("key-2") + key3 := []byte("key-3") + orig1 := []byte("original value for key1") + orig2 := []byte("original value for key2") + orig3 := []byte("original value for key3") + + err = b.Put(key1, orig1) + require.Nil(t, err) + err = b.Put(key2, orig2) + require.Nil(t, err) + err = b.Put(key3, orig3) + require.Nil(t, err) + }) + + t.Run("delete one, update one", func(t *testing.T) { + key2 := []byte("key-2") + key3 := []byte("key-3") + updated3 := []byte("updated value for key 3") + + err = b.Delete(key2) + require.Nil(t, err) + + err = b.Put(key3, updated3) + require.Nil(t, err) + }) + + t.Run("verify control", func(t *testing.T) { + key1 := []byte("key-1") + key2 := []byte("key-2") + key3 := []byte("key-3") + orig1 := []byte("original value for key1") + updated3 := []byte("updated value for key 3") + res, err := b.Get(key1) + require.Nil(t, err) + assert.Equal(t, res, orig1) + res, err = b.Get(key2) + require.Nil(t, err) + assert.Nil(t, res) + res, err = b.Get(key3) + require.Nil(t, err) + assert.Equal(t, res, updated3) + }) + + t.Run("make sure the WAL is flushed", func(t *testing.T) { + require.Nil(t, b.WriteWAL()) + }) + + t.Run("copy state into recovery folder and destroy original", func(t *testing.T) { + t.Run("copy over wals", func(t *testing.T) { + cmd := exec.Command("/bin/bash", "-c", fmt.Sprintf("cp -r %s/*.wal %s", + dirNameOriginal, dirNameRecovered)) + var out bytes.Buffer + cmd.Stderr = &out + err := cmd.Run() + if err != nil { + fmt.Println(out.String()) + t.Fatal(err) + } + }) + + t.Run("copy over segments", func(t *testing.T) { + cmd := exec.Command("/bin/bash", "-c", fmt.Sprintf("cp -r %s/*.db %s", + dirNameOriginal, dirNameRecovered)) + var out bytes.Buffer + cmd.Stderr = &out + err := cmd.Run() + if err != nil { + fmt.Println(out.String()) + t.Fatal(err) + } + }) + b = nil + require.Nil(t, os.RemoveAll(dirNameOriginal)) + }) + + var bRec *Bucket + + t.Run("create new bucket from existing state", func(t *testing.T) { + b, err := NewBucketCreator().NewBucket(testCtx(), dirNameRecovered, "", nullLogger(), nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), + WithStrategy(StrategyReplace), WithMinWalThreshold(0)) + require.Nil(t, err) + + // so big it effectively never triggers as part of this test + b.SetMemtableThreshold(1e9) + + bRec = b + }) + + t.Run("verify all data is present", func(t *testing.T) { + key1 := []byte("key-1") + key2 := []byte("key-2") + key3 := []byte("key-3") + orig1 := []byte("original value for key1") + updated3 := []byte("updated value for key 3") + res, err := bRec.Get(key1) + require.Nil(t, err) + assert.Equal(t, res, orig1) + res, err = bRec.Get(key2) + require.Nil(t, err) + assert.Nil(t, res) + res, err = bRec.Get(key3) + require.Nil(t, err) + assert.Equal(t, res, updated3) + }) + }) +} + +func TestReplaceStrategy_RecoverFromWALWithCorruptLastElement(t *testing.T) { + dirNameOriginal := t.TempDir() + dirNameRecovered := t.TempDir() + + t.Run("without previous state", func(t *testing.T) { + b, err := NewBucketCreator().NewBucket(testCtx(), dirNameOriginal, "", nullLogger(), nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), + WithStrategy(StrategyReplace)) + require.Nil(t, err) + + // so big it effectively never triggers as part of this test + b.SetMemtableThreshold(1e9) + + t.Run("set original values", func(t *testing.T) { + key1 := []byte("key-1") + key2 := []byte("key-2") + key3 := []byte("key-3") + orig1 := []byte("original value for key1") + orig2 := []byte("original value for key2") + orig3 := []byte("original value for key3") + + err = b.Put(key1, orig1) + require.Nil(t, err) + err = b.Put(key2, orig2) + require.Nil(t, err) + err = b.Put(key3, orig3) + require.Nil(t, err) + }) + + t.Run("delete one, update one", func(t *testing.T) { + key2 := []byte("key-2") + key3 := []byte("key-3") + updated3 := []byte("updated value for key 3") + + err = b.Delete(key2) + require.Nil(t, err) + + err = b.Put(key3, updated3) + require.Nil(t, err) + }) + + t.Run("verify control", func(t *testing.T) { + key1 := []byte("key-1") + key2 := []byte("key-2") + key3 := []byte("key-3") + orig1 := []byte("original value for key1") + updated3 := []byte("updated value for key 3") + res, err := b.Get(key1) + require.Nil(t, err) + assert.Equal(t, res, orig1) + res, err = b.Get(key2) + require.Nil(t, err) + assert.Nil(t, res) + res, err = b.Get(key3) + require.Nil(t, err) + assert.Equal(t, res, updated3) + }) + + t.Run("make sure the WAL is flushed", func(t *testing.T) { + require.Nil(t, b.WriteWAL()) + }) + + t.Run("copy state into recovery folder and destroy original", func(t *testing.T) { + cmd := exec.Command("/bin/bash", "-c", fmt.Sprintf("cp -r %s/*.wal %s", + dirNameOriginal, dirNameRecovered)) + var out bytes.Buffer + cmd.Stderr = &out + err := cmd.Run() + if err != nil { + fmt.Println(out.String()) + t.Fatal(err) + } + b = nil + require.Nil(t, os.RemoveAll(dirNameOriginal)) + }) + + t.Run("corrupt WAL by removing some bytes at the very end", func(t *testing.T) { + entries, err := os.ReadDir(dirNameRecovered) + require.Nil(t, err) + require.Len(t, entries, 1, "there should be exactly one .wal file") + + oldFileName := filepath.Join(dirNameRecovered, entries[0].Name()) + tmpFileName := oldFileName + ".tmp" + + err = os.Rename(oldFileName, tmpFileName) + require.Nil(t, err) + + orig, err := os.Open(tmpFileName) + require.Nil(t, err) + + correctLog, err := io.ReadAll(orig) + require.Nil(t, err) + err = orig.Close() + require.Nil(t, err) + + corruptLog := correctLog[:len(correctLog)-6] + + err = os.Remove(tmpFileName) + require.Nil(t, err) + + corrupt, err := os.Create(oldFileName) + require.Nil(t, err) + + _, err = corrupt.Write(corruptLog) + require.Nil(t, err) + + err = corrupt.Close() + require.Nil(t, err) + }) + + var bRec *Bucket + + t.Run("create new bucket from existing state", func(t *testing.T) { + b, err := NewBucketCreator().NewBucket(testCtx(), dirNameRecovered, "", nullLogger(), nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), + WithStrategy(StrategyReplace)) + require.Nil(t, err) + + // so big it effectively never triggers as part of this test + b.SetMemtableThreshold(1e9) + + bRec = b + }) + + t.Run("verify all data prior to the corruption is present", func(t *testing.T) { + key1 := []byte("key-1") + key2 := []byte("key-2") + key3 := []byte("key-3") + orig1 := []byte("original value for key1") + notUpdated3 := []byte("original value for key3") + + // the last operation we performed (that now got corrupted) was an update + // on key3. So now that we're expecting all state prior to the corruption + // to be present, we would expect the original value for key3 + + res, err := bRec.Get(key1) + require.Nil(t, err) + assert.Equal(t, res, orig1) + res, err = bRec.Get(key2) + require.Nil(t, err) + assert.Nil(t, res) + res, err = bRec.Get(key3) + require.Nil(t, err) + assert.Equal(t, res, notUpdated3) + }) + }) +} + +func TestSetStrategy_RecoverFromWAL(t *testing.T) { + dirNameOriginal := t.TempDir() + dirNameRecovered := t.TempDir() + + t.Run("without prior state", func(t *testing.T) { + b, err := NewBucketCreator().NewBucket(testCtx(), dirNameOriginal, "", nullLogger(), nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), + WithStrategy(StrategySetCollection)) + require.Nil(t, err) + + // so big it effectively never triggers as part of this test + b.SetMemtableThreshold(1e9) + + key1 := []byte("test1-key-1") + key2 := []byte("test1-key-2") + key3 := []byte("test1-key-3") + + t.Run("set original values and verify", func(t *testing.T) { + orig1 := [][]byte{[]byte("value 1.1"), []byte("value 1.2")} + orig2 := [][]byte{[]byte("value 2.1"), []byte("value 2.2")} + orig3 := [][]byte{[]byte("value 3.1"), []byte("value 3.2")} + + err = b.SetAdd(key1, orig1) + require.Nil(t, err) + err = b.SetAdd(key2, orig2) + require.Nil(t, err) + err = b.SetAdd(key3, orig3) + require.Nil(t, err) + + res, err := b.SetList(key1) + require.Nil(t, err) + assert.Equal(t, orig1, res) + res, err = b.SetList(key2) + require.Nil(t, err) + assert.Equal(t, orig2, res) + res, err = b.SetList(key3) + require.Nil(t, err) + assert.Equal(t, orig3, res) + }) + + t.Run("delete individual keys", func(t *testing.T) { + delete2 := []byte("value 2.1") + delete3 := []byte("value 3.2") + + err = b.SetDeleteSingle(key2, delete2) + require.Nil(t, err) + err = b.SetDeleteSingle(key3, delete3) + require.Nil(t, err) + }) + + t.Run("re-add keys which were previously deleted and new ones", func(t *testing.T) { + readd2 := [][]byte{[]byte("value 2.1"), []byte("value 2.3")} + readd3 := [][]byte{[]byte("value 3.2"), []byte("value 3.3")} + + err = b.SetAdd(key2, readd2) + require.Nil(t, err) + err = b.SetAdd(key3, readd3) + require.Nil(t, err) + }) + + t.Run("validate the results prior to recovery", func(t *testing.T) { + expected1 := [][]byte{[]byte("value 1.1"), []byte("value 1.2")} // unchanged + expected2 := [][]byte{ + []byte("value 2.2"), // from original import + []byte("value 2.1"), // added again after initial deletion + []byte("value 2.3"), // newly added + } + expected3 := [][]byte{ + []byte("value 3.1"), // form original import + []byte("value 3.2"), // added again after initial deletion + []byte("value 3.3"), // newly added + } // value2 deleted + + res, err := b.SetList(key1) + require.Nil(t, err) + assert.Equal(t, expected1, res) + res, err = b.SetList(key2) + require.Nil(t, err) + assert.Equal(t, expected2, res) + res, err = b.SetList(key3) + require.Nil(t, err) + assert.Equal(t, expected3, res) + }) + + t.Run("make sure the WAL is flushed", func(t *testing.T) { + require.Nil(t, b.WriteWAL()) + }) + + t.Run("copy state into recovery folder and destroy original", func(t *testing.T) { + cmd := exec.Command("/bin/bash", "-c", fmt.Sprintf("cp -r %s/*.wal %s", + dirNameOriginal, dirNameRecovered)) + var out bytes.Buffer + cmd.Stderr = &out + err := cmd.Run() + if err != nil { + fmt.Println(out.String()) + t.Fatal(err) + } + b = nil + require.Nil(t, os.RemoveAll(dirNameOriginal)) + }) + + var bRec *Bucket + + t.Run("create new bucket from existing state", func(t *testing.T) { + b, err := NewBucketCreator().NewBucket(testCtx(), dirNameRecovered, "", nullLogger(), nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), + WithStrategy(StrategySetCollection)) + require.Nil(t, err) + + // so big it effectively never triggers as part of this test + b.SetMemtableThreshold(1e9) + + bRec = b + }) + + t.Run("validate the results after recovery", func(t *testing.T) { + expected1 := [][]byte{[]byte("value 1.1"), []byte("value 1.2")} // unchanged + expected2 := [][]byte{ + []byte("value 2.2"), // from original import + []byte("value 2.1"), // added again after initial deletion + []byte("value 2.3"), // newly added + } + expected3 := [][]byte{ + []byte("value 3.1"), // form original import + []byte("value 3.2"), // added again after initial deletion + []byte("value 3.3"), // newly added + } // value2 deleted + + res, err := bRec.SetList(key1) + require.Nil(t, err) + assert.Equal(t, expected1, res) + res, err = bRec.SetList(key2) + require.Nil(t, err) + assert.Equal(t, expected2, res) + res, err = bRec.SetList(key3) + require.Nil(t, err) + assert.Equal(t, expected3, res) + }) + }) +} + +func TestRoaringSetStrategy_RecoverFromWAL(t *testing.T) { + dirNameOriginal := t.TempDir() + dirNameRecovered := t.TempDir() + + t.Run("without prior state", func(t *testing.T) { + b, err := NewBucketCreator().NewBucket(testCtx(), dirNameOriginal, "", nullLogger(), nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), + WithStrategy(StrategyRoaringSet), WithBitmapBufPool(roaringset.NewBitmapBufPoolNoop())) + require.Nil(t, err) + + key1 := []byte("test1-key-1") + key2 := []byte("test1-key-2") + key3 := []byte("test1-key-3") + + t.Run("set original values and verify", func(t *testing.T) { + orig1 := []uint64{11, 12} + orig2 := []uint64{21, 22} + orig3 := []uint64{31, 32} + + err = b.RoaringSetAddList(key1, orig1) + require.NoError(t, err) + err = b.RoaringSetAddList(key2, orig2) + require.NoError(t, err) + err = b.RoaringSetAddList(key3, orig3) + require.NoError(t, err) + + bm1, release, err := b.RoaringSetGet(key1) + require.NoError(t, err) + defer release() + assert.ElementsMatch(t, orig1, bm1.ToArray()) + + bm2, release, err := b.RoaringSetGet(key2) + require.NoError(t, err) + defer release() + assert.ElementsMatch(t, orig2, bm2.ToArray()) + + bm3, release, err := b.RoaringSetGet(key3) + require.NoError(t, err) + defer release() + assert.ElementsMatch(t, orig3, bm3.ToArray()) + }) + + t.Run("delete individual keys", func(t *testing.T) { + delete2 := uint64(21) + delete3 := uint64(32) + + err = b.RoaringSetRemoveOne(key2, delete2) + require.NoError(t, err) + err = b.RoaringSetRemoveOne(key3, delete3) + require.NoError(t, err) + }) + + t.Run("re-add keys which were previously deleted and new ones", func(t *testing.T) { + reAdd2 := []uint64{21, 23} + reAdd3 := []uint64{31, 33} + + err = b.RoaringSetAddList(key2, reAdd2) + require.NoError(t, err) + err = b.RoaringSetAddList(key3, reAdd3) + require.NoError(t, err) + }) + + t.Run("validate the results prior to recovery", func(t *testing.T) { + expected1 := []uint64{11, 12} // unchanged + expected2 := []uint64{ + 22, // from original import + 21, // added again after initial deletion + 23, // newly added + } + expected3 := []uint64{ + 31, // form original import + 33, // newly added + } // 32 deleted + + bm1, release, err := b.RoaringSetGet(key1) + require.NoError(t, err) + defer release() + assert.ElementsMatch(t, expected1, bm1.ToArray()) + + bm2, release, err := b.RoaringSetGet(key2) + require.NoError(t, err) + defer release() + assert.ElementsMatch(t, expected2, bm2.ToArray()) + + bm3, release, err := b.RoaringSetGet(key3) + require.NoError(t, err) + defer release() + assert.ElementsMatch(t, expected3, bm3.ToArray()) + }) + + t.Run("make sure the WAL is flushed", func(t *testing.T) { + require.Nil(t, b.WriteWAL()) + }) + + t.Run("copy state into recovery folder and destroy original", func(t *testing.T) { + cmd := exec.Command("/bin/bash", "-c", fmt.Sprintf("cp -r %s/*.wal %s", + dirNameOriginal, dirNameRecovered)) + var out bytes.Buffer + cmd.Stderr = &out + err := cmd.Run() + if err != nil { + fmt.Println(out.String()) + t.Fatal(err) + } + b = nil + require.Nil(t, os.RemoveAll(dirNameOriginal)) + }) + + var bRec *Bucket + + t.Run("create new bucket from existing state", func(t *testing.T) { + b, err := NewBucketCreator().NewBucket(testCtx(), dirNameRecovered, "", nullLogger(), nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), + WithStrategy(StrategyRoaringSet), WithBitmapBufPool(roaringset.NewBitmapBufPoolNoop())) + require.Nil(t, err) + + bRec = b + }) + + t.Run("validate the results after recovery", func(t *testing.T) { + expected1 := []uint64{11, 12} // unchanged + expected2 := []uint64{ + 22, // from original import + 21, // added again after initial deletion + 23, // newly added + } + expected3 := []uint64{ + 31, // form original import + 33, // newly added + } // 32 deleted + + bm1, release, err := bRec.RoaringSetGet(key1) + require.NoError(t, err) + defer release() + assert.ElementsMatch(t, expected1, bm1.ToArray()) + + bm2, release, err := bRec.RoaringSetGet(key2) + require.NoError(t, err) + defer release() + assert.ElementsMatch(t, expected2, bm2.ToArray()) + + bm3, release, err := bRec.RoaringSetGet(key3) + require.NoError(t, err) + defer release() + assert.ElementsMatch(t, expected3, bm3.ToArray()) + }) + }) +} + +func TestRoaringSetRangeStrategy_RecoverFromWAL(t *testing.T) { + dirNameOriginal := t.TempDir() + dirNameRecovered := t.TempDir() + + t.Run("without prior state", func(t *testing.T) { + b, err := NewBucketCreator().NewBucket(testCtx(), dirNameOriginal, "", nullLogger(), nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), + WithStrategy(StrategyRoaringSetRange)) + require.Nil(t, err) + + key1 := uint64(1) + key2 := uint64(2) + key3 := uint64(3) + + t.Run("set original values and verify", func(t *testing.T) { + orig1 := []uint64{11, 12} + orig2 := []uint64{21, 22} + orig3 := []uint64{31, 32} + + err = b.RoaringSetRangeAdd(key1, orig1...) + require.NoError(t, err) + err = b.RoaringSetRangeAdd(key2, orig2...) + require.NoError(t, err) + err = b.RoaringSetRangeAdd(key3, orig3...) + require.NoError(t, err) + + reader := b.ReaderRoaringSetRange() + defer reader.Close() + + bm1, release1, err := reader.Read(testCtx(), key1, filters.OperatorEqual) + require.NoError(t, err) + defer release1() + assert.ElementsMatch(t, orig1, bm1.ToArray()) + + bm2, release2, err := reader.Read(testCtx(), key2, filters.OperatorEqual) + require.NoError(t, err) + defer release2() + assert.ElementsMatch(t, orig2, bm2.ToArray()) + + bm3, release3, err := reader.Read(testCtx(), key3, filters.OperatorEqual) + require.NoError(t, err) + defer release3() + assert.ElementsMatch(t, orig3, bm3.ToArray()) + }) + + t.Run("delete individual keys", func(t *testing.T) { + delete2 := uint64(21) + delete3 := uint64(32) + + err = b.RoaringSetRangeRemove(key2, delete2) + require.NoError(t, err) + err = b.RoaringSetRangeRemove(key3, delete3) + require.NoError(t, err) + }) + + t.Run("re-add keys which were previously deleted and new ones", func(t *testing.T) { + reAdd2 := []uint64{21, 23} + reAdd3 := []uint64{31, 33} + + err = b.RoaringSetRangeAdd(key2, reAdd2...) + require.NoError(t, err) + err = b.RoaringSetRangeAdd(key3, reAdd3...) + require.NoError(t, err) + }) + + t.Run("validate the results prior to recovery", func(t *testing.T) { + expected1 := []uint64{11, 12} // unchanged + expected2 := []uint64{ + 22, // from original import + 21, // added again after initial deletion + 23, // newly added + } + expected3 := []uint64{ + 31, // form original import + 33, // newly added + } // 32 deleted + + reader := b.ReaderRoaringSetRange() + defer reader.Close() + + bm1, release1, err := reader.Read(testCtx(), key1, filters.OperatorEqual) + require.NoError(t, err) + defer release1() + assert.ElementsMatch(t, expected1, bm1.ToArray()) + + bm2, release2, err := reader.Read(testCtx(), key2, filters.OperatorEqual) + require.NoError(t, err) + defer release2() + assert.ElementsMatch(t, expected2, bm2.ToArray()) + + bm3, release3, err := reader.Read(testCtx(), key3, filters.OperatorEqual) + require.NoError(t, err) + defer release3() + assert.ElementsMatch(t, expected3, bm3.ToArray()) + }) + + t.Run("make sure the WAL is flushed", func(t *testing.T) { + require.Nil(t, b.WriteWAL()) + }) + + t.Run("copy state into recovery folder and destroy original", func(t *testing.T) { + cmd := exec.Command("/bin/bash", "-c", fmt.Sprintf("cp -r %s/*.wal %s", + dirNameOriginal, dirNameRecovered)) + var out bytes.Buffer + cmd.Stderr = &out + err := cmd.Run() + if err != nil { + fmt.Println(out.String()) + t.Fatal(err) + } + b = nil + require.Nil(t, os.RemoveAll(dirNameOriginal)) + }) + + var bRec *Bucket + + t.Run("create new bucket from existing state", func(t *testing.T) { + b, err := NewBucketCreator().NewBucket(testCtx(), dirNameRecovered, "", nullLogger(), nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), + WithStrategy(StrategyRoaringSetRange)) + require.Nil(t, err) + + bRec = b + }) + + t.Run("validate the results after recovery", func(t *testing.T) { + expected1 := []uint64{11, 12} // unchanged + expected2 := []uint64{ + 22, // from original import + 21, // added again after initial deletion + 23, // newly added + } + expected3 := []uint64{ + 31, // form original import + 33, // newly added + } // 32 deleted + + reader := bRec.ReaderRoaringSetRange() + defer reader.Close() + + bm1, release1, err := reader.Read(testCtx(), key1, filters.OperatorEqual) + require.NoError(t, err) + defer release1() + assert.ElementsMatch(t, expected1, bm1.ToArray()) + + bm2, release2, err := reader.Read(testCtx(), key2, filters.OperatorEqual) + require.NoError(t, err) + defer release2() + assert.ElementsMatch(t, expected2, bm2.ToArray()) + + bm3, release3, err := reader.Read(testCtx(), key3, filters.OperatorEqual) + require.NoError(t, err) + defer release3() + assert.ElementsMatch(t, expected3, bm3.ToArray()) + }) + }) +} + +func TestMapStrategy_RecoverFromWAL(t *testing.T) { + dirNameOriginal := t.TempDir() + dirNameRecovered := t.TempDir() + + t.Run("without prior state", func(t *testing.T) { + b, err := NewBucketCreator().NewBucket(testCtx(), dirNameOriginal, "", nullLogger(), nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), + WithStrategy(StrategyMapCollection)) + require.Nil(t, err) + + // so big it effectively never triggers as part of this test + b.SetMemtableThreshold(1e9) + + rowKey1 := []byte("test1-key-1") + rowKey2 := []byte("test1-key-2") + + t.Run("set original values and verify", func(t *testing.T) { + row1Map := []MapPair{ + { + Key: []byte("row1-key1"), + Value: []byte("row1-key1-value1"), + }, { + Key: []byte("row1-key2"), + Value: []byte("row1-key2-value1"), + }, + } + + row2Map := []MapPair{ + { + Key: []byte("row2-key1"), + Value: []byte("row2-key1-value1"), + }, { + Key: []byte("row2-key2"), + Value: []byte("row2-key2-value1"), + }, + } + + for _, pair := range row1Map { + err = b.MapSet(rowKey1, pair) + require.Nil(t, err) + } + + for _, pair := range row2Map { + err = b.MapSet(rowKey2, pair) + require.Nil(t, err) + } + + res, err := b.MapList(context.Background(), rowKey1) + require.Nil(t, err) + assert.Equal(t, row1Map, res) + res, err = b.MapList(context.Background(), rowKey2) + require.Nil(t, err) + assert.Equal(t, res, row2Map) + }) + + t.Run("replace an existing map key", func(t *testing.T) { + err = b.MapSet(rowKey1, MapPair{ + Key: []byte("row1-key1"), // existing key + Value: []byte("row1-key1-value2"), // updated value + }) + require.Nil(t, err) + + row1Updated := []MapPair{ + { + Key: []byte("row1-key1"), + Value: []byte("row1-key1-value2"), // <--- updated, rest unchanged + }, { + Key: []byte("row1-key2"), + Value: []byte("row1-key2-value1"), + }, + } + + row2Unchanged := []MapPair{ + { + Key: []byte("row2-key1"), + Value: []byte("row2-key1-value1"), + }, { + Key: []byte("row2-key2"), + Value: []byte("row2-key2-value1"), + }, + } + + res, err := b.MapList(context.Background(), rowKey1) + require.Nil(t, err) + assert.Equal(t, row1Updated, res) + res, err = b.MapList(context.Background(), rowKey2) + require.Nil(t, err) + assert.Equal(t, res, row2Unchanged) + }) + + t.Run("validate the results prior to recovery", func(t *testing.T) { + rowKey1 := []byte("test1-key-1") + rowKey2 := []byte("test1-key-2") + + expectedRow1 := []MapPair{ + { + Key: []byte("row1-key1"), + Value: []byte("row1-key1-value2"), + }, { + Key: []byte("row1-key2"), + Value: []byte("row1-key2-value1"), + }, + } + + expectedRow2 := []MapPair{ + { + Key: []byte("row2-key1"), + Value: []byte("row2-key1-value1"), + }, { + Key: []byte("row2-key2"), + Value: []byte("row2-key2-value1"), + }, + } + + res, err := b.MapList(context.Background(), rowKey1) + require.Nil(t, err) + assert.Equal(t, expectedRow1, res) + res, err = b.MapList(context.Background(), rowKey2) + require.Nil(t, err) + assert.Equal(t, expectedRow2, res) + }) + + t.Run("make sure the WAL is flushed", func(t *testing.T) { + require.Nil(t, b.WriteWAL()) + }) + + t.Run("copy state into recovery folder and destroy original", func(t *testing.T) { + cmd := exec.Command("/bin/bash", "-c", fmt.Sprintf("cp -r %s/*.wal %s", + dirNameOriginal, dirNameRecovered)) + var out bytes.Buffer + cmd.Stderr = &out + err := cmd.Run() + if err != nil { + fmt.Println(out.String()) + t.Fatal(err) + } + b = nil + require.Nil(t, os.RemoveAll(dirNameOriginal)) + }) + + var bRec *Bucket + + t.Run("create new bucket from existing state", func(t *testing.T) { + b, err := NewBucketCreator().NewBucket(testCtx(), dirNameRecovered, "", nullLogger(), nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), + WithStrategy(StrategyMapCollection)) + require.Nil(t, err) + + // so big it effectively never triggers as part of this test + b.SetMemtableThreshold(1e9) + + bRec = b + }) + + t.Run("validate the results after recovery", func(t *testing.T) { + rowKey1 := []byte("test1-key-1") + rowKey2 := []byte("test1-key-2") + + expectedRow1 := []MapPair{ + { + Key: []byte("row1-key1"), + Value: []byte("row1-key1-value2"), + }, { + Key: []byte("row1-key2"), + Value: []byte("row1-key2-value1"), + }, + } + + expectedRow2 := []MapPair{ + { + Key: []byte("row2-key1"), + Value: []byte("row2-key1-value1"), + }, { + Key: []byte("row2-key2"), + Value: []byte("row2-key2-value1"), + }, + } + + res, err := bRec.MapList(context.Background(), rowKey1) + require.Nil(t, err) + assert.Equal(t, expectedRow1, res) + res, err = bRec.MapList(context.Background(), rowKey2) + require.Nil(t, err) + assert.Equal(t, expectedRow2, res) + }) + }) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/red_black_tree_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/red_black_tree_test.go new file mode 100644 index 0000000000000000000000000000000000000000..1b474106f7c9ead7d39ded1aedc4d5442e4a4153 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/red_black_tree_test.go @@ -0,0 +1,424 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package lsmkv + +import ( + "crypto/rand" + "fmt" + "math" + "math/big" + "reflect" + "testing" + + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/adapters/repos/db/lsmkv/rbtree" +) + +const ( + R = true + B = false +) + +// This test adds keys to the RB tree. Afterwards the same nodes are added in the expected order, eg in the way +// the RB tree is expected to re-order the nodes +var rbTests = []struct { + name string + keys []uint + ReorderedKeys []uint + expectedColors []bool // with respect to the original keys +}{ + { + "Requires recoloring but no reordering", + []uint{61, 52, 83, 93}, + []uint{61, 52, 83, 93}, + []bool{B, B, B, R}, + }, + { + "Requires left rotate around root", + []uint{61, 83, 99}, + []uint{83, 61, 99}, + []bool{R, B, R}, + }, + { + "Requires left rotate with more nodes", + []uint{61, 52, 85, 93, 99}, + []uint{61, 52, 93, 85, 99}, + []bool{B, B, R, B, R}, + }, + { + "Requires right and then left rotate", + []uint{61, 52, 85, 93, 87}, + []uint{61, 52, 87, 85, 93}, + []bool{B, B, R, R, B}, + }, + { + "Requires right rotate around root", + []uint{61, 30, 10}, + []uint{30, 10, 61}, + []bool{R, B, R}, + }, + { + "Requires right rotate with more nodes", + []uint{61, 52, 85, 21, 10}, + []uint{61, 85, 21, 10, 52}, + []bool{B, R, B, B, R}, + }, + { + "Requires left and then right rotate", + []uint{61, 52, 85, 21, 36}, + []uint{61, 85, 36, 21, 52}, + []bool{B, R, B, R, B}, + }, + { + "Require reordering for two nodes", + []uint{61, 52, 40, 85, 105, 110}, + []uint{52, 40, 85, 61, 105, 110}, + []bool{B, B, B, R, B, R}, + }, + { + "Ordered nodes increasing", + []uint{1, 2, 3, 4, 5, 6, 7, 8}, + []uint{4, 2, 6, 1, 3, 5, 7, 8}, + []bool{B, R, B, B, B, R, B, R}, + }, + { + "Ordered nodes decreasing", + []uint{8, 7, 6, 5, 4, 3, 2, 1}, + []uint{5, 3, 7, 2, 4, 6, 8, 1}, + []bool{B, R, B, B, B, R, B, R}, + }, + { + "Multiple rotations along the tree and colour changes", + []uint{166, 92, 33, 133, 227, 236, 71, 183, 18, 139, 245, 161}, + []uint{166, 92, 227, 33, 139, 183, 236, 18, 71, 133, 161, 245}, + []bool{B, R, B, R, R, B, R, B, R, B, R, R}, + }, +} + +func TestRBTree(t *testing.T) { + for _, tt := range rbTests { + t.Run(tt.name, func(t *testing.T) { + tree := &binarySearchTree{} + for _, key := range tt.keys { + iByte := []byte{uint8(key)} + tree.insert(iByte, iByte, nil) + require.Empty(t, tree.root.parent) + } + validateRBTree(t, tree.root) + + flattenTree := tree.flattenInOrder() + require.Equal(t, len(tt.keys), len(flattenTree)) // no entries got lost + + // add tree with the same nodes in the "optimal" order to be able to compare their order afterwards + treeCorrectOrder := &binarySearchTree{} + for _, key := range tt.ReorderedKeys { + iByte := []byte{uint8(key)} + treeCorrectOrder.insert(iByte, iByte, nil) + } + + flattenTreeInput := treeCorrectOrder.flattenInOrder() + for i := range flattenTree { + byteKey := flattenTree[i].key + originalIndex := getIndexInSlice(tt.keys, byteKey) + require.Equal(t, byteKey, flattenTreeInput[i].key) + require.Equal(t, flattenTree[i].colourIsRed, tt.expectedColors[originalIndex]) + } + }) + } +} + +func TestRBTreeMap(t *testing.T) { + for _, tt := range rbTests { + t.Run(tt.name, func(t *testing.T) { + tree := &binarySearchTreeMap{} + for _, key := range tt.keys { + tree.insert([]byte{uint8(key)}, MapPair{ + Key: []byte("map-key-1"), + Value: []byte("map-value-1"), + }) + require.Empty(t, tree.root.parent) + } + validateRBTree(t, tree.root) + + flatten_tree := tree.flattenInOrder() + require.Equal(t, len(tt.keys), len(flatten_tree)) // no entries got lost + + // add tree with the same nodes in the "optimal" order to be able to compare their order afterwards + treeCorrectOrder := &binarySearchTreeMap{} + for _, key := range tt.ReorderedKeys { + treeCorrectOrder.insert([]byte{uint8(key)}, MapPair{ + Key: []byte("map-key-1"), + Value: []byte("map-value-1"), + }) + } + + flatten_tree_input := treeCorrectOrder.flattenInOrder() + for i := range flatten_tree { + byte_key := flatten_tree[i].key + originalIndex := getIndexInSlice(tt.keys, byte_key) + require.Equal(t, byte_key, flatten_tree_input[i].key) + require.Equal(t, flatten_tree[i].colourIsRed, tt.expectedColors[originalIndex]) + } + }) + } +} + +func TestRBTreeMulti(t *testing.T) { + for _, tt := range rbTests { + t.Run(tt.name, func(t *testing.T) { + tree := &binarySearchTreeMulti{} + for _, key := range tt.keys { + values := []value{} + for j := uint(0); j < 5; j++ { + values = append(values, value{value: []byte{uint8(key * j)}, tombstone: false}) + } + tree.insert([]byte{uint8(key)}, values) + require.Empty(t, tree.root.parent) + } + validateRBTree(t, tree.root) + + flatten_tree := tree.flattenInOrder() + require.Equal(t, len(tt.keys), len(flatten_tree)) // no entries got lost + + // add tree with the same nodes in the "optimal" order to be able to compare their order afterwards + treeCorrectOrder := &binarySearchTreeMulti{} + for _, key := range tt.ReorderedKeys { + values := []value{} + for j := uint(0); j < 5; j++ { + values = append(values, value{value: []byte{uint8(key * j)}, tombstone: false}) + } + treeCorrectOrder.insert([]byte{uint8(key)}, values) + } + + flatten_tree_input := treeCorrectOrder.flattenInOrder() + for i := range flatten_tree { + byte_key := flatten_tree[i].key + originalIndex := getIndexInSlice(tt.keys, byte_key) + require.Equal(t, byte_key, flatten_tree_input[i].key) + require.Equal(t, flatten_tree[i].colourIsRed, tt.expectedColors[originalIndex]) + } + }) + } +} + +// add keys as a) normal keys b) tombstone keys and c) half tombstone, half normal. +// The resulting (rebalanced) trees must have the same order and colors +var tombstoneTests = []struct { + name string + keys []uint +}{ + {"Rotate left around root", []uint{61, 83, 99}}, + {"Rotate right around root", []uint{61, 30, 10}}, + {"Multiple rotations along the tree and colour changes", []uint{166, 92, 33, 133, 227, 236, 71, 183, 18, 139, 245, 161}}, + {"Ordered nodes increasing", []uint{1, 2, 3, 4, 5, 6, 7, 8}}, + {"Ordered nodes decreasing", []uint{8, 7, 6, 5, 4, 3, 2, 1}}, +} + +func TestRBTrees_Tombstones(t *testing.T) { + for _, tt := range tombstoneTests { + t.Run(tt.name, func(t *testing.T) { + treeNormal := &binarySearchTree{} + treeTombstone := &binarySearchTree{} + treeHalfHalf := &binarySearchTree{} + for i, key := range tt.keys { + iByte := []byte{uint8(key)} + treeNormal.insert(iByte, iByte, nil) + treeTombstone.setTombstone(iByte, nil, nil) + if i%2 == 0 { + treeHalfHalf.insert(iByte, iByte, nil) + } else { + treeHalfHalf.setTombstone(iByte, nil, nil) + } + } + validateRBTree(t, treeNormal.root) + validateRBTree(t, treeTombstone.root) + validateRBTree(t, treeHalfHalf.root) + + treeNormalFlatten := treeNormal.flattenInOrder() + treeTombstoneFlatten := treeTombstone.flattenInOrder() + treeHalfHalfFlatten := treeHalfHalf.flattenInOrder() + require.Equal(t, len(tt.keys), len(treeNormalFlatten)) + require.Equal(t, len(tt.keys), len(treeTombstoneFlatten)) + require.Equal(t, len(tt.keys), len(treeHalfHalfFlatten)) + + for i := range treeNormalFlatten { + require.Equal(t, treeNormalFlatten[i].key, treeTombstoneFlatten[i].key) + require.Equal(t, treeNormalFlatten[i].key, treeHalfHalfFlatten[i].key) + require.Equal(t, treeNormalFlatten[i].colourIsRed, treeTombstoneFlatten[i].colourIsRed) + require.Equal(t, treeNormalFlatten[i].colourIsRed, treeHalfHalfFlatten[i].colourIsRed) + } + }) + } +} + +type void struct{} + +var member void + +func mustRandIntn(max int64) int { + randInt, err := rand.Int(rand.Reader, big.NewInt(max)) + if err != nil { + panic(fmt.Sprintf("mustRandIntn error: %v", err)) + } + return int(randInt.Int64()) +} + +func TestRBTrees_Random(t *testing.T) { + tree := &binarySearchTree{} + amount := mustRandIntn(100000) + keySize := mustRandIntn(100) + uniqueKeys := make(map[string]void) + for i := 0; i < amount; i++ { + key := make([]byte, keySize) + rand.Read(key) + uniqueKeys[string(key)] = member + if mustRandIntn(5) == 1 { // add 20% of all entries as tombstone + tree.setTombstone(key, nil, nil) + } else { + tree.insert(key, key, nil) + } + } + + // all added keys are still part of the tree + treeFlattened := tree.flattenInOrder() + require.Equal(t, len(uniqueKeys), len(treeFlattened)) + for _, entry := range treeFlattened { + _, ok := uniqueKeys[string(entry.key)] + require.True(t, ok) + } + validateRBTree(t, tree.root) +} + +func TestRBTreesMap_Random(t *testing.T) { + tree := &binarySearchTreeMap{} + amount := mustRandIntn(100000) + keySize := mustRandIntn(100) + uniqueKeys := make(map[string]void) + for i := 0; i < amount; i++ { + key := make([]byte, keySize) + rand.Read(key) + uniqueKeys[string(key)] = member + tree.insert(key, MapPair{ + Key: []byte("map-key-1"), + Value: []byte("map-value-1"), + }) + } + + // all added keys are still part of the tree + treeFlattened := tree.flattenInOrder() + require.Equal(t, len(uniqueKeys), len(treeFlattened)) + for _, entry := range treeFlattened { + _, ok := uniqueKeys[string(entry.key)] + require.True(t, ok) + } + validateRBTree(t, tree.root) +} + +func TestRBTreesMulti_Random(t *testing.T) { + tree := &binarySearchTreeMulti{} + amount := mustRandIntn(100000) + keySize := mustRandIntn(100) + uniqueKeys := make(map[string]void) + for i := 0; i < amount; i++ { + key := make([]byte, keySize) + rand.Read(key) + uniqueKeys[string(key)] = member + values := []value{} + for j := 0; j < 5; j++ { + values = append(values, value{value: []byte{uint8(i * j)}, tombstone: false}) + } + tree.insert(key, values) + } + + // all added keys are still part of the tree + treeFlattened := tree.flattenInOrder() + require.Equal(t, len(uniqueKeys), len(treeFlattened)) + for _, entry := range treeFlattened { + _, ok := uniqueKeys[string(entry.key)] + require.True(t, ok) + } + validateRBTree(t, tree.root) +} + +func getIndexInSlice(reorderedKeys []uint, key []byte) int { + for i, v := range reorderedKeys { + if v == uint(key[0]) { + return i + } + } + return -1 +} + +// Checks if a tree is a RB tree +// +// There are several properties that valid RB trees follow: +// 1) The root node is always black +// 2) The max depth of a tree is 2* Log2(N+1), where N is the number of nodes +// 3) Every path from root to leave has the same number of _black_ nodes +// 4) Red nodes only have black (or nil) children +// +// In addition this also validates some general tree properties: +// - root has no parent +// - if node A is a child of B, B must be the parent of A) +func validateRBTree(t *testing.T, rootNode rbtree.Node) { + require.False(t, rootNode.IsRed()) + require.True(t, rootNode.Parent().IsNil()) + + treeDepth, nodeCount, _ := walkTree(t, rootNode) + maxDepth := 2 * math.Log2(float64(nodeCount)+1) + require.True(t, treeDepth <= int(maxDepth)) +} + +// Walks through the tree and counts the depth, number of nodes and number of black nodes +func walkTree(t *testing.T, node rbtree.Node) (int, int, int) { + if reflect.ValueOf(node).IsNil() { + return 0, 0, 0 + } + leftNode := node.Left() + leftNodeIsNil := reflect.ValueOf(leftNode).IsNil() + rightNode := node.Right() + rightNodeIsNil := reflect.ValueOf(rightNode).IsNil() + + // validate parent/child connections + if !rightNodeIsNil { + require.Equal(t, rightNode.Parent(), node) + } + if !leftNodeIsNil { + require.Equal(t, leftNode.Parent(), node) + } + + // red nodes need black (or nil) children + if node.IsRed() { + require.True(t, leftNodeIsNil || !node.Left().IsRed()) + require.True(t, rightNodeIsNil || !node.Left().IsRed()) + } + + blackNode := int(1) + if node.IsRed() { + blackNode = 0 + } + + if node.Right().IsNil() && node.Left().IsNil() { + return 1, 1, blackNode + } + + depthRight, nodeCountRight, blackNodesDepthRight := walkTree(t, node.Right()) + depthLeft, nodeCountLeft, blackNodesDepthLeft := walkTree(t, node.Left()) + require.Equal(t, blackNodesDepthRight, blackNodesDepthLeft) + + nodeCount := nodeCountLeft + nodeCountRight + 1 + if depthRight > depthLeft { + return depthRight + 1, nodeCount, blackNodesDepthRight + blackNode + } else { + return depthLeft + 1, nodeCount, blackNodesDepthRight + blackNode + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/search_segment.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/search_segment.go new file mode 100644 index 0000000000000000000000000000000000000000..b3e428514c7a34427429b07326bfb7cf8bf07837 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/search_segment.go @@ -0,0 +1,370 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package lsmkv + +import ( + "context" + "fmt" + "math" + "sort" + "strconv" + + "github.com/sirupsen/logrus" + "github.com/weaviate/weaviate/adapters/repos/db/inverted/terms" + "github.com/weaviate/weaviate/adapters/repos/db/priorityqueue" +) + +func DoBlockMaxWand(ctx context.Context, limit int, results Terms, averagePropLength float64, additionalExplanations bool, + termCount, minimumOrTokensMatch int, logger logrus.FieldLogger, +) (*priorityqueue.Queue[[]*terms.DocPointerWithScore], error) { + var docInfos []*terms.DocPointerWithScore + topKHeap := priorityqueue.NewMinWithId[[]*terms.DocPointerWithScore](limit) + worstDist := float64(-10000) // tf score can be negative + sort.Sort(results) + iterations := 0 + var firstNonExhausted int + pivotID := uint64(0) + var pivotPoint int + upperBound := float32(0) + + for { + iterations++ + + if iterations%100000 == 0 && ctx != nil && ctx.Err() != nil { + segmentPath := "" + terms := "" + filterCardinality := -1 + for _, r := range results { + if r == nil { + continue + } + if r.segment != nil { + segmentPath = r.segment.path + if r.filterDocIds != nil { + filterCardinality = r.filterDocIds.GetCardinality() + } + } + terms += r.QueryTerm() + ":" + strconv.Itoa(int(r.IdPointer())) + ":" + strconv.Itoa(r.Count()) + ", " + } + logger.WithFields(logrus.Fields{ + "segment": segmentPath, + "iterations": iterations, + "pivotID": pivotID, + "firstNonExhausted": firstNonExhausted, + "lenResults": len(results), + "pivotPoint": pivotPoint, + "upperBound": upperBound, + "terms": terms, + "filterCardinality": filterCardinality, + "limit": limit, + }).Warnf("DoBlockMaxWand: search timed out, returning partial results") + return topKHeap, fmt.Errorf("DoBlockMaxWand: search timed out, returning partial results") + } + + cumScore := float64(0) + firstNonExhausted = -1 + pivotID = math.MaxUint64 + + for pivotPoint = 0; pivotPoint < len(results); pivotPoint++ { + if results[pivotPoint].exhausted { + continue + } + if firstNonExhausted == -1 { + firstNonExhausted = pivotPoint + } + cumScore += float64(results[pivotPoint].Idf()) + if cumScore >= worstDist { + pivotID = results[pivotPoint].idPointer + for i := pivotPoint + 1; i < len(results); i++ { + if results[i].idPointer != pivotID { + break + } + pivotPoint = i + } + break + } + } + if firstNonExhausted == -1 || pivotID == math.MaxUint64 { + return topKHeap, nil + } + + upperBound = float32(0) + for i := 0; i <= pivotPoint; i++ { + if results[i].exhausted { + continue + } + if results[i].currentBlockMaxId < pivotID { + results[i].AdvanceAtLeastShallow(pivotID) + } + upperBound += results[i].currentBlockImpact + } + + if topKHeap.ShouldEnqueue(upperBound, limit) { + if additionalExplanations { + docInfos = make([]*terms.DocPointerWithScore, termCount) + } + if pivotID == results[firstNonExhausted].idPointer { + score := 0.0 + termsMatched := 0 + for _, term := range results { + if term.idPointer != pivotID { + break + } + termsMatched++ + _, s, d := term.Score(averagePropLength, additionalExplanations) + score += s + upperBound -= term.currentBlockImpact - float32(s) + + if additionalExplanations { + docInfos[term.QueryTermIndex()] = d + } + + } + for _, term := range results { + if !term.exhausted && term.idPointer != pivotID { + break + } + term.Advance() + } + if topKHeap.ShouldEnqueue(float32(score), limit) && termsMatched >= minimumOrTokensMatch { + topKHeap.InsertAndPop(pivotID, score, limit, &worstDist, docInfos) + } + + sort.Sort(results) + + } else { + nextList := pivotPoint + for results[nextList].idPointer == pivotID { + nextList-- + } + results[nextList].AdvanceAtLeast(pivotID) + + // sort partial + for i := nextList + 1; i < len(results); i++ { + if results[i].idPointer < results[i-1].idPointer { + // swap + results[i], results[i-1] = results[i-1], results[i] + } else { + break + } + } + + } + } else { + nextList := pivotPoint + maxWeight := results[nextList].Idf() + + for i := 0; i < pivotPoint; i++ { + if results[i].Idf() > maxWeight { + nextList = i + maxWeight = results[i].Idf() + } + } + + // max uint + next := uint64(math.MaxUint64) + + for i := 0; i <= pivotPoint; i++ { + if results[i].currentBlockMaxId < next { + next = results[i].currentBlockMaxId + } + } + + next += 1 + + if pivotPoint+1 < len(results) && results[pivotPoint+1].idPointer < next { + next = results[pivotPoint+1].idPointer + } + + if next <= pivotID { + next = pivotID + 1 + } + results[nextList].AdvanceAtLeast(next) + + for i := nextList + 1; i < len(results); i++ { + if results[i].idPointer < results[i-1].idPointer { + // swap + results[i], results[i-1] = results[i-1], results[i] + } else if results[i].exhausted && i < len(results)-1 { + results[i], results[i+1] = results[i+1], results[i] + } + } + + } + + } +} + +func DoBlockMaxAnd(ctx context.Context, limit int, resultsByTerm Terms, averagePropLength float64, additionalExplanations bool, + termCount int, minimumOrTokensMatch int, logger logrus.FieldLogger, +) *priorityqueue.Queue[[]*terms.DocPointerWithScore] { + results := TermsBySize(resultsByTerm) + var docInfos []*terms.DocPointerWithScore + topKHeap := priorityqueue.NewMinWithId[[]*terms.DocPointerWithScore](limit) + worstDist := float64(-10000) // tf score can be negative + sort.Sort(results) + iterations := 0 + pivotID := uint64(0) + upperBound := float32(0) + + if minimumOrTokensMatch > len(results) { + return topKHeap + } + + for { + iterations++ + + if iterations%100000 == 0 && ctx != nil && ctx.Err() != nil { + segmentPath := "" + terms := "" + filterCardinality := -1 + for _, r := range results { + if r == nil { + continue + } + if r.segment != nil { + segmentPath = r.segment.path + if r.filterDocIds != nil { + filterCardinality = r.filterDocIds.GetCardinality() + } + } + terms += r.QueryTerm() + ":" + strconv.Itoa(int(r.IdPointer())) + ":" + strconv.Itoa(r.Count()) + ", " + } + logger.WithFields(logrus.Fields{ + "segment": segmentPath, + "iterations": iterations, + "pivotID": pivotID, + "lenResults": len(results), + "upperBound": upperBound, + "terms": terms, + "filterCardinality": filterCardinality, + "limit": limit, + }).Warnf("DoBlockMaxAnd: search timed out, returning partial results") + return topKHeap + } + + for i := 0; i < len(results); i++ { + if results[i].exhausted { + return topKHeap + } + } + + results[0].AdvanceAtLeast(pivotID) + + if results[0].idPointer == math.MaxUint64 { + return topKHeap + } + + pivotID = results[0].idPointer + + for i := 1; i < len(results); i++ { + results[i].AdvanceAtLeastShallow(pivotID) + } + + upperBound = float32(0) + for i := 0; i < len(results); i++ { + upperBound += results[i].currentBlockImpact + } + + if topKHeap.ShouldEnqueue(upperBound, limit) { + isCandidate := true + for i := 1; i < len(results); i++ { + results[i].AdvanceAtLeast(pivotID) + if results[i].idPointer != pivotID { + isCandidate = false + break + } + } + if isCandidate { + score := 0.0 + if additionalExplanations { + docInfos = make([]*terms.DocPointerWithScore, termCount) + } + for _, term := range results { + _, s, d := term.Score(averagePropLength, additionalExplanations) + score += s + if additionalExplanations { + docInfos[term.QueryTermIndex()] = d + } + term.Advance() + } + if topKHeap.ShouldEnqueue(float32(score), limit) { + topKHeap.InsertAndPop(pivotID, score, limit, &worstDist, docInfos) + } + } else { + pivotID += 1 + } + } else { + + // max uint + pivotID = uint64(math.MaxUint64) + + for i := 0; i < len(results); i++ { + if results[i].currentBlockMaxId < pivotID { + pivotID = results[i].currentBlockMaxId + } + } + + pivotID += 1 + } + } +} + +func DoWand(limit int, results *terms.Terms, averagePropLength float64, additionalExplanations bool, + minimumOrTokensMatch int, +) *priorityqueue.Queue[[]*terms.DocPointerWithScore] { + topKHeap := priorityqueue.NewMinWithId[[]*terms.DocPointerWithScore](limit) + worstDist := float64(-10000) // tf score can be negative + sort.Sort(results) + for { + + if results.CompletelyExhausted() || results.Pivot(worstDist) { + return topKHeap + } + + id, score, additional, ok := results.ScoreNext(averagePropLength, additionalExplanations, minimumOrTokensMatch) + results.SortFull() + if topKHeap.ShouldEnqueue(float32(score), limit) && ok { + topKHeap.InsertAndPop(id, score, limit, &worstDist, additional) + } + } +} + +type Terms []*SegmentBlockMax + +// provide sort interface for +func (t Terms) Len() int { + return len(t) +} + +func (t Terms) Less(i, j int) bool { + return t[i].idPointer < t[j].idPointer +} + +func (t Terms) Swap(i, j int) { + t[i], t[j] = t[j], t[i] +} + +type TermsBySize []*SegmentBlockMax + +// provide sort interface for +func (t TermsBySize) Len() int { + return len(t) +} + +func (t TermsBySize) Less(i, j int) bool { + return t[i].Count() < t[j].Count() +} + +func (t TermsBySize) Swap(i, j int) { + t[i], t[j] = t[j], t[i] +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segment.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segment.go new file mode 100644 index 0000000000000000000000000000000000000000..72b6dd163264bc07bb6032dd4ff3635ae549167b --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segment.go @@ -0,0 +1,683 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package lsmkv + +import ( + "bufio" + "bytes" + "fmt" + "io" + "os" + "sync" + + "github.com/weaviate/weaviate/adapters/repos/db/roaringset" + "github.com/weaviate/weaviate/adapters/repos/db/roaringsetrange" + + "github.com/pkg/errors" + + "github.com/bits-and-blooms/bloom/v3" + "github.com/prometheus/client_golang/prometheus" + "github.com/sirupsen/logrus" + "github.com/weaviate/sroar" + "github.com/weaviate/weaviate/adapters/repos/db/lsmkv/segmentindex" + "github.com/weaviate/weaviate/entities/diskio" + "github.com/weaviate/weaviate/entities/lsmkv" + entsentry "github.com/weaviate/weaviate/entities/sentry" + "github.com/weaviate/weaviate/usecases/memwatch" + "github.com/weaviate/weaviate/usecases/mmap" + "github.com/weaviate/weaviate/usecases/monitoring" +) + +type Segment interface { + getPath() string + setPath(path string) + getStrategy() segmentindex.Strategy + getSecondaryIndexCount() uint16 + getLevel() uint16 + getSize() int64 + setSize(size int64) + + PayloadSize() int + close() error + get(key []byte) ([]byte, error) + getBySecondaryIntoMemory(pos int, key []byte, buffer []byte) ([]byte, []byte, []byte, error) + getCollection(key []byte) ([]value, error) + getInvertedData() *segmentInvertedData + getSegment() *segment + isLoaded() bool + markForDeletion() error + MergeTombstones(other *sroar.Bitmap) (*sroar.Bitmap, error) + newCollectionCursor() *segmentCursorCollection + newCollectionCursorReusable() *segmentCursorCollectionReusable + newCursor() *segmentCursorReplace + newCursorWithSecondaryIndex(pos int) *segmentCursorReplace + newMapCursor() *segmentCursorMap + newNodeReader(offset nodeOffset, operation string) (*nodeReader, error) + newRoaringSetCursor() *roaringset.SegmentCursor + newRoaringSetRangeCursor() roaringsetrange.SegmentCursor + newRoaringSetRangeReader() *roaringsetrange.SegmentReader + quantileKeys(q int) [][]byte + ReadOnlyTombstones() (*sroar.Bitmap, error) + replaceStratParseData(in []byte) ([]byte, []byte, error) + roaringSetGet(key []byte, bitmapBufPool roaringset.BitmapBufPool) (roaringset.BitmapLayer, func(), error) + roaringSetMergeWith(key []byte, input roaringset.BitmapLayer, bitmapBufPool roaringset.BitmapBufPool) error +} + +type segment struct { + path string + metaPaths []string + level uint16 + secondaryIndexCount uint16 + version uint16 + segmentStartPos uint64 + segmentEndPos uint64 + dataStartPos uint64 + dataEndPos uint64 + contents []byte + contentFile *os.File + strategy segmentindex.Strategy + index diskIndex + secondaryIndices []diskIndex + logger logrus.FieldLogger + metrics *Metrics + size int64 + readFromMemory bool + unMapContents bool + + useBloomFilter bool // see bucket for more datails + bloomFilter *bloom.BloomFilter + secondaryBloomFilters []*bloom.BloomFilter + bloomFilterMetrics *bloomFilterMetrics + + // the net addition this segment adds with respect to all previous segments + calcCountNetAdditions bool // see bucket for more datails + countNetAdditions int + + invertedHeader *segmentindex.HeaderInverted + invertedData *segmentInvertedData + + observeMetaWrite diskio.MeteredWriterCallback // used for precomputing meta (cna + bloom) +} + +type diskIndex interface { + // Get return lsmkv.NotFound in case no node can be found + Get(key []byte) (segmentindex.Node, error) + + // Seek returns lsmkv.NotFound in case the seek value is larger than + // the highest value in the collection, otherwise it returns the next highest + // value (or the exact value if present) + Seek(key []byte) (segmentindex.Node, error) + + Next(key []byte) (segmentindex.Node, error) + + // AllKeys in no specific order, e.g. for building a bloom filter + AllKeys() ([][]byte, error) + + // Size of the index in bytes + Size() int + + QuantileKeys(q int) [][]byte +} + +type segmentConfig struct { + mmapContents bool + useBloomFilter bool + calcCountNetAdditions bool + overwriteDerived bool + enableChecksumValidation bool + MinMMapSize int64 + allocChecker memwatch.AllocChecker + fileList map[string]int64 + precomputedCountNetAdditions *int + writeMetadata bool +} + +// newSegment creates a new segment structure, representing an LSM disk segment. +// +// This function is partially copied by a function called preComputeSegmentMeta. +// Any changes made here should likely be made in preComputeSegmentMeta as well, +// and vice versa. This is absolutely not ideal, but in the short time I was able +// to consider this, I wasn't able to find a way to unify the two -- there are +// subtle differences. +func newSegment(path string, logger logrus.FieldLogger, metrics *Metrics, + existsLower existsOnLowerSegmentsFn, cfg segmentConfig, +) (_ *segment, rerr error) { + defer func() { + p := recover() + if p == nil { + return + } + entsentry.Recover(p) + rerr = fmt.Errorf("unexpected error loading segment %q: %v", path, p) + }() + + file, err := os.Open(path) + if err != nil { + return nil, fmt.Errorf("open file: %w", err) + } + + // The lifetime of the `file` exceeds this constructor as we store the open file for later use in `contentFile`. + // invariant: We close **only** if any error happened after successfully opening the file. To avoid leaking open file descriptor. + // NOTE: This `defer` works even with `err` being shadowed in the whole function because defer checks for named `rerr` return value. + defer func() { + if rerr != nil { + file.Close() + } + }() + + var size int64 + if cfg.fileList != nil { + if fileSize, ok := cfg.fileList[file.Name()]; ok { + size = fileSize + } + } + + // fallback to getting the filesize from disk in case it wasn't prefetched (for example, for new segments after compaction) + if size == 0 { + fileInfo, err := file.Stat() + if err != nil { + return nil, fmt.Errorf("stat file: %w", err) + } + size = fileInfo.Size() + } + + // mmap has some overhead, we can read small files directly to memory + var contents []byte + var unMapContents bool + var allocCheckerErr error + + if size <= cfg.MinMMapSize { // check if it is a candidate for full reading + if cfg.allocChecker == nil { + logger.WithFields(logrus.Fields{ + "path": path, + "size": size, + "minMMapSize": cfg.MinMMapSize, + }).Info("allocChecker is nil, skipping memory pressure check for new segment") + } else { + allocCheckerErr = cfg.allocChecker.CheckAlloc(size) // check if we have enough memory + if allocCheckerErr != nil { + logger.Debugf("memory pressure: cannot fully read segment") + } + } + } + + useBloomFilter := cfg.useBloomFilter + readFromMemory := cfg.mmapContents + if size > cfg.MinMMapSize || cfg.allocChecker == nil || allocCheckerErr != nil { // mmap the file if it's too large or if we have memory pressure + contents2, err := mmap.MapRegion(file, int(size), mmap.RDONLY, 0, 0) + if err != nil { + return nil, fmt.Errorf("mmap file: %w", err) + } + contents = contents2 + unMapContents = true + } else { // read the file into memory if it's small enough and we have enough memory + meteredF := diskio.NewMeteredReader(file, diskio.MeteredReaderCallback(metrics.ReadObserver("readSegmentFile"))) + bufio.NewReader(meteredF) + contents, err = io.ReadAll(meteredF) + if err != nil { + return nil, fmt.Errorf("read file: %w", err) + } + unMapContents = false + readFromMemory = true + useBloomFilter = false + } + header, err := segmentindex.ParseHeader(contents[:segmentindex.HeaderSize]) + if err != nil { + return nil, fmt.Errorf("parse header: %w", err) + } + + if err := segmentindex.CheckExpectedStrategy(header.Strategy); err != nil { + return nil, fmt.Errorf("unsupported strategy in segment: %w", err) + } + + if header.Version >= segmentindex.SegmentV1 && cfg.enableChecksumValidation { + file.Seek(0, io.SeekStart) + headerSize := int64(segmentindex.HeaderSize) + if header.Strategy == segmentindex.StrategyInverted { + headerSize += int64(segmentindex.HeaderInvertedSize) + } + segmentFile := segmentindex.NewSegmentFile(segmentindex.WithReader(file)) + if err := segmentFile.ValidateChecksum(size, headerSize); err != nil { + return nil, fmt.Errorf("validate segment %q: %w", path, err) + } + } + + primaryIndex, err := header.PrimaryIndex(contents) + if err != nil { + return nil, fmt.Errorf("extract primary index position: %w", err) + } + + // if there are no secondary indices and checksum validation is enabled, + // we need to remove the checksum bytes from the primary index + // See below for the same logic if there are secondary indices + if header.Version >= segmentindex.SegmentV1 && cfg.enableChecksumValidation && header.SecondaryIndices == 0 { + primaryIndex = primaryIndex[:len(primaryIndex)-segmentindex.ChecksumSize] + } + + primaryDiskIndex := segmentindex.NewDiskTree(primaryIndex) + + dataStartPos := uint64(segmentindex.HeaderSize) + dataEndPos := header.IndexStart + + var invertedHeader *segmentindex.HeaderInverted + if header.Strategy == segmentindex.StrategyInverted { + invertedHeader, err = segmentindex.LoadHeaderInverted(contents[segmentindex.HeaderSize : segmentindex.HeaderSize+segmentindex.HeaderInvertedSize]) + if err != nil { + return nil, errors.Wrap(err, "load inverted header") + } + dataStartPos = invertedHeader.KeysOffset + dataEndPos = invertedHeader.TombstoneOffset + } + + stratLabel := header.Strategy.String() + observeWrite := monitoring.GetMetrics().FileIOWrites.With(prometheus.Labels{ + "strategy": stratLabel, + "operation": "segmentMetadata", + }) + + if unMapContents { + // a map was created, track it + monitoring.GetMetrics().MmapOperations.With(prometheus.Labels{ + "operation": "mmap", + "strategy": stratLabel, + }).Inc() + } + + seg := &segment{ + level: header.Level, + path: path, + contents: contents, + version: header.Version, + secondaryIndexCount: header.SecondaryIndices, + segmentStartPos: header.IndexStart, + segmentEndPos: uint64(size), + strategy: header.Strategy, + dataStartPos: dataStartPos, + dataEndPos: dataEndPos, + index: primaryDiskIndex, + logger: logger, + metrics: metrics, + size: size, + readFromMemory: readFromMemory, + useBloomFilter: useBloomFilter, + calcCountNetAdditions: cfg.calcCountNetAdditions, + invertedHeader: invertedHeader, + invertedData: &segmentInvertedData{ + tombstones: sroar.NewBitmap(), + }, + unMapContents: unMapContents, + observeMetaWrite: func(n int64) { observeWrite.Observe(float64(n)) }, + } + + // Using pread strategy requires file to remain open for segment lifetime + if seg.readFromMemory { + defer file.Close() + } else { + seg.contentFile = file + } + + if seg.secondaryIndexCount > 0 { + seg.secondaryIndices = make([]diskIndex, seg.secondaryIndexCount) + for i := range seg.secondaryIndices { + secondary, err := header.SecondaryIndex(contents, uint16(i)) + if err != nil { + return nil, fmt.Errorf("get position for secondary index at %d: %w", i, err) + } + // if we are on the last secondary index and checksum validation is enabled, + // we need to remove the checksum bytes from the secondary index + if header.Version >= segmentindex.SegmentV1 && cfg.enableChecksumValidation && i == int(seg.secondaryIndexCount-1) { + secondary = secondary[:len(secondary)-segmentindex.ChecksumSize] + } + seg.secondaryIndices[i] = segmentindex.NewDiskTree(secondary) + } + } + + metadataRead, err := seg.initMetadata(metrics, cfg.overwriteDerived, existsLower, cfg.precomputedCountNetAdditions, cfg.fileList, cfg.writeMetadata) + if err != nil { + return nil, fmt.Errorf("init metadata: %w", err) + } + + if !metadataRead { + if seg.useBloomFilter { + if err := seg.initBloomFilters(metrics, cfg.overwriteDerived, cfg.fileList); err != nil { + return nil, err + } + } + if seg.calcCountNetAdditions { + if err := seg.initCountNetAdditions(existsLower, cfg.overwriteDerived, cfg.precomputedCountNetAdditions, cfg.fileList); err != nil { + return nil, err + } + } + } + + if seg.strategy == segmentindex.StrategyInverted { + _, err := seg.loadTombstones() + if err != nil { + return nil, fmt.Errorf("load tombstones: %w", err) + } + + _, err = seg.loadPropertyLengths() + if err != nil { + return nil, fmt.Errorf("load property lengths: %w", err) + } + + } + + return seg, nil +} + +func (s *segment) close() error { + var munmapErr, fileCloseErr error + if s.unMapContents { + m := mmap.MMap(s.contents) + munmapErr = m.Unmap() + stratLabel := s.strategy.String() + monitoring.GetMetrics().MmapOperations.With(prometheus.Labels{ + "operation": "munmap", + "strategy": stratLabel, + }).Inc() + } + if s.contentFile != nil { + fileCloseErr = s.contentFile.Close() + } + + if munmapErr != nil || fileCloseErr != nil { + return fmt.Errorf("close segment: munmap: %w, close contents file: %w", munmapErr, fileCloseErr) + } + + return nil +} + +func (s *segment) dropImmediately() error { + // support for persisting bloom filters and cnas was added in v1.17, + // therefore the files may not be present on segments created with previous + // versions. By using RemoveAll, which does not error on NotExists, these + // drop calls are backward-compatible: + if err := os.RemoveAll(s.bloomFilterPath()); err != nil { + return fmt.Errorf("drop bloom filter: %w", err) + } + + for i := 0; i < int(s.secondaryIndexCount); i++ { + if err := os.RemoveAll(s.bloomFilterSecondaryPath(i)); err != nil { + return fmt.Errorf("drop bloom filter: %w", err) + } + } + + if err := os.RemoveAll(s.countNetPath()); err != nil { + return fmt.Errorf("drop count net additions file: %w", err) + } + + if err := os.RemoveAll(s.metadataPath()); err != nil { + return fmt.Errorf("drop metadata file: %w", err) + } + + // for the segment itself, we're not using RemoveAll, but Remove. If there + // was a NotExists error here, something would be seriously wrong, and we + // don't want to ignore it. + if err := os.Remove(s.path); err != nil { + return fmt.Errorf("drop segment: %w", err) + } + + return nil +} + +func (s *segment) dropMarked() error { + // support for persisting bloom filters and cnas was added in v1.17, + // therefore the files may not be present on segments created with previous + // versions. By using RemoveAll, which does not error on NotExists, these + // drop calls are backward-compatible: + if err := os.RemoveAll(s.bloomFilterPath() + DeleteMarkerSuffix); err != nil { + return fmt.Errorf("drop previously marked bloom filter: %w", err) + } + + for i := 0; i < int(s.secondaryIndexCount); i++ { + if err := os.RemoveAll(s.bloomFilterSecondaryPath(i) + DeleteMarkerSuffix); err != nil { + return fmt.Errorf("drop previously marked secondary bloom filter: %w", err) + } + } + + if err := os.RemoveAll(s.countNetPath() + DeleteMarkerSuffix); err != nil { + return fmt.Errorf("drop previously marked count net additions file: %w", err) + } + + if err := os.RemoveAll(s.metadataPath() + DeleteMarkerSuffix); err != nil { + return fmt.Errorf("drop previously marked metadata file: %w", err) + } + + // for the segment itself, we're not using RemoveAll, but Remove. If there + // was a NotExists error here, something would be seriously wrong, and we + // don't want to ignore it. + if err := os.Remove(s.path + DeleteMarkerSuffix); err != nil { + return fmt.Errorf("drop previously marked segment: %w", err) + } + + return nil +} + +const DeleteMarkerSuffix = ".deleteme" + +func markDeleted(path string) error { + return os.Rename(path, path+DeleteMarkerSuffix) +} + +func (s *segment) markForDeletion() error { + // support for persisting bloom filters and cnas was added in v1.17, + // therefore the files may not be present on segments created with previous + // versions. If we get a not exist error, we ignore it. + if err := markDeleted(s.bloomFilterPath()); err != nil { + if !os.IsNotExist(err) { + return fmt.Errorf("mark bloom filter deleted: %w", err) + } + } + + for i := 0; i < int(s.secondaryIndexCount); i++ { + if err := markDeleted(s.bloomFilterSecondaryPath(i)); err != nil { + if !os.IsNotExist(err) { + return fmt.Errorf("mark secondary bloom filter deleted: %w", err) + } + } + } + + if err := markDeleted(s.countNetPath()); err != nil { + if !os.IsNotExist(err) { + return fmt.Errorf("mark count net additions file deleted: %w", err) + } + } + + if err := markDeleted(s.metadataPath()); err != nil { + if !os.IsNotExist(err) { + return fmt.Errorf("mark metadata file deleted: %w", err) + } + } + + // for the segment itself, we're not accepting a NotExists error. If there + // was a NotExists error here, something would be seriously wrong, and we + // don't want to ignore it. + if err := markDeleted(s.path); err != nil { + return fmt.Errorf("mark segment deleted: %w", err) + } + + return nil +} + +// Size returns the total size of the segment in bytes, including the header +// and index +func (s *segment) Size() int { + return int(s.size) +} + +func (s *segment) getPath() string { + return s.path +} + +func (s *segment) setPath(path string) { + s.path = path +} + +func (s *segment) getStrategy() segmentindex.Strategy { + return s.strategy +} + +func (s *segment) getSecondaryIndexCount() uint16 { + return s.secondaryIndexCount +} + +func (s *segment) getCountNetAdditions() int { + return s.countNetAdditions +} + +func (s *segment) getLevel() uint16 { + return s.level +} + +func (s *segment) getSize() int64 { + return s.size +} + +func (s *segment) setSize(size int64) { + s.size = size +} + +func (s *segment) getInvertedData() *segmentInvertedData { + return s.invertedData +} + +func (s *segment) getSegment() *segment { + return s +} + +func (s *segment) isLoaded() bool { + return true +} + +// PayloadSize is only the payload of the index, excluding the index +func (s *segment) PayloadSize() int { + return int(s.dataEndPos) +} + +type nodeReader struct { + r io.Reader + releaseFn func() +} + +func (n *nodeReader) Read(b []byte) (int, error) { + if n.r == nil { + panic("nodeReader.Read called after Release") + } + return n.r.Read(b) +} + +func (n *nodeReader) Release() { + n.r = nil + n.releaseFn() +} + +type nodeOffset struct { + start, end uint64 +} + +func (s *segment) newNodeReader(offset nodeOffset, operation string) (*nodeReader, error) { + var ( + r io.Reader + err error + release = func() {} // no-op function for un-pooled readers + ) + + if s.readFromMemory { + contents := s.contents[offset.start:] + if offset.end != 0 { + contents = s.contents[offset.start:offset.end] + } + r, err = s.bytesReaderFrom(contents) + } else { + r, release, err = s.bufferedReaderAt(offset.start, "ReadFromSegment"+operation) + } + if err != nil { + return nil, fmt.Errorf("new nodeReader: %w", err) + } + return &nodeReader{r: r, releaseFn: release}, nil +} + +func (s *segment) copyNode(b []byte, offset nodeOffset) error { + if s.readFromMemory { + copy(b, s.contents[offset.start:offset.end]) + return nil + } + n, err := s.newNodeReader(offset, "copyNode") + if err != nil { + return fmt.Errorf("copy node: %w", err) + } + defer n.Release() + + _, err = io.ReadFull(n, b) + return err +} + +func (s *segment) bytesReaderFrom(in []byte) (*bytes.Reader, error) { + if len(in) == 0 { + return nil, lsmkv.NotFound + } + return bytes.NewReader(in), nil +} + +func (s *segment) bufferedReaderAt(offset uint64, operation string) (io.Reader, func(), error) { + if s.contentFile == nil { + return nil, nil, fmt.Errorf("nil contentFile for segment at %s", s.path) + } + + meteredF := diskio.NewMeteredReader(s.contentFile, diskio.MeteredReaderCallback(readObserver.GetOrCreate(operation, s.metrics))) + r := io.NewSectionReader(meteredF, int64(offset), s.size) + + bufioR := bufReaderPool.Get().(*bufio.Reader) + bufioR.Reset(r) + + releaseFn := func() { + bufReaderPool.Put(bufioR) + } + + return bufioR, releaseFn, nil +} + +var ( + bufReaderPool *sync.Pool + readObserver *readObserverCache +) + +func init() { + bufReaderPool = &sync.Pool{ + New: func() interface{} { + return bufio.NewReader(nil) + }, + } + + readObserver = &readObserverCache{} +} + +type readObserverCache struct { + sync.Map +} + +// GetOrCreate returns a BytesReadObserver for the given key if it exists or +// creates one if it doesn't. +// +// Note that the design is not atomic, so it is possible that a single key will +// be initialize multiple times. This is not a problem, it only adds a slight +// re-allocation penalty, but does not alter the behavior +func (c *readObserverCache) GetOrCreate(key string, metrics *Metrics) BytesReadObserver { + if v, ok := c.Load(key); ok { + return v.(BytesReadObserver) + } + + observer := metrics.ReadObserver(key) + c.Store(key, observer) + return observer +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segment_blockmax.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segment_blockmax.go new file mode 100644 index 0000000000000000000000000000000000000000..ec406d509b230bf867e3a98dfbbf84ac233e0c88 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segment_blockmax.go @@ -0,0 +1,603 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package lsmkv + +import ( + "encoding/binary" + "io" + "math" + + "github.com/weaviate/sroar" + "github.com/weaviate/weaviate/adapters/repos/db/helpers" + "github.com/weaviate/weaviate/adapters/repos/db/inverted/terms" + "github.com/weaviate/weaviate/adapters/repos/db/lsmkv/segmentindex" + "github.com/weaviate/weaviate/adapters/repos/db/lsmkv/varenc" + "github.com/weaviate/weaviate/entities/schema" +) + +var blockMaxBufferSize = 4096 + +func (s *segment) loadBlockEntries(node segmentindex.Node) ([]*terms.BlockEntry, uint64, *terms.BlockDataDecoded, error) { + var buf []byte + if s.readFromMemory { + buf = s.contents[node.Start : node.Start+uint64(8+12*terms.ENCODE_AS_FULL_BYTES)] + } else { + // read first 8 bytes to get + buf = make([]byte, 8+12*terms.ENCODE_AS_FULL_BYTES) + r, err := s.newNodeReader(nodeOffset{node.Start, node.Start + uint64(8+12*terms.ENCODE_AS_FULL_BYTES)}, "loadBMW") + if err != nil { + return nil, 0, nil, err + } + defer r.Release() + + _, err = r.Read(buf) + if err != nil { + return nil, 0, nil, err + } + } + + docCount := binary.LittleEndian.Uint64(buf) + + if docCount <= uint64(terms.ENCODE_AS_FULL_BYTES) { + data := convertFixedLengthFromMemory(buf, int(docCount)) + entries := make([]*terms.BlockEntry, 1) + propLength := s.invertedData.propertyLengths[data.DocIds[0]] + tf := data.Tfs[0] + entries[0] = &terms.BlockEntry{ + Offset: 0, + MaxId: data.DocIds[len(data.DocIds)-1], + MaxImpactTf: uint32(tf), + MaxImpactPropLength: uint32(propLength), + } + + return entries, docCount, data, nil + } + + blockCount := (docCount + uint64(terms.BLOCK_SIZE-1)) / uint64(terms.BLOCK_SIZE) + + entries := make([]*terms.BlockEntry, blockCount) + if s.readFromMemory { + buf = s.contents[node.Start+16 : node.Start+16+uint64(blockCount*20)] + } else { + r, err := s.newNodeReader(nodeOffset{node.Start + 16, node.Start + 16 + uint64(blockCount*20)}, "loadBMW") + if err != nil { + return nil, 0, nil, err + } + defer r.Release() + + buf = make([]byte, blockCount*20) + _, err = r.Read(buf) + if err != nil { + return nil, 0, nil, err + } + } + + for i := 0; i < int(blockCount); i++ { + entries[i] = terms.DecodeBlockEntry(buf[i*20 : (i+1)*20]) + } + + return entries, docCount, nil, nil +} + +// todo: check if there is a performance impact of starting to sectionReader at offset and not have to pass offset here +func (s *segment) loadBlockDataReusable(sectionReader *io.SectionReader, blockDataBufferOffset, offset, offsetStart, offsetEnd uint64, buf []byte, encoded *terms.BlockData) (uint64, error) { + if s.readFromMemory { + terms.DecodeBlockDataReusable(s.contents[offsetStart:offsetEnd], encoded) + return offsetStart, nil + } else { + if offsetStart < blockDataBufferOffset || offsetEnd > blockDataBufferOffset+uint64(len(buf)) { + sectionReader.Seek(int64(offsetStart-offset), io.SeekStart) + _, err := sectionReader.Read(buf) + // EOF is expected when the last block + tree are smaller than the buffer + if err != nil && err.Error() != "EOF" { + return 0, err + } + // readBytes += int64(n) + // readCounts++ + blockDataBufferOffset = offsetStart + } + + bufOffsetStart := offsetStart - blockDataBufferOffset + bufOffsetEnd := offsetEnd - blockDataBufferOffset + terms.DecodeBlockDataReusable(buf[bufOffsetStart:bufOffsetEnd], encoded) + return blockDataBufferOffset, nil + } +} + +type BlockMetrics struct { + BlockCountTotal uint64 + BlockCountDecodedDocIds uint64 + BlockCountDecodedFreqs uint64 + DocCountTotal uint64 + DocCountDecodedDocIds uint64 + DocCountDecodedFreqs uint64 + DocCountScored uint64 + QueryCount uint64 + LastAddedBlock int +} + +type SegmentBlockMax struct { + segment *segment + node segmentindex.Node + docCount uint64 + blockEntries []*terms.BlockEntry + blockEntryIdx int + blockDataBufferOffset uint64 + blockDataBuffer []byte + blockDataEncoded *terms.BlockData + blockDataDecoded *terms.BlockDataDecoded + blockDataIdx int + blockDataSize int + blockDataStartOffset uint64 + blockDataEndOffset uint64 + idPointer uint64 + idf float64 + exhausted bool + decoded bool + freqDecoded bool + queryTermIndex int + Metrics BlockMetrics + averagePropLength float64 + b float64 + k1 float64 + propertyBoost float64 + + currentBlockImpact float32 + currentBlockMaxId uint64 + tombstones *sroar.Bitmap + filterDocIds *sroar.Bitmap + + // at position 0 we have the doc ids decoder, at position 1 is the tfs decoder + decoders []varenc.VarEncEncoder[uint64] + + propLengths map[uint64]uint32 + blockDatasTest []*terms.BlockData + + sectionReader *io.SectionReader +} + +func generateSingleFilter(tombstones *sroar.Bitmap, filterDocIds helpers.AllowList) (*sroar.Bitmap, *sroar.Bitmap) { + if tombstones != nil && tombstones.IsEmpty() { + tombstones = nil + } + + var filterSroar *sroar.Bitmap + // if we don't have an allow list filter, tombstones are the only needed filter + if filterDocIds != nil { + // the ok check should always succeed, but we keep it for safety + bm, ok := filterDocIds.(*helpers.BitmapAllowList) + // if we have a (allow list) filter and a (block list) tombstones filter, we can combine them into a single allowlist filter filter + if ok && tombstones != nil { + filterSroar = bm.Bm.AndNot(tombstones) + tombstones = nil + } else if ok && tombstones == nil { + filterSroar = bm.Bm + } + } + return tombstones, filterSroar +} + +func NewSegmentBlockMax(s *segment, key []byte, queryTermIndex int, idf float64, propertyBoost float32, tombstones *sroar.Bitmap, filterDocIds helpers.AllowList, averagePropLength float64, config schema.BM25Config) *SegmentBlockMax { + node, err := s.index.Get(key) + if err != nil { + return nil + } + + tombstones, filterSroar := generateSingleFilter(tombstones, filterDocIds) + + // if filter is empty after checking for tombstones, + // we can skip it and return nil for the segment + if filterSroar != nil && filterSroar.IsEmpty() { + return nil + } + + codecs := s.invertedHeader.DataFields + decoders := make([]varenc.VarEncEncoder[uint64], len(codecs)) + + for i, codec := range codecs { + decoders[i] = varenc.GetVarEncEncoder64(codec) + decoders[i].Init(terms.BLOCK_SIZE) + } + + var sectionReader *io.SectionReader + + if !s.readFromMemory { + sectionReader = io.NewSectionReader(s.contentFile, int64(node.Start), int64(node.End)) + } + + output := &SegmentBlockMax{ + segment: s, + node: node, + idf: idf, + queryTermIndex: queryTermIndex, + averagePropLength: averagePropLength, + + b: config.B, + k1: config.K1, + decoders: decoders, + propertyBoost: float64(propertyBoost), + filterDocIds: filterSroar, + tombstones: tombstones, + sectionReader: sectionReader, + } + + err = output.reset() + if err != nil { + return nil + } + output.Metrics.BlockCountTotal += uint64(len(output.blockEntries)) + output.Metrics.DocCountTotal += output.docCount + output.Metrics.LastAddedBlock = -1 + + return output +} + +func NewSegmentBlockMaxTest(docCount uint64, blockEntries []*terms.BlockEntry, blockDatas []*terms.BlockData, propLengths map[uint64]uint32, key []byte, queryTermIndex int, idf float64, propertyBoost float32, tombstones *sroar.Bitmap, filterDocIds helpers.AllowList, averagePropLength float64, config schema.BM25Config, codecs []varenc.VarEncDataType) *SegmentBlockMax { + decoders := make([]varenc.VarEncEncoder[uint64], len(codecs)) + + for i, codec := range codecs { + decoders[i] = varenc.GetVarEncEncoder64(codec) + } + + tombstones, filterSroar := generateSingleFilter(tombstones, filterDocIds) + + // if filter is empty after checking for tombstones, + // we can skip it and return nil for the segment + if filterSroar != nil && filterSroar.IsEmpty() { + return nil + } + + output := &SegmentBlockMax{ + blockEntries: blockEntries, + node: segmentindex.Node{Key: key}, + idf: idf, + queryTermIndex: queryTermIndex, + averagePropLength: averagePropLength, + b: config.B, + k1: config.K1, + decoders: decoders, + propertyBoost: float64(propertyBoost), + filterDocIds: filterSroar, + tombstones: tombstones, + propLengths: propLengths, + blockDatasTest: blockDatas, + blockEntryIdx: 0, + blockDataIdx: 0, + docCount: docCount, + blockDataDecoded: &terms.BlockDataDecoded{ + DocIds: make([]uint64, terms.BLOCK_SIZE), + Tfs: make([]uint64, terms.BLOCK_SIZE), + }, + } + + output.decodeBlock() + + output.advanceOnTombstoneOrFilter() + + output.Metrics.BlockCountTotal += uint64(len(output.blockEntries)) + output.Metrics.DocCountTotal += output.docCount + output.Metrics.LastAddedBlock = -1 + + return output +} + +func NewSegmentBlockMaxDecoded(key []byte, queryTermIndex int, propertyBoost float32, filterDocIds helpers.AllowList, averagePropLength float64, config schema.BM25Config) *SegmentBlockMax { + _, filterSroar := generateSingleFilter(nil, filterDocIds) + + output := &SegmentBlockMax{ + queryTermIndex: queryTermIndex, + node: segmentindex.Node{Key: key}, + averagePropLength: averagePropLength, + b: config.B, + k1: config.K1, + propertyBoost: float64(propertyBoost), + filterDocIds: filterSroar, + blockEntryIdx: 0, + blockDataIdx: 0, + decoded: true, + freqDecoded: true, + exhausted: true, + } + + output.Metrics.BlockCountTotal += uint64(len(output.blockEntries)) + output.Metrics.DocCountTotal += output.docCount + output.Metrics.LastAddedBlock = -1 + + return output +} + +func (s *SegmentBlockMax) advanceOnTombstoneOrFilter() { + if (s.filterDocIds == nil && s.tombstones == nil) || s.exhausted { + if !s.exhausted { + s.idPointer = s.blockDataDecoded.DocIds[s.blockDataIdx] + } + return + } + + for (s.filterDocIds != nil && !s.filterDocIds.Contains(s.blockDataDecoded.DocIds[s.blockDataIdx])) || + (s.tombstones != nil && s.tombstones.Contains(s.blockDataDecoded.DocIds[s.blockDataIdx])) { + s.blockDataIdx++ + if s.blockDataIdx > s.blockDataSize-1 { + if s.blockEntryIdx >= len(s.blockEntries)-1 { + s.exhaust() + return + } + s.blockEntryIdx++ + s.blockDataIdx = 0 + s.decodeBlock() + } + } + + if !s.exhausted { + s.idPointer = s.blockDataDecoded.DocIds[s.blockDataIdx] + } +} + +func (s *SegmentBlockMax) reset() error { + var err error + + s.propLengths, err = s.segment.GetPropertyLengths() + if err != nil { + return err + } + + s.blockEntries, s.docCount, s.blockDataDecoded, err = s.segment.loadBlockEntries(s.node) + if err != nil { + return err + } + + if s.blockDataDecoded == nil { + s.blockDataBuffer = make([]byte, blockMaxBufferSize) + s.blockDataDecoded = &terms.BlockDataDecoded{ + DocIds: make([]uint64, terms.BLOCK_SIZE), + Tfs: make([]uint64, terms.BLOCK_SIZE), + } + s.blockDataEncoded = &terms.BlockData{} + } + + s.blockEntryIdx = 0 + s.blockDataIdx = 0 + s.blockDataStartOffset = s.node.Start + 16 + uint64(len(s.blockEntries)*20) + s.blockDataEndOffset = s.node.End - uint64(len(s.node.Key)+4) + + s.blockDataBufferOffset = s.blockDataStartOffset + 1 + s.decodeBlock() + + s.advanceOnTombstoneOrFilter() + + return nil +} + +func (s *SegmentBlockMax) decodeBlock() error { + if s.exhausted { + return nil + } + + var err error + if s.blockEntries == nil { + return nil + } + + if s.blockEntryIdx >= len(s.blockEntries) { + s.exhaust() + return nil + } + + s.blockDataIdx = 0 + if s.docCount <= uint64(terms.ENCODE_AS_FULL_BYTES) { + s.idPointer = s.blockDataDecoded.DocIds[s.blockDataIdx] + s.blockDataSize = int(s.docCount) + s.freqDecoded = true + s.decoded = true + s.Metrics.BlockCountDecodedDocIds++ + s.Metrics.DocCountDecodedDocIds += uint64(s.blockDataSize) + return nil + } + if s.segment != nil { + startOffset := uint64(s.blockEntries[s.blockEntryIdx].Offset) + s.blockDataStartOffset + endOffset := s.blockDataEndOffset + + if s.blockEntryIdx < len(s.blockEntries)-1 { + endOffset = uint64(s.blockEntries[s.blockEntryIdx+1].Offset) + s.blockDataStartOffset + } + s.blockDataBufferOffset, err = s.segment.loadBlockDataReusable(s.sectionReader, s.blockDataBufferOffset, s.node.Start, startOffset, endOffset, s.blockDataBuffer, s.blockDataEncoded) + if err != nil { + return err + } + } else { + s.blockDataEncoded = s.blockDatasTest[s.blockEntryIdx] + } + + s.blockDataSize = terms.BLOCK_SIZE + if s.blockEntryIdx == len(s.blockEntries)-1 { + s.blockDataSize = int(s.docCount) - terms.BLOCK_SIZE*s.blockEntryIdx + } + s.decoders[0].DecodeReusable(s.blockDataEncoded.DocIds, s.blockDataDecoded.DocIds[:s.blockDataSize]) + s.Metrics.BlockCountDecodedDocIds++ + s.Metrics.DocCountDecodedDocIds += uint64(s.blockDataSize) + s.idPointer = s.blockDataDecoded.DocIds[s.blockDataIdx] + s.freqDecoded = false + s.decoded = true + s.currentBlockImpact = s.computeCurrentBlockImpact() + s.currentBlockMaxId = s.blockEntries[s.blockEntryIdx].MaxId + return nil +} + +func (s *SegmentBlockMax) AdvanceAtLeast(docId uint64) { + if s.exhausted { + return + } + + for s.blockEntryIdx < len(s.blockEntries) && docId > s.blockEntries[s.blockEntryIdx].MaxId { + s.blockEntryIdx++ + s.decoded = false + s.freqDecoded = false + } + + if (s.blockEntryIdx == len(s.blockEntries)-1 && docId > s.blockEntries[s.blockEntryIdx].MaxId) || s.blockEntryIdx >= len(s.blockEntries) { + s.exhaust() + return + } + + if !s.decoded { + s.decodeBlock() + } + + for s.blockDataIdx < s.blockDataSize-1 && docId > s.blockDataDecoded.DocIds[s.blockDataIdx] { + s.blockDataIdx++ + } + + s.advanceOnTombstoneOrFilter() +} + +func (s *SegmentBlockMax) AdvanceAtLeastShallow(docId uint64) { + if s.exhausted { + return + } + if docId <= s.blockEntries[s.blockEntryIdx].MaxId { + return + } + + for s.blockEntryIdx < len(s.blockEntries) && docId > s.blockEntries[s.blockEntryIdx].MaxId { + + s.blockEntryIdx++ + s.blockDataIdx = 0 + s.decoded = false + s.freqDecoded = false + if s.blockEntryIdx >= len(s.blockEntries) { + s.exhaust() + return + } + } + + if (s.blockEntryIdx == len(s.blockEntries)-1 && docId > s.blockEntries[s.blockEntryIdx].MaxId) || s.blockEntryIdx >= len(s.blockEntries) { + s.exhaust() + return + } + s.idPointer = s.blockEntries[s.blockEntryIdx-1].MaxId + s.currentBlockMaxId = s.blockEntries[s.blockEntryIdx].MaxId + s.currentBlockImpact = s.computeCurrentBlockImpact() +} + +func (s *SegmentBlockMax) Idf() float64 { + return s.idf +} + +func (s *SegmentBlockMax) IdPointer() uint64 { + return s.idPointer +} + +func (s *SegmentBlockMax) Exhausted() bool { + return s.exhausted +} + +func (s *SegmentBlockMax) Count() int { + return int(s.docCount) +} + +func (s *SegmentBlockMax) QueryTermIndex() int { + return s.queryTermIndex +} + +func (s *SegmentBlockMax) QueryTerm() string { + return string(s.node.Key) +} + +func (s *SegmentBlockMax) Score(averagePropLength float64, additionalExplanation bool) (uint64, float64, *terms.DocPointerWithScore) { + if s.exhausted { + return 0, 0, nil + } + + var doc *terms.DocPointerWithScore + + if !s.freqDecoded { + s.decoders[1].DecodeReusable(s.blockDataEncoded.Tfs, s.blockDataDecoded.Tfs[:s.blockDataSize]) + s.freqDecoded = true + } + + freq := float64(s.blockDataDecoded.Tfs[s.blockDataIdx]) + propLength := s.propLengths[s.idPointer] + tf := freq / (freq + s.k1*((1-s.b)+s.b*(float64(propLength)/s.averagePropLength))) + s.Metrics.DocCountScored++ + if s.blockEntryIdx != s.Metrics.LastAddedBlock { + s.Metrics.BlockCountDecodedFreqs++ + s.Metrics.DocCountDecodedFreqs += uint64(s.blockDataSize) + s.Metrics.LastAddedBlock = s.blockEntryIdx + } + + if additionalExplanation { + doc = &terms.DocPointerWithScore{ + Id: s.idPointer, + Frequency: float32(freq), + PropLength: float32(propLength), + } + } + score := tf * s.idf * s.propertyBoost + return s.idPointer, score, doc +} + +func (s *SegmentBlockMax) Advance() { + if s.exhausted { + return + } + + if !s.decoded { + s.decodeBlock() + return + } + + s.blockDataIdx++ + if s.blockDataIdx >= s.blockDataSize { + s.blockEntryIdx++ + s.blockDataIdx = 0 + s.decodeBlock() + if s.exhausted { + return + } + } + + s.advanceOnTombstoneOrFilter() +} + +func (s *SegmentBlockMax) computeCurrentBlockImpact() float32 { + if s.exhausted { + return 0 + } + // for the fully decode blocks return the idf + if len(s.blockEntries) == 0 { + return float32(s.idf) + } + freq := float64(s.blockEntries[s.blockEntryIdx].MaxImpactTf) + propLength := float64(s.blockEntries[s.blockEntryIdx].MaxImpactPropLength) + return float32(s.idf * (freq / (freq + s.k1*(1-s.b+s.b*(propLength/s.averagePropLength)))) * s.propertyBoost) +} + +func (s *SegmentBlockMax) CurrentBlockImpact() float32 { + return s.currentBlockImpact +} + +func (s *SegmentBlockMax) CurrentBlockMaxId() uint64 { + return s.currentBlockMaxId +} + +func (s *SegmentBlockMax) exhaust() { + s.idPointer = math.MaxUint64 + s.currentBlockImpact = 0 + s.idf = 0 + s.currentBlockMaxId = math.MaxUint64 + s.exhausted = true +} + +func (s *SegmentBlockMax) SetIdf(idf float64) { + s.idf = idf + s.currentBlockImpact = s.computeCurrentBlockImpact() +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segment_blockmax_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segment_blockmax_test.go new file mode 100644 index 0000000000000000000000000000000000000000..deedf2f2ff64f3d13aa553a9a8d69f7212975f09 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segment_blockmax_test.go @@ -0,0 +1,47 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package lsmkv + +import ( + "fmt" + "testing" + + "github.com/weaviate/weaviate/entities/schema" +) + +func TestSerializeAndParseInvertedNodeTest(t *testing.T) { + t.Skip() + path := "/Users/amourao/code/weaviate/weaviate/data-weaviate-0/" + + "msmarco/6Jx2gaSLtsnd/lsm/property_text_searchable/segment-1729794337023372000.db" + cfg := segmentConfig{ + mmapContents: false, + useBloomFilter: false, + calcCountNetAdditions: false, + overwriteDerived: true, + enableChecksumValidation: false, + } + seg, err := newSegment(path, nil, nil, nil, cfg) + if err != nil { + t.Fatalf("error creating segment: %v", err) + } + + sbm := NewSegmentBlockMax(seg, []byte("and"), 0, 1, 1, nil, nil, 10, schema.BM25Config{K1: 1.2, B: 0.75}) + + sbm.AdvanceAtLeast(100) + id, score, pair := sbm.Score(1, false) + sbm.Advance() + fmt.Println(id, score, pair) + sbm.AdvanceAtLeast(16000) + sbm.AdvanceAtLeast(160000000) + + fmt.Println(sbm) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segment_bloom_filters.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segment_bloom_filters.go new file mode 100644 index 0000000000000000000000000000000000000000..b07dec35fe259d8be3a8207679338295476fe4df --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segment_bloom_filters.go @@ -0,0 +1,331 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package lsmkv + +import ( + "bytes" + "encoding/binary" + "fmt" + "hash/crc32" + "io" + "os" + "path/filepath" + "strings" + "time" + + "github.com/weaviate/weaviate/usecases/byteops" + + "github.com/bits-and-blooms/bloom/v3" + "github.com/pkg/errors" + "github.com/weaviate/weaviate/entities/diskio" +) + +func (s *segment) buildPath(template string) string { + isTmpFile := filepath.Ext(s.path) == ".tmp" + + extless := strings.TrimSuffix(s.path, filepath.Ext(s.path)) + if isTmpFile { // remove second extension + extless = strings.TrimSuffix(extless, filepath.Ext(extless)) + } + + path := fmt.Sprintf(template, extless) + if isTmpFile { + path = fmt.Sprintf("%s.tmp", path) + } + return path +} + +func (s *segment) bloomFilterPath() string { + return s.buildPath("%s.bloom") +} + +func (s *segment) bloomFilterSecondaryPath(pos int) string { + posTemplate := fmt.Sprintf(".%d.bloom", pos) + return s.buildPath("%s.secondary" + posTemplate) +} + +func (s *segment) initBloomFilters(metrics *Metrics, overwrite bool, existingFilesList map[string]int64) error { + if err := s.initBloomFilter(overwrite, existingFilesList); err != nil { + return fmt.Errorf("init bloom filter for primary index: %w", err) + } + if s.secondaryIndexCount > 0 { + s.secondaryBloomFilters = make([]*bloom.BloomFilter, s.secondaryIndexCount) + for i := range s.secondaryBloomFilters { + if err := s.initSecondaryBloomFilter(i, overwrite, existingFilesList); err != nil { + return fmt.Errorf("init bloom filter for secondary index at %d: %w", i, err) + } + } + } + s.bloomFilterMetrics = newBloomFilterMetrics(metrics) + return nil +} + +func (s *segment) initBloomFilter(overwrite bool, existingFilesList map[string]int64) error { + path := s.bloomFilterPath() + s.metaPaths = append(s.metaPaths, path) + + loadFromDisk, err := fileExistsInList(existingFilesList, filepath.Base(path)) + if err != nil { + return err + } + if loadFromDisk { + if overwrite { + err := os.Remove(path) + if err != nil { + return fmt.Errorf("delete existing bloom filter %s: %w", path, err) + } + } else { + err = s.loadBloomFilterFromDisk() + if err == nil { + return nil + } + + if !errors.Is(err, ErrInvalidChecksum) { + // not a recoverable error + return err + } + + // now continue re-calculating + } + } + + before := time.Now() + + if err := s.computeAndStoreBloomFilter(path); err != nil { + return err + } + + took := time.Since(before) + + s.logger.WithField("action", "lsm_init_disk_segment_build_bloom_filter_primary"). + WithField("path", s.path). + WithField("took", took). + Debugf("building bloom filter took %s\n", took) + + return nil +} + +func (s *segment) computeAndStoreBloomFilter(path string) error { + keys, err := s.index.AllKeys() + if err != nil { + return err + } + + s.bloomFilter = bloom.NewWithEstimates(uint(len(keys)), 0.001) + for _, key := range keys { + s.bloomFilter.Add(key) + } + + if err := s.storeBloomFilterOnDisk(path); err != nil { + return fmt.Errorf("store bloom filter on disk: %w", err) + } + + return nil +} + +func (s *segment) storeBloomFilterOnDisk(path string) error { + bfSize := getBloomFilterSize(s.bloomFilter) + + rw := byteops.NewReadWriter(make([]byte, bfSize+byteops.Uint32Len)) + rw.MoveBufferPositionForward(byteops.Uint32Len) // leave space for checksum + _, err := s.bloomFilter.WriteTo(&rw) + if err != nil { + return fmt.Errorf("write bloom filter: %w", err) + } + + return writeWithChecksum(rw, path, s.observeMetaWrite) +} + +func (s *segment) loadBloomFilterFromDisk() error { + data, err := loadWithChecksum(s.bloomFilterPath(), -1, s.metrics.ReadObserver("loadBloomfilter")) + if err != nil { + return err + } + + s.bloomFilter = new(bloom.BloomFilter) + _, err = s.bloomFilter.ReadFrom(bytes.NewReader(data)) + if err != nil { + return fmt.Errorf("read bloom filter from disk: %w", err) + } + + return nil +} + +func (s *segment) initSecondaryBloomFilter(pos int, overwrite bool, existingFilesList map[string]int64) error { + before := time.Now() + + path := s.bloomFilterSecondaryPath(pos) + s.metaPaths = append(s.metaPaths, path) + + loadFromDisk, err := fileExistsInList(existingFilesList, filepath.Base(path)) + if err != nil { + return err + } + if loadFromDisk { + if overwrite { + err := os.Remove(path) + if err != nil { + return fmt.Errorf("deleting existing secondary bloom filter %s: %w", path, err) + } + } else { + err = s.loadBloomFilterSecondaryFromDisk(pos) + if err == nil { + return nil + } + + if !errors.Is(err, ErrInvalidChecksum) { + // not a recoverable error + return err + } + + // now continue re-calculating + } + } + + if err := s.computeAndStoreSecondaryBloomFilter(path, pos); err != nil { + return err + } + + took := time.Since(before) + + s.logger.WithField("action", "lsm_init_disk_segment_build_bloom_filter_secondary"). + WithField("secondary_index_position", pos). + WithField("path", s.path). + WithField("took", took). + Debugf("building bloom filter took %s\n", took) + + return nil +} + +func (s *segment) computeAndStoreSecondaryBloomFilter(path string, pos int) error { + keys, err := s.secondaryIndices[pos].AllKeys() + if err != nil { + return err + } + + s.secondaryBloomFilters[pos] = bloom.NewWithEstimates(uint(len(keys)), 0.001) + for _, key := range keys { + s.secondaryBloomFilters[pos].Add(key) + } + + if err := s.storeBloomFilterSecondaryOnDisk(path, pos); err != nil { + return fmt.Errorf("store secondary bloom filter on disk: %w", err) + } + + return nil +} + +func (s *segment) storeBloomFilterSecondaryOnDisk(path string, pos int) error { + bfSize := getBloomFilterSize(s.bloomFilter) + + rw := byteops.NewReadWriter(make([]byte, bfSize+byteops.Uint32Len)) + rw.MoveBufferPositionForward(byteops.Uint32Len) // leave space for checksum + _, err := s.secondaryBloomFilters[pos].WriteTo(&rw) + if err != nil { + return fmt.Errorf("write bloom filter: %w", err) + } + + return writeWithChecksum(rw, path, s.observeMetaWrite) +} + +func (s *segment) loadBloomFilterSecondaryFromDisk(pos int) error { + data, err := loadWithChecksum(s.bloomFilterSecondaryPath(pos), -1, s.metrics.ReadObserver("loadSecondaryBloomFilter")) + if err != nil { + return err + } + + s.secondaryBloomFilters[pos] = new(bloom.BloomFilter) + _, err = s.secondaryBloomFilters[pos].ReadFrom(bytes.NewReader(data)) + if err != nil { + return fmt.Errorf("read bloom filter from disk: %w", err) + } + + return nil +} + +func fileExistsInList(nameList map[string]int64, filePath string) (bool, error) { + if nameList != nil { + _, ok := nameList[filePath] + return ok, nil + } else { + return fileExists(filePath) + } +} + +// writeWithChecksum expects the data in the buffer to start at position byteops.Uint32Len so the +// checksum can be added into the same buffer at its start and everything can be written to the file +// in one go +func writeWithChecksum(bufWriter byteops.ReadWriter, path string, observeFileWriter diskio.MeteredWriterCallback) error { + // checksum needs to be at the start of the file + chksm := crc32.ChecksumIEEE(bufWriter.Buffer[byteops.Uint32Len:]) + bufWriter.MoveBufferToAbsolutePosition(0) + bufWriter.WriteUint32(chksm) + f, err := os.Create(path) + if err != nil { + return fmt.Errorf("open file for writing: %w", err) + } + + meteredW := diskio.NewMeteredWriter(f, observeFileWriter) + + if _, err := meteredW.Write(bufWriter.Buffer); err != nil { + // ignoring f.Close() error here, as we don't care about whether the file + // was flushed, the call is mainly intended to prevent a file descriptor + // leak. We still want to return the original error below. + f.Close() + return fmt.Errorf("write bloom filter to disk: %w", err) + } + + if err := f.Close(); err != nil { + return fmt.Errorf("close bloom filter file: %w", err) + } + + return nil +} + +// use negative length check to indicate that no length check should be +// performed +func loadWithChecksum(path string, lengthCheck int, observeFileReader BytesReadObserver) ([]byte, error) { + f, err := os.Open(path) + if err != nil { + return nil, err + } + defer f.Close() + meteredF := diskio.NewMeteredReader(f, diskio.MeteredReaderCallback(observeFileReader)) + + data, err := io.ReadAll(meteredF) + if err != nil { + return nil, err + } + if lengthCheck > 0 && len(data) != lengthCheck { + return nil, ErrInvalidChecksum + } + + if len(data) < 4 { + // the file does not even contain the full checksum, we must consider it corrupt + return nil, ErrInvalidChecksum + } + + chcksm := binary.LittleEndian.Uint32(data[:4]) + actual := crc32.ChecksumIEEE(data[4:]) + if chcksm != actual { + return nil, ErrInvalidChecksum + } + + return data[4:], nil +} + +func getBloomFilterSize(bf *bloom.BloomFilter) int { + // size of the bloom filter is size of the underlying bitSet and two uint64 parameters + bs := bf.BitSet() + bsSize := bs.BinaryStorageSize() + return bsSize + 2*byteops.Uint64Len +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segment_bloom_filters_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segment_bloom_filters_test.go new file mode 100644 index 0000000000000000000000000000000000000000..59589acd48bfc95427fd1665fcd2c4b1f65b6ced --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segment_bloom_filters_test.go @@ -0,0 +1,642 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package lsmkv + +import ( + "context" + "encoding/binary" + "fmt" + "hash/crc32" + "io" + "math/rand" + "os" + "path" + "testing" + + "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/entities/cyclemanager" +) + +func TestCreateBloomOnFlush(t *testing.T) { + ctx := context.Background() + dirName := t.TempDir() + + logger, _ := test.NewNullLogger() + + b, err := NewBucketCreator().NewBucket(ctx, dirName, "", logger, nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), + WithStrategy(StrategyReplace), WithSecondaryIndices(1)) + require.Nil(t, err) + + require.Nil(t, b.Put([]byte("hello"), []byte("world"), + WithSecondaryKey(0, []byte("bonjour")))) + require.Nil(t, b.FlushMemtable()) + + files, err := os.ReadDir(dirName) + require.Nil(t, err) + + _, ok := findFileWithExt(files, ".bloom") + assert.True(t, ok) + + _, ok = findFileWithExt(files, "secondary.0.bloom") + assert.True(t, ok) + // on Windows we have to shutdown the bucket before opening it again + require.Nil(t, b.Shutdown(ctx)) + + b2, err := NewBucketCreator().NewBucket(ctx, dirName, "", logger, nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), + WithStrategy(StrategyReplace), WithSecondaryIndices(1)) + require.Nil(t, err) + defer b2.Shutdown(ctx) + + valuePrimary, err := b2.Get([]byte("hello")) + require.Nil(t, err) + valueSecondary, err := b2.GetBySecondary(0, []byte("bonjour")) + require.Nil(t, err) + + assert.Equal(t, []byte("world"), valuePrimary) + assert.Equal(t, []byte("world"), valueSecondary) +} + +func TestCreateBloomInit(t *testing.T) { + // this test deletes the initial bloom and makes sure it gets recreated after + // the bucket is initialized + ctx := context.Background() + dirName := t.TempDir() + + logger, _ := test.NewNullLogger() + + b, err := NewBucketCreator().NewBucket(ctx, dirName, "", logger, nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), + WithStrategy(StrategyReplace), WithSecondaryIndices(1)) + require.Nil(t, err) + defer b.Shutdown(ctx) + + require.Nil(t, b.Put([]byte("hello"), []byte("world"), + WithSecondaryKey(0, []byte("bonjour")))) + require.Nil(t, b.FlushMemtable()) + + for _, ext := range []string{".secondary.0.bloom", ".bloom"} { + files, err := os.ReadDir(dirName) + require.Nil(t, err) + fname, ok := findFileWithExt(files, ext) + require.True(t, ok) + + err = os.RemoveAll(path.Join(dirName, fname)) + require.Nil(t, err) + + files, err = os.ReadDir(dirName) + require.Nil(t, err) + _, ok = findFileWithExt(files, ext) + require.False(t, ok, "verify the file is really gone") + } + + require.Nil(t, b.Shutdown(ctx)) + + // now create a new bucket and assert that the file is re-created on init + b2, err := NewBucketCreator().NewBucket(ctx, dirName, "", logger, nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), + WithStrategy(StrategyReplace)) + require.Nil(t, err) + defer b2.Shutdown(ctx) + + // just to ensure segments are loaded + cursor := b2.Cursor() + cursor.Close() + + files, err := os.ReadDir(dirName) + require.Nil(t, err) + _, ok := findFileWithExt(files, ".bloom") + require.True(t, ok) + _, ok = findFileWithExt(files, ".secondary.0.bloom") + require.True(t, ok) +} + +func TestRepairCorruptedBloomOnInit(t *testing.T) { + ctx := context.Background() + dirName := t.TempDir() + + logger, _ := test.NewNullLogger() + + b, err := NewBucketCreator().NewBucket(ctx, dirName, "", logger, nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), + WithStrategy(StrategyReplace)) + require.Nil(t, err) + + require.Nil(t, b.Put([]byte("hello"), []byte("world"))) + require.Nil(t, b.FlushMemtable()) + + files, err := os.ReadDir(dirName) + require.Nil(t, err) + fname, ok := findFileWithExt(files, ".bloom") + require.True(t, ok) + + // now corrupt the bloom filter by randomly overriding data + require.Nil(t, corruptBloomFile(path.Join(dirName, fname))) + // on Windows we have to shutdown the bucket before opening it again + require.Nil(t, b.Shutdown(ctx)) + + // now create a new bucket and assert that the file is ignored, re-created on + // init, and the count matches + b2, err := NewBucketCreator().NewBucket(ctx, dirName, "", logger, nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), + WithStrategy(StrategyReplace)) + require.Nil(t, err) + defer b2.Shutdown(ctx) + + value, err := b2.Get([]byte("hello")) + assert.Nil(t, err) + assert.Equal(t, []byte("world"), value) +} + +func TestRepairTooShortBloomOnInit(t *testing.T) { + ctx := context.Background() + dirName := t.TempDir() + + logger, _ := test.NewNullLogger() + + b, err := NewBucketCreator().NewBucket(ctx, dirName, "", logger, nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), + WithStrategy(StrategyReplace)) + require.Nil(t, err) + + require.Nil(t, b.Put([]byte("hello"), []byte("world"))) + require.Nil(t, b.FlushMemtable()) + + files, err := os.ReadDir(dirName) + require.Nil(t, err) + fname, ok := findFileWithExt(files, ".bloom") + require.True(t, ok) + b.Shutdown(ctx) + + // now corrupt the bloom filter by randomly overriding data + require.Nil(t, corruptBloomFileByTruncatingIt(path.Join(dirName, fname))) + + // now create a new bucket and assert that the file is ignored, re-created on + // init, and the count matches + b2, err := NewBucketCreator().NewBucket(ctx, dirName, "", logger, nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), + WithStrategy(StrategyReplace)) + require.Nil(t, err) + defer b2.Shutdown(ctx) + + value, err := b2.Get([]byte("hello")) + assert.Nil(t, err) + assert.Equal(t, []byte("world"), value) +} + +func TestRepairCorruptedBloomSecondaryOnInit(t *testing.T) { + ctx := context.Background() + dirName := t.TempDir() + + logger, _ := test.NewNullLogger() + + b, err := NewBucketCreator().NewBucket(ctx, dirName, "", logger, nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), + WithStrategy(StrategyReplace), WithSecondaryIndices(1)) + require.Nil(t, err) + + require.Nil(t, b.Put([]byte("hello"), []byte("world"), + WithSecondaryKey(0, []byte("bonjour")))) + require.Nil(t, b.FlushMemtable()) + + files, err := os.ReadDir(dirName) + require.Nil(t, err) + fname, ok := findFileWithExt(files, "secondary.0.bloom") + require.True(t, ok) + + // now corrupt the file by replacing the count value without adapting the checksum + require.Nil(t, corruptBloomFile(path.Join(dirName, fname))) + // on Windows we have to shutdown the bucket before opening it again + require.Nil(t, b.Shutdown(ctx)) + + // now create a new bucket and assert that the file is ignored, re-created on + // init, and the count matches + b2, err := NewBucketCreator().NewBucket(ctx, dirName, "", logger, nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), + WithStrategy(StrategyReplace), WithSecondaryIndices(1)) + require.Nil(t, err) + defer b2.Shutdown(ctx) + + value := make([]byte, 5) + value, _, err = b2.GetBySecondaryIntoMemory(0, []byte("bonjour"), value) + assert.Nil(t, err) + assert.Equal(t, []byte("world"), value) + + err = b2.Delete([]byte("hello")) + assert.Nil(t, err) + + v, err := b2.Get([]byte("hello")) + assert.Nil(t, err) + assert.Nil(t, v) + + value, _, err = b2.GetBySecondaryIntoMemory(0, []byte("bonjour"), value) + assert.Nil(t, err) + assert.Nil(t, value) +} + +func TestRepairCorruptedBloomSecondaryOnInitIntoMemory(t *testing.T) { + ctx := context.Background() + dirName := t.TempDir() + + logger, _ := test.NewNullLogger() + + b, err := NewBucketCreator().NewBucket(ctx, dirName, "", logger, nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), + WithStrategy(StrategyReplace), WithSecondaryIndices(1)) + require.Nil(t, err) + + require.Nil(t, b.Put([]byte("hello"), []byte("world"), + WithSecondaryKey(0, []byte("bonjour")))) + require.Nil(t, b.FlushMemtable()) + + files, err := os.ReadDir(dirName) + require.Nil(t, err) + fname, ok := findFileWithExt(files, "secondary.0.bloom") + require.True(t, ok) + + b.Shutdown(ctx) + + // now corrupt the file by replacing the count value without adapting the checksum + require.Nil(t, corruptBloomFile(path.Join(dirName, fname))) + + // now create a new bucket and assert that the file is ignored, re-created on + // init, and the count matches + b2, err := NewBucketCreator().NewBucket(ctx, dirName, "", logger, nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), + WithStrategy(StrategyReplace), WithSecondaryIndices(1)) + require.Nil(t, err) + defer b2.Shutdown(ctx) + + value, err := b2.GetBySecondary(0, []byte("bonjour")) + assert.Nil(t, err) + assert.Equal(t, []byte("world"), value) +} + +func TestRepairTooShortBloomSecondaryOnInit(t *testing.T) { + ctx := context.Background() + dirName := t.TempDir() + + logger, _ := test.NewNullLogger() + + b, err := NewBucketCreator().NewBucket(ctx, dirName, "", logger, nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), + WithStrategy(StrategyReplace), WithSecondaryIndices(1)) + require.Nil(t, err) + + require.Nil(t, b.Put([]byte("hello"), []byte("world"), + WithSecondaryKey(0, []byte("bonjour")))) + require.Nil(t, b.FlushMemtable()) + + files, err := os.ReadDir(dirName) + require.Nil(t, err) + fname, ok := findFileWithExt(files, "secondary.0.bloom") + require.True(t, ok) + + b.Shutdown(ctx) + // now corrupt the file by replacing the count value without adapting the checksum + require.Nil(t, corruptBloomFileByTruncatingIt(path.Join(dirName, fname))) + + // now create a new bucket and assert that the file is ignored, re-created on + // init, and the count matches + b2, err := NewBucketCreator().NewBucket(ctx, dirName, "", logger, nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), + WithStrategy(StrategyReplace), WithSecondaryIndices(1)) + require.Nil(t, err) + defer b2.Shutdown(ctx) + + value, err := b2.GetBySecondary(0, []byte("bonjour")) + assert.Nil(t, err) + assert.Equal(t, []byte("world"), value) +} + +func TestLoadWithChecksumErrorCases(t *testing.T) { + t.Run("file does not exist", func(t *testing.T) { + dirName := t.TempDir() + _, err := loadWithChecksum(path.Join(dirName, "my-file"), -1, nil) + assert.NotNil(t, err) + }) + + t.Run("file has incorrect length", func(t *testing.T) { + dirName := t.TempDir() + fName := path.Join(dirName, "my-file") + f, err := os.Create(fName) + require.Nil(t, err) + + _, err = f.Write(make([]byte, 13)) + require.Nil(t, err) + + require.Nil(t, f.Close()) + + _, err = loadWithChecksum(path.Join(dirName, "my-file"), 17, nil) + assert.NotNil(t, err) + }) +} + +func BenchmarkLoading(b *testing.B) { + for _, val := range []int{10, 100, 1000, 10000} { + b.Run(fmt.Sprintf("%d", val), func(b *testing.B) { + dirName := b.TempDir() + fName := path.Join(dirName, fmt.Sprintf("my-file-%d", val)) + f, err := os.Create(fName) + require.Nil(b, err) + data := make([]byte, val) + for i := 0; i < len(data); i++ { + data[i] = byte(rand.Intn(100)) + } + chmsum := crc32.ChecksumIEEE(data[4:]) + binary.LittleEndian.PutUint32(data[:4], chmsum) + _, err = f.Write(data) + require.NoError(b, err) + + require.NoError(b, f.Sync()) + require.NoError(b, f.Close()) + b.ResetTimer() + for i := 0; i < b.N; i++ { + loadedData, err := loadWithChecksum(fName, len(data), nil) + require.NoError(b, err) + require.Equal(b, loadedData, data[4:]) + } + }) + } +} + +func TestBloom_OFF(t *testing.T) { + ctx := context.Background() + tests := bucketTests{ + { + name: "dontCreateBloom", + f: dontCreateBloom, + opts: []BucketOption{ + WithStrategy(StrategyReplace), + WithSecondaryIndices(1), + WithUseBloomFilter(false), + }, + }, + { + name: "dontRecreateBloom", + f: dontRecreateBloom, + opts: []BucketOption{ + WithStrategy(StrategyReplace), + WithSecondaryIndices(1), + WithUseBloomFilter(false), + }, + }, + { + name: "dontPrecomputeBloom", + f: dontPrecomputeBloom, + opts: []BucketOption{ + WithStrategy(StrategyReplace), + WithSecondaryIndices(1), + WithUseBloomFilter(false), + }, + }, + } + tests.run(ctx, t) +} + +func dontCreateBloom(ctx context.Context, t *testing.T, opts []BucketOption) { + dirName := t.TempDir() + logger, _ := test.NewNullLogger() + + b, err := NewBucketCreator().NewBucket(ctx, dirName, "", logger, nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), + opts...) + require.NoError(t, err) + defer b.Shutdown(ctx) + + t.Run("populate", func(t *testing.T) { + require.NoError(t, b.Put([]byte("hello"), []byte("world"), + WithSecondaryKey(0, []byte("bonjour")))) + require.NoError(t, b.FlushMemtable()) + }) + + t.Run("check files", func(t *testing.T) { + files, err := os.ReadDir(dirName) + require.NoError(t, err) + + _, ok := findFileWithExt(files, ".bloom") + assert.False(t, ok) + _, ok = findFileWithExt(files, "secondary.0.bloom") + assert.False(t, ok) + }) + + t.Run("search", func(t *testing.T) { + valuePrimary, err := b.Get([]byte("hello")) + require.NoError(t, err) + valueSecondary, err := b.GetBySecondary(0, []byte("bonjour")) + require.NoError(t, err) + + assert.Equal(t, []byte("world"), valuePrimary) + assert.Equal(t, []byte("world"), valueSecondary) + }) +} + +func dontRecreateBloom(ctx context.Context, t *testing.T, opts []BucketOption) { + dirName := t.TempDir() + logger, _ := test.NewNullLogger() + + t.Run("create, populate, shutdown", func(t *testing.T) { + b, err := NewBucketCreator().NewBucket(ctx, dirName, "", logger, nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), + opts...) + require.NoError(t, err) + defer b.Shutdown(ctx) + + require.NoError(t, b.Put([]byte("hello"), []byte("world"), + WithSecondaryKey(0, []byte("bonjour")))) + require.NoError(t, b.FlushMemtable()) + }) + + b2, err := NewBucketCreator().NewBucket(ctx, dirName, "", logger, nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), + opts...) + require.NoError(t, err) + defer b2.Shutdown(ctx) + + t.Run("check files", func(t *testing.T) { + files, err := os.ReadDir(dirName) + require.NoError(t, err) + + _, ok := findFileWithExt(files, ".bloom") + assert.False(t, ok) + _, ok = findFileWithExt(files, "secondary.0.bloom") + assert.False(t, ok) + }) + + t.Run("search", func(t *testing.T) { + valuePrimary, err := b2.Get([]byte("hello")) + require.NoError(t, err) + valueSecondary, err := b2.GetBySecondary(0, []byte("bonjour")) + require.NoError(t, err) + + assert.Equal(t, []byte("world"), valuePrimary) + assert.Equal(t, []byte("world"), valueSecondary) + }) +} + +func dontPrecomputeBloom(ctx context.Context, t *testing.T, opts []BucketOption) { + dirName := t.TempDir() + logger, _ := test.NewNullLogger() + + b, err := NewBucketCreator().NewBucket(ctx, dirName, "", logger, nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), + opts...) + require.NoError(t, err) + defer b.Shutdown(ctx) + + t.Run("populate, compact", func(t *testing.T) { + require.NoError(t, b.Put([]byte("hello"), []byte("world"), + WithSecondaryKey(0, []byte("bonjour")))) + require.NoError(t, b.FlushMemtable()) + + require.NoError(t, b.Put([]byte("hello2"), []byte("world2"), + WithSecondaryKey(0, []byte("bonjour2")))) + require.NoError(t, b.FlushMemtable()) + + compacted, err := b.disk.compactOnce() + require.NoError(t, err) + require.True(t, compacted) + }) + + t.Run("check files", func(t *testing.T) { + files, err := os.ReadDir(dirName) + require.NoError(t, err) + + _, ok := findFileWithExt(files, ".bloom") + assert.False(t, ok) + _, ok = findFileWithExt(files, "secondary.0.bloom") + assert.False(t, ok) + }) + + t.Run("search", func(t *testing.T) { + valuePrimary, err := b.Get([]byte("hello")) + require.NoError(t, err) + valueSecondary, err := b.GetBySecondary(0, []byte("bonjour")) + require.NoError(t, err) + value2Primary, err := b.Get([]byte("hello2")) + require.NoError(t, err) + value2Secondary, err := b.GetBySecondary(0, []byte("bonjour2")) + require.NoError(t, err) + + assert.Equal(t, []byte("world"), valuePrimary) + assert.Equal(t, []byte("world"), valueSecondary) + assert.Equal(t, []byte("world2"), value2Primary) + assert.Equal(t, []byte("world2"), value2Secondary) + }) +} + +func corruptBloomFile(fname string) error { + f, err := os.Open(fname) + if err != nil { + return err + } + + data, err := io.ReadAll(f) + if err != nil { + return err + } + + if err := f.Close(); err != nil { + return err + } + + // corrupt it by setting all data bytes to 0x01 + for i := 5; i < len(data); i++ { + data[i] = 0x01 + } + + f, err = os.Create(fname) + if err != nil { + return err + } + + _, err = f.Write(data) + if err != nil { + return err + } + + return f.Close() +} + +func corruptBloomFileByTruncatingIt(fname string) error { + f, err := os.Open(fname) + if err != nil { + return err + } + + data, err := io.ReadAll(f) + if err != nil { + return err + } + + if err := f.Close(); err != nil { + return err + } + + data = data[:2] + + f, err = os.Create(fname) + if err != nil { + return err + } + + _, err = f.Write(data) + if err != nil { + return err + } + + return f.Close() +} + +func BenchmarkName(b *testing.B) { + logger, _ := test.NewNullLogger() + fn := func(key []byte) (bool, error) { return true, nil } + + for _, val := range []int{10, 100, 1000, 10000} { + b.Run(fmt.Sprintf("%d", val), func(b *testing.B) { + dirName := b.TempDir() + ctx := context.Background() + bu, err := NewBucketCreator().NewBucket(ctx, dirName, "", logger, nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), + WithStrategy(StrategyReplace)) + require.Nil(b, err) + + for i := 0; i < val; i++ { + require.Nil(b, bu.Put([]byte(fmt.Sprintf("hello-%v", i)), []byte(fmt.Sprintf("world-%v", i)))) + } + + require.Nil(b, bu.FlushMemtable()) + bu.Shutdown(ctx) + + files, err := os.ReadDir(dirName) + require.NoError(b, err) + + fnames, ok := findFileWithExt(files, ".db") + assert.True(b, ok) + assert.NotNil(b, fnames) + + b.ResetTimer() + b.ReportAllocs() + for i := 0; i < b.N; i++ { + _, err := newSegment(path.Join(dirName, fnames), logger, nil, fn, segmentConfig{ + mmapContents: false, + useBloomFilter: true, + overwriteDerived: true, + }) + require.NoError(b, err) + } + }) + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segment_cleaner_replace.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segment_cleaner_replace.go new file mode 100644 index 0000000000000000000000000000000000000000..748aa358eb8630e1e0ab32495bb9c3588f2b8923 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segment_cleaner_replace.go @@ -0,0 +1,201 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package lsmkv + +import ( + "bufio" + "errors" + "fmt" + "io" + "math" + + "github.com/prometheus/client_golang/prometheus" + "github.com/weaviate/weaviate/adapters/repos/db/lsmkv/segmentindex" + "github.com/weaviate/weaviate/entities/cyclemanager" + "github.com/weaviate/weaviate/entities/lsmkv" + "github.com/weaviate/weaviate/usecases/monitoring" +) + +type segmentCleanerReplace struct { + w io.WriteSeeker + bufw *bufio.Writer + cursor *segmentCursorReplace + keyExistsFn keyExistsOnUpperSegmentsFunc + version uint16 + level uint16 + secondaryIndexCount uint16 + scratchSpacePath string + enableChecksumValidation bool +} + +func newSegmentCleanerReplace(w io.WriteSeeker, cursor *segmentCursorReplace, + keyExistsFn keyExistsOnUpperSegmentsFunc, level, secondaryIndexCount uint16, + scratchSpacePath string, enableChecksumValidation bool, +) *segmentCleanerReplace { + return &segmentCleanerReplace{ + w: w, + bufw: bufio.NewWriterSize(w, 256*1024), + cursor: cursor, + keyExistsFn: keyExistsFn, + version: segmentindex.ChooseHeaderVersion(enableChecksumValidation), + level: level, + secondaryIndexCount: secondaryIndexCount, + scratchSpacePath: scratchSpacePath, + enableChecksumValidation: enableChecksumValidation, + } +} + +func (p *segmentCleanerReplace) do(shouldAbort cyclemanager.ShouldAbortCallback) error { + if err := p.init(); err != nil { + return fmt.Errorf("init: %w", err) + } + + segmentFile := segmentindex.NewSegmentFile( + segmentindex.WithBufferedWriter(p.bufw), + segmentindex.WithChecksumsDisabled(!p.enableChecksumValidation), + ) + + indexKeys, err := p.writeKeys(segmentFile, shouldAbort) + if err != nil { + return fmt.Errorf("write keys: %w", err) + } + + if err := p.writeIndexes(segmentFile, indexKeys); err != nil { + return fmt.Errorf("write indices: %w", err) + } + + // flush buffered, so we can safely seek on underlying writer + if err := p.bufw.Flush(); err != nil { + return fmt.Errorf("flush buffered: %w", err) + } + + var dataEnd uint64 = segmentindex.HeaderSize + if l := len(indexKeys); l > 0 { + dataEnd = uint64(indexKeys[l-1].ValueEnd) + } + + if err := p.writeHeader(segmentFile, dataEnd); err != nil { + return fmt.Errorf("write header: %w", err) + } + + if _, err := segmentFile.WriteChecksum(); err != nil { + return fmt.Errorf("write compactorSet segment checksum: %w", err) + } + + return nil +} + +func (p *segmentCleanerReplace) init() error { + // write a dummy header as its contents are not known yet. + // file will be sought to the beginning and overwritten with actual header + // at the very end + + if _, err := p.bufw.Write(make([]byte, segmentindex.HeaderSize)); err != nil { + return fmt.Errorf("write empty header: %w", err) + } + return nil +} + +func (p *segmentCleanerReplace) writeKeys(f *segmentindex.SegmentFile, + shouldAbort cyclemanager.ShouldAbortCallback, +) ([]segmentindex.Key, error) { + // the (dummy) header was already written, this is our initial offset + offset := segmentindex.HeaderSize + + var indexKeys []segmentindex.Key + var indexKey segmentindex.Key + var node segmentReplaceNode + var err error + var keyExists bool + + i := 0 + for node, err = p.cursor.firstWithAllKeys(); err == nil || errors.Is(err, lsmkv.Deleted); node, err = p.cursor.nextWithAllKeys() { + i++ + if i%100 == 0 && shouldAbort() { + return nil, fmt.Errorf("should abort requested") + } + + keyExists, err = p.keyExistsFn(node.primaryKey) + if err != nil { + break + } + if keyExists { + continue + } + nodeCopy := node + nodeCopy.offset = offset + indexKey, err = nodeCopy.KeyIndexAndWriteTo(f.BodyWriter()) + if err != nil { + break + } + offset = indexKey.ValueEnd + indexKeys = append(indexKeys, indexKey) + } + + if !errors.Is(err, lsmkv.NotFound) { + return nil, err + } + return indexKeys, nil +} + +func (p *segmentCleanerReplace) writeIndexes(f *segmentindex.SegmentFile, + keys []segmentindex.Key, +) error { + indexes := &segmentindex.Indexes{ + Keys: keys, + SecondaryIndexCount: p.secondaryIndexCount, + ScratchSpacePath: p.scratchSpacePath, + ObserveWrite: monitoring.GetMetrics().FileIOWrites.With(prometheus.Labels{ + "strategy": StrategyReplace, + "operation": "cleanupWriteIndices", + }), + } + _, err := f.WriteIndexes(indexes, math.MaxInt64) // segment cleaner only runs for big files + return err +} + +// writeHeader assumes that everything has been written to the underlying +// writer and it is now safe to seek to the beginning and override the initial +// header +func (p *segmentCleanerReplace) writeHeader(f *segmentindex.SegmentFile, + startOfIndex uint64, +) error { + if _, err := p.w.Seek(0, io.SeekStart); err != nil { + return fmt.Errorf("seek to beginning to write header: %w", err) + } + + h := &segmentindex.Header{ + Level: p.level, + Version: p.version, + SecondaryIndices: p.secondaryIndexCount, + Strategy: segmentindex.StrategyReplace, + IndexStart: startOfIndex, + } + // We have to write directly to compactor writer, + // since it has seeked back to start. The following + // call to f.WriteHeader will not write again. + if _, err := h.WriteTo(p.w); err != nil { + return err + } + + if _, err := f.WriteHeader(h); err != nil { + return err + } + + if _, err := p.w.Seek(0, io.SeekEnd); err != nil { + return fmt.Errorf("seek to end after writing header: %w", err) + } + + p.bufw.Reset(p.w) + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segment_collection_strategy.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segment_collection_strategy.go new file mode 100644 index 0000000000000000000000000000000000000000..a8011f3b67afd7b68bbeb49a253ea8c16c5495a8 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segment_collection_strategy.go @@ -0,0 +1,118 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package lsmkv + +import ( + "encoding/binary" + "fmt" + + "github.com/weaviate/weaviate/adapters/repos/db/lsmkv/segmentindex" + "github.com/weaviate/weaviate/entities/lsmkv" +) + +func (s *segment) getCollection(key []byte) ([]value, error) { + if s.strategy != segmentindex.StrategySetCollection && + s.strategy != segmentindex.StrategyMapCollection && + s.strategy != segmentindex.StrategyInverted { + return nil, fmt.Errorf("get only possible for strategies %q, %q and %q, got %q", + StrategySetCollection, StrategyMapCollection, StrategyInverted, s.strategy) + } + + if s.useBloomFilter && !s.bloomFilter.Test(key) { + return nil, lsmkv.NotFound + } + + node, err := s.index.Get(key) + if err != nil { + return nil, err + } + + // We need to copy the data we read from the segment exactly once in this + // place. This means that future processing can share this memory as much as + // it wants to, as it can now be considered immutable. If we didn't copy in + // this place it would only be safe to hold this data while still under the + // protection of the segmentGroup.maintenanceLock. This lock makes sure that + // no compaction is started during an ongoing read. However, as we could show + // as part of https://github.com/weaviate/weaviate/issues/1837 + // further processing, such as map-decoding and eventually map-merging would + // happen inside the bucket.MapList() method. This scope has its own lock, + // but that lock can only protecting against flushing (i.e. changing the + // active/flushing memtable), not against removing the disk segment. If a + // compaction completes and the old segment is removed, we would be accessing + // invalid memory without the copy, thus leading to a SEGFAULT. + contentsCopy := make([]byte, node.End-node.Start) + if err = s.copyNode(contentsCopy, nodeOffset{node.Start, node.End}); err != nil { + return nil, err + } + if s.strategy == segmentindex.StrategyInverted { + return s.collectionStratParseDataInverted(contentsCopy) + } + + return s.collectionStratParseData(contentsCopy) +} + +func (s *segment) collectionStratParseData(in []byte) ([]value, error) { + if len(in) == 0 { + return nil, lsmkv.NotFound + } + + offset := 0 + + valuesLen := binary.LittleEndian.Uint64(in[offset : offset+8]) + offset += 8 + + values := make([]value, valuesLen) + valueIndex := 0 + for valueIndex < int(valuesLen) { + values[valueIndex].tombstone = in[offset] == 0x01 + offset += 1 + + valueLen := binary.LittleEndian.Uint64(in[offset : offset+8]) + offset += 8 + + values[valueIndex].value = in[offset : offset+int(valueLen)] + offset += int(valueLen) + + valueIndex++ + } + + return values, nil +} + +func (s *segment) collectionStratParseDataInverted(in []byte) ([]value, error) { + if len(in) == 0 { + return nil, lsmkv.NotFound + } + + offset := 0 + + valuesLen := binary.LittleEndian.Uint64(in[offset : offset+8]) + // offset += 8 + + values := make([]value, valuesLen) + + nodes, _ := decodeAndConvertFromBlocks(in) + + valueIndex := 0 + for _, node := range nodes { + buf := make([]byte, 16) + copy(buf, node.Key) + copy(buf[8:], node.Value) + values[valueIndex].tombstone = node.Tombstone + values[valueIndex].value = buf + + valueIndex++ + + } + + return values, nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segment_group.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segment_group.go new file mode 100644 index 0000000000000000000000000000000000000000..748b0453a5a3c9b13e08c1d15d0e8f3ba1f8630d --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segment_group.go @@ -0,0 +1,1052 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package lsmkv + +import ( + "context" + "errors" + "fmt" + "io/fs" + "os" + "path/filepath" + "sort" + "strings" + "sync" + "time" + + "github.com/sirupsen/logrus" + + "github.com/weaviate/weaviate/adapters/repos/db/lsmkv/segmentindex" + "github.com/weaviate/weaviate/adapters/repos/db/roaringset" + "github.com/weaviate/weaviate/adapters/repos/db/roaringsetrange" + "github.com/weaviate/weaviate/entities/cyclemanager" + "github.com/weaviate/weaviate/entities/diskio" + "github.com/weaviate/weaviate/entities/lsmkv" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/schema" + "github.com/weaviate/weaviate/entities/storagestate" + "github.com/weaviate/weaviate/usecases/memwatch" +) + +type SegmentGroup struct { + segments []Segment + + // Lock() for changing the currently active segments, RLock() for normal + // operation + maintenanceLock sync.RWMutex + dir string + + cursorsLock sync.RWMutex + activeCursors int + enqueuedSegments []Segment + + // flushVsCompactLock is a simple synchronization mechanism between the + // compaction and flush cycle. In general, those are independent, however, + // there are parts of it that are not. See the comments of the routines + // interacting with this lock for more details. + flushVsCompactLock sync.Mutex + + strategy string + + compactionCallbackCtrl cyclemanager.CycleCallbackCtrl + + logger logrus.FieldLogger + + // for backward-compatibility with states where the disk state for maps was + // not guaranteed to be sorted yet + mapRequiresSorting bool + + status storagestate.Status + statusLock sync.Mutex + metrics *Metrics + + // all "replace" buckets support counting through net additions, but not all + // produce a meaningful count. Typically, the only count we're interested in + // is that of the bucket that holds objects + monitorCount bool + + mmapContents bool + keepTombstones bool // see bucket for more details + useBloomFilter bool // see bucket for more details + calcCountNetAdditions bool // see bucket for more details + compactLeftOverSegments bool // see bucket for more details + enableChecksumValidation bool + MinMMapSize int64 + keepLevelCompaction bool // see bucket for more details + + allocChecker memwatch.AllocChecker + maxSegmentSize int64 + + segmentCleaner segmentCleaner + cleanupInterval time.Duration + lastCleanupCall time.Time + lastCompactionCall time.Time + + roaringSetRangeSegmentInMemory *roaringsetrange.SegmentInMemory + bitmapBufPool roaringset.BitmapBufPool + bm25config *schema.BM25Config + writeSegmentInfoIntoFileName bool + writeMetadata bool +} + +type sgConfig struct { + dir string + strategy string + mapRequiresSorting bool + monitorCount bool + mmapContents bool + keepTombstones bool + useBloomFilter bool + calcCountNetAdditions bool + forceCompaction bool + keepLevelCompaction bool + maxSegmentSize int64 + cleanupInterval time.Duration + enableChecksumValidation bool + keepSegmentsInMemory bool + MinMMapSize int64 + bm25config *models.BM25Config + writeSegmentInfoIntoFileName bool + writeMetadata bool +} + +func newSegmentGroup(ctx context.Context, logger logrus.FieldLogger, metrics *Metrics, cfg sgConfig, + compactionCallbacks cyclemanager.CycleCallbackGroup, b *Bucket, files map[string]int64, +) (*SegmentGroup, error) { + now := time.Now() + sg := &SegmentGroup{ + segments: make([]Segment, len(files)), + dir: cfg.dir, + logger: logger, + metrics: metrics, + monitorCount: cfg.monitorCount, + mapRequiresSorting: cfg.mapRequiresSorting, + strategy: cfg.strategy, + mmapContents: cfg.mmapContents, + keepTombstones: cfg.keepTombstones, + useBloomFilter: cfg.useBloomFilter, + calcCountNetAdditions: cfg.calcCountNetAdditions, + compactLeftOverSegments: cfg.forceCompaction, + maxSegmentSize: cfg.maxSegmentSize, + cleanupInterval: cfg.cleanupInterval, + enableChecksumValidation: cfg.enableChecksumValidation, + allocChecker: b.allocChecker, + lastCompactionCall: now, + lastCleanupCall: now, + MinMMapSize: cfg.MinMMapSize, + writeSegmentInfoIntoFileName: cfg.writeSegmentInfoIntoFileName, + writeMetadata: cfg.writeMetadata, + bitmapBufPool: b.bitmapBufPool, + } + + segmentIndex := 0 + + segmentsAlreadyRecoveredFromCompaction := make(map[string]struct{}) + + // Note: it's important to process first the compacted segments + // TODO: a single iteration may be possible + + for entry := range files { + if filepath.Ext(entry) != ".tmp" { + continue + } + + potentialCompactedSegmentFileName := strings.TrimSuffix(entry, ".tmp") + + if filepath.Ext(potentialCompactedSegmentFileName) != ".db" { + // another kind of temporal file, ignore at this point but it may need to be deleted... + continue + } + + jointSegments := segmentID(potentialCompactedSegmentFileName) + jointSegmentsIDs := strings.Split(jointSegments, "_") + + if len(jointSegmentsIDs) == 1 { + // cleanup leftover, to be removed + if err := os.Remove(filepath.Join(sg.dir, entry)); err != nil { + return nil, fmt.Errorf("delete partially cleaned segment %q: %w", entry, err) + } + continue + } + + if len(jointSegmentsIDs) != 2 { + logger.WithField("action", "lsm_segment_init"). + WithField("path", filepath.Join(sg.dir, entry)). + Warn("ignored (partially written) LSM compacted segment generated with a version older than v1.24.0") + + continue + } + + // jointSegmentsIDs[0] is the left segment, jointSegmentsIDs[1] is the right segment + leftSegmentFound, _ := segmentExistsWithID(jointSegmentsIDs[0], files) + rightSegmentFound, rightSegmentFilename := segmentExistsWithID(jointSegmentsIDs[1], files) + + rightSegmentPath := filepath.Join(sg.dir, rightSegmentFilename) + + if leftSegmentFound && rightSegmentFound { + delete(files, entry) + if err := os.Remove(filepath.Join(sg.dir, entry)); err != nil { + return nil, fmt.Errorf("delete partially compacted segment %q: %w", entry, err) + } + continue + } + + if leftSegmentFound && !rightSegmentFound { + return nil, fmt.Errorf("missing right segment %q", rightSegmentFilename) + } + + var rightSegmentMetadata *struct { + Level uint16 + Strategy segmentindex.Strategy + } + if !leftSegmentFound && rightSegmentFound { + // segment is initialized just to be erased + // there is no need of bloom filters nor net addition counter re-calculation + rightSegment, err := newSegment(rightSegmentPath, logger, + metrics, sg.makeExistsOn(nil), + segmentConfig{ + mmapContents: sg.mmapContents, + useBloomFilter: sg.useBloomFilter, + calcCountNetAdditions: sg.calcCountNetAdditions, + overwriteDerived: false, + enableChecksumValidation: sg.enableChecksumValidation, + MinMMapSize: sg.MinMMapSize, + allocChecker: sg.allocChecker, + fileList: make(map[string]int64), // empty to not check if bloom/cna files already exist + writeMetadata: sg.writeMetadata, + }) + if err != nil { + return nil, fmt.Errorf("init already compacted right segment %s: %w", rightSegmentFilename, err) + } + + rightSegmentMetadata = &struct { + Level uint16 + Strategy segmentindex.Strategy + }{ + Level: rightSegment.getLevel(), + Strategy: rightSegment.getStrategy(), + } + + err = rightSegment.close() + if err != nil { + return nil, fmt.Errorf("close already compacted right segment %s: %w", rightSegmentFilename, err) + } + + // https://github.com/weaviate/weaviate/pull/6128 introduces the ability + // to drop segments delayed by renaming them first and then dropping them + // later. + // + // The existing functionality (previously .drop) was renamed to + // .dropImmediately. We are keeping the old behavior in this mainly for + // backward compatbility, but also because the motivation behind the + // delayed deletion does not apply here: + // + // The new behavior is meant to split the deletion into two steps, to + // reduce the time that an expensive lock – which could block readers - + // is held. In this scenario, the segment has not been initialized yet, + // so there is no one we could be blocking. + // + // The total time is the same, so we can also just drop it immediately. + err = rightSegment.dropImmediately() + if err != nil { + return nil, fmt.Errorf("delete already compacted right segment %s: %w", rightSegmentFilename, err) + } + delete(files, rightSegmentFilename) + + err = diskio.Fsync(sg.dir) + if err != nil { + return nil, fmt.Errorf("fsync segment directory %s: %w", sg.dir, err) + } + } + + var newRightSegmentFileName string + if cfg.writeSegmentInfoIntoFileName && rightSegmentMetadata != nil { + newRightSegmentFileName = fmt.Sprintf("segment-%s%s.db", jointSegmentsIDs[1], segmentExtraInfo(rightSegmentMetadata.Level, rightSegmentMetadata.Strategy)) + } else { + newRightSegmentFileName = fmt.Sprintf("segment-%s.db", jointSegmentsIDs[1]) + } + newRightSegmentPath := filepath.Join(sg.dir, newRightSegmentFileName) + + if err := os.Rename(filepath.Join(sg.dir, entry), newRightSegmentPath); err != nil { + return nil, fmt.Errorf("rename compacted segment file %q as %q: %w", entry, newRightSegmentFileName, err) + } + + var segment Segment + var err error + sgConf := segmentConfig{ + mmapContents: sg.mmapContents, + useBloomFilter: sg.useBloomFilter, + calcCountNetAdditions: sg.calcCountNetAdditions, + overwriteDerived: true, + enableChecksumValidation: sg.enableChecksumValidation, + MinMMapSize: sg.MinMMapSize, + allocChecker: sg.allocChecker, + fileList: files, + writeMetadata: sg.writeMetadata, + } + if b.lazySegmentLoading { + segment, err = newLazySegment(newRightSegmentPath, logger, + metrics, sg.makeExistsOn(sg.segments[:segmentIndex]), sgConf, + ) + if err != nil { + return nil, fmt.Errorf("init lazy segment %s: %w", newRightSegmentFileName, err) + } + } else { + segment, err = newSegment(newRightSegmentPath, logger, + metrics, sg.makeExistsOn(sg.segments[:segmentIndex]), sgConf, + ) + if err != nil { + return nil, fmt.Errorf("init segment %s: %w", newRightSegmentFileName, err) + } + } + + sg.segments[segmentIndex] = segment + segmentIndex++ + + segmentsAlreadyRecoveredFromCompaction[newRightSegmentFileName] = struct{}{} + } + + for entry := range files { + if filepath.Ext(entry) == DeleteMarkerSuffix { + // marked for deletion, but never actually deleted. Delete now. + if err := os.Remove(filepath.Join(sg.dir, entry)); err != nil { + // don't abort if the delete fails, we can still continue (albeit + // without freeing disk space that should have been freed) + sg.logger.WithError(err).WithFields(logrus.Fields{ + "action": "lsm_segment_init_deleted_previously_marked_files", + "file": entry, + }).Error("failed to delete file already marked for deletion") + } + continue + + } + + if filepath.Ext(entry) != ".db" { + // skip, this could be commit log, etc. + continue + } + + _, alreadyRecoveredFromCompaction := segmentsAlreadyRecoveredFromCompaction[entry] + if alreadyRecoveredFromCompaction { + // the .db file was already removed and restored from a compacted segment + continue + } + + // before we can mount this file, we need to check if a WAL exists for it. + // If yes, we must assume that the flush never finished, as otherwise the + // WAL would have been deleted. Thus we must remove it. + walFileName, _, _ := strings.Cut(entry, ".") + walFileName += ".wal" + _, ok := files[walFileName] + if ok { + // the segment will be recovered from the WAL + err := os.Remove(filepath.Join(sg.dir, entry)) + if err != nil { + return nil, fmt.Errorf("delete partially written segment %s: %w", entry, err) + } + + logger.WithField("action", "lsm_segment_init"). + WithField("path", filepath.Join(sg.dir, entry)). + WithField("wal_path", walFileName). + Info("discarded (partially written) LSM segment, because an active WAL for " + + "the same segment was found. A recovery from the WAL will follow.") + + continue + } + + var segment Segment + segConf := segmentConfig{ + mmapContents: sg.mmapContents, + useBloomFilter: sg.useBloomFilter, + calcCountNetAdditions: sg.calcCountNetAdditions, + overwriteDerived: false, + enableChecksumValidation: sg.enableChecksumValidation, + MinMMapSize: sg.MinMMapSize, + allocChecker: sg.allocChecker, + fileList: files, + writeMetadata: sg.writeMetadata, + } + var err error + if b.lazySegmentLoading { + segment, err = newLazySegment(filepath.Join(sg.dir, entry), logger, + metrics, sg.makeExistsOn(sg.segments[:segmentIndex]), segConf, + ) + if err != nil { + return nil, fmt.Errorf("init lazy segment %s: %w", filepath.Join(sg.dir, entry), err) + } + } else { + segment, err = newSegment(filepath.Join(sg.dir, entry), logger, + metrics, sg.makeExistsOn(sg.segments[:segmentIndex]), segConf, + ) + if err != nil { + return nil, fmt.Errorf("init segment %s: %w", filepath.Join(sg.dir, entry), err) + } + } + sg.segments[segmentIndex] = segment + segmentIndex++ + } + + sg.segments = sg.segments[:segmentIndex] + + // segment load order is as follows: + // - find .tmp files and recover them first + // - find .db files and load them + // - if there is a .wal file exists for a .db, remove the .db file + // - find .wal files and load them into a memtable + // - flush the memtable to a segment file + // Thus, files may be loaded in a different order than they were created, + // and we need to re-sort them to ensure the order is correct, as compations + // and other operations are based on the creation order of the segments + sort.Slice(sg.segments, func(i, j int) bool { + return sg.segments[i].getPath() < sg.segments[j].getPath() + }) + + // Actual strategy is stored in segment files. In case it is SetCollection, + // while new implementation uses bitmaps and supposed to be RoaringSet, + // bucket and segmentgroup strategy is changed back to SetCollection + // (memtables will be created later on, with already modified strategy) + // TODO what if only WAL files exists, and there is no segment to get actual strategy? + if b.strategy == StrategyRoaringSet && len(sg.segments) > 0 && + sg.segments[0].getStrategy() == segmentindex.StrategySetCollection { + b.strategy = StrategySetCollection + b.desiredStrategy = StrategyRoaringSet + sg.strategy = StrategySetCollection + } + // As of v1.19 property's IndexInterval setting is replaced with + // IndexFilterable (roaring set) + IndexSearchable (map) and enabled by default. + // Buckets for text/text[] inverted indexes created before 1.19 have strategy + // map and name that since 1.19 is used by filterable indeverted index. + // Those buckets (roaring set by configuration, but in fact map) have to be + // renamed on startup by migrator. Here actual strategy is set based on + // data found in segment files + if b.strategy == StrategyRoaringSet && len(sg.segments) > 0 && + sg.segments[0].getStrategy() == segmentindex.StrategyMapCollection { + b.strategy = StrategyMapCollection + b.desiredStrategy = StrategyRoaringSet + sg.strategy = StrategyMapCollection + } + + // Inverted segments share a lot of their logic as the MapCollection, + // and the main difference is in the way they store their data. + // Setting the desired strategy to Inverted will make sure that we can + // distinguish between the two strategies for search. + // The changes only apply when we have segments on disk, + // as the memtables will always be created with the MapCollection strategy. + if b.strategy == StrategyInverted && len(sg.segments) > 0 && + sg.segments[0].getStrategy() == segmentindex.StrategyMapCollection { + b.strategy = StrategyMapCollection + b.desiredStrategy = StrategyInverted + sg.strategy = StrategyMapCollection + } else if b.strategy == StrategyMapCollection && len(sg.segments) > 0 && + sg.segments[0].getStrategy() == segmentindex.StrategyInverted { + // TODO amourao: blockmax "else" to be removed before final release + // in case bucket was created as inverted and default strategy was reverted to map + // by unsetting corresponding env variable + b.strategy = StrategyInverted + b.desiredStrategy = StrategyMapCollection + sg.strategy = StrategyInverted + } + + if err := b.mayRecoverFromCommitLogs(ctx, sg, files); err != nil { + return nil, err + } + + if sg.monitorCount { + sg.metrics.ObjectCount(sg.count()) + } + + sc, err := newSegmentCleaner(sg) + if err != nil { + return nil, err + } + sg.segmentCleaner = sc + + // if a segment exists of the map collection strategy, we need to + // convert the inverted strategy to a map collection strategy + // as it is done on the bucket level + if sg.strategy == StrategyInverted && len(sg.segments) > 0 && + sg.segments[0].getStrategy() == segmentindex.StrategyMapCollection { + sg.strategy = StrategyMapCollection + } + + switch sg.strategy { + case StrategyInverted: + // start with last but one segment, as the last one doesn't need tombstones for now + for i := len(sg.segments) - 2; i >= 0; i-- { + // avoid crashing if segment has no tombstones + tombstonesNext, err := sg.segments[i+1].ReadOnlyTombstones() + if err != nil { + return nil, fmt.Errorf("init segment %s: load tombstones %w", sg.segments[i+1].getPath(), err) + } + if _, err := sg.segments[i].MergeTombstones(tombstonesNext); err != nil { + return nil, fmt.Errorf("init segment %s: merge tombstones %w", sg.segments[i].getPath(), err) + } + } + + case StrategyRoaringSetRange: + if cfg.keepSegmentsInMemory { + t := time.Now() + sg.roaringSetRangeSegmentInMemory = roaringsetrange.NewSegmentInMemory() + for _, seg := range sg.segments { + cursor := seg.newRoaringSetRangeCursor() + if err := sg.roaringSetRangeSegmentInMemory.MergeSegmentByCursor(cursor); err != nil { + return nil, fmt.Errorf("build segment-in-memory of strategy '%s': %w", sg.strategy, err) + } + } + logger.WithFields(logrus.Fields{ + "took": time.Since(t).String(), + "bucket": filepath.Base(cfg.dir), + "size_mb": fmt.Sprintf("%.3f", float64(sg.roaringSetRangeSegmentInMemory.Size())/1024/1024), + }).Debug("rangeable segment-in-memory built") + } + } + + id := "segmentgroup/compaction/" + sg.dir + sg.compactionCallbackCtrl = compactionCallbacks.Register(id, sg.compactOrCleanup) + + return sg, nil +} + +func (sg *SegmentGroup) makeExistsOn(segments []Segment) existsOnLowerSegmentsFn { + return func(key []byte) (bool, error) { + if len(segments) == 0 { + // this is already the lowest possible segment, we can guarantee that + // any key in this segment is previously unseen. + return false, nil + } + + v, err := sg.getWithUpperSegmentBoundary(key, segments) + if err != nil { + return false, fmt.Errorf("check exists on segments: %w", err) + } + + return v != nil, nil + } +} + +func (sg *SegmentGroup) add(path string) error { + sg.maintenanceLock.Lock() + defer sg.maintenanceLock.Unlock() + + segment, err := newSegment(path, sg.logger, + sg.metrics, sg.makeExistsOn(sg.segments), + segmentConfig{ + mmapContents: sg.mmapContents, + useBloomFilter: sg.useBloomFilter, + calcCountNetAdditions: sg.calcCountNetAdditions, + overwriteDerived: true, + enableChecksumValidation: sg.enableChecksumValidation, + MinMMapSize: sg.MinMMapSize, + allocChecker: sg.allocChecker, + writeMetadata: sg.writeMetadata, + }) + if err != nil { + return fmt.Errorf("init segment %s: %w", path, err) + } + + sg.segments = append(sg.segments, segment) + return nil +} + +func (sg *SegmentGroup) getAndLockSegments() (segments []Segment, release func()) { + sg.cursorsLock.RLock() + sg.maintenanceLock.RLock() + + if len(sg.enqueuedSegments) == 0 { + return sg.segments, func() { + sg.cursorsLock.RUnlock() + sg.maintenanceLock.RUnlock() + } + } + + segments = make([]Segment, 0, len(sg.segments)+len(sg.enqueuedSegments)) + + segments = append(segments, sg.segments...) + segments = append(segments, sg.enqueuedSegments...) + + return segments, func() { + sg.cursorsLock.RUnlock() + sg.maintenanceLock.RUnlock() + } +} + +func (sg *SegmentGroup) addInitializedSegment(segment *segment) error { + sg.cursorsLock.Lock() + defer sg.cursorsLock.Unlock() + + if sg.activeCursors > 0 { + sg.enqueuedSegments = append(sg.enqueuedSegments, segment) + return nil + } + + sg.maintenanceLock.Lock() + defer sg.maintenanceLock.Unlock() + + sg.segments = append(sg.segments, segment) + return nil +} + +func (sg *SegmentGroup) get(key []byte) ([]byte, error) { + beforeMaintenanceLock := time.Now() + segments, release := sg.getAndLockSegments() + defer release() + + if time.Since(beforeMaintenanceLock) > 100*time.Millisecond { + sg.logger.WithField("duration", time.Since(beforeMaintenanceLock)). + WithField("action", "lsm_segment_group_get_obtain_maintenance_lock"). + Debug("waited over 100ms to obtain maintenance lock in segment group get()") + } + + return sg.getWithUpperSegmentBoundary(key, segments) +} + +// not thread-safe on its own, as the assumption is that this is called from a +// lockholder, e.g. within .get() +func (sg *SegmentGroup) getWithUpperSegmentBoundary(key []byte, segments []Segment) ([]byte, error) { + // assumes "replace" strategy + + // start with latest and exit as soon as something is found, thus making sure + // the latest takes presence + for i := len(segments) - 1; i >= 0; i-- { + beforeSegment := time.Now() + v, err := segments[i].get(key) + if time.Since(beforeSegment) > 100*time.Millisecond { + sg.logger.WithField("duration", time.Since(beforeSegment)). + WithField("action", "lsm_segment_group_get_individual_segment"). + WithError(err). + WithField("segment_pos", i). + Debug("waited over 100ms to get result from individual segment") + } + if err != nil { + if errors.Is(err, lsmkv.NotFound) { + continue + } + + if errors.Is(err, lsmkv.Deleted) { + return nil, nil + } + + panic(fmt.Sprintf("unsupported error in segmentGroup.get(): %v", err)) + } + + return v, nil + } + + return nil, nil +} + +func (sg *SegmentGroup) getErrDeleted(key []byte) ([]byte, error) { + segments, release := sg.getAndLockSegments() + defer release() + + return sg.getWithUpperSegmentBoundaryErrDeleted(key, segments) +} + +func (sg *SegmentGroup) getWithUpperSegmentBoundaryErrDeleted(key []byte, segments []Segment) ([]byte, error) { + // assumes "replace" strategy + + // start with latest and exit as soon as something is found, thus making sure + // the latest takes presence + for i := len(segments) - 1; i >= 0; i-- { + v, err := segments[i].get(key) + if err != nil { + if errors.Is(err, lsmkv.NotFound) { + continue + } + + if errors.Is(err, lsmkv.Deleted) { + return nil, err + } + + panic(fmt.Sprintf("unsupported error in segmentGroup.get(): %v", err)) + } + + return v, nil + } + + return nil, lsmkv.NotFound +} + +func (sg *SegmentGroup) getBySecondaryIntoMemory(pos int, key []byte, buffer []byte) ([]byte, []byte, []byte, error) { + segments, release := sg.getAndLockSegments() + defer release() + + // assumes "replace" strategy + + // start with latest and exit as soon as something is found, thus making sure + // the latest takes presence + for i := len(segments) - 1; i >= 0; i-- { + k, v, allocatedBuff, err := segments[i].getBySecondaryIntoMemory(pos, key, buffer) + if err != nil { + if errors.Is(err, lsmkv.NotFound) { + continue + } + + if errors.Is(err, lsmkv.Deleted) { + return nil, nil, nil, nil + } + + panic(fmt.Sprintf("unsupported error in segmentGroup.get(): %v", err)) + } + + return k, v, allocatedBuff, nil + } + + return nil, nil, nil, nil +} + +func (sg *SegmentGroup) getCollection(key []byte) ([]value, error) { + segments, release := sg.getAndLockSegments() + defer release() + + var out []value + + // start with first and do not exit + for _, segment := range segments { + v, err := segment.getCollection(key) + if err != nil { + if errors.Is(err, lsmkv.NotFound) { + continue + } + + return nil, err + } + + if len(out) == 0 { + out = v + } else { + out = append(out, v...) + } + } + + return out, nil +} + +func (sg *SegmentGroup) getCollectionAndSegments(key []byte) ([][]value, []Segment, func(), error) { + segments, release := sg.getAndLockSegments() + + out := make([][]value, len(segments)) + outSegments := make([]Segment, len(segments)) + + i := 0 + // start with first and do not exit + for _, segment := range segments { + v, err := segment.getCollection(key) + if err != nil { + if !errors.Is(err, lsmkv.NotFound) { + release() + return nil, nil, func() {}, err + } + // inverted segments need to be loaded anyway, even if they don't have + // the key, as we need to know if they have tombstones + if segment.getStrategy() != segmentindex.StrategyInverted { + continue + } + } + + out[i] = v + outSegments[i] = segment + i++ + } + + return out[:i], outSegments[:i], release, nil +} + +func (sg *SegmentGroup) roaringSetGet(key []byte) (out roaringset.BitmapLayers, release func(), err error) { + segments, sgRelease := sg.getAndLockSegments() + defer sgRelease() + + ln := len(segments) + if ln == 0 { + return nil, noopRelease, nil + } + + release = noopRelease + // use bigger buffer for first layer, to make space for further merges + // with following layers + bitmapBufPool := roaringset.NewBitmapBufPoolFactorWrapper(sg.bitmapBufPool, 1.25) + + i := 0 + for ; i < ln; i++ { + layer, layerRelease, err := segments[i].roaringSetGet(key, bitmapBufPool) + if err == nil { + out = append(out, layer) + release = layerRelease + i++ + break + } + if !errors.Is(err, lsmkv.NotFound) { + return nil, noopRelease, err + } + } + defer func() { + if err != nil { + release() + } + }() + + for ; i < ln; i++ { + if err := segments[i].roaringSetMergeWith(key, out[0], sg.bitmapBufPool); err != nil { + return nil, noopRelease, err + } + } + + return out, release, nil +} + +func (sg *SegmentGroup) count() int { + segments, release := sg.getAndLockSegments() + defer release() + + count := 0 + for _, seg := range segments { + count += seg.getSegment().getCountNetAdditions() + } + + return count +} + +func (sg *SegmentGroup) Size() int64 { + segments, release := sg.getAndLockSegments() + defer release() + + totalSize := int64(0) + for _, seg := range segments { + totalSize += int64(seg.getSize()) + } + + return totalSize +} + +// MetadataSize returns the total size of metadata files (.bloom and .cna) from segments in memory +// MetadataSize returns the total size of metadata files for all segments. +// The calculation differs based on the writeMetadata setting: +// +// When writeMetadata is enabled: +// - Counts the actual file size of .metadata files on disk +// - Each .metadata file contains: header + bloom filters + count net additions +// - Header includes: checksum (4 bytes) + version (1 byte) + bloom len (4 bytes) + cna len (4 bytes) = 13 bytes +// - Bloom filters are serialized and stored inline +// - CNA data includes: uint64 count (8 bytes) + length indicator (4 bytes) = 12 bytes +// +// When writeMetadata is disabled: +// - Counts bloom filters in memory (getBloomFilterSize) +// - Counts .cna files separately (12 bytes each: 8 bytes data + 4 bytes checksum) +// - This represents the legacy behavior where metadata was stored separately +// +// The total size should be equivalent between both modes, accounting for the +// metadata file header overhead when writeMetadata is enabled. +func (sg *SegmentGroup) MetadataSize() int64 { + segments, release := sg.getAndLockSegments() + defer release() + + var totalSize int64 + for _, segment := range segments { + if sg.writeMetadata { + // When writeMetadata is enabled, count .metadata files + // Each .metadata file contains bloom filters + count net additions + if seg := segment.getSegment(); seg != nil { + // Check if segment has metadata file + metadataPath := seg.metadataPath() + if metadataPath != "" { + exists, err := fileExists(metadataPath) + if err == nil && exists { + // Get the actual file size of the metadata file + if info, err := os.Stat(metadataPath); err == nil { + totalSize += info.Size() + } + } + } + } + } else { + // When writeMetadata is disabled, count bloom filters and .cna files separately + if seg := segment.getSegment(); seg != nil { + // Count bloom filters in memory + if seg.bloomFilter != nil { + totalSize += int64(getBloomFilterSize(seg.bloomFilter)) + } + // Count secondary bloom filters + for _, bf := range seg.secondaryBloomFilters { + if bf != nil { + totalSize += int64(getBloomFilterSize(bf)) + } + } + } + + // Count .cna files (12 bytes each) + if segment.getSegment().countNetPath() != "" { + // .cna files: uint64 count (8 bytes) + uint32 checksum (4 bytes) = 12 bytes + totalSize += 12 + } + } + } + + return totalSize +} + +func (sg *SegmentGroup) shutdown(ctx context.Context) error { + if err := sg.compactionCallbackCtrl.Unregister(ctx); err != nil { + return fmt.Errorf("long-running compaction in progress: %w", ctx.Err()) + } + if err := sg.segmentCleaner.close(); err != nil { + return err + } + + sg.cursorsLock.Lock() + defer sg.cursorsLock.Unlock() + + for _, seg := range sg.enqueuedSegments { + seg.close() + } + + // Lock acquirement placed after compaction cycle stop request, due to occasional deadlock, + // because compaction logic used in cycle also requires maintenance lock. + // + // If lock is grabbed by shutdown method and compaction in cycle loop starts right after, + // it is blocked waiting for the same lock, eventually blocking entire cycle loop and preventing to read stop signal. + // If stop signal can not be read, shutdown will not receive stop result and will not proceed with further execution. + // Maintenance lock will then never be released. + sg.maintenanceLock.Lock() + defer sg.maintenanceLock.Unlock() + + for _, seg := range sg.segments { + if err := seg.close(); err != nil { + return err + } + } + + // make sure the segment list itself is set to nil. In case a memtable will + // still flush after closing, it might try to read from a disk segment list + // otherwise and run into nil-pointer problems. + sg.segments = nil + + return nil +} + +func (sg *SegmentGroup) UpdateStatus(status storagestate.Status) { + sg.statusLock.Lock() + defer sg.statusLock.Unlock() + + sg.status = status +} + +func (sg *SegmentGroup) isReadyOnly() bool { + sg.statusLock.Lock() + defer sg.statusLock.Unlock() + + return sg.status == storagestate.StatusReadOnly +} + +func fileExists(path string) (bool, error) { + _, err := os.Stat(path) + if err == nil { + return true, nil + } + + if errors.Is(err, fs.ErrNotExist) { + return false, nil + } + + return false, err +} + +func segmentExistsWithID(segmentID string, files map[string]int64) (bool, string) { + // segment file format is "segment-{segmentID}.EXT" where EXT is either + // - ".db" if extra infos in filename are not used + // - ".{extra_infos}.db" if extra infos in filename are used + match := fmt.Sprintf("segment-%s.", segmentID) + for fileName := range files { + if strings.HasPrefix(fileName, match) && strings.HasSuffix(fileName, ".db") { + return true, fileName + } + } + return false, "" +} + +func (sg *SegmentGroup) compactOrCleanup(shouldAbort cyclemanager.ShouldAbortCallback) bool { + sg.monitorSegments() + + compact := func() bool { + sg.lastCompactionCall = time.Now() + compacted, err := sg.compactOnce() + if err != nil { + sg.logger.WithField("action", "lsm_compaction"). + WithField("path", sg.dir). + WithError(err). + Errorf("compaction failed") + } else if !compacted { + sg.logger.WithField("action", "lsm_compaction"). + WithField("path", sg.dir). + Trace("no segments eligible for compaction") + } + return compacted + } + cleanup := func() bool { + sg.lastCleanupCall = time.Now() + cleaned, err := sg.segmentCleaner.cleanupOnce(shouldAbort) + if err != nil { + sg.logger.WithField("action", "lsm_cleanup"). + WithField("path", sg.dir). + WithError(err). + Errorf("cleanup failed") + } + return cleaned + } + + // alternatively run compaction or cleanup first + // if 1st one called succeeds, 2nd one is skipped, otherwise 2nd one is called as well + // + // compaction has the precedence over cleanup, however if cleanup + // was not called for over [forceCleanupInterval], force at least one execution + // in between compactions. + // (ignore if compaction was not called within that time either) + forceCleanupInterval := time.Hour * 12 + + if time.Since(sg.lastCleanupCall) > forceCleanupInterval && sg.lastCleanupCall.Before(sg.lastCompactionCall) { + return cleanup() || compact() + } + return compact() || cleanup() +} + +func (sg *SegmentGroup) Len() int { + segments, release := sg.getAndLockSegments() + defer release() + + return len(segments) +} + +func (sg *SegmentGroup) GetAveragePropertyLength() (float64, uint64) { + segments, release := sg.getAndLockSegments() + defer release() + + if len(segments) == 0 { + return 0, 0 + } + + totalDocCount := uint64(0) + for _, segment := range segments { + invertedData := segment.getInvertedData() + totalDocCount += invertedData.avgPropertyLengthsCount + } + + if totalDocCount == 0 { + return defaultAveragePropLength, 0 + } + + weightedAverage := 0.0 + for _, segment := range segments { + invertedData := segment.getInvertedData() + weightedAverage += float64(invertedData.avgPropertyLengthsCount) / float64(totalDocCount) * invertedData.avgPropertyLengthsAvg + } + + return weightedAverage, totalDocCount +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segment_group_cleanup.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segment_group_cleanup.go new file mode 100644 index 0000000000000000000000000000000000000000..c7ed39866a162a9145a07dad27353b21e1b1f433 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segment_group_cleanup.go @@ -0,0 +1,681 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package lsmkv + +import ( + "encoding/binary" + "fmt" + "os" + "path/filepath" + "strconv" + "strings" + "time" + + "github.com/pkg/errors" + + "github.com/sirupsen/logrus" + "github.com/weaviate/weaviate/entities/cyclemanager" + "github.com/weaviate/weaviate/entities/diskio" + bolt "go.etcd.io/bbolt" +) + +const ( + cleanupDbFileName = "cleanup.db.bolt" + emptyIdx = -1 + minCleanupSizePercent = 10 +) + +var ( + cleanupDbBucketSegments = []byte("segments") + cleanupDbBucketMeta = []byte("meta") + cleanupDbKeyMetaNextAllowedTs = []byte("nextAllowedTs") +) + +type segmentCleaner interface { + close() error + cleanupOnce(shouldAbort cyclemanager.ShouldAbortCallback) (cleaned bool, err error) +} + +func newSegmentCleaner(sg *SegmentGroup) (segmentCleaner, error) { + if sg.cleanupInterval <= 0 { + return &segmentCleanerNoop{}, nil + } + + switch sg.strategy { + case StrategyReplace: + cleaner := &segmentCleanerCommon{sg: sg} + if err := cleaner.init(); err != nil { + return nil, err + } + return cleaner, nil + case StrategyMapCollection, + StrategySetCollection, + StrategyRoaringSet, + StrategyRoaringSetRange, + StrategyInverted: + return &segmentCleanerNoop{}, nil + default: + return nil, fmt.Errorf("unrecognized strategy %q", sg.strategy) + } +} + +// ================================================================ + +type segmentCleanerNoop struct{} + +func (c *segmentCleanerNoop) close() error { + return nil +} + +func (c *segmentCleanerNoop) cleanupOnce(shouldAbort cyclemanager.ShouldAbortCallback) (bool, error) { + return false, nil +} + +// ================================================================ + +// segmentCleanerCommon uses bolt db to persist data relevant to cleanup +// progress. +// db is stored in file named [cleanupDbFileName] in bucket directory, next to +// segment files. +// +// db uses 2 buckets: +// - [cleanupDbBucketMeta] to store global cleanup data +// - [cleanupDbBucketSegments] to store each segments cleanup data +// +// [cleanupDbBucketMeta] holds single key [cleanupDbKeyMetaNextAllowedTs] with value of +// timestamp of earliest of last segments' cleanups or last execution timestamp of findCandidate +// if no eligible cleanup candidate was found. +// [cleanupDbBucketSegments] holds multiple keys (being segment ids) with values being combined: +// - timestamp of current segment's cleanup +// - segmentId of last segment used in current segment's cleanup +// - size of current segment after cleanup +// Entries of segmentIds of segments that were removed (left segments after compaction) +// are regularly removed from cleanup db while next cleanup candidate is searched. +// +// cleanupInterval indicates minimal interval that have to pass for segment to be cleaned again. +// Each segment has stored its last cleanup timestamp in cleanup bolt db. +// Additionally "global" earliest cleanup timestamp is stored ([cleanupDbKeyMetaNextAllowedTs]) +// or last execution timestamp of findCandiate method. This timeout is used to quickly exit +// findCandidate method without necessity to verify if interval passed for each segment. +type segmentCleanerCommon struct { + sg *SegmentGroup + db *bolt.DB +} + +func (c *segmentCleanerCommon) init() error { + path := filepath.Join(c.sg.dir, cleanupDbFileName) + var db *bolt.DB + var err error + + if db, err = bolt.Open(path, 0o600, nil); err != nil { + return fmt.Errorf("open cleanup bolt db %q: %w", path, err) + } + + if err = db.Update(func(tx *bolt.Tx) error { + if _, err := tx.CreateBucketIfNotExists(cleanupDbBucketSegments); err != nil { + return err + } + if _, err := tx.CreateBucketIfNotExists(cleanupDbBucketMeta); err != nil { + return err + } + return nil + }); err != nil { + return fmt.Errorf("create bucket cleanup bolt db %q: %w", path, err) + } + + c.db = db + return nil +} + +func (c *segmentCleanerCommon) close() error { + if err := c.db.Close(); err != nil { + return fmt.Errorf("close cleanup bolt db %q: %w", c.db.Path(), err) + } + return nil +} + +// findCandidate returns index of segment that should be cleaned as next one, +// index of first newer segment to start cleanup from, index of last newer segment +// to finish cleanup on, callback to be executed after cleanup is successfully completed +// and error in case of issues occurred while finding candidate +func (c *segmentCleanerCommon) findCandidate() (int, int, int, onCompletedFunc, error) { + nowTs := time.Now().UnixNano() + nextAllowedTs := nowTs - int64(c.sg.cleanupInterval) + nextAllowedStoredTs := c.readNextAllowed() + + if nextAllowedStoredTs > nextAllowedTs { + // too soon for next cleanup + return emptyIdx, emptyIdx, emptyIdx, nil, nil + } + + ids, sizes, err := c.getSegmentIdsAndSizes() + if err != nil { + return emptyIdx, emptyIdx, emptyIdx, nil, err + } + if count := len(ids); count <= 1 { + // too few segments for cleanup, update next allowed timestamp for cleanup to now + if err := c.storeNextAllowed(nowTs); err != nil { + return emptyIdx, emptyIdx, emptyIdx, nil, err + } + return emptyIdx, emptyIdx, emptyIdx, nil, nil + } + + // get idx and cleanup timestamp of earliest cleaned segment, + // take the opportunity to find obsolete segment keys to be deleted later from cleanup db + candidateIdx, startIdx, lastIdx, earliestCleanedTs, nonExistentSegmentKeys := c.readEarliestCleaned(ids, sizes, nowTs) + + if err := c.deleteSegmentMetas(nonExistentSegmentKeys); err != nil { + return emptyIdx, emptyIdx, emptyIdx, nil, err + } + + if candidateIdx != emptyIdx && earliestCleanedTs <= nextAllowedTs { + // candidate found and ready for cleanup + id := ids[candidateIdx] + lastProcessedId := ids[len(ids)-1] + onCompleted := func(size int64) error { + return c.storeSegmentMeta(id, lastProcessedId, size, nowTs) + } + return candidateIdx, startIdx, lastIdx, onCompleted, nil + } + + // candidate not found or not ready for cleanup, update next allowed timestamp to earliest cleaned segment + // (which is "now" if candidate was not found) + if err := c.storeNextAllowed(earliestCleanedTs); err != nil { + return emptyIdx, emptyIdx, emptyIdx, nil, err + } + + return emptyIdx, emptyIdx, emptyIdx, nil, nil +} + +func (c *segmentCleanerCommon) getSegmentIdsAndSizes() ([]int64, []int64, error) { + segments, release := c.sg.getAndLockSegments() + defer release() + + var ids []int64 + var sizes []int64 + if count := len(segments); count > 1 { + ids = make([]int64, count) + sizes = make([]int64, count) + + for i, seg := range segments { + idStr := segmentID(seg.getPath()) + id, err := strconv.ParseInt(idStr, 10, 64) + if err != nil { + return nil, nil, fmt.Errorf("parse segment id %q: %w", idStr, err) + } + ids[i] = id + sizes[i] = seg.getSize() + } + } + + return ids, sizes, nil +} + +func (c *segmentCleanerCommon) readNextAllowed() int64 { + ts := int64(0) + c.db.View(func(tx *bolt.Tx) error { + b := tx.Bucket(cleanupDbBucketMeta) + v := b.Get(cleanupDbKeyMetaNextAllowedTs) + if v != nil { + ts = int64(binary.BigEndian.Uint64(v)) + } + return nil + }) + return ts +} + +func (c *segmentCleanerCommon) storeNextAllowed(ts int64) error { + if err := c.db.Update(func(tx *bolt.Tx) error { + b := tx.Bucket(cleanupDbBucketMeta) + bufV := make([]byte, 8) + + binary.BigEndian.PutUint64(bufV, uint64(ts)) + return b.Put(cleanupDbKeyMetaNextAllowedTs, bufV) + }); err != nil { + return fmt.Errorf("updating cleanup bolt db %q: %w", c.db.Path(), err) + } + return nil +} + +func (c *segmentCleanerCommon) deleteSegmentMetas(segIds [][]byte) error { + if len(segIds) > 0 { + if err := c.db.Update(func(tx *bolt.Tx) error { + b := tx.Bucket(cleanupDbBucketSegments) + for _, k := range segIds { + if err := b.Delete(k); err != nil { + return err + } + } + return nil + }); err != nil { + return fmt.Errorf("deleting from cleanup bolt db %q: %w", c.db.Path(), err) + } + } + return nil +} + +// based of data stored in cleanup bolt db and existing segments in filesystem +// method returns: +// - index of candidate segment best suitable for cleanup, +// - index of segment, cleanup of candidate should be started from, +// - index of segment, cleanup of candidate should be finished on, +// - time of previous candidate's cleanup, +// - list of segmentIds stored in cleanup bolt db that no longer exist in filesystem +// - error (if occurred). +// +// First candidate to be returned is segment that was not cleaned before (if multiple +// uncleaned segments exist - the oldest one is returned). +// If there is no unclean segment, segment that was cleaned as the earliest is returned. +// For segment already cleaned before to be returned, new segments must have been created +// after previous cleanup and sum of their sizes should be greater than [minCleanupSizePercent] +// percent of size of cleaned segment, to increase the chance of segment being actually cleaned, +// not just copied. +func (c *segmentCleanerCommon) readEarliestCleaned(ids, sizes []int64, nowTs int64, +) (int, int, int, int64, [][]byte) { + earliestCleanedTs := nowTs + candidateIdx := emptyIdx + startIdx := emptyIdx + lastIdx := emptyIdx + + count := len(ids) + nonExistentSegmentKeys := [][]byte{} + emptyId := int64(-1) + + c.db.View(func(tx *bolt.Tx) error { + b := tx.Bucket(cleanupDbBucketSegments) + cur := b.Cursor() + + // Loop through all segmentIds, the ones stored in cleanup db (cur) + // and ones currently existing in filesystem (ids). + // Note: both sets of segmentIds may have unique elements: + // - cursor can contain segmentIds of segments already removed (by compaction) + // - ids can contain segmentIds of newly created segments + // Note: both sets are ordered, therefore in case of one element is missing + // in set, only this set advances to next element + idx := 0 + key, val := cur.First() + for idx < count-1 || key != nil { + id := emptyId + storedId := emptyId + + if idx < count-1 { + id = ids[idx] + } + if key != nil { + storedId = int64(binary.BigEndian.Uint64(key)) + } + + // segment with segmentId stored in cleanup db (storedId) no longer exists, + if id == emptyId || (storedId != emptyId && id > storedId) { + // entry to be deleted + nonExistentSegmentKeys = append(nonExistentSegmentKeys, key) + // advance cursor + key, val = cur.Next() + continue + } + + // segment with segmentId in filesystem (id) has no entry in cleanup db, + if storedId == emptyId || (id != emptyId && id < storedId) { + // as segment was not cleaned before (timestamp == 0), it becomes best + // candidate for next cleanup. + // (if there are more segments not yet cleaned, 1st one is selected) + if earliestCleanedTs > 0 { + earliestCleanedTs = 0 + candidateIdx = idx + startIdx = idx + 1 + lastIdx = count - 1 + } + // advance index + idx++ + continue + } + + // segmentId present in both sets, had to be cleaned before + // id == cid + + storedCleanedTs := int64(binary.BigEndian.Uint64(val[0:8])) + // check if cleaned before current candidate + if earliestCleanedTs > storedCleanedTs { + lastId := ids[count-1] + storedLastId := int64(binary.BigEndian.Uint64(val[8:16])) + // check if new segments created after last cleanup + if storedLastId < lastId { + // last segment's id in filesystem is higher than last id used for cleanup + size := sizes[idx] + storedSize := int64(binary.BigEndian.Uint64(val[16:24])) + + // In general segment could be cleaned considering only segments created + // after its last cleanup. One exception is when segment was compacted + // (previous and current sizes differ). + // As after compaction cleanup db will contain only entry of right segment, + // not the left one, it is unknown what was last segment used for cleanup of removed + // left segment, therefore compacted segment will be cleaned again using all newer segments. + possibleStartIdx := idx + 1 + // in case of using segments that were already used for cleanup, process them in reverse + // order starting with newest ones, to maximize the chance of finding redundant entries + // as soon as possible (leaving segments that were already used for cleanup as last ones) + reverseOrder := true + if size == storedSize { + reverseOrder = false + // size not changed (not compacted), clean using only newly created segments, + // skipping segments already processed in previous cleanup + for i := idx + 1; i < count; i++ { + possibleStartIdx = i + if ids[i] > storedLastId { + break + } + } + } + + // segment should be cleaned only if sum of sizes of segments to be cleaned + // with exceeds [minCleanupSizePercent] of its current size, to increase + // probability of redunand keys. + sumSize := int64(0) + for i := possibleStartIdx; i < count; i++ { + sumSize += sizes[i] + } + if size*minCleanupSizePercent/100 <= sumSize { + earliestCleanedTs = storedCleanedTs + candidateIdx = idx + startIdx = possibleStartIdx + lastIdx = count - 1 + + if reverseOrder { + startIdx, lastIdx = lastIdx, startIdx + } + } + } + } + // advance cursor and index + key, val = cur.Next() + idx++ + } + return nil + }) + return candidateIdx, startIdx, lastIdx, earliestCleanedTs, nonExistentSegmentKeys +} + +func (c *segmentCleanerCommon) storeSegmentMeta(id, lastProcessedId, size, cleanedTs int64) error { + bufK := make([]byte, 8) + binary.BigEndian.PutUint64(bufK, uint64(id)) + + bufV := make([]byte, 24) + binary.BigEndian.PutUint64(bufV[0:8], uint64(cleanedTs)) + binary.BigEndian.PutUint64(bufV[8:16], uint64(lastProcessedId)) + binary.BigEndian.PutUint64(bufV[16:24], uint64(size)) + + if err := c.db.Update(func(tx *bolt.Tx) error { + b := tx.Bucket(cleanupDbBucketSegments) + return b.Put(bufK, bufV) + }); err != nil { + return fmt.Errorf("updating cleanup bolt db %q: %w", c.db.Path(), err) + } + return nil +} + +func (c *segmentCleanerCommon) cleanupOnce(shouldAbort cyclemanager.ShouldAbortCallback, +) (bool, error) { + if c.sg.isReadyOnly() { + return false, nil + } + + var err error + candidateIdx, startIdx, lastIdx, onCompleted, err := c.findCandidate() + if err != nil { + return false, err + } + if candidateIdx == emptyIdx { + return false, nil + } + + if c.sg.allocChecker != nil { + // allocChecker is optional + if err := c.sg.allocChecker.CheckAlloc(100 * 1024 * 1024); err != nil { + // if we don't have at least 100MB to spare, don't start a cleanup. A + // cleanup does not actually need a 100MB, but it will create garbage + // that needs to be cleaned up. If we're so close to the memory limit, we + // can increase stability by preventing anything that's not strictly + // necessary. Cleanup can simply resume when the cluster has been + // scaled. + c.sg.logger.WithFields(logrus.Fields{ + "action": "lsm_cleanup", + "event": "cleanup_skipped_oom", + "path": c.sg.dir, + }).WithError(err). + Warnf("skipping cleanup due to memory pressure") + + return false, nil + } + } + + if shouldAbort() { + c.sg.logger.WithFields(logrus.Fields{ + "action": "lsm_cleanup", + "path": c.sg.dir, + }).Warnf("skipping cleanup due to shouldAbort") + return false, nil + } + + oldSegment := c.sg.segmentAtPos(candidateIdx) + segmentId := segmentID(oldSegment.path) + tmpSegmentPath := filepath.Join(c.sg.dir, "segment-"+segmentId+segmentExtraInfo(oldSegment.level, oldSegment.strategy)+".db.tmp") + scratchSpacePath := oldSegment.path + "cleanup.scratch.d" + + start := time.Now() + c.sg.logger.WithFields(logrus.Fields{ + "action": "lsm_cleanup", + "path": c.sg.dir, + "candidateIdx": candidateIdx, + "startIdx": startIdx, + "lastIdx": lastIdx, + "segmentId": segmentId, + }).Info("cleanup started with candidate") + defer func() { + l := c.sg.logger.WithFields(logrus.Fields{ + "action": "lsm_cleanup", + "path": c.sg.dir, + "segmentId": segmentId, + "took": time.Since(start), + }) + if err == nil { + l.Info("clenaup finished") + } else { + l.WithError(err).Error("cleanup failed") + } + }() + + file, err := os.Create(tmpSegmentPath) + if err != nil { + return false, err + } + + switch c.sg.strategy { + case StrategyReplace: + c := newSegmentCleanerReplace(file, oldSegment.newCursor(), + c.sg.makeKeyExistsOnUpperSegments(startIdx, lastIdx), oldSegment.level, + oldSegment.secondaryIndexCount, scratchSpacePath, c.sg.enableChecksumValidation) + if err = c.do(shouldAbort); err != nil { + return false, err + } + default: + err = fmt.Errorf("unsported strategy %q", c.sg.strategy) + return false, err + } + + if err = file.Sync(); err != nil { + err = fmt.Errorf("fsync cleaned segment file: %w", err) + return false, err + } + if err = file.Close(); err != nil { + err = fmt.Errorf("close cleaned segment file: %w", err) + return false, err + } + + segment, err := c.sg.replaceSegment(candidateIdx, tmpSegmentPath) + if err != nil { + err = fmt.Errorf("replace compacted segments: %w", err) + return false, err + } + if err = onCompleted(segment.size); err != nil { + err = fmt.Errorf("callback cleaned segment file: %w", err) + return false, err + } + + return true, nil +} + +type onCompletedFunc func(size int64) error + +// ================================================================ + +type keyExistsOnUpperSegmentsFunc func(key []byte) (bool, error) + +func (sg *SegmentGroup) makeKeyExistsOnUpperSegments(startIdx, lastIdx int) keyExistsOnUpperSegmentsFunc { + return func(key []byte) (bool, error) { + // asc order by default + i := startIdx + updateI := func() { i++ } + if startIdx > lastIdx { + // dest order + i = lastIdx + updateI = func() { i-- } + } + + segAtPos := func() *segment { + segments, release := sg.getAndLockSegments() + defer release() + + if i >= startIdx && i <= lastIdx { + j := i + updateI() + return segments[j].getSegment() + } + return nil + } + + for seg := segAtPos(); seg != nil; seg = segAtPos() { + if exists, err := seg.exists(key); err != nil { + return false, err + } else if exists { + return true, nil + } + } + return false, nil + } +} + +func (sg *SegmentGroup) replaceSegment(segmentIdx int, tmpSegmentPath string, +) (*segment, error) { + oldSegment := sg.segmentAtPos(segmentIdx) + countNetAdditions := oldSegment.countNetAdditions + + // as a guardrail validate that the segment is considered a .tmp segment. + // This way we can be sure that we're not accidentally operating on a live + // segment as the segment group completely ignores .tmp segment files + if !strings.HasSuffix(tmpSegmentPath, ".tmp") { + return nil, fmt.Errorf("pre computing a segment expects a .tmp segment path") + } + + seg, err := newSegment(tmpSegmentPath, sg.logger, sg.metrics, nil, + segmentConfig{ + mmapContents: sg.mmapContents, + useBloomFilter: sg.useBloomFilter, + calcCountNetAdditions: sg.calcCountNetAdditions, + overwriteDerived: true, + enableChecksumValidation: sg.enableChecksumValidation, + MinMMapSize: sg.MinMMapSize, + allocChecker: sg.allocChecker, + precomputedCountNetAdditions: &countNetAdditions, + writeMetadata: sg.writeMetadata, + }) + if err != nil { + return nil, fmt.Errorf("precompute segment meta: %w", err) + } + + newSegment, err := sg.replaceSegmentBlocking(segmentIdx, oldSegment, seg) + if err != nil { + return nil, fmt.Errorf("replace segment (blocking): %w", err) + } + + if err := sg.deleteOldSegmentsNonBlocking(oldSegment); err != nil { + // don't abort if the delete fails, we can still continue (albeit + // without freeing disk space that should have been freed). The + // compaction itself was successful. + sg.logger.WithError(err).WithFields(logrus.Fields{ + "action": "lsm_replace_segments_delete_file", + "file": oldSegment.path, + }).Error("failed to delete file already marked for deletion") + } + + return newSegment, nil +} + +func (sg *SegmentGroup) replaceSegmentBlocking( + segmentIdx int, oldSegment *segment, newSegment *segment, +) (*segment, error) { + sg.maintenanceLock.Lock() + defer sg.maintenanceLock.Unlock() + + start := time.Now() + + if err := oldSegment.close(); err != nil { + return nil, fmt.Errorf("close disk segment %q: %w", oldSegment.path, err) + } + if err := oldSegment.markForDeletion(); err != nil { + return nil, fmt.Errorf("drop disk segment %q: %w", oldSegment.path, err) + } + if err := diskio.Fsync(sg.dir); err != nil { + return nil, fmt.Errorf("fsync segment directory %q: %w", sg.dir, err) + } + + segmentId := segmentID(oldSegment.path) + newPath, err := sg.stripTmpExtension(newSegment.path, segmentId, segmentId) + if err != nil { + return nil, errors.Wrap(err, "strip .tmp extension of new segment") + } + newSegment.path = newPath + + // the old segment have been deleted, we can now safely remove the .tmp + // extension from the new segment itself and the pre-computed files + for i, tmpPath := range newSegment.metaPaths { + path, err := sg.stripTmpExtension(tmpPath, segmentId, segmentId) + if err != nil { + return nil, fmt.Errorf("strip .tmp extension of new segment %q: %w", tmpPath, err) + } + newSegment.metaPaths[i] = path + } + + sg.segments[segmentIdx] = newSegment + + sg.observeReplaceDuration(start, segmentIdx, oldSegment, newSegment) + return newSegment, nil +} + +func (sg *SegmentGroup) observeReplaceDuration( + start time.Time, segmentIdx int, oldSegment, newSegment *segment, +) { + // observe duration - warn if it took too long + took := time.Since(start) + fields := sg.logger.WithFields(logrus.Fields{ + "action": "lsm_replace_segment_blocking", + "segment_index": segmentIdx, + "path_old": oldSegment.path, + "path_new": newSegment.path, + "took": took, + }) + msg := fmt.Sprintf("replacing segment took %s", took) + if took > replaceSegmentWarnThreshold { + fields.Warn(msg) + } else { + fields.Debug(msg) + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segment_group_cleanup_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segment_group_cleanup_test.go new file mode 100644 index 0000000000000000000000000000000000000000..4ec578fd03c0dde7451932c4560d1379b13cb29f --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segment_group_cleanup_test.go @@ -0,0 +1,847 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package lsmkv + +import ( + "encoding/binary" + "path/filepath" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + bolt "go.etcd.io/bbolt" +) + +func TestSegmentGroup_CleanupCandidates(t *testing.T) { + assertSegment := func(t *testing.T, sg *SegmentGroup, idx int, expectedName string, expectedSize int64) { + seg := sg.segments[idx].getSegment() + assert.Equal(t, filepath.Join(sg.dir, expectedName), seg.path) + assert.Equal(t, expectedSize, seg.size) + } + requireCandidateFound := func(t *testing.T, idx, expIdx, startIdx, expStartIdx, lastIdx, expLastIdx int) { + require.Equal(t, expIdx, idx) + require.Equal(t, expStartIdx, startIdx) + require.Equal(t, expLastIdx, lastIdx) + } + requireCandidateNotFound := func(t *testing.T, idx, startIdx, lastIdx int) { + require.Equal(t, emptyIdx, idx) + require.Equal(t, emptyIdx, startIdx) + require.Equal(t, emptyIdx, lastIdx) + } + assertBoltDbKeys := func(t *testing.T, db *bolt.DB, expectedIds []int64) { + ids := make([]int64, 0, len(expectedIds)) + + db.View(func(tx *bolt.Tx) error { + b := tx.Bucket(cleanupDbBucketSegments) + c := b.Cursor() + + for ck, _ := c.First(); ck != nil; ck, _ = c.Next() { + ids = append(ids, int64(binary.BigEndian.Uint64(ck))) + } + return nil + }) + + assert.ElementsMatch(t, expectedIds, ids) + } + + t.Run("no segments", func(t *testing.T) { + dir := t.TempDir() + sg := &SegmentGroup{ + dir: dir, + segments: []Segment{}, + cleanupInterval: time.Millisecond * 75, + } + sc := &segmentCleanerCommon{sg: sg} + require.NoError(t, sc.init()) + defer sc.close() + + idx, startIdx, lastIdx, onCompleted, err := sc.findCandidate() + require.NoError(t, err) + requireCandidateNotFound(t, idx, startIdx, lastIdx) + assert.Nil(t, onCompleted) + assertBoltDbKeys(t, sc.db, []int64{}) + }) + + t.Run("single segment", func(t *testing.T) { + dir := t.TempDir() + sg := &SegmentGroup{ + dir: dir, + segments: []Segment{ + &segment{ + path: filepath.Join(dir, "segment-0001.db"), + size: 10001, + }, + }, + cleanupInterval: time.Millisecond * 75, + } + sc := &segmentCleanerCommon{sg: sg} + require.NoError(t, sc.init()) + defer sc.close() + + idx, startIdx, lastIdx, onCompleted, err := sc.findCandidate() + require.NoError(t, err) + requireCandidateNotFound(t, idx, startIdx, lastIdx) + assert.Nil(t, onCompleted) + assertBoltDbKeys(t, sc.db, []int64{}) + }) + + t.Run("multilpe segments, segments in order oldest to newest, last one skipped", func(t *testing.T) { + dir := t.TempDir() + sg := &SegmentGroup{ + dir: dir, + segments: []Segment{ + &segment{ + path: filepath.Join(dir, "segment-0001.db"), + size: 10001, + }, + &segment{ + path: filepath.Join(dir, "segment-0002.db"), + size: 10002, + }, + &segment{ + path: filepath.Join(dir, "segment-0003.db"), + size: 10003, + }, + &segment{ + path: filepath.Join(dir, "segment-0004.db"), + size: 10004, + }, + }, + cleanupInterval: time.Millisecond * 75, + } + sc := &segmentCleanerCommon{sg: sg} + sc.init() + defer sc.close() + + idx1, startIdx1, lastIdx1, onCompleted1, err1 := sc.findCandidate() + require.NoError(t, err1) + requireCandidateFound(t, idx1, 0, startIdx1, 1, lastIdx1, 3) + assertSegment(t, sg, idx1, "segment-0001.db", 10001) + require.NotNil(t, onCompleted1) + onCompleted1(9001) + sg.segments[idx1].setSize(9001) + + idx2, startIdx2, lastIdx2, onCompleted2, err2 := sc.findCandidate() + require.NoError(t, err2) + requireCandidateFound(t, idx2, 1, startIdx2, 2, lastIdx2, 3) + assertSegment(t, sg, idx2, "segment-0002.db", 10002) + require.NotNil(t, onCompleted2) + onCompleted2(9002) + sg.segments[idx2].setSize(9002) + + idx3, startIdx3, lastIdx3, onCompleted3, err3 := sc.findCandidate() + require.NoError(t, err3) + requireCandidateFound(t, idx3, 2, startIdx3, 3, lastIdx3, 3) + assertSegment(t, sg, idx3, "segment-0003.db", 10003) + require.NotNil(t, onCompleted3) + onCompleted3(9003) + sg.segments[idx3].setSize(9003) + + idx4, startIdx4, lastIdx4, onCompleted4, err4 := sc.findCandidate() + require.NoError(t, err4) + requireCandidateNotFound(t, idx4, startIdx4, lastIdx4) + assert.Nil(t, onCompleted4) + + assertBoltDbKeys(t, sc.db, []int64{1, 2, 3}) + }) + + t.Run("multilpe segments, no candidates after interval if no new segments", func(t *testing.T) { + dir := t.TempDir() + sg := &SegmentGroup{ + dir: dir, + segments: []Segment{ + &segment{ + path: filepath.Join(dir, "segment-0001.db"), + size: 10001, + }, + &segment{ + path: filepath.Join(dir, "segment-0002.db"), + size: 10002, + }, + &segment{ + path: filepath.Join(dir, "segment-0003.db"), + size: 10003, + }, + &segment{ + path: filepath.Join(dir, "segment-0004.db"), + size: 10004, + }, + }, + cleanupInterval: time.Millisecond * 75, + } + sc := &segmentCleanerCommon{sg: sg} + require.NoError(t, sc.init()) + defer sc.close() + + t.Run("1st round, all but last cleaned", func(t *testing.T) { + idx1, startIdx1, lastIdx1, onCompleted1, err1 := sc.findCandidate() + require.NoError(t, err1) + requireCandidateFound(t, idx1, 0, startIdx1, 1, lastIdx1, 3) + assertSegment(t, sg, idx1, "segment-0001.db", 10001) + require.NotNil(t, onCompleted1) + onCompleted1(9001) + sg.segments[idx1].setSize(9001) + + idx2, startIdx2, lastIdx2, onCompleted2, err2 := sc.findCandidate() + require.NoError(t, err2) + requireCandidateFound(t, idx2, 1, startIdx2, 2, lastIdx2, 3) + assertSegment(t, sg, idx2, "segment-0002.db", 10002) + require.NotNil(t, onCompleted2) + onCompleted2(9002) + sg.segments[idx2].setSize(9002) + + idx3, startIdx3, lastIdx3, onCompleted3, err3 := sc.findCandidate() + require.NoError(t, err3) + requireCandidateFound(t, idx3, 2, startIdx3, 3, lastIdx3, 3) + assertSegment(t, sg, idx3, "segment-0003.db", 10003) + require.NotNil(t, onCompleted3) + onCompleted3(9003) + sg.segments[idx3].setSize(9003) + + idx4, startIdx4, lastIdx4, onCompleted4, err4 := sc.findCandidate() + require.NoError(t, err4) + requireCandidateNotFound(t, idx4, startIdx4, lastIdx4) + assert.Nil(t, onCompleted4) + }) + + t.Run("no candidates before interval", func(t *testing.T) { + idx, startIdx, lastIdx, onCompleted, err := sc.findCandidate() + require.NoError(t, err) + requireCandidateNotFound(t, idx, startIdx, lastIdx) + assert.Nil(t, onCompleted) + }) + + t.Run("wait interval for next round", func(t *testing.T) { + time.Sleep(sg.cleanupInterval * 3 / 2) + }) + + t.Run("2nd round, no candiates due to no new segments", func(t *testing.T) { + idx, startIdx, lastIdx, onCompleted, err := sc.findCandidate() + require.NoError(t, err) + requireCandidateNotFound(t, idx, startIdx, lastIdx) + assert.Nil(t, onCompleted) + }) + + assertBoltDbKeys(t, sc.db, []int64{1, 2, 3}) + }) + + t.Run("multilpe segments, candidates after interval if new segments created", func(t *testing.T) { + dir := t.TempDir() + sg := &SegmentGroup{ + dir: dir, + segments: []Segment{ + &segment{ + path: filepath.Join(dir, "segment-0001.db"), + size: 10001, + }, + &segment{ + path: filepath.Join(dir, "segment-0002.db"), + size: 10002, + }, + &segment{ + path: filepath.Join(dir, "segment-0003.db"), + size: 10003, + }, + &segment{ + path: filepath.Join(dir, "segment-0004.db"), + size: 10004, + }, + }, + cleanupInterval: time.Millisecond * 75, + } + sc := &segmentCleanerCommon{sg: sg} + require.NoError(t, sc.init()) + defer sc.close() + + t.Run("1st round, all but last cleaned", func(t *testing.T) { + idx1, startIdx1, lastIdx1, onCompleted1, err1 := sc.findCandidate() + require.NoError(t, err1) + requireCandidateFound(t, idx1, 0, startIdx1, 1, lastIdx1, 3) + assertSegment(t, sg, idx1, "segment-0001.db", 10001) + require.NotNil(t, onCompleted1) + onCompleted1(9001) + sg.segments[idx1].setSize(9001) + + idx2, startIdx2, lastIdx2, onCompleted2, err2 := sc.findCandidate() + require.NoError(t, err2) + requireCandidateFound(t, idx2, 1, startIdx2, 2, lastIdx2, 3) + assertSegment(t, sg, idx2, "segment-0002.db", 10002) + require.NotNil(t, onCompleted2) + onCompleted2(9002) + sg.segments[idx2].setSize(9002) + + idx3, startIdx3, lastIdx3, onCompleted3, err3 := sc.findCandidate() + require.NoError(t, err3) + requireCandidateFound(t, idx3, 2, startIdx3, 3, lastIdx3, 3) + assertSegment(t, sg, idx3, "segment-0003.db", 10003) + require.NotNil(t, onCompleted3) + onCompleted3(9003) + sg.segments[idx3].setSize(9003) + + idx4, startIdx4, lastIdx4, onCompleted4, err4 := sc.findCandidate() + require.NoError(t, err4) + requireCandidateNotFound(t, idx4, startIdx4, lastIdx4) + assert.Nil(t, onCompleted4) + }) + + t.Run("no candidates before interval", func(t *testing.T) { + idx, startIdx, lastIdx, onCompleted, err := sc.findCandidate() + require.NoError(t, err) + requireCandidateNotFound(t, idx, startIdx, lastIdx) + assert.Nil(t, onCompleted) + }) + + t.Run("wait interval for next round", func(t *testing.T) { + time.Sleep(sg.cleanupInterval * 3 / 2) + }) + + t.Run("new segments created", func(t *testing.T) { + sg.segments = append(sg.segments, + &segment{ + path: filepath.Join(dir, "segment-0005.db"), + size: 10005, + observeMetaWrite: func(n int64) {}, + }, + &segment{ + path: filepath.Join(dir, "segment-0006.db"), + size: 10006, + observeMetaWrite: func(n int64) {}, + }, + ) + }) + + t.Run("2nd round, new candidates then same candiates again", func(t *testing.T) { + idx1, startIdx1, lastIdx1, onCompleted1, err1 := sc.findCandidate() + require.NoError(t, err1) + requireCandidateFound(t, idx1, 3, startIdx1, 4, lastIdx1, 5) + assertSegment(t, sg, idx1, "segment-0004.db", 10004) + require.NotNil(t, onCompleted1) + onCompleted1(9004) + sg.segments[idx1].setSize(9004) + + idx2, startIdx2, lastIdx2, onCompleted2, err2 := sc.findCandidate() + require.NoError(t, err2) + requireCandidateFound(t, idx2, 4, startIdx2, 5, lastIdx2, 5) + assertSegment(t, sg, idx2, "segment-0005.db", 10005) + require.NotNil(t, onCompleted2) + onCompleted2(9005) + sg.segments[idx2].setSize(9005) + + idx3, startIdx3, lastIdx3, onCompleted3, err3 := sc.findCandidate() + require.NoError(t, err3) + requireCandidateFound(t, idx3, 0, startIdx3, 4, lastIdx3, 5) + assertSegment(t, sg, idx3, "segment-0001.db", 9001) + require.NotNil(t, onCompleted3) + onCompleted3(8001) + sg.segments[idx3].setSize(8001) + + idx4, startIdx4, lastIdx4, onCompleted4, err4 := sc.findCandidate() + require.NoError(t, err4) + requireCandidateFound(t, idx4, 1, startIdx4, 4, lastIdx4, 5) + assertSegment(t, sg, idx4, "segment-0002.db", 9002) + require.NotNil(t, onCompleted4) + onCompleted4(8002) + sg.segments[idx4].setSize(8002) + + idx5, startIdx5, lastIdx5, onCompleted5, err5 := sc.findCandidate() + require.NoError(t, err5) + requireCandidateFound(t, idx5, 2, startIdx5, 4, lastIdx5, 5) + assertSegment(t, sg, idx5, "segment-0003.db", 9003) + require.NotNil(t, onCompleted5) + onCompleted5(8003) + sg.segments[idx5].setSize(8003) + + idx6, startIdx6, lastIdx6, onCompleted6, err6 := sc.findCandidate() + require.NoError(t, err6) + requireCandidateNotFound(t, idx6, startIdx6, lastIdx6) + assert.Nil(t, onCompleted6) + }) + + t.Run("wait interval for next round", func(t *testing.T) { + time.Sleep(sg.cleanupInterval * 3 / 2) + }) + + t.Run("3rd round, no candidates due to no new segments", func(t *testing.T) { + idx, startIdx, lastIdx, onCompleted, err := sc.findCandidate() + require.NoError(t, err) + requireCandidateNotFound(t, idx, startIdx, lastIdx) + assert.Nil(t, onCompleted) + }) + + assertBoltDbKeys(t, sc.db, []int64{1, 2, 3, 4, 5}) + }) + + t.Run("multilpe segments, candidates after interval dependant on new segments sizes", func(t *testing.T) { + dir := t.TempDir() + sg := &SegmentGroup{ + dir: dir, + segments: []Segment{ + &segment{ + path: filepath.Join(dir, "segment-0001.db"), + size: 10001, + }, + &segment{ + path: filepath.Join(dir, "segment-0002.db"), + size: 10002, + }, + &segment{ + path: filepath.Join(dir, "segment-0003.db"), + size: 10003, + }, + &segment{ + path: filepath.Join(dir, "segment-0004.db"), + size: 10004, + }, + }, + cleanupInterval: time.Millisecond * 75, + } + sc := &segmentCleanerCommon{sg: sg} + require.NoError(t, sc.init()) + defer sc.close() + + t.Run("1st round, all but last cleaned", func(t *testing.T) { + // not cleaned before, cleaning considering 2+3+4 + idx1, startIdx1, lastIdx1, onCompleted1, err1 := sc.findCandidate() + require.NoError(t, err1) + requireCandidateFound(t, idx1, 0, startIdx1, 1, lastIdx1, 3) + assertSegment(t, sg, idx1, "segment-0001.db", 10001) + require.NotNil(t, onCompleted1) + onCompleted1(9001) + sg.segments[idx1].setSize(9001) + + // not cleaned before, cleaning considering 3+4 + idx2, startIdx2, lastIdx2, onCompleted2, err2 := sc.findCandidate() + require.NoError(t, err2) + requireCandidateFound(t, idx2, 1, startIdx2, 2, lastIdx2, 3) + assertSegment(t, sg, idx2, "segment-0002.db", 10002) + require.NotNil(t, onCompleted2) + onCompleted2(9002) + sg.segments[idx2].setSize(9002) + + // not cleaned before, cleaning considering 4 + idx3, startIdx3, lastIdx3, onCompleted3, err3 := sc.findCandidate() + require.NoError(t, err3) + requireCandidateFound(t, idx3, 2, startIdx3, 3, lastIdx3, 3) + assertSegment(t, sg, idx3, "segment-0003.db", 10003) + require.NotNil(t, onCompleted3) + onCompleted3(9003) + sg.segments[idx3].setSize(9003) + + // skipping 4 as last one + idx4, startIdx4, lastIdx4, onCompleted4, err4 := sc.findCandidate() + require.NoError(t, err4) + requireCandidateNotFound(t, idx4, startIdx4, lastIdx4) + assert.Nil(t, onCompleted4) + }) + + t.Run("no candidates before interval", func(t *testing.T) { + idx, startIdx, lastIdx, onCompleted, err := sc.findCandidate() + require.NoError(t, err) + requireCandidateNotFound(t, idx, startIdx, lastIdx) + assert.Nil(t, onCompleted) + }) + + t.Run("wait interval for next round", func(t *testing.T) { + time.Sleep(sg.cleanupInterval * 3 / 2) + }) + + t.Run("new segments created", func(t *testing.T) { + sg.segments = append(sg.segments, + &segment{ + path: filepath.Join(dir, "segment-0005.db"), + size: 405, + observeMetaWrite: func(n int64) {}, + }, + &segment{ + path: filepath.Join(dir, "segment-0006.db"), + size: 406, + observeMetaWrite: func(n int64) {}, + }, + ) + }) + + t.Run("2nd round, only new candidates due to sum of new sizes not big enough", func(t *testing.T) { + // not cleaned before, cleaning considering 5+6 + idx1, startIdx1, lastIdx1, onCompleted1, err1 := sc.findCandidate() + require.NoError(t, err1) + requireCandidateFound(t, idx1, 3, startIdx1, 4, lastIdx1, 5) + assertSegment(t, sg, idx1, "segment-0004.db", 10004) + require.NotNil(t, onCompleted1) + onCompleted1(9004) + sg.segments[idx1].setSize(9004) + + // not cleaned before, cleaning considering 6 + idx2, startIdx2, lastIdx2, onCompleted2, err2 := sc.findCandidate() + require.NoError(t, err2) + requireCandidateFound(t, idx2, 4, startIdx2, 5, lastIdx2, 5) + assertSegment(t, sg, idx2, "segment-0005.db", 405) + require.NotNil(t, onCompleted2) + onCompleted2(305) + sg.segments[idx2].setSize(305) + + // skipping 6 as last one + // skipping 1,2,3 due to sum of new sizes (5+6) not big enough compared to old segments' sizes + idx3, startIdx3, lastIdx3, onCompleted3, err3 := sc.findCandidate() + require.NoError(t, err3) + requireCandidateNotFound(t, idx3, startIdx3, lastIdx3) + assert.Nil(t, onCompleted3) + }) + + t.Run("wait interval for next round", func(t *testing.T) { + time.Sleep(sg.cleanupInterval * 3 / 2) + }) + + t.Run("3rd round, no candidates due to no new segments", func(t *testing.T) { + // no changes in segments + idx, startIdx, lastIdx, onCompleted, err := sc.findCandidate() + require.NoError(t, err) + requireCandidateNotFound(t, idx, startIdx, lastIdx) + assert.Nil(t, onCompleted) + }) + + t.Run("new segments created", func(t *testing.T) { + sg.segments = append(sg.segments, + &segment{ + path: filepath.Join(dir, "segment-0007.db"), + size: 407, + observeMetaWrite: func(n int64) {}, + }, + &segment{ + path: filepath.Join(dir, "segment-0008.db"), + size: 408, + observeMetaWrite: func(n int64) {}, + }, + ) + }) + + t.Run("wait interval for next round", func(t *testing.T) { + time.Sleep(sg.cleanupInterval * 3 / 2) + }) + + t.Run("4th round, new and old candidates due to sum of new sizes big enough", func(t *testing.T) { + // not cleaned before, cleaning considering 7+8 + idx1, startIdx1, lastIdx1, onCompleted1, err1 := sc.findCandidate() + require.NoError(t, err1) + requireCandidateFound(t, idx1, 5, startIdx1, 6, lastIdx1, 7) + assertSegment(t, sg, idx1, "segment-0006.db", 406) + require.NotNil(t, onCompleted1) + onCompleted1(306) + sg.segments[idx1].setSize(306) + + // not cleaned before, cleaning considering 8 + idx2, startIdx2, lastIdx2, onCompleted2, err2 := sc.findCandidate() + require.NoError(t, err2) + requireCandidateFound(t, idx2, 6, startIdx2, 7, lastIdx2, 7) + assertSegment(t, sg, idx2, "segment-0007.db", 407) + require.NotNil(t, onCompleted2) + onCompleted2(307) + sg.segments[idx2].setSize(307) + + // sum of sizes (5+6+7+8) big enough compared to segment's size, cleaning considering 5+6+7+8 + idx3, startIdx3, lastIdx3, onCompleted3, err3 := sc.findCandidate() + require.NoError(t, err3) + requireCandidateFound(t, idx3, 0, startIdx3, 4, lastIdx3, 7) + assertSegment(t, sg, idx3, "segment-0001.db", 9001) + require.NotNil(t, onCompleted3) + onCompleted3(8001) + sg.segments[idx3].setSize(8001) + + // sum of sizes (5+6+7+8) big enough compared to segment's size, cleaning considering 5+6+7+8 + idx4, startIdx4, lastIdx4, onCompleted4, err4 := sc.findCandidate() + require.NoError(t, err4) + requireCandidateFound(t, idx4, 1, startIdx4, 4, lastIdx4, 7) + assertSegment(t, sg, idx4, "segment-0002.db", 9002) + require.NotNil(t, onCompleted4) + onCompleted4(8002) + sg.segments[idx4].setSize(8002) + + // sum of sizes (5+6+7+8) big enough compared to segment's size, cleaning considering 5+6+7+8 + idx5, startIdx5, lastIdx5, onCompleted5, err5 := sc.findCandidate() + require.NoError(t, err5) + requireCandidateFound(t, idx5, 2, startIdx5, 4, lastIdx5, 7) + assertSegment(t, sg, idx5, "segment-0003.db", 9003) + require.NotNil(t, onCompleted5) + onCompleted5(8003) + sg.segments[idx5].setSize(8003) + + // skipping 4 due to sum of new sizes (7+8) not big enough compared to segment's size + // sum of sizes (7+8) big enough compared to segment's size, cleaning considering 7+8 + idx6, startIdx6, lastIdx6, onCompleted6, err6 := sc.findCandidate() + require.NoError(t, err6) + requireCandidateFound(t, idx6, 4, startIdx6, 6, lastIdx6, 7) + assertSegment(t, sg, idx6, "segment-0005.db", 305) + require.NotNil(t, onCompleted6) + onCompleted6(205) + sg.segments[idx6].setSize(205) + + idx7, startIdx7, lastIdx7, onCompleted7, err7 := sc.findCandidate() + require.NoError(t, err7) + requireCandidateNotFound(t, idx7, startIdx7, lastIdx7) + assert.Nil(t, onCompleted7) + }) + + assertBoltDbKeys(t, sc.db, []int64{1, 2, 3, 4, 5, 6, 7}) + }) + + t.Run("multilpe segments, cleanup and compaction", func(t *testing.T) { + dir := t.TempDir() + sg := &SegmentGroup{ + dir: dir, + segments: []Segment{ + &segment{ + path: filepath.Join(dir, "segment-0001.db"), + size: 10001, + }, + &segment{ + path: filepath.Join(dir, "segment-0002.db"), + size: 10002, + }, + &segment{ + path: filepath.Join(dir, "segment-0003.db"), + size: 10003, + }, + &segment{ + path: filepath.Join(dir, "segment-0004.db"), + size: 10004, + }, + &segment{ + path: filepath.Join(dir, "segment-0005.db"), + size: 10005, + }, + }, + cleanupInterval: time.Millisecond * 75, + } + sc := &segmentCleanerCommon{sg: sg} + require.NoError(t, sc.init()) + defer sc.close() + + t.Run("1st round, all but last cleaned", func(t *testing.T) { + // not cleaned before, cleaning considering 2+3+4+5 + idx1, startIdx1, lastIdx1, onCompleted1, err1 := sc.findCandidate() + require.NoError(t, err1) + requireCandidateFound(t, idx1, 0, startIdx1, 1, lastIdx1, 4) + assertSegment(t, sg, idx1, "segment-0001.db", 10001) + require.NotNil(t, onCompleted1) + onCompleted1(9001) + sg.segments[idx1].setSize(9001) + + // not cleaned before, cleaning considering 3+4+5 + idx2, startIdx2, lastIdx2, onCompleted2, err2 := sc.findCandidate() + require.NoError(t, err2) + requireCandidateFound(t, idx2, 1, startIdx2, 2, lastIdx2, 4) + assertSegment(t, sg, idx2, "segment-0002.db", 10002) + require.NotNil(t, onCompleted2) + onCompleted2(9002) + sg.segments[idx2].setSize(9002) + + // not cleaned before, cleaning considering 4+5 + idx3, startIdx3, lastIdx3, onCompleted3, err3 := sc.findCandidate() + require.NoError(t, err3) + requireCandidateFound(t, idx3, 2, startIdx3, 3, lastIdx3, 4) + assertSegment(t, sg, idx3, "segment-0003.db", 10003) + require.NotNil(t, onCompleted3) + onCompleted3(9003) + sg.segments[idx3].setSize(9003) + + // not cleaned before, cleaning considering 5 + idx4, startIdx4, lastIdx4, onCompleted4, err4 := sc.findCandidate() + require.NoError(t, err4) + requireCandidateFound(t, idx4, 3, startIdx4, 4, lastIdx4, 4) + assertSegment(t, sg, idx4, "segment-0004.db", 10004) + require.NotNil(t, onCompleted4) + onCompleted4(9004) + sg.segments[idx4].setSize(9004) + + // skipping 5 as last one + idx5, startIdx5, lastIdx5, onCompleted5, err5 := sc.findCandidate() + require.NoError(t, err5) + requireCandidateNotFound(t, idx5, startIdx5, lastIdx5) + assert.Nil(t, onCompleted5) + }) + + t.Run("no candidates before interval", func(t *testing.T) { + idx, startIdx, lastIdx, onCompleted, err := sc.findCandidate() + require.NoError(t, err) + requireCandidateNotFound(t, idx, startIdx, lastIdx) + assert.Nil(t, onCompleted) + }) + + t.Run("compact", func(t *testing.T) { + seg2 := sg.segments[1] + seg2.setSize(20002) + seg4 := sg.segments[3] + seg4.setSize(20004) + seg5 := sg.segments[4] + + sg.segments = []Segment{seg2, seg4, seg5} + }) + + t.Run("wait interval for next round", func(t *testing.T) { + time.Sleep(sg.cleanupInterval * 3 / 2) + }) + + t.Run("2nd round, no candidates due to no new segments", func(t *testing.T) { + // no new segments + idx, startIdx, lastIdx, onCompleted, err := sc.findCandidate() + require.NoError(t, err) + requireCandidateNotFound(t, idx, startIdx, lastIdx) + assert.Nil(t, onCompleted) + }) + + t.Run("new segments created", func(t *testing.T) { + sg.segments = append(sg.segments, + &segment{ + path: filepath.Join(dir, "segment-0006.db"), + size: 10006, + observeMetaWrite: func(n int64) {}, + }, + ) + }) + + t.Run("wait interval for next round", func(t *testing.T) { + time.Sleep(sg.cleanupInterval * 3 / 2) + }) + + t.Run("3rd round, new segments cleaned and some old ones", func(t *testing.T) { + // not cleaned before, cleaning considering 6 + idx1, startIdx1, lastIdx1, onCompleted1, err1 := sc.findCandidate() + require.NoError(t, err1) + requireCandidateFound(t, idx1, 2, startIdx1, 3, lastIdx1, 3) + assertSegment(t, sg, idx1, "segment-0005.db", 10005) + require.NotNil(t, onCompleted1) + onCompleted1(9005) + sg.segments[idx1].setSize(9005) + + // size changed, cleanup considering all next segments, including new ones + // sum of sizes (4+5+6) big enough compared to segment's size, cleaning considering 4+5+6 + idx2, startIdx2, lastIdx2, onCompleted2, err2 := sc.findCandidate() + require.NoError(t, err2) + requireCandidateFound(t, idx2, 0, startIdx2, 3, lastIdx2, 1) + assertSegment(t, sg, idx2, "segment-0002.db", 20002) + require.NotNil(t, onCompleted2) + onCompleted2(19002) + sg.segments[idx2].setSize(19002) + }) + + t.Run("new segments created while 3rd round", func(t *testing.T) { + sg.segments = append(sg.segments, + &segment{ + path: filepath.Join(dir, "segment-0007.db"), + size: 10007, + observeMetaWrite: func(n int64) {}, + }, + ) + }) + + t.Run("3rd round ongoing, new segments cleaned and some old ones", func(t *testing.T) { + // not cleaned before, cleaning considering 7 + idx1, startIdx1, lastIdx1, onCompleted1, err1 := sc.findCandidate() + require.NoError(t, err1) + requireCandidateFound(t, idx1, 3, startIdx1, 4, lastIdx1, 4) + assertSegment(t, sg, idx1, "segment-0006.db", 10006) + require.NotNil(t, onCompleted1) + onCompleted1(9006) + sg.segments[idx1].setSize(9006) + + // size changed, cleanup considering all next segments, including new ones + // sum of sizes (5+6+7) big enough compared to segment's size, cleaning considering 5+6+7 + idx2, startIdx2, lastIdx2, onCompleted2, err2 := sc.findCandidate() + require.NoError(t, err2) + requireCandidateFound(t, idx2, 1, startIdx2, 4, lastIdx2, 2) + assertSegment(t, sg, idx2, "segment-0004.db", 20004) + require.NotNil(t, onCompleted2) + onCompleted2(19004) + sg.segments[idx2].setSize(19004) + + // skipping 7 as last one + idx3, startIdx3, lastIdx3, onCompleted3, err3 := sc.findCandidate() + require.NoError(t, err3) + requireCandidateNotFound(t, idx3, startIdx3, lastIdx3) + assert.Nil(t, onCompleted3) + }) + + t.Run("compact", func(t *testing.T) { + seg4 := sg.segments[1] + seg4.setSize(40004) + seg6 := sg.segments[3] + seg6.setSize(20006) + seg7 := sg.segments[4] + + sg.segments = []Segment{seg4, seg6, seg7} + }) + + t.Run("wait interval for next round", func(t *testing.T) { + time.Sleep(sg.cleanupInterval * 3 / 2) + }) + + t.Run("4th round, no candidates due to no new segments", func(t *testing.T) { + idx, startIdx, lastIdx, onCompleted, err := sc.findCandidate() + require.NoError(t, err) + requireCandidateNotFound(t, idx, startIdx, lastIdx) + assert.Nil(t, onCompleted) + }) + + t.Run("new segments created", func(t *testing.T) { + sg.segments = append(sg.segments, + &segment{ + path: filepath.Join(dir, "segment-0008.db"), + size: 10008, + observeMetaWrite: func(n int64) {}, + }, + ) + }) + + t.Run("wait interval for next round", func(t *testing.T) { + time.Sleep(sg.cleanupInterval * 3 / 2) + }) + + t.Run("5th round, new segments cleaned and some old ones", func(t *testing.T) { + // not cleaned before, cleaning considering 8 + idx1, startIdx1, lastIdx1, onCompleted1, err1 := sc.findCandidate() + require.NoError(t, err1) + requireCandidateFound(t, idx1, 2, startIdx1, 3, lastIdx1, 3) + assertSegment(t, sg, idx1, "segment-0007.db", 10007) + require.NotNil(t, onCompleted1) + onCompleted1(9007) + sg.segments[idx1].setSize(9007) + + // size changed, cleanup considering all next segments, including new ones + // sum of sizes (7+8) big enough compared to segment's size, cleaning considering 7+8 + idx2, startIdx2, lastIdx2, onCompleted2, err2 := sc.findCandidate() + require.NoError(t, err2) + requireCandidateFound(t, idx2, 1, startIdx2, 3, lastIdx2, 2) + assertSegment(t, sg, idx2, "segment-0006.db", 20006) + require.NotNil(t, onCompleted2) + onCompleted2(19006) + sg.segments[idx2].setSize(19006) + + // size changed, cleanup considering all next segments, including new ones + // sum of sizes (6+7+8) big enough compared to segment's size, cleaning considering 6+7+8 + idx3, startIdx3, lastIdx3, onCompleted3, err3 := sc.findCandidate() + require.NoError(t, err3) + requireCandidateFound(t, idx3, 0, startIdx3, 3, lastIdx3, 1) + assertSegment(t, sg, idx3, "segment-0004.db", 40004) + require.NotNil(t, onCompleted3) + onCompleted3(39004) + sg.segments[idx3].setSize(39004) + + // skipping 8 as last one + idx4, startIdx4, lastIdx4, onCompleted4, err4 := sc.findCandidate() + require.NoError(t, err4) + requireCandidateNotFound(t, idx4, startIdx4, lastIdx4) + assert.Nil(t, onCompleted4) + }) + + assertBoltDbKeys(t, sc.db, []int64{4, 6, 7}) + }) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segment_group_compaction.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segment_group_compaction.go new file mode 100644 index 0000000000000000000000000000000000000000..40e15f43d275800e8e190fa0be598bf1dd32efc4 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segment_group_compaction.go @@ -0,0 +1,735 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package lsmkv + +import ( + "fmt" + "os" + "path" + "path/filepath" + "strings" + "time" + + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + "github.com/sirupsen/logrus" + "github.com/weaviate/weaviate/adapters/repos/db/helpers" + "github.com/weaviate/weaviate/adapters/repos/db/lsmkv/segmentindex" + "github.com/weaviate/weaviate/adapters/repos/db/roaringset" + "github.com/weaviate/weaviate/adapters/repos/db/roaringsetrange" + "github.com/weaviate/weaviate/entities/diskio" + "github.com/weaviate/weaviate/usecases/config" +) + +// findCompactionCandidates looks for pair of segments eligible for compaction +// into single segment. +// Segments use level property to mark how many times they were compacted. +// +// By default pair of segments with lowest matching levels is searched. If there are more +// than 2 segments of the same level, the oldest ones are picked. +// Behaviour of the method can be changed with 2 segment group settings: +// - maxSegmentSize (prevents segments being compacted into single segment of too large size) +// - compactLeftOverSegments (allows picking for compaction segments of not equal levels) +// Regardless of compaction settings, following constraints have to be met: +// - only consecutive segments can be merged to keep order of creations/updates/deletions of data stored +// - newer segments have levels lower or equal than levels of older segments (descendent order is kept) +// +// E.g. out of segments: s1(4), s2(3), s3(3), s4(2), s5(2), s6(2), s7(1), s8(0), s4+s5 will be +// selected for compaction first producing single segment s4s5(3), then s2+s3 producing s2s3(4), +// then s1+s2s3 producing s1s2s3(5). Until new segment s9(0) will be added to segment group, +// no next pair will be returned, as all segments will have different levels. +// +// If maxSegmentSize is set, estimated total size of compacted segment must not exceed given limit. +// Only pair of segments having sum of sizes <= maxSegmentSize can be returned by the method. If there +// exist older segments with same lavel as level of selected pair, level of compacted segment will +// not be changed to ensure no new segment have higher level than older ones. If there is no segment +// having same level as selected pair, new segment's level will be incremented. +// E.g. out of segments: s1(4), s2(3), s3(3), s4(2), s5(2), s6(2), s7(1), s8(0), +// when s4.size+s5.size > maxSegmentSize, but s5.size+s6.size <= maxSegmentSize, s5+s6 will be selected +// first for compaction producing segment s5s6(2) (note same level, due to older s2(2)). +// If s2.size+s3.size <= maxSegmentSize, then s2+s3 producing s2s3(4) (note incremented level, +// due to no older segment of level 3). If s1.size+s2s3.size <= maxSegmentSize s1s2s3(5) will be produced, +// if not, no other pair will be returned until new segments will be added to segment group. +// +// If compactLeftOverSegments is set, pair of segments of lowest levels, though similar in sizes +// can be returned if no pair of matching levels will be found. Segment sizes need to be close to each +// other to prevent merging large segments (GiB) with tiny one (KiB). Level of newly produced segment +// will be the same as level of larger(left) segment. +// maxSegmentSize ise respected for pair of leftover segments. +func (sg *SegmentGroup) findCompactionCandidates() (pair []int, level uint16) { + // if true, the parent shard has indicated that it has + // entered an immutable state. During this time, the + // SegmentGroup should refrain from flushing until its + // shard indicates otherwise + if sg.isReadyOnly() { + sg.logger.WithField("action", "lsm_compaction"). + WithField("path", sg.dir). + Debug("compaction halted due to shard READONLY status") + return nil, 0 + } + + sg.maintenanceLock.RLock() + defer sg.maintenanceLock.RUnlock() + + // Nothing to compact + if len(sg.segments) < 2 { + return nil, 0 + } + + matchingPairFound := false + leftoverPairFound := false + var matchingLeftId, leftoverLeftId int + var matchingLevel, leftoverLevel uint16 + + // as newest segments are prioritized, loop in reverse order + for leftId := len(sg.segments) - 2; leftId >= 0; leftId-- { + left, right := sg.segments[leftId], sg.segments[leftId+1] + + if left.getLevel() == right.getLevel() { + leftS, rightS := sg.segments[leftId].getSegment(), sg.segments[leftId+1].getSegment() + if leftS.secondaryIndexCount != rightS.secondaryIndexCount { + // only pair of segments with the same secondary indexes are compacted + continue + } + if sg.compactionFitsSizeLimit(leftS, rightS) { + // max size not exceeded + matchingPairFound = true + matchingLeftId = leftId + + // this is for bucket migrations with re-ingestion, specifically + // for the new incoming data (ingest) bucket. + // we don't want to change the level of the segments on ingest data, + // so that, when we copy the segments to the bucket with the reingested + // data, the levels are all still at zero, and they can be compacted + // with the existing re-ingested segments. + if sg.keepLevelCompaction { + matchingLevel = leftS.level + } else { + matchingLevel = leftS.level + 1 + } + } else if matchingPairFound { + // older segment of same level as pair's level exist. + // keep unchanged level + matchingLevel = leftS.level + } + } else { + if matchingPairFound { + // moving to segments of higher level, but matching pair is already found. + // stop further search + break + } + if sg.compactLeftOverSegments && !leftoverPairFound { + leftS, rightS := sg.segments[leftId].getSegment(), sg.segments[leftId+1].getSegment() + if leftS.secondaryIndexCount != rightS.secondaryIndexCount { + // only pair of segments with the same secondary indexes are compacted + continue + } + // leftover segments enabled, none leftover pair found yet + if sg.compactionFitsSizeLimit(leftS, rightS) && isSimilarSegmentSizes(leftS.size, rightS.size) { + // max size not exceeded, segment sizes similar despite different levels + leftoverPairFound = true + leftoverLeftId = leftId + leftoverLevel = leftS.level + } + } + } + } + + if matchingPairFound { + return []int{matchingLeftId, matchingLeftId + 1}, matchingLevel + } + if leftoverPairFound { + return []int{leftoverLeftId, leftoverLeftId + 1}, leftoverLevel + } + return nil, 0 +} + +func isSimilarSegmentSizes(leftSize, rightSize int64) bool { + MiB := int64(1024 * 1024) + GiB := 1024 * MiB + + threshold1 := 10 * MiB + threshold2 := 100 * MiB + threshold3 := GiB + threshold4 := 10 * GiB + + factor2 := int64(10) + factor3 := int64(5) + factor4 := int64(3) + factorDef := int64(2) + + // if both sizes less then 10 MiB + if leftSize <= threshold1 && rightSize <= threshold1 { + return true + } + + lowerSize, higherSize := leftSize, rightSize + if leftSize > rightSize { + lowerSize, higherSize = rightSize, leftSize + } + + // if higher size less than 100 MiB and not 10x bigger than lower + if higherSize <= threshold2 && lowerSize*factor2 >= higherSize { + return true + } + // if higher size less than 1 GiB and not 5x bigger than lower + if higherSize <= threshold3 && lowerSize*factor3 >= higherSize { + return true + } + // if higher size less than 10 GiB and not 3x bigger than lower + if higherSize <= threshold4 && lowerSize*factor4 >= higherSize { + return true + } + // if higher size not 2x bigger than lower + return lowerSize*factorDef >= higherSize +} + +// segmentAtPos retrieves the segment for the given position using a read-lock +func (sg *SegmentGroup) segmentAtPos(pos int) *segment { + sg.maintenanceLock.RLock() + defer sg.maintenanceLock.RUnlock() + + return sg.segments[pos].getSegment() +} + +func segmentID(path string) string { + filename := filepath.Base(path) + filename, _, _ = strings.Cut(filename, ".") + return strings.TrimPrefix(filename, "segment-") +} + +func segmentExtraInfo(level uint16, strategy segmentindex.Strategy) string { + return fmt.Sprintf(".l%d.s%d", level, strategy) +} + +func (sg *SegmentGroup) compactOnce() (bool, error) { + // Is it safe to only occasionally lock instead of the entire duration? Yes, + // because other than compaction the only change to the segments array could + // be an append because of a new flush cycle, so we do not need to guarantee + // that the array contents stay stable over the duration of an entire + // compaction. We do however need to protect against a read-while-write (race + // condition) on the array. Thus any read from sg.segments need to protected + + pair, level := sg.findCompactionCandidates() + if pair == nil { + // nothing to do + return false, nil + } + + if sg.allocChecker != nil { + // allocChecker is optional + if err := sg.allocChecker.CheckAlloc(100 * 1024 * 1024); err != nil { + // if we don't have at least 100MB to spare, don't start a compaction. A + // compaction does not actually need a 100MB, but it will create garbage + // that needs to be cleaned up. If we're so close to the memory limit, we + // can increase stability by preventing anything that's not strictly + // necessary. Compactions can simply resume when the cluster has been + // scaled. + sg.logger.WithFields(logrus.Fields{ + "action": "lsm_compaction", + "event": "compaction_skipped_oom", + "path": sg.dir, + }).WithError(err). + Warnf("skipping compaction due to memory pressure") + + return false, nil + } + } + + leftSegment := sg.segmentAtPos(pair[0]) + rightSegment := sg.segmentAtPos(pair[1]) + var path string + if sg.writeSegmentInfoIntoFileName { + path = filepath.Join(sg.dir, "segment-"+segmentID(leftSegment.path)+"_"+segmentID(rightSegment.path)+segmentExtraInfo(level, leftSegment.strategy)+".db.tmp") + } else { + path = filepath.Join(sg.dir, "segment-"+segmentID(leftSegment.path)+"_"+segmentID(rightSegment.path)+".db.tmp") + } + + f, err := os.Create(path) + if err != nil { + return false, err + } + + scratchSpacePath := rightSegment.path + "compaction.scratch.d" + + strategy := leftSegment.strategy + secondaryIndices := leftSegment.secondaryIndexCount + cleanupTombstones := !sg.keepTombstones && pair[0] == 0 + + pathLabel := "n/a" + if sg.metrics != nil && !sg.metrics.groupClasses { + pathLabel = sg.dir + } + + maxNewFileSize := leftSegment.size + rightSegment.size + + switch strategy { + + // TODO: call metrics just once with variable strategy label + + case segmentindex.StrategyReplace: + + c := newCompactorReplace(f, leftSegment.newCursor(), + rightSegment.newCursor(), level, secondaryIndices, + scratchSpacePath, cleanupTombstones, sg.enableChecksumValidation, maxNewFileSize, sg.allocChecker) + + if sg.metrics != nil { + sg.metrics.CompactionReplace.With(prometheus.Labels{"path": pathLabel}).Inc() + defer sg.metrics.CompactionReplace.With(prometheus.Labels{"path": pathLabel}).Dec() + } + + if err := c.do(); err != nil { + return false, err + } + case segmentindex.StrategySetCollection: + c := newCompactorSetCollection(f, leftSegment.newCollectionCursor(), + rightSegment.newCollectionCursor(), level, secondaryIndices, + scratchSpacePath, cleanupTombstones, sg.enableChecksumValidation, maxNewFileSize, sg.allocChecker) + + if sg.metrics != nil { + sg.metrics.CompactionSet.With(prometheus.Labels{"path": pathLabel}).Inc() + defer sg.metrics.CompactionSet.With(prometheus.Labels{"path": pathLabel}).Dec() + } + + if err := c.do(); err != nil { + return false, err + } + case segmentindex.StrategyMapCollection: + c := newCompactorMapCollection(f, + leftSegment.newCollectionCursorReusable(), + rightSegment.newCollectionCursorReusable(), + level, secondaryIndices, scratchSpacePath, + sg.mapRequiresSorting, cleanupTombstones, + sg.enableChecksumValidation, maxNewFileSize, sg.allocChecker) + + if sg.metrics != nil { + sg.metrics.CompactionMap.With(prometheus.Labels{"path": pathLabel}).Inc() + defer sg.metrics.CompactionMap.With(prometheus.Labels{"path": pathLabel}).Dec() + } + + if err := c.do(); err != nil { + return false, err + } + case segmentindex.StrategyRoaringSet: + leftCursor := leftSegment.newRoaringSetCursor() + rightCursor := rightSegment.newRoaringSetCursor() + + c := roaringset.NewCompactor(f, leftCursor, rightCursor, + level, scratchSpacePath, cleanupTombstones, + sg.enableChecksumValidation, maxNewFileSize, sg.allocChecker) + + if sg.metrics != nil { + sg.metrics.CompactionRoaringSet.With(prometheus.Labels{"path": pathLabel}).Set(1) + defer sg.metrics.CompactionRoaringSet.With(prometheus.Labels{"path": pathLabel}).Set(0) + } + if err := c.Do(); err != nil { + return false, err + } + + case segmentindex.StrategyRoaringSetRange: + leftCursor := leftSegment.newRoaringSetRangeCursor() + rightCursor := rightSegment.newRoaringSetRangeCursor() + + c := roaringsetrange.NewCompactor(f, leftCursor, rightCursor, + level, cleanupTombstones, sg.enableChecksumValidation, maxNewFileSize) + + if sg.metrics != nil { + sg.metrics.CompactionRoaringSetRange.With(prometheus.Labels{"path": pathLabel}).Set(1) + defer sg.metrics.CompactionRoaringSetRange.With(prometheus.Labels{"path": pathLabel}).Set(0) + } + + if err := c.Do(); err != nil { + return false, err + } + case segmentindex.StrategyInverted: + avgPropLen, _ := sg.GetAveragePropertyLength() + b := float64(config.DefaultBM25b) + k1 := float64(config.DefaultBM25k1) + if sg.bm25config != nil { + b = sg.bm25config.B + k1 = sg.bm25config.K1 + } + + c := newCompactorInverted(f, + leftSegment.newInvertedCursorReusable(), + rightSegment.newInvertedCursorReusable(), + level, secondaryIndices, scratchSpacePath, cleanupTombstones, k1, b, avgPropLen, maxNewFileSize, sg.allocChecker, sg.enableChecksumValidation) + if sg.metrics != nil { + sg.metrics.CompactionMap.With(prometheus.Labels{"path": pathLabel}).Inc() + defer sg.metrics.CompactionMap.With(prometheus.Labels{"path": pathLabel}).Dec() + } + + if err := c.do(); err != nil { + return false, err + } + default: + return false, errors.Errorf("unrecognized strategy %v", strategy) + } + + if err := f.Sync(); err != nil { + return false, errors.Wrap(err, "fsync compacted segment file") + } + + if err := f.Close(); err != nil { + return false, errors.Wrap(err, "close compacted segment file") + } + + if err := sg.replaceCompactedSegments(pair[0], pair[1], path); err != nil { + return false, errors.Wrap(err, "replace compacted segments") + } + + return true, nil +} + +func (sg *SegmentGroup) replaceCompactedSegments(old1, old2 int, + newPathTmp string, +) error { + sg.maintenanceLock.RLock() + updatedCountNetAdditions := sg.segments[old1].getSegment().getCountNetAdditions() + + sg.segments[old2].getSegment().getCountNetAdditions() + sg.maintenanceLock.RUnlock() + + // WIP: we could add a random suffix to the tmp file to avoid conflicts + + // as a guardrail validate that the segment is considered a .tmp segment. + // This way we can be sure that we're not accidentally operating on a live + // segment as the segment group completely ignores .tmp segment files + if !strings.HasSuffix(newPathTmp, ".tmp") { + return fmt.Errorf("pre computing a segment expects a .tmp segment path") + } + + seg, err := newSegment(newPathTmp, sg.logger, sg.metrics, nil, + segmentConfig{ + mmapContents: sg.mmapContents, + useBloomFilter: sg.useBloomFilter, + calcCountNetAdditions: sg.calcCountNetAdditions, + overwriteDerived: true, + enableChecksumValidation: sg.enableChecksumValidation, + MinMMapSize: sg.MinMMapSize, + allocChecker: sg.allocChecker, + precomputedCountNetAdditions: &updatedCountNetAdditions, + fileList: make(map[string]int64), // empty to not check if bloom/cna files already exist + writeMetadata: sg.writeMetadata, + }) + if err != nil { + return errors.Wrap(err, "create new segment") + } + + oldL, oldR, err := sg.replaceCompactedSegmentsBlocking(old1, old2, seg) + if err != nil { + return fmt.Errorf("replace compacted segments (blocking): %w", err) + } + + if err := sg.deleteOldSegmentsNonBlocking(oldL, oldR); err != nil { + // don't abort if the delete fails, we can still continue (albeit + // without freeing disk space that should have been freed). The + // compaction itself was successful. + sg.logger.WithError(err).WithFields(logrus.Fields{ + "action": "lsm_replace_compacted_segments_delete_files", + "file_left": oldL.path, + "file_right": oldR.path, + }).Error("failed to delete file already marked for deletion") + } + + return nil +} + +const replaceSegmentWarnThreshold = 300 * time.Millisecond + +func (sg *SegmentGroup) replaceCompactedSegmentsBlocking( + old1, old2 int, newSeg *segment, +) (*segment, *segment, error) { + // We need a maintenanceLock.Lock() to switch segments, however, we can't + // simply call Lock(). Due to the write-preferring nature of the RWMutex this + // would mean that if any RLock() holder still holds the lock, all future + // RLock() holders would be blocked until we release the Lock() again. + // + // Typical RLock() holders are user operations that are short-lived. However, + // the flush routine also requires an RLock() and could potentially hold it + // for minutes. This is problematic, so we need to synchronize with the flush + // routine by obtaining the flushVsCompactLock. + // + // This gives us the guarantee that – until we have released the + // flushVsCompactLock – no flush routine will try to obtain a long-lived + // maintenanceLock.RLock(). + sg.flushVsCompactLock.Lock() + defer sg.flushVsCompactLock.Unlock() + + start := time.Now() + beforeMaintenanceLock := time.Now() + sg.maintenanceLock.Lock() + if time.Since(beforeMaintenanceLock) > 100*time.Millisecond { + sg.logger.WithField("duration", time.Since(beforeMaintenanceLock)). + Debug("compaction took more than 100ms to acquire maintenance lock") + } + defer sg.maintenanceLock.Unlock() + + leftSegment := sg.segments[old1] + rightSegment := sg.segments[old2] + + if err := leftSegment.close(); err != nil { + return nil, nil, errors.Wrap(err, "close disk segment") + } + + if err := rightSegment.close(); err != nil { + return nil, nil, errors.Wrap(err, "close disk segment") + } + + if err := leftSegment.markForDeletion(); err != nil { + return nil, nil, errors.Wrap(err, "drop disk segment") + } + + if err := rightSegment.markForDeletion(); err != nil { + return nil, nil, errors.Wrap(err, "drop disk segment") + } + + err := diskio.Fsync(sg.dir) + if err != nil { + return nil, nil, fmt.Errorf("fsync segment directory %s: %w", sg.dir, err) + } + + sg.segments[old1] = nil + sg.segments[old2] = nil + + // the old segments have been deleted, we can now safely remove the .tmp + // extension from the new segment itself and the pre-computed files which + // carried the name of the second old segment + + newPath, err := sg.stripTmpExtension(newSeg.path, segmentID(leftSegment.getPath()), segmentID(rightSegment.getPath())) + if err != nil { + return nil, nil, errors.Wrap(err, "strip .tmp extension of new segment") + } + newSeg.path = newPath + + for i, pth := range newSeg.metaPaths { + updated, err := sg.stripTmpExtension(pth, segmentID(leftSegment.getPath()), segmentID(rightSegment.getPath())) + if err != nil { + return nil, nil, errors.Wrap(err, "strip .tmp extension of new segment") + } + newSeg.metaPaths[i] = updated + } + + sg.segments[old2] = newSeg + + sg.segments = append(sg.segments[:old1], sg.segments[old1+1:]...) + + sg.observeReplaceCompactedDuration(start, old1, leftSegment.getSegment(), rightSegment.getSegment()) + return leftSegment.getSegment(), rightSegment.getSegment(), nil +} + +func (sg *SegmentGroup) observeReplaceCompactedDuration( + start time.Time, segmentIdx int, left, right *segment, +) { + // observe duration - warn if it took too long + took := time.Since(start) + fields := sg.logger.WithFields(logrus.Fields{ + "action": "lsm_replace_compacted_segments_blocking", + "segment_index": segmentIdx, + "path_left": left.path, + "path_right": right.path, + "took": took, + }) + msg := fmt.Sprintf("replacing compacted segments took %s", took) + if took > replaceSegmentWarnThreshold { + fields.Warn(msg) + } else { + fields.Debug(msg) + } +} + +func (sg *SegmentGroup) deleteOldSegmentsNonBlocking(segments ...*segment) error { + // At this point those segments are no longer used, so we can drop them + // without holding the maintenance lock and therefore not block readers. + + for pos, seg := range segments { + if err := seg.dropMarked(); err != nil { + return fmt.Errorf("drop segment at pos %d: %w", pos, err) + } + } + + return nil +} + +func (sg *SegmentGroup) stripTmpExtension(oldPath, left, right string) (string, error) { + ext := filepath.Ext(oldPath) + if ext != ".tmp" { + return "", errors.Errorf("segment %q did not have .tmp extension", oldPath) + } + newPath := oldPath[:len(oldPath)-len(ext)] + + newPath = strings.ReplaceAll(newPath, fmt.Sprintf("%s_%s", left, right), right) + + if err := os.Rename(oldPath, newPath); err != nil { + return "", errors.Wrapf(err, "rename %q -> %q", oldPath, newPath) + } + + return newPath, nil +} + +func (sg *SegmentGroup) monitorSegments() { + if sg.metrics == nil || sg.metrics.groupClasses { + return + } + + // Keeping metering to only the critical buckets helps + // cut down on noise when monitoring + if sg.metrics.criticalBucketsOnly { + bucket := path.Base(sg.dir) + if bucket != helpers.ObjectsBucketLSM && + bucket != helpers.VectorsCompressedBucketLSM { + return + } + if bucket == helpers.ObjectsBucketLSM { + sg.metrics.ObjectsBucketSegments.With(prometheus.Labels{ + "strategy": sg.strategy, + "path": sg.dir, + }).Set(float64(sg.Len())) + } + if bucket == helpers.VectorsCompressedBucketLSM { + sg.metrics.CompressedVecsBucketSegments.With(prometheus.Labels{ + "strategy": sg.strategy, + "path": sg.dir, + }).Set(float64(sg.Len())) + } + sg.reportSegmentStats() + return + } + + sg.metrics.ActiveSegments.With(prometheus.Labels{ + "strategy": sg.strategy, + "path": sg.dir, + }).Set(float64(sg.Len())) + sg.reportSegmentStats() +} + +func (sg *SegmentGroup) reportSegmentStats() { + stats := sg.segmentLevelStats() + stats.fillMissingLevels() + stats.report(sg.metrics, sg.strategy, sg.dir) +} + +type segmentLevelStats struct { + indexes map[uint16]int + payloads map[uint16]int + count map[uint16]int + unloaded int +} + +func newSegmentLevelStats() segmentLevelStats { + return segmentLevelStats{ + indexes: map[uint16]int{}, + payloads: map[uint16]int{}, + count: map[uint16]int{}, + unloaded: 0, + } +} + +func (sg *SegmentGroup) segmentLevelStats() segmentLevelStats { + sg.maintenanceLock.RLock() + defer sg.maintenanceLock.RUnlock() + + stats := newSegmentLevelStats() + + for _, seg := range sg.segments { + if !seg.isLoaded() { + stats.unloaded++ + continue + } + sgm := seg.getSegment() + stats.count[sgm.level]++ + + cur := stats.indexes[sgm.level] + cur += sgm.index.Size() + stats.indexes[sgm.level] = cur + + cur = stats.payloads[sgm.level] + cur += seg.PayloadSize() + stats.payloads[sgm.level] = cur + } + + return stats +} + +// fill missing levels +// +// Imagine we had exactly two segments of level 4 before, and there were just +// compacted to single segment of level 5. As a result, there should be no +// more segments of level 4. However, our current logic only loops over +// existing segments. As a result, we need to check what the highest level +// is, then for every level lower than the highest check if we are missing +// data. If yes, we need to explicitly set the gauges to 0. +func (s *segmentLevelStats) fillMissingLevels() { + maxLevel := uint16(0) + for level := range s.count { + if level > maxLevel { + maxLevel = level + } + } + + if maxLevel > 0 { + for level := uint16(0); level < maxLevel; level++ { + if _, ok := s.count[level]; ok { + continue + } + + // there is no entry for this level, we must explicitly set it to 0 + s.count[level] = 0 + s.indexes[level] = 0 + s.payloads[level] = 0 + } + } +} + +func (s *segmentLevelStats) report(metrics *Metrics, + strategy, dir string, +) { + for level, size := range s.indexes { + metrics.SegmentSize.With(prometheus.Labels{ + "strategy": strategy, + "unit": "index", + "level": fmt.Sprint(level), + "path": dir, + }).Set(float64(size)) + } + + for level, size := range s.payloads { + metrics.SegmentSize.With(prometheus.Labels{ + "strategy": strategy, + "unit": "payload", + "level": fmt.Sprint(level), + "path": dir, + }).Set(float64(size)) + } + + for level, count := range s.count { + metrics.SegmentCount.With(prometheus.Labels{ + "strategy": strategy, + "level": fmt.Sprint(level), + "path": dir, + }).Set(float64(count)) + } + + metrics.SegmentUnloaded.With(prometheus.Labels{ + "strategy": strategy, + "path": dir, + }).Set(float64(s.unloaded)) +} + +func (sg *SegmentGroup) compactionFitsSizeLimit(left, right *segment) bool { + if sg.maxSegmentSize == 0 { + // no limit is set, always return true + return true + } + + totalSize := left.size + right.size + return totalSize <= sg.maxSegmentSize +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segment_group_compaction_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segment_group_compaction_test.go new file mode 100644 index 0000000000000000000000000000000000000000..b71b703ae45a8362d72b3bb9bf67a9d1e849408a --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segment_group_compaction_test.go @@ -0,0 +1,2612 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package lsmkv + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var ( + KiB = int64(1024) + MiB = 1024 * KiB + GiB = 1024 * MiB +) + +func TestSegmentGroup_BestCompactionPair(t *testing.T) { + var maxSegmentSize int64 = 10000 + + tests := []struct { + name string + segments []Segment + expectedPair []string + }{ + { + name: "single segment", + segments: []Segment{ + &segment{size: 1000, path: "segment0", level: 0}, + }, + expectedPair: nil, + }, + { + name: "two segments, same level", + segments: []Segment{ + &segment{size: 1000, path: "segment0", level: 0}, + &segment{size: 1000, path: "segment1", level: 0}, + }, + expectedPair: []string{"segment0", "segment1"}, + }, + { + name: "multiple segments, multiple levels, lowest level is picked", + segments: []Segment{ + &segment{size: 4000, path: "segment0", level: 2}, + &segment{size: 4000, path: "segment1", level: 2}, + &segment{size: 2000, path: "segment2", level: 1}, + &segment{size: 2000, path: "segment3", level: 1}, + &segment{size: 1000, path: "segment4", level: 0}, + &segment{size: 1000, path: "segment5", level: 0}, + }, + expectedPair: []string{"segment4", "segment5"}, + }, + { + name: "two segments that don't fit the max size, but eliglbe segments of a lower level are present", + segments: []Segment{ + &segment{size: 8000, path: "segment0", level: 3}, + &segment{size: 8000, path: "segment1", level: 3}, + &segment{size: 4000, path: "segment2", level: 2}, + &segment{size: 4000, path: "segment3", level: 2}, + }, + expectedPair: []string{"segment2", "segment3"}, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + sg := &SegmentGroup{ + segments: test.segments, + maxSegmentSize: maxSegmentSize, + } + pair, level := sg.findCompactionCandidates() + if test.expectedPair == nil { + assert.Nil(t, pair) + assert.Equal(t, uint16(0), level) + } else { + leftPath := test.segments[pair[0]].getPath() + rightPath := test.segments[pair[1]].getPath() + assert.Equal(t, test.expectedPair, []string{leftPath, rightPath}) + } + }) + } +} + +func TestSegmenGroup_CompactionLargerThanMaxSize(t *testing.T) { + maxSegmentSize := int64(10000) + // this test only tests the unhappy path which has an early exist condition, + // meaning we don't need real segments, it is only metadata that is evaluated + // here. + sg := &SegmentGroup{ + segments: []Segment{ + &segment{size: 8000, path: "segment0", level: 3}, + &segment{size: 8000, path: "segment1", level: 3}, + }, + maxSegmentSize: maxSegmentSize, + } + + ok, err := sg.compactOnce() + assert.False(t, ok, "segments are too large to run") + assert.Nil(t, err) +} + +func TestSegmentGroup_CompactionCandidates(t *testing.T) { + compactionResizeFactor := float32(1) + sg := &SegmentGroup{ + segments: createSegments(), + } + + t.Run("existing segments", func(t *testing.T) { + testCases := []testCaseCompactionCandidates{ + { + expectedPair: []int{3, 4}, + expectedLevel: 13, + controlPath: "seg_04+seg_05", + }, + { + expectedPair: []int{4, 5}, + expectedLevel: 13, + controlPath: "seg_06+seg_07", + }, + { + expectedPair: []int{5, 6}, + expectedLevel: 13, + controlPath: "seg_08+seg_09", + }, + { + expectedPair: []int{6, 7}, + expectedLevel: 13, + controlPath: "seg_10+seg_11", + }, + { + expectedPair: []int{7, 8}, + expectedLevel: 13, + controlPath: "seg_12+seg_13", + }, + { + expectedPair: []int{8, 9}, + expectedLevel: 13, + controlPath: "seg_14+seg_15", + }, + { + expectedPair: []int{9, 10}, + expectedLevel: 13, + controlPath: "seg_16+seg_17", + }, + { + expectedPair: []int{10, 11}, + expectedLevel: 13, + controlPath: "seg_18+seg_19", + }, + { + expectedPair: []int{11, 12}, + expectedLevel: 13, + controlPath: "seg_20+seg_21", + }, + { + expectedPair: []int{12, 13}, + expectedLevel: 13, + controlPath: "seg_22+seg_23", + }, + { + expectedPair: []int{13, 14}, + expectedLevel: 13, + controlPath: "seg_24+seg_25", + }, + { + expectedPair: []int{14, 15}, + expectedLevel: 13, + controlPath: "seg_26+seg_27", + }, + { + expectedPair: []int{15, 16}, + expectedLevel: 13, + controlPath: "seg_28+seg_29", + }, + { + expectedPair: []int{16, 17}, + expectedLevel: 13, + controlPath: "seg_30+seg_31", + }, + { + expectedPair: []int{17, 18}, + expectedLevel: 13, + controlPath: "seg_32+seg_33", + }, + { + expectedPair: []int{18, 19}, + expectedLevel: 13, + controlPath: "seg_34+seg_35", + }, + { + expectedPair: []int{19, 20}, + expectedLevel: 13, + controlPath: "seg_36+seg_37", + }, + { + expectedPair: []int{20, 21}, + expectedLevel: 13, + controlPath: "seg_38+seg_39", + }, + { + expectedPair: []int{21, 22}, + expectedLevel: 13, + controlPath: "seg_40+seg_41", + }, + { + expectedPair: []int{1, 2}, + expectedLevel: 14, + controlPath: "seg_02+seg_03", + }, + { + expectedPair: []int{2, 3}, + expectedLevel: 14, + controlPath: "seg_04+seg_05+seg_06+seg_07", + }, + { + expectedPair: []int{3, 4}, + expectedLevel: 14, + controlPath: "seg_08+seg_09+seg_10+seg_11", + }, + { + expectedPair: []int{4, 5}, + expectedLevel: 14, + controlPath: "seg_12+seg_13+seg_14+seg_15", + }, + { + expectedPair: []int{5, 6}, + expectedLevel: 14, + controlPath: "seg_16+seg_17+seg_18+seg_19", + }, + { + expectedPair: []int{6, 7}, + expectedLevel: 14, + controlPath: "seg_20+seg_21+seg_22+seg_23", + }, + { + expectedPair: []int{7, 8}, + expectedLevel: 14, + controlPath: "seg_24+seg_25+seg_26+seg_27", + }, + { + expectedPair: []int{8, 9}, + expectedLevel: 14, + controlPath: "seg_28+seg_29+seg_30+seg_31", + }, + { + expectedPair: []int{9, 10}, + expectedLevel: 14, + controlPath: "seg_32+seg_33+seg_34+seg_35", + }, + { + expectedPair: []int{10, 11}, + expectedLevel: 14, + controlPath: "seg_36+seg_37+seg_38+seg_39", + }, + { + expectedPair: []int{0, 1}, + expectedLevel: 15, + controlPath: "seg_01+seg_02+seg_03", + }, + { + expectedPair: []int{1, 2}, + expectedLevel: 15, + controlPath: "seg_04+seg_05+seg_06+seg_07+seg_08+seg_09+seg_10+seg_11", + }, + { + expectedPair: []int{2, 3}, + expectedLevel: 15, + controlPath: "seg_12+seg_13+seg_14+seg_15+seg_16+seg_17+seg_18+seg_19", + }, + { + expectedPair: []int{3, 4}, + expectedLevel: 15, + controlPath: "seg_20+seg_21+seg_22+seg_23+seg_24+seg_25+seg_26+seg_27", + }, + { + expectedPair: []int{4, 5}, + expectedLevel: 15, + controlPath: "seg_28+seg_29+seg_30+seg_31+seg_32+seg_33+seg_34+seg_35", + }, + { + expectedPair: []int{0, 1}, + expectedLevel: 16, + controlPath: "seg_01+seg_02+seg_03+seg_04+seg_05+seg_06+seg_07+seg_08+seg_09+seg_10+seg_11", + }, + { + expectedPair: []int{1, 2}, + expectedLevel: 16, + controlPath: "seg_12+seg_13+seg_14+seg_15+seg_16+seg_17+seg_18+seg_19+seg_20+seg_21+seg_22+seg_23+seg_24+seg_25+seg_26+seg_27", + }, + { + expectedPair: []int{0, 1}, + expectedLevel: 17, + controlPath: "seg_01+seg_02+seg_03+seg_04+seg_05+seg_06+seg_07+seg_08+seg_09+seg_10+seg_11+seg_12+seg_13+seg_14+seg_15+seg_16+seg_17+seg_18+seg_19+seg_20+seg_21+seg_22+seg_23+seg_24+seg_25+seg_26+seg_27", + }, + { + expectedPair: nil, + expectedLevel: 0, + }, + } + + runCompactionCandidatesTestCases(t, testCases, sg, compactionResizeFactor) + }) + + t.Run("new segment", func(t *testing.T) { + testCases := []testCaseCompactionCandidates{ + { + expectedPair: []int{11, 12}, + expectedLevel: 1, + controlPath: "seg_49+seg_50", + }, + { + expectedPair: []int{10, 11}, + expectedLevel: 2, + controlPath: "seg_48+seg_49+seg_50", + }, + { + expectedPair: []int{9, 10}, + expectedLevel: 3, + controlPath: "seg_47+seg_48+seg_49+seg_50", + }, + { + expectedPair: []int{8, 9}, + expectedLevel: 4, + controlPath: "seg_46+seg_47+seg_48+seg_49+seg_50", + }, + { + expectedPair: []int{7, 8}, + expectedLevel: 5, + controlPath: "seg_45+seg_46+seg_47+seg_48+seg_49+seg_50", + }, + { + expectedPair: nil, + expectedLevel: 0, + }, + } + + sg.segments = append(sg.segments, &segment{path: "seg_50", level: 0, size: 20 * MiB}) + runCompactionCandidatesTestCases(t, testCases, sg, compactionResizeFactor) + }) +} + +func TestSegmentGroup_CompactionCandidates_MaxSize300_Resize08(t *testing.T) { + compactionResizeFactor := float32(.8) + sg := &SegmentGroup{ + segments: createSegments(), + maxSegmentSize: 300 * GiB, + } + + t.Run("existing segments", func(t *testing.T) { + testCases := []testCaseCompactionCandidates{ + { + expectedPair: []int{4, 5}, + expectedLevel: 12, + controlPath: "seg_05+seg_06", + }, + { + expectedPair: []int{4, 5}, + expectedLevel: 12, + controlPath: "seg_05+seg_06+seg_07", + }, + { + expectedPair: []int{6, 7}, + expectedLevel: 12, + controlPath: "seg_09+seg_10", + }, + { + expectedPair: []int{6, 7}, + expectedLevel: 12, + controlPath: "seg_09+seg_10+seg_11", + }, + { + expectedPair: []int{6, 7}, + expectedLevel: 12, + controlPath: "seg_09+seg_10+seg_11+seg_12", + }, + { + expectedPair: []int{6, 7}, + expectedLevel: 12, + controlPath: "seg_09+seg_10+seg_11+seg_12+seg_13", + }, + { + expectedPair: []int{7, 8}, + expectedLevel: 12, + controlPath: "seg_14+seg_15", + }, + { + expectedPair: []int{7, 8}, + expectedLevel: 12, + controlPath: "seg_14+seg_15+seg_16", + }, + { + expectedPair: []int{7, 8}, + expectedLevel: 12, + controlPath: "seg_14+seg_15+seg_16+seg_17", + }, + { + expectedPair: []int{8, 9}, + expectedLevel: 12, + controlPath: "seg_18+seg_19", + }, + { + expectedPair: []int{8, 9}, + expectedLevel: 12, + controlPath: "seg_18+seg_19+seg_20", + }, + { + expectedPair: []int{9, 10}, + expectedLevel: 12, + controlPath: "seg_21+seg_22", + }, + { + expectedPair: []int{10, 11}, + expectedLevel: 12, + controlPath: "seg_23+seg_24", + }, + { + expectedPair: []int{10, 11}, + expectedLevel: 12, + controlPath: "seg_23+seg_24+seg_25", + }, + { + expectedPair: []int{11, 12}, + expectedLevel: 12, + controlPath: "seg_26+seg_27", + }, + { + expectedPair: []int{11, 12}, + expectedLevel: 12, + controlPath: "seg_26+seg_27+seg_28", + }, + { + expectedPair: []int{11, 12}, + expectedLevel: 12, + controlPath: "seg_26+seg_27+seg_28+seg_29", + }, + { + expectedPair: []int{11, 12}, + expectedLevel: 12, + controlPath: "seg_26+seg_27+seg_28+seg_29+seg_30", + }, + { + expectedPair: []int{12, 13}, + expectedLevel: 12, + controlPath: "seg_31+seg_32", + }, + { + expectedPair: []int{12, 13}, + expectedLevel: 12, + controlPath: "seg_31+seg_32+seg_33", + }, + { + expectedPair: []int{13, 14}, + expectedLevel: 12, + controlPath: "seg_34+seg_35", + }, + { + expectedPair: []int{14, 15}, + expectedLevel: 12, + controlPath: "seg_36+seg_37", + }, + { + expectedPair: []int{14, 15}, + expectedLevel: 12, + controlPath: "seg_36+seg_37+seg_38", + }, + { + expectedPair: []int{15, 16}, + expectedLevel: 12, + controlPath: "seg_39+seg_40", + }, + { + expectedPair: []int{15, 16}, + expectedLevel: 12, + controlPath: "seg_39+seg_40+seg_41", + }, + { + expectedPair: nil, + expectedLevel: 0, + }, + } + + runCompactionCandidatesTestCases(t, testCases, sg, compactionResizeFactor) + }) + + t.Run("new segment", func(t *testing.T) { + testCases := []testCaseCompactionCandidates{ + { + expectedPair: []int{23, 24}, + expectedLevel: 1, + controlPath: "seg_49+seg_50", + }, + { + expectedPair: []int{22, 23}, + expectedLevel: 2, + controlPath: "seg_48+seg_49+seg_50", + }, + { + expectedPair: []int{21, 22}, + expectedLevel: 3, + controlPath: "seg_47+seg_48+seg_49+seg_50", + }, + { + expectedPair: []int{20, 21}, + expectedLevel: 4, + controlPath: "seg_46+seg_47+seg_48+seg_49+seg_50", + }, + { + expectedPair: []int{19, 20}, + expectedLevel: 5, + controlPath: "seg_45+seg_46+seg_47+seg_48+seg_49+seg_50", + }, + { + expectedPair: nil, + expectedLevel: 0, + }, + } + + sg.segments = append(sg.segments, &segment{path: "seg_50", level: 0, size: 20 * MiB}) + runCompactionCandidatesTestCases(t, testCases, sg, compactionResizeFactor) + }) +} + +func TestSegmentGroup_CompactionCandidates_MaxSize400_Resize09(t *testing.T) { + compactionResizeFactor := float32(.9) + sg := &SegmentGroup{ + segments: createSegments(), + maxSegmentSize: 400 * GiB, + } + + t.Run("existing segments", func(t *testing.T) { + testCases := []testCaseCompactionCandidates{ + { + expectedPair: []int{3, 4}, + expectedLevel: 13, + controlPath: "seg_04+seg_05", + }, + { + expectedPair: []int{4, 5}, + expectedLevel: 13, + controlPath: "seg_06+seg_07", + }, + { + expectedPair: []int{5, 6}, + expectedLevel: 13, + controlPath: "seg_08+seg_09", + }, + { + expectedPair: []int{6, 7}, + expectedLevel: 13, + controlPath: "seg_10+seg_11", + }, + { + expectedPair: []int{7, 8}, + expectedLevel: 13, + controlPath: "seg_12+seg_13", + }, + { + expectedPair: []int{8, 9}, + expectedLevel: 13, + controlPath: "seg_14+seg_15", + }, + { + expectedPair: []int{9, 10}, + expectedLevel: 13, + controlPath: "seg_16+seg_17", + }, + { + expectedPair: []int{10, 11}, + expectedLevel: 13, + controlPath: "seg_18+seg_19", + }, + { + expectedPair: []int{11, 12}, + expectedLevel: 13, + controlPath: "seg_20+seg_21", + }, + { + expectedPair: []int{12, 13}, + expectedLevel: 13, + controlPath: "seg_22+seg_23", + }, + { + expectedPair: []int{13, 14}, + expectedLevel: 13, + controlPath: "seg_24+seg_25", + }, + { + expectedPair: []int{14, 15}, + expectedLevel: 13, + controlPath: "seg_26+seg_27", + }, + { + expectedPair: []int{15, 16}, + expectedLevel: 13, + controlPath: "seg_28+seg_29", + }, + { + expectedPair: []int{16, 17}, + expectedLevel: 13, + controlPath: "seg_30+seg_31", + }, + { + expectedPair: []int{17, 18}, + expectedLevel: 13, + controlPath: "seg_32+seg_33", + }, + { + expectedPair: []int{18, 19}, + expectedLevel: 13, + controlPath: "seg_34+seg_35", + }, + { + expectedPair: []int{19, 20}, + expectedLevel: 13, + controlPath: "seg_36+seg_37", + }, + { + expectedPair: []int{20, 21}, + expectedLevel: 13, + controlPath: "seg_38+seg_39", + }, + { + expectedPair: []int{21, 22}, + expectedLevel: 13, + controlPath: "seg_40+seg_41", + }, + { + expectedPair: []int{3, 4}, + expectedLevel: 13, + controlPath: "seg_04+seg_05+seg_06+seg_07", + }, + { + expectedPair: []int{5, 6}, + expectedLevel: 13, + controlPath: "seg_10+seg_11+seg_12+seg_13", + }, + { + expectedPair: []int{6, 7}, + expectedLevel: 13, + controlPath: "seg_14+seg_15+seg_16+seg_17", + }, + { + expectedPair: []int{7, 8}, + expectedLevel: 13, + controlPath: "seg_18+seg_19+seg_20+seg_21", + }, + { + expectedPair: []int{8, 9}, + expectedLevel: 13, + controlPath: "seg_22+seg_23+seg_24+seg_25", + }, + { + expectedPair: []int{9, 10}, + expectedLevel: 13, + controlPath: "seg_26+seg_27+seg_28+seg_29", + }, + { + expectedPair: []int{10, 11}, + expectedLevel: 13, + controlPath: "seg_30+seg_31+seg_32+seg_33", + }, + { + expectedPair: []int{11, 12}, + expectedLevel: 13, + controlPath: "seg_34+seg_35+seg_36+seg_37", + }, + { + expectedPair: nil, + expectedLevel: 0, + }, + } + + runCompactionCandidatesTestCases(t, testCases, sg, compactionResizeFactor) + }) + + t.Run("new segment", func(t *testing.T) { + testCases := []testCaseCompactionCandidates{ + { + expectedPair: []int{21, 22}, + expectedLevel: 1, + controlPath: "seg_49+seg_50", + }, + { + expectedPair: []int{20, 21}, + expectedLevel: 2, + controlPath: "seg_48+seg_49+seg_50", + }, + { + expectedPair: []int{19, 20}, + expectedLevel: 3, + controlPath: "seg_47+seg_48+seg_49+seg_50", + }, + { + expectedPair: []int{18, 19}, + expectedLevel: 4, + controlPath: "seg_46+seg_47+seg_48+seg_49+seg_50", + }, + { + expectedPair: []int{17, 18}, + expectedLevel: 5, + controlPath: "seg_45+seg_46+seg_47+seg_48+seg_49+seg_50", + }, + { + expectedPair: nil, + expectedLevel: 0, + }, + } + + sg.segments = append(sg.segments, &segment{path: "seg_50", level: 0, size: 20 * MiB}) + runCompactionCandidatesTestCases(t, testCases, sg, compactionResizeFactor) + }) +} + +func TestSegmentGroup_CompactionCandidates_MaxSize500_Resize08(t *testing.T) { + compactionResizeFactor := float32(.8) + sg := &SegmentGroup{ + segments: createSegments(), + maxSegmentSize: 500 * GiB, + } + + t.Run("existing segments", func(t *testing.T) { + testCases := []testCaseCompactionCandidates{ + { + expectedPair: []int{3, 4}, + expectedLevel: 13, + controlPath: "seg_04+seg_05", + }, + { + expectedPair: []int{4, 5}, + expectedLevel: 13, + controlPath: "seg_06+seg_07", + }, + { + expectedPair: []int{5, 6}, + expectedLevel: 13, + controlPath: "seg_08+seg_09", + }, + { + expectedPair: []int{6, 7}, + expectedLevel: 13, + controlPath: "seg_10+seg_11", + }, + { + expectedPair: []int{7, 8}, + expectedLevel: 13, + controlPath: "seg_12+seg_13", + }, + { + expectedPair: []int{8, 9}, + expectedLevel: 13, + controlPath: "seg_14+seg_15", + }, + { + expectedPair: []int{9, 10}, + expectedLevel: 13, + controlPath: "seg_16+seg_17", + }, + { + expectedPair: []int{10, 11}, + expectedLevel: 13, + controlPath: "seg_18+seg_19", + }, + { + expectedPair: []int{11, 12}, + expectedLevel: 13, + controlPath: "seg_20+seg_21", + }, + { + expectedPair: []int{12, 13}, + expectedLevel: 13, + controlPath: "seg_22+seg_23", + }, + { + expectedPair: []int{13, 14}, + expectedLevel: 13, + controlPath: "seg_24+seg_25", + }, + { + expectedPair: []int{14, 15}, + expectedLevel: 13, + controlPath: "seg_26+seg_27", + }, + { + expectedPair: []int{15, 16}, + expectedLevel: 13, + controlPath: "seg_28+seg_29", + }, + { + expectedPair: []int{16, 17}, + expectedLevel: 13, + controlPath: "seg_30+seg_31", + }, + { + expectedPair: []int{17, 18}, + expectedLevel: 13, + controlPath: "seg_32+seg_33", + }, + { + expectedPair: []int{18, 19}, + expectedLevel: 13, + controlPath: "seg_34+seg_35", + }, + { + expectedPair: []int{19, 20}, + expectedLevel: 13, + controlPath: "seg_36+seg_37", + }, + { + expectedPair: []int{20, 21}, + expectedLevel: 13, + controlPath: "seg_38+seg_39", + }, + { + expectedPair: []int{21, 22}, + expectedLevel: 13, + controlPath: "seg_40+seg_41", + }, + { + expectedPair: []int{2, 3}, + expectedLevel: 13, + controlPath: "seg_03+seg_04+seg_05", + }, + { + expectedPair: []int{2, 3}, + expectedLevel: 13, + controlPath: "seg_03+seg_04+seg_05+seg_06+seg_07", + }, + { + expectedPair: []int{3, 4}, + expectedLevel: 13, + controlPath: "seg_08+seg_09+seg_10+seg_11", + }, + { + expectedPair: []int{3, 4}, + expectedLevel: 13, + controlPath: "seg_08+seg_09+seg_10+seg_11+seg_12+seg_13", + }, + { + expectedPair: []int{4, 5}, + expectedLevel: 13, + controlPath: "seg_14+seg_15+seg_16+seg_17", + }, + { + expectedPair: []int{4, 5}, + expectedLevel: 13, + controlPath: "seg_14+seg_15+seg_16+seg_17+seg_18+seg_19", + }, + { + expectedPair: []int{5, 6}, + expectedLevel: 13, + controlPath: "seg_20+seg_21+seg_22+seg_23", + }, + { + expectedPair: []int{5, 6}, + expectedLevel: 13, + controlPath: "seg_20+seg_21+seg_22+seg_23+seg_24+seg_25", + }, + { + expectedPair: []int{6, 7}, + expectedLevel: 13, + controlPath: "seg_26+seg_27+seg_28+seg_29", + }, + { + expectedPair: []int{6, 7}, + expectedLevel: 13, + controlPath: "seg_26+seg_27+seg_28+seg_29+seg_30+seg_31", + }, + { + expectedPair: []int{6, 7}, + expectedLevel: 13, + controlPath: "seg_26+seg_27+seg_28+seg_29+seg_30+seg_31+seg_32+seg_33", + }, + { + expectedPair: []int{7, 8}, + expectedLevel: 13, + controlPath: "seg_34+seg_35+seg_36+seg_37", + }, + { + expectedPair: []int{8, 9}, + expectedLevel: 13, + controlPath: "seg_38+seg_39+seg_40+seg_41", + }, + { + expectedPair: nil, + expectedLevel: 0, + }, + } + + runCompactionCandidatesTestCases(t, testCases, sg, compactionResizeFactor) + }) + + t.Run("new segment", func(t *testing.T) { + testCases := []testCaseCompactionCandidates{ + { + expectedPair: []int{16, 17}, + expectedLevel: 1, + controlPath: "seg_49+seg_50", + }, + { + expectedPair: []int{15, 16}, + expectedLevel: 2, + controlPath: "seg_48+seg_49+seg_50", + }, + { + expectedPair: []int{14, 15}, + expectedLevel: 3, + controlPath: "seg_47+seg_48+seg_49+seg_50", + }, + { + expectedPair: []int{13, 14}, + expectedLevel: 4, + controlPath: "seg_46+seg_47+seg_48+seg_49+seg_50", + }, + { + expectedPair: []int{12, 13}, + expectedLevel: 5, + controlPath: "seg_45+seg_46+seg_47+seg_48+seg_49+seg_50", + }, + { + expectedPair: nil, + expectedLevel: 0, + }, + } + + sg.segments = append(sg.segments, &segment{path: "seg_50", level: 0, size: 20 * MiB}) + runCompactionCandidatesTestCases(t, testCases, sg, compactionResizeFactor) + }) +} + +func TestSegmentGroup_CompactionCandidates_MaxSize600_Resize09(t *testing.T) { + compactionResizeFactor := float32(.9) + sg := &SegmentGroup{ + segments: createSegments(), + maxSegmentSize: 600 * GiB, + } + + t.Run("existing segments", func(t *testing.T) { + testCases := []testCaseCompactionCandidates{ + { + expectedPair: []int{3, 4}, + expectedLevel: 13, + controlPath: "seg_04+seg_05", + }, + { + expectedPair: []int{4, 5}, + expectedLevel: 13, + controlPath: "seg_06+seg_07", + }, + { + expectedPair: []int{5, 6}, + expectedLevel: 13, + controlPath: "seg_08+seg_09", + }, + { + expectedPair: []int{6, 7}, + expectedLevel: 13, + controlPath: "seg_10+seg_11", + }, + { + expectedPair: []int{7, 8}, + expectedLevel: 13, + controlPath: "seg_12+seg_13", + }, + { + expectedPair: []int{8, 9}, + expectedLevel: 13, + controlPath: "seg_14+seg_15", + }, + { + expectedPair: []int{9, 10}, + expectedLevel: 13, + controlPath: "seg_16+seg_17", + }, + { + expectedPair: []int{10, 11}, + expectedLevel: 13, + controlPath: "seg_18+seg_19", + }, + { + expectedPair: []int{11, 12}, + expectedLevel: 13, + controlPath: "seg_20+seg_21", + }, + { + expectedPair: []int{12, 13}, + expectedLevel: 13, + controlPath: "seg_22+seg_23", + }, + { + expectedPair: []int{13, 14}, + expectedLevel: 13, + controlPath: "seg_24+seg_25", + }, + { + expectedPair: []int{14, 15}, + expectedLevel: 13, + controlPath: "seg_26+seg_27", + }, + { + expectedPair: []int{15, 16}, + expectedLevel: 13, + controlPath: "seg_28+seg_29", + }, + { + expectedPair: []int{16, 17}, + expectedLevel: 13, + controlPath: "seg_30+seg_31", + }, + { + expectedPair: []int{17, 18}, + expectedLevel: 13, + controlPath: "seg_32+seg_33", + }, + { + expectedPair: []int{18, 19}, + expectedLevel: 13, + controlPath: "seg_34+seg_35", + }, + { + expectedPair: []int{19, 20}, + expectedLevel: 13, + controlPath: "seg_36+seg_37", + }, + { + expectedPair: []int{20, 21}, + expectedLevel: 13, + controlPath: "seg_38+seg_39", + }, + { + expectedPair: []int{21, 22}, + expectedLevel: 13, + controlPath: "seg_40+seg_41", + }, + { + expectedPair: []int{1, 2}, + expectedLevel: 14, + controlPath: "seg_02+seg_03", + }, + { + expectedPair: []int{2, 3}, + expectedLevel: 14, + controlPath: "seg_04+seg_05+seg_06+seg_07", + }, + { + expectedPair: []int{3, 4}, + expectedLevel: 14, + controlPath: "seg_08+seg_09+seg_10+seg_11", + }, + { + expectedPair: []int{4, 5}, + expectedLevel: 14, + controlPath: "seg_12+seg_13+seg_14+seg_15", + }, + { + expectedPair: []int{5, 6}, + expectedLevel: 14, + controlPath: "seg_16+seg_17+seg_18+seg_19", + }, + { + expectedPair: []int{6, 7}, + expectedLevel: 14, + controlPath: "seg_20+seg_21+seg_22+seg_23", + }, + { + expectedPair: []int{7, 8}, + expectedLevel: 14, + controlPath: "seg_24+seg_25+seg_26+seg_27", + }, + { + expectedPair: []int{8, 9}, + expectedLevel: 14, + controlPath: "seg_28+seg_29+seg_30+seg_31", + }, + { + expectedPair: []int{9, 10}, + expectedLevel: 14, + controlPath: "seg_32+seg_33+seg_34+seg_35", + }, + { + expectedPair: []int{10, 11}, + expectedLevel: 14, + controlPath: "seg_36+seg_37+seg_38+seg_39", + }, + { + expectedPair: []int{4, 5}, + expectedLevel: 14, + controlPath: "seg_12+seg_13+seg_14+seg_15+seg_16+seg_17+seg_18+seg_19", + }, + { + expectedPair: nil, + expectedLevel: 0, + }, + } + + runCompactionCandidatesTestCases(t, testCases, sg, compactionResizeFactor) + }) + + t.Run("new segment", func(t *testing.T) { + testCases := []testCaseCompactionCandidates{ + { + expectedPair: []int{18, 19}, + expectedLevel: 1, + controlPath: "seg_49+seg_50", + }, + { + expectedPair: []int{17, 18}, + expectedLevel: 2, + controlPath: "seg_48+seg_49+seg_50", + }, + { + expectedPair: []int{16, 17}, + expectedLevel: 3, + controlPath: "seg_47+seg_48+seg_49+seg_50", + }, + { + expectedPair: []int{15, 16}, + expectedLevel: 4, + controlPath: "seg_46+seg_47+seg_48+seg_49+seg_50", + }, + { + expectedPair: []int{14, 15}, + expectedLevel: 5, + controlPath: "seg_45+seg_46+seg_47+seg_48+seg_49+seg_50", + }, + { + expectedPair: nil, + expectedLevel: 0, + }, + } + + sg.segments = append(sg.segments, &segment{path: "seg_50", level: 0, size: 20 * MiB}) + runCompactionCandidatesTestCases(t, testCases, sg, compactionResizeFactor) + }) +} + +func TestSegmentGroup_CompactionCandidates_MaxSize300To600_Resize08(t *testing.T) { + compactionResizeFactor := float32(.8) + sg := &SegmentGroup{ + segments: createSegments(), + maxSegmentSize: 300 * GiB, + } + + t.Run("compact with 300 GiB limit", func(t *testing.T) { + for pair, level := sg.findCompactionCandidates(); pair != nil; pair, level = sg.findCompactionCandidates() { + compactSegments(sg, pair, level, compactionResizeFactor) + } + }) + + t.Run("compact again with 600 GiB limit", func(t *testing.T) { + testCases := []testCaseCompactionCandidates{ + { + expectedPair: []int{3, 4}, + expectedLevel: 13, + controlPath: "seg_04+seg_05+seg_06+seg_07", + }, + { + expectedPair: []int{4, 5}, + expectedLevel: 13, + controlPath: "seg_08+seg_09+seg_10+seg_11+seg_12+seg_13", + }, + { + expectedPair: []int{5, 6}, + expectedLevel: 13, + controlPath: "seg_14+seg_15+seg_16+seg_17+seg_18+seg_19+seg_20", + }, + { + expectedPair: []int{6, 7}, + expectedLevel: 13, + controlPath: "seg_21+seg_22+seg_23+seg_24+seg_25", + }, + { + expectedPair: []int{7, 8}, + expectedLevel: 13, + controlPath: "seg_26+seg_27+seg_28+seg_29+seg_30+seg_31+seg_32+seg_33", + }, + { + expectedPair: []int{8, 9}, + expectedLevel: 13, + controlPath: "seg_34+seg_35+seg_36+seg_37+seg_38", + }, + { + expectedPair: []int{9, 10}, + expectedLevel: 13, + controlPath: "seg_39+seg_40+seg_41+seg_42", + }, + { + expectedPair: []int{1, 2}, + expectedLevel: 14, + controlPath: "seg_02+seg_03", + }, + { + expectedPair: nil, + expectedLevel: 0, + }, + } + + sg.maxSegmentSize = 600 * GiB + runCompactionCandidatesTestCases(t, testCases, sg, compactionResizeFactor) + }) +} + +func TestSegmentGroup_CompactionCandidates_Leftover(t *testing.T) { + compactionResizeFactor := float32(1) + sg := &SegmentGroup{ + segments: createSegments(), + compactLeftOverSegments: true, + } + + t.Run("existing segments", func(t *testing.T) { + testCases := []testCaseCompactionCandidates{ + { + expectedPair: []int{3, 4}, + expectedLevel: 13, + controlPath: "seg_04+seg_05", + }, + { + expectedPair: []int{4, 5}, + expectedLevel: 13, + controlPath: "seg_06+seg_07", + }, + { + expectedPair: []int{5, 6}, + expectedLevel: 13, + controlPath: "seg_08+seg_09", + }, + { + expectedPair: []int{6, 7}, + expectedLevel: 13, + controlPath: "seg_10+seg_11", + }, + { + expectedPair: []int{7, 8}, + expectedLevel: 13, + controlPath: "seg_12+seg_13", + }, + { + expectedPair: []int{8, 9}, + expectedLevel: 13, + controlPath: "seg_14+seg_15", + }, + { + expectedPair: []int{9, 10}, + expectedLevel: 13, + controlPath: "seg_16+seg_17", + }, + { + expectedPair: []int{10, 11}, + expectedLevel: 13, + controlPath: "seg_18+seg_19", + }, + { + expectedPair: []int{11, 12}, + expectedLevel: 13, + controlPath: "seg_20+seg_21", + }, + { + expectedPair: []int{12, 13}, + expectedLevel: 13, + controlPath: "seg_22+seg_23", + }, + { + expectedPair: []int{13, 14}, + expectedLevel: 13, + controlPath: "seg_24+seg_25", + }, + { + expectedPair: []int{14, 15}, + expectedLevel: 13, + controlPath: "seg_26+seg_27", + }, + { + expectedPair: []int{15, 16}, + expectedLevel: 13, + controlPath: "seg_28+seg_29", + }, + { + expectedPair: []int{16, 17}, + expectedLevel: 13, + controlPath: "seg_30+seg_31", + }, + { + expectedPair: []int{17, 18}, + expectedLevel: 13, + controlPath: "seg_32+seg_33", + }, + { + expectedPair: []int{18, 19}, + expectedLevel: 13, + controlPath: "seg_34+seg_35", + }, + { + expectedPair: []int{19, 20}, + expectedLevel: 13, + controlPath: "seg_36+seg_37", + }, + { + expectedPair: []int{20, 21}, + expectedLevel: 13, + controlPath: "seg_38+seg_39", + }, + { + expectedPair: []int{21, 22}, + expectedLevel: 13, + controlPath: "seg_40+seg_41", + }, + { + expectedPair: []int{1, 2}, + expectedLevel: 14, + controlPath: "seg_02+seg_03", + }, + { + expectedPair: []int{2, 3}, + expectedLevel: 14, + controlPath: "seg_04+seg_05+seg_06+seg_07", + }, + { + expectedPair: []int{3, 4}, + expectedLevel: 14, + controlPath: "seg_08+seg_09+seg_10+seg_11", + }, + { + expectedPair: []int{4, 5}, + expectedLevel: 14, + controlPath: "seg_12+seg_13+seg_14+seg_15", + }, + { + expectedPair: []int{5, 6}, + expectedLevel: 14, + controlPath: "seg_16+seg_17+seg_18+seg_19", + }, + { + expectedPair: []int{6, 7}, + expectedLevel: 14, + controlPath: "seg_20+seg_21+seg_22+seg_23", + }, + { + expectedPair: []int{7, 8}, + expectedLevel: 14, + controlPath: "seg_24+seg_25+seg_26+seg_27", + }, + { + expectedPair: []int{8, 9}, + expectedLevel: 14, + controlPath: "seg_28+seg_29+seg_30+seg_31", + }, + { + expectedPair: []int{9, 10}, + expectedLevel: 14, + controlPath: "seg_32+seg_33+seg_34+seg_35", + }, + { + expectedPair: []int{10, 11}, + expectedLevel: 14, + controlPath: "seg_36+seg_37+seg_38+seg_39", + }, + { + expectedPair: []int{0, 1}, + expectedLevel: 15, + controlPath: "seg_01+seg_02+seg_03", + }, + { + expectedPair: []int{1, 2}, + expectedLevel: 15, + controlPath: "seg_04+seg_05+seg_06+seg_07+seg_08+seg_09+seg_10+seg_11", + }, + { + expectedPair: []int{2, 3}, + expectedLevel: 15, + controlPath: "seg_12+seg_13+seg_14+seg_15+seg_16+seg_17+seg_18+seg_19", + }, + { + expectedPair: []int{3, 4}, + expectedLevel: 15, + controlPath: "seg_20+seg_21+seg_22+seg_23+seg_24+seg_25+seg_26+seg_27", + }, + { + expectedPair: []int{4, 5}, + expectedLevel: 15, + controlPath: "seg_28+seg_29+seg_30+seg_31+seg_32+seg_33+seg_34+seg_35", + }, + { + expectedPair: []int{0, 1}, + expectedLevel: 16, + controlPath: "seg_01+seg_02+seg_03+seg_04+seg_05+seg_06+seg_07+seg_08+seg_09+seg_10+seg_11", + }, + { + expectedPair: []int{1, 2}, + expectedLevel: 16, + controlPath: "seg_12+seg_13+seg_14+seg_15+seg_16+seg_17+seg_18+seg_19+seg_20+seg_21+seg_22+seg_23+seg_24+seg_25+seg_26+seg_27", + }, + { + expectedPair: []int{0, 1}, + expectedLevel: 17, + controlPath: "seg_01+seg_02+seg_03+seg_04+seg_05+seg_06+seg_07+seg_08+seg_09+seg_10+seg_11+seg_12+seg_13+seg_14+seg_15+seg_16+seg_17+seg_18+seg_19+seg_20+seg_21+seg_22+seg_23+seg_24+seg_25+seg_26+seg_27", + }, + { + expectedPair: []int{10, 11}, + expectedLevel: 1, + controlPath: "seg_48+seg_49", + }, + { + expectedPair: []int{9, 10}, + expectedLevel: 2, + controlPath: "seg_47+seg_48+seg_49", + }, + { + expectedPair: []int{8, 9}, + expectedLevel: 3, + controlPath: "seg_46+seg_47+seg_48+seg_49", + }, + { + expectedPair: []int{7, 8}, + expectedLevel: 4, + controlPath: "seg_45+seg_46+seg_47+seg_48+seg_49", + }, + { + expectedPair: []int{5, 6}, + expectedLevel: 8, + controlPath: "seg_43+seg_44", + }, + { + expectedPair: []int{2, 3}, + expectedLevel: 14, + controlPath: "seg_36+seg_37+seg_38+seg_39+seg_40+seg_41", + }, + { + expectedPair: []int{1, 2}, + expectedLevel: 15, + controlPath: "seg_28+seg_29+seg_30+seg_31+seg_32+seg_33+seg_34+seg_35+seg_36+seg_37+seg_38+seg_39+seg_40+seg_41", + }, + { + expectedPair: nil, + expectedLevel: 0, + }, + } + + runCompactionCandidatesTestCases(t, testCases, sg, compactionResizeFactor) + }) + + t.Run("new segment", func(t *testing.T) { + testCases := []testCaseCompactionCandidates{ + { + expectedPair: nil, + expectedLevel: 0, + }, + } + + sg.segments = append(sg.segments, &segment{path: "seg_50", level: 0, size: 20 * MiB}) + runCompactionCandidatesTestCases(t, testCases, sg, compactionResizeFactor) + }) +} + +func TestSegmentGroup_CompactionCandidates_MaxSize300_Resize08_Leftover(t *testing.T) { + compactionResizeFactor := float32(.8) + sg := &SegmentGroup{ + segments: createSegments(), + maxSegmentSize: 300 * GiB, + compactLeftOverSegments: true, + } + + t.Run("existing segments", func(t *testing.T) { + testCases := []testCaseCompactionCandidates{ + { + expectedPair: []int{4, 5}, + expectedLevel: 12, + controlPath: "seg_05+seg_06", + }, + { + expectedPair: []int{4, 5}, + expectedLevel: 12, + controlPath: "seg_05+seg_06+seg_07", + }, + { + expectedPair: []int{6, 7}, + expectedLevel: 12, + controlPath: "seg_09+seg_10", + }, + { + expectedPair: []int{6, 7}, + expectedLevel: 12, + controlPath: "seg_09+seg_10+seg_11", + }, + { + expectedPair: []int{6, 7}, + expectedLevel: 12, + controlPath: "seg_09+seg_10+seg_11+seg_12", + }, + { + expectedPair: []int{6, 7}, + expectedLevel: 12, + controlPath: "seg_09+seg_10+seg_11+seg_12+seg_13", + }, + { + expectedPair: []int{7, 8}, + expectedLevel: 12, + controlPath: "seg_14+seg_15", + }, + { + expectedPair: []int{7, 8}, + expectedLevel: 12, + controlPath: "seg_14+seg_15+seg_16", + }, + { + expectedPair: []int{7, 8}, + expectedLevel: 12, + controlPath: "seg_14+seg_15+seg_16+seg_17", + }, + { + expectedPair: []int{8, 9}, + expectedLevel: 12, + controlPath: "seg_18+seg_19", + }, + { + expectedPair: []int{8, 9}, + expectedLevel: 12, + controlPath: "seg_18+seg_19+seg_20", + }, + { + expectedPair: []int{9, 10}, + expectedLevel: 12, + controlPath: "seg_21+seg_22", + }, + { + expectedPair: []int{10, 11}, + expectedLevel: 12, + controlPath: "seg_23+seg_24", + }, + { + expectedPair: []int{10, 11}, + expectedLevel: 12, + controlPath: "seg_23+seg_24+seg_25", + }, + { + expectedPair: []int{11, 12}, + expectedLevel: 12, + controlPath: "seg_26+seg_27", + }, + { + expectedPair: []int{11, 12}, + expectedLevel: 12, + controlPath: "seg_26+seg_27+seg_28", + }, + { + expectedPair: []int{11, 12}, + expectedLevel: 12, + controlPath: "seg_26+seg_27+seg_28+seg_29", + }, + { + expectedPair: []int{11, 12}, + expectedLevel: 12, + controlPath: "seg_26+seg_27+seg_28+seg_29+seg_30", + }, + { + expectedPair: []int{12, 13}, + expectedLevel: 12, + controlPath: "seg_31+seg_32", + }, + { + expectedPair: []int{12, 13}, + expectedLevel: 12, + controlPath: "seg_31+seg_32+seg_33", + }, + { + expectedPair: []int{13, 14}, + expectedLevel: 12, + controlPath: "seg_34+seg_35", + }, + { + expectedPair: []int{14, 15}, + expectedLevel: 12, + controlPath: "seg_36+seg_37", + }, + { + expectedPair: []int{14, 15}, + expectedLevel: 12, + controlPath: "seg_36+seg_37+seg_38", + }, + { + expectedPair: []int{15, 16}, + expectedLevel: 12, + controlPath: "seg_39+seg_40", + }, + { + expectedPair: []int{15, 16}, + expectedLevel: 12, + controlPath: "seg_39+seg_40+seg_41", + }, + { + expectedPair: []int{22, 23}, + expectedLevel: 1, + controlPath: "seg_48+seg_49", + }, + { + expectedPair: []int{21, 22}, + expectedLevel: 2, + controlPath: "seg_47+seg_48+seg_49", + }, + { + expectedPair: []int{20, 21}, + expectedLevel: 3, + controlPath: "seg_46+seg_47+seg_48+seg_49", + }, + { + expectedPair: []int{19, 20}, + expectedLevel: 4, + controlPath: "seg_45+seg_46+seg_47+seg_48+seg_49", + }, + { + expectedPair: []int{17, 18}, + expectedLevel: 8, + controlPath: "seg_43+seg_44", + }, + { + expectedPair: nil, + expectedLevel: 0, + }, + } + + runCompactionCandidatesTestCases(t, testCases, sg, compactionResizeFactor) + }) + + t.Run("new segment", func(t *testing.T) { + testCases := []testCaseCompactionCandidates{ + { + expectedPair: nil, + expectedLevel: 0, + }, + } + + sg.segments = append(sg.segments, &segment{path: "seg_50", level: 0, size: 20 * MiB}) + runCompactionCandidatesTestCases(t, testCases, sg, compactionResizeFactor) + }) +} + +func TestSegmentGroup_CompactionCandidates_MaxSize400_Resize09_Leftover(t *testing.T) { + compactionResizeFactor := float32(.9) + sg := &SegmentGroup{ + segments: createSegments(), + maxSegmentSize: 400 * GiB, + compactLeftOverSegments: true, + } + + t.Run("existing segments", func(t *testing.T) { + testCases := []testCaseCompactionCandidates{ + { + expectedPair: []int{3, 4}, + expectedLevel: 13, + controlPath: "seg_04+seg_05", + }, + { + expectedPair: []int{4, 5}, + expectedLevel: 13, + controlPath: "seg_06+seg_07", + }, + { + expectedPair: []int{5, 6}, + expectedLevel: 13, + controlPath: "seg_08+seg_09", + }, + { + expectedPair: []int{6, 7}, + expectedLevel: 13, + controlPath: "seg_10+seg_11", + }, + { + expectedPair: []int{7, 8}, + expectedLevel: 13, + controlPath: "seg_12+seg_13", + }, + { + expectedPair: []int{8, 9}, + expectedLevel: 13, + controlPath: "seg_14+seg_15", + }, + { + expectedPair: []int{9, 10}, + expectedLevel: 13, + controlPath: "seg_16+seg_17", + }, + { + expectedPair: []int{10, 11}, + expectedLevel: 13, + controlPath: "seg_18+seg_19", + }, + { + expectedPair: []int{11, 12}, + expectedLevel: 13, + controlPath: "seg_20+seg_21", + }, + { + expectedPair: []int{12, 13}, + expectedLevel: 13, + controlPath: "seg_22+seg_23", + }, + { + expectedPair: []int{13, 14}, + expectedLevel: 13, + controlPath: "seg_24+seg_25", + }, + { + expectedPair: []int{14, 15}, + expectedLevel: 13, + controlPath: "seg_26+seg_27", + }, + { + expectedPair: []int{15, 16}, + expectedLevel: 13, + controlPath: "seg_28+seg_29", + }, + { + expectedPair: []int{16, 17}, + expectedLevel: 13, + controlPath: "seg_30+seg_31", + }, + { + expectedPair: []int{17, 18}, + expectedLevel: 13, + controlPath: "seg_32+seg_33", + }, + { + expectedPair: []int{18, 19}, + expectedLevel: 13, + controlPath: "seg_34+seg_35", + }, + { + expectedPair: []int{19, 20}, + expectedLevel: 13, + controlPath: "seg_36+seg_37", + }, + { + expectedPair: []int{20, 21}, + expectedLevel: 13, + controlPath: "seg_38+seg_39", + }, + { + expectedPair: []int{21, 22}, + expectedLevel: 13, + controlPath: "seg_40+seg_41", + }, + { + expectedPair: []int{3, 4}, + expectedLevel: 13, + controlPath: "seg_04+seg_05+seg_06+seg_07", + }, + { + expectedPair: []int{5, 6}, + expectedLevel: 13, + controlPath: "seg_10+seg_11+seg_12+seg_13", + }, + { + expectedPair: []int{6, 7}, + expectedLevel: 13, + controlPath: "seg_14+seg_15+seg_16+seg_17", + }, + { + expectedPair: []int{7, 8}, + expectedLevel: 13, + controlPath: "seg_18+seg_19+seg_20+seg_21", + }, + { + expectedPair: []int{8, 9}, + expectedLevel: 13, + controlPath: "seg_22+seg_23+seg_24+seg_25", + }, + { + expectedPair: []int{9, 10}, + expectedLevel: 13, + controlPath: "seg_26+seg_27+seg_28+seg_29", + }, + { + expectedPair: []int{10, 11}, + expectedLevel: 13, + controlPath: "seg_30+seg_31+seg_32+seg_33", + }, + { + expectedPair: []int{11, 12}, + expectedLevel: 13, + controlPath: "seg_34+seg_35+seg_36+seg_37", + }, + { + expectedPair: []int{20, 21}, + expectedLevel: 1, + controlPath: "seg_48+seg_49", + }, + { + expectedPair: []int{19, 20}, + expectedLevel: 2, + controlPath: "seg_47+seg_48+seg_49", + }, + { + expectedPair: []int{18, 19}, + expectedLevel: 3, + controlPath: "seg_46+seg_47+seg_48+seg_49", + }, + { + expectedPair: []int{17, 18}, + expectedLevel: 4, + controlPath: "seg_45+seg_46+seg_47+seg_48+seg_49", + }, + { + expectedPair: []int{15, 16}, + expectedLevel: 8, + controlPath: "seg_43+seg_44", + }, + { + expectedPair: []int{13, 14}, + expectedLevel: 13, + controlPath: "seg_40+seg_41+seg_42", + }, + { + expectedPair: nil, + expectedLevel: 0, + }, + } + + runCompactionCandidatesTestCases(t, testCases, sg, compactionResizeFactor) + }) + + t.Run("new segment", func(t *testing.T) { + testCases := []testCaseCompactionCandidates{ + { + expectedPair: nil, + expectedLevel: 0, + }, + } + + sg.segments = append(sg.segments, &segment{path: "seg_50", level: 0, size: 20 * MiB}) + runCompactionCandidatesTestCases(t, testCases, sg, compactionResizeFactor) + }) +} + +func TestSegmentGroup_CompactionCandidates_MaxSize500_Resize08_Leftover(t *testing.T) { + compactionResizeFactor := float32(.8) + sg := &SegmentGroup{ + segments: createSegments(), + maxSegmentSize: 500 * GiB, + compactLeftOverSegments: true, + } + + t.Run("existing segments", func(t *testing.T) { + testCases := []testCaseCompactionCandidates{ + { + expectedPair: []int{3, 4}, + expectedLevel: 13, + controlPath: "seg_04+seg_05", + }, + { + expectedPair: []int{4, 5}, + expectedLevel: 13, + controlPath: "seg_06+seg_07", + }, + { + expectedPair: []int{5, 6}, + expectedLevel: 13, + controlPath: "seg_08+seg_09", + }, + { + expectedPair: []int{6, 7}, + expectedLevel: 13, + controlPath: "seg_10+seg_11", + }, + { + expectedPair: []int{7, 8}, + expectedLevel: 13, + controlPath: "seg_12+seg_13", + }, + { + expectedPair: []int{8, 9}, + expectedLevel: 13, + controlPath: "seg_14+seg_15", + }, + { + expectedPair: []int{9, 10}, + expectedLevel: 13, + controlPath: "seg_16+seg_17", + }, + { + expectedPair: []int{10, 11}, + expectedLevel: 13, + controlPath: "seg_18+seg_19", + }, + { + expectedPair: []int{11, 12}, + expectedLevel: 13, + controlPath: "seg_20+seg_21", + }, + { + expectedPair: []int{12, 13}, + expectedLevel: 13, + controlPath: "seg_22+seg_23", + }, + { + expectedPair: []int{13, 14}, + expectedLevel: 13, + controlPath: "seg_24+seg_25", + }, + { + expectedPair: []int{14, 15}, + expectedLevel: 13, + controlPath: "seg_26+seg_27", + }, + { + expectedPair: []int{15, 16}, + expectedLevel: 13, + controlPath: "seg_28+seg_29", + }, + { + expectedPair: []int{16, 17}, + expectedLevel: 13, + controlPath: "seg_30+seg_31", + }, + { + expectedPair: []int{17, 18}, + expectedLevel: 13, + controlPath: "seg_32+seg_33", + }, + { + expectedPair: []int{18, 19}, + expectedLevel: 13, + controlPath: "seg_34+seg_35", + }, + { + expectedPair: []int{19, 20}, + expectedLevel: 13, + controlPath: "seg_36+seg_37", + }, + { + expectedPair: []int{20, 21}, + expectedLevel: 13, + controlPath: "seg_38+seg_39", + }, + { + expectedPair: []int{21, 22}, + expectedLevel: 13, + controlPath: "seg_40+seg_41", + }, + { + expectedPair: []int{2, 3}, + expectedLevel: 13, + controlPath: "seg_03+seg_04+seg_05", + }, + { + expectedPair: []int{2, 3}, + expectedLevel: 13, + controlPath: "seg_03+seg_04+seg_05+seg_06+seg_07", + }, + { + expectedPair: []int{3, 4}, + expectedLevel: 13, + controlPath: "seg_08+seg_09+seg_10+seg_11", + }, + { + expectedPair: []int{3, 4}, + expectedLevel: 13, + controlPath: "seg_08+seg_09+seg_10+seg_11+seg_12+seg_13", + }, + { + expectedPair: []int{4, 5}, + expectedLevel: 13, + controlPath: "seg_14+seg_15+seg_16+seg_17", + }, + { + expectedPair: []int{4, 5}, + expectedLevel: 13, + controlPath: "seg_14+seg_15+seg_16+seg_17+seg_18+seg_19", + }, + { + expectedPair: []int{5, 6}, + expectedLevel: 13, + controlPath: "seg_20+seg_21+seg_22+seg_23", + }, + { + expectedPair: []int{5, 6}, + expectedLevel: 13, + controlPath: "seg_20+seg_21+seg_22+seg_23+seg_24+seg_25", + }, + { + expectedPair: []int{6, 7}, + expectedLevel: 13, + controlPath: "seg_26+seg_27+seg_28+seg_29", + }, + { + expectedPair: []int{6, 7}, + expectedLevel: 13, + controlPath: "seg_26+seg_27+seg_28+seg_29+seg_30+seg_31", + }, + { + expectedPair: []int{6, 7}, + expectedLevel: 13, + controlPath: "seg_26+seg_27+seg_28+seg_29+seg_30+seg_31+seg_32+seg_33", + }, + { + expectedPair: []int{7, 8}, + expectedLevel: 13, + controlPath: "seg_34+seg_35+seg_36+seg_37", + }, + { + expectedPair: []int{8, 9}, + expectedLevel: 13, + controlPath: "seg_38+seg_39+seg_40+seg_41", + }, + { + expectedPair: []int{15, 16}, + expectedLevel: 1, + controlPath: "seg_48+seg_49", + }, + { + expectedPair: []int{14, 15}, + expectedLevel: 2, + controlPath: "seg_47+seg_48+seg_49", + }, + { + expectedPair: []int{13, 14}, + expectedLevel: 3, + controlPath: "seg_46+seg_47+seg_48+seg_49", + }, + { + expectedPair: []int{12, 13}, + expectedLevel: 4, + controlPath: "seg_45+seg_46+seg_47+seg_48+seg_49", + }, + { + expectedPair: []int{10, 11}, + expectedLevel: 8, + controlPath: "seg_43+seg_44", + }, + { + expectedPair: nil, + expectedLevel: 0, + }, + } + + runCompactionCandidatesTestCases(t, testCases, sg, compactionResizeFactor) + }) + + t.Run("new segment", func(t *testing.T) { + testCases := []testCaseCompactionCandidates{ + { + expectedPair: nil, + expectedLevel: 0, + }, + } + + sg.segments = append(sg.segments, &segment{path: "seg_50", level: 0, size: 20 * MiB}) + runCompactionCandidatesTestCases(t, testCases, sg, compactionResizeFactor) + }) +} + +func TestSegmentGroup_CompactionCandidates_MaxSize600_Resize09_Leftover(t *testing.T) { + compactionResizeFactor := float32(.9) + sg := &SegmentGroup{ + segments: createSegments(), + maxSegmentSize: 600 * GiB, + compactLeftOverSegments: true, + } + + t.Run("existing segments", func(t *testing.T) { + testCases := []testCaseCompactionCandidates{ + { + expectedPair: []int{3, 4}, + expectedLevel: 13, + controlPath: "seg_04+seg_05", + }, + { + expectedPair: []int{4, 5}, + expectedLevel: 13, + controlPath: "seg_06+seg_07", + }, + { + expectedPair: []int{5, 6}, + expectedLevel: 13, + controlPath: "seg_08+seg_09", + }, + { + expectedPair: []int{6, 7}, + expectedLevel: 13, + controlPath: "seg_10+seg_11", + }, + { + expectedPair: []int{7, 8}, + expectedLevel: 13, + controlPath: "seg_12+seg_13", + }, + { + expectedPair: []int{8, 9}, + expectedLevel: 13, + controlPath: "seg_14+seg_15", + }, + { + expectedPair: []int{9, 10}, + expectedLevel: 13, + controlPath: "seg_16+seg_17", + }, + { + expectedPair: []int{10, 11}, + expectedLevel: 13, + controlPath: "seg_18+seg_19", + }, + { + expectedPair: []int{11, 12}, + expectedLevel: 13, + controlPath: "seg_20+seg_21", + }, + { + expectedPair: []int{12, 13}, + expectedLevel: 13, + controlPath: "seg_22+seg_23", + }, + { + expectedPair: []int{13, 14}, + expectedLevel: 13, + controlPath: "seg_24+seg_25", + }, + { + expectedPair: []int{14, 15}, + expectedLevel: 13, + controlPath: "seg_26+seg_27", + }, + { + expectedPair: []int{15, 16}, + expectedLevel: 13, + controlPath: "seg_28+seg_29", + }, + { + expectedPair: []int{16, 17}, + expectedLevel: 13, + controlPath: "seg_30+seg_31", + }, + { + expectedPair: []int{17, 18}, + expectedLevel: 13, + controlPath: "seg_32+seg_33", + }, + { + expectedPair: []int{18, 19}, + expectedLevel: 13, + controlPath: "seg_34+seg_35", + }, + { + expectedPair: []int{19, 20}, + expectedLevel: 13, + controlPath: "seg_36+seg_37", + }, + { + expectedPair: []int{20, 21}, + expectedLevel: 13, + controlPath: "seg_38+seg_39", + }, + { + expectedPair: []int{21, 22}, + expectedLevel: 13, + controlPath: "seg_40+seg_41", + }, + { + expectedPair: []int{1, 2}, + expectedLevel: 14, + controlPath: "seg_02+seg_03", + }, + { + expectedPair: []int{2, 3}, + expectedLevel: 14, + controlPath: "seg_04+seg_05+seg_06+seg_07", + }, + { + expectedPair: []int{3, 4}, + expectedLevel: 14, + controlPath: "seg_08+seg_09+seg_10+seg_11", + }, + { + expectedPair: []int{4, 5}, + expectedLevel: 14, + controlPath: "seg_12+seg_13+seg_14+seg_15", + }, + { + expectedPair: []int{5, 6}, + expectedLevel: 14, + controlPath: "seg_16+seg_17+seg_18+seg_19", + }, + { + expectedPair: []int{6, 7}, + expectedLevel: 14, + controlPath: "seg_20+seg_21+seg_22+seg_23", + }, + { + expectedPair: []int{7, 8}, + expectedLevel: 14, + controlPath: "seg_24+seg_25+seg_26+seg_27", + }, + { + expectedPair: []int{8, 9}, + expectedLevel: 14, + controlPath: "seg_28+seg_29+seg_30+seg_31", + }, + { + expectedPair: []int{9, 10}, + expectedLevel: 14, + controlPath: "seg_32+seg_33+seg_34+seg_35", + }, + { + expectedPair: []int{10, 11}, + expectedLevel: 14, + controlPath: "seg_36+seg_37+seg_38+seg_39", + }, + { + expectedPair: []int{4, 5}, + expectedLevel: 14, + controlPath: "seg_12+seg_13+seg_14+seg_15+seg_16+seg_17+seg_18+seg_19", + }, + { + expectedPair: []int{17, 18}, + expectedLevel: 1, + controlPath: "seg_48+seg_49", + }, + { + expectedPair: []int{16, 17}, + expectedLevel: 2, + controlPath: "seg_47+seg_48+seg_49", + }, + { + expectedPair: []int{15, 16}, + expectedLevel: 3, + controlPath: "seg_46+seg_47+seg_48+seg_49", + }, + { + expectedPair: []int{14, 15}, + expectedLevel: 4, + controlPath: "seg_45+seg_46+seg_47+seg_48+seg_49", + }, + { + expectedPair: []int{12, 13}, + expectedLevel: 8, + controlPath: "seg_43+seg_44", + }, + { + expectedPair: []int{10, 11}, + expectedLevel: 13, + controlPath: "seg_40+seg_41+seg_42", + }, + { + expectedPair: nil, + expectedLevel: 0, + }, + } + + runCompactionCandidatesTestCases(t, testCases, sg, compactionResizeFactor) + }) + + t.Run("new segment", func(t *testing.T) { + testCases := []testCaseCompactionCandidates{ + { + expectedPair: nil, + expectedLevel: 0, + }, + } + + sg.segments = append(sg.segments, &segment{path: "seg_50", level: 0, size: 20 * MiB}) + runCompactionCandidatesTestCases(t, testCases, sg, compactionResizeFactor) + }) +} + +func TestSegmentGroup_CompactionCandidates_MaxSize300To600_Resize08_Leftover(t *testing.T) { + compactionResizeFactor := float32(.8) + sg := &SegmentGroup{ + segments: createSegments(), + maxSegmentSize: 300 * GiB, + compactLeftOverSegments: true, + } + + t.Run("compact with 300 GiB limit", func(t *testing.T) { + for pair, level := sg.findCompactionCandidates(); pair != nil; pair, level = sg.findCompactionCandidates() { + compactSegments(sg, pair, level, compactionResizeFactor) + } + }) + + t.Run("compact again with 600 GiB limit", func(t *testing.T) { + testCases := []testCaseCompactionCandidates{ + { + expectedPair: []int{3, 4}, + expectedLevel: 13, + controlPath: "seg_04+seg_05+seg_06+seg_07", + }, + { + expectedPair: []int{4, 5}, + expectedLevel: 13, + controlPath: "seg_08+seg_09+seg_10+seg_11+seg_12+seg_13", + }, + { + expectedPair: []int{5, 6}, + expectedLevel: 13, + controlPath: "seg_14+seg_15+seg_16+seg_17+seg_18+seg_19+seg_20", + }, + { + expectedPair: []int{6, 7}, + expectedLevel: 13, + controlPath: "seg_21+seg_22+seg_23+seg_24+seg_25", + }, + { + expectedPair: []int{7, 8}, + expectedLevel: 13, + controlPath: "seg_26+seg_27+seg_28+seg_29+seg_30+seg_31+seg_32+seg_33", + }, + { + expectedPair: []int{8, 9}, + expectedLevel: 13, + controlPath: "seg_34+seg_35+seg_36+seg_37+seg_38", + }, + { + expectedPair: []int{9, 10}, + expectedLevel: 13, + controlPath: "seg_39+seg_40+seg_41+seg_42", + }, + { + expectedPair: []int{1, 2}, + expectedLevel: 14, + controlPath: "seg_02+seg_03", + }, + { + expectedPair: nil, + expectedLevel: 0, + }, + } + + sg.maxSegmentSize = 600 * GiB + runCompactionCandidatesTestCases(t, testCases, sg, compactionResizeFactor) + }) +} + +type testCaseCompactionCandidates struct { + expectedPair []int + expectedLevel uint16 + controlPath string +} + +func runCompactionCandidatesTestCases(t *testing.T, testCases []testCaseCompactionCandidates, + sg *SegmentGroup, compactionResizeFactor float32, +) { + for _, tc := range testCases { + t.Run(fmt.Sprintf("find candidates %s", tc.controlPath), func(t *testing.T) { + pair, level := sg.findCompactionCandidates() + require.ElementsMatch(t, tc.expectedPair, pair) + require.Equal(t, tc.expectedLevel, level) + + if pair != nil { + t.Run("compact", func(t *testing.T) { + compactSegments(sg, pair, tc.expectedLevel, compactionResizeFactor) + assert.Equal(t, tc.controlPath, sg.segments[pair[0]].getPath()) + }) + } + }) + } +} + +func compactSegments(sg *SegmentGroup, pair []int, newLevel uint16, resizeFactor float32) { + leftId, rightId := pair[0], pair[1] + left, right := sg.segments[leftId], sg.segments[rightId] + + seg := &segment{ + path: left.getPath() + "+" + right.getPath(), + size: int64(float32(left.getSize()+right.getSize()) * resizeFactor), + level: newLevel, + observeMetaWrite: func(n int64) {}, + } + + sg.segments[leftId] = seg + sg.segments = append(sg.segments[:rightId], sg.segments[rightId+1:]...) +} + +func createSegments() []Segment { + return []Segment{ + &segment{path: "seg_01", level: 14, size: 836263427894}, + &segment{path: "seg_02", level: 13, size: 374869132170}, + &segment{path: "seg_03", level: 13, size: 208332808374}, + &segment{path: "seg_04", level: 12, size: 239015897301}, + &segment{path: "seg_05", level: 12, size: 106610102545}, + &segment{path: "seg_06", level: 12, size: 23426179335}, + &segment{path: "seg_07", level: 12, size: 87965523667}, + &segment{path: "seg_08", level: 12, size: 191582236181}, + &segment{path: "seg_09", level: 12, size: 210767274757}, + &segment{path: "seg_10", level: 12, size: 59578965712}, + &segment{path: "seg_11", level: 12, size: 64190979390}, + &segment{path: "seg_12", level: 12, size: 82209515753}, + &segment{path: "seg_13", level: 12, size: 75902833663}, + &segment{path: "seg_14", level: 12, size: 118868567716}, + &segment{path: "seg_15", level: 12, size: 127672461922}, + &segment{path: "seg_16", level: 12, size: 98975345366}, + &segment{path: "seg_17", level: 12, size: 68258824385}, + &segment{path: "seg_18", level: 12, size: 100849005187}, + &segment{path: "seg_19", level: 12, size: 102541173132}, + &segment{path: "seg_20", level: 12, size: 95981553544}, + &segment{path: "seg_21", level: 12, size: 159801966562}, + &segment{path: "seg_22", level: 12, size: 124441347108}, + &segment{path: "seg_23", level: 12, size: 134382829443}, + &segment{path: "seg_24", level: 12, size: 120928049419}, + &segment{path: "seg_25", level: 12, size: 96456793734}, + &segment{path: "seg_26", level: 12, size: 83607439705}, + &segment{path: "seg_27", level: 12, size: 96770548809}, + &segment{path: "seg_28", level: 12, size: 75610476308}, + &segment{path: "seg_29", level: 12, size: 90640520486}, + &segment{path: "seg_30", level: 12, size: 70865888540}, + &segment{path: "seg_31", level: 12, size: 210224834736}, + &segment{path: "seg_32", level: 12, size: 73153660353}, + &segment{path: "seg_33", level: 12, size: 76174252244}, + &segment{path: "seg_34", level: 12, size: 151728889040}, + &segment{path: "seg_35", level: 12, size: 128444521806}, + &segment{path: "seg_36", level: 12, size: 117679144581}, + &segment{path: "seg_37", level: 12, size: 75389068382}, + &segment{path: "seg_38", level: 12, size: 166442398845}, + &segment{path: "seg_39", level: 12, size: 131302230624}, + &segment{path: "seg_40", level: 12, size: 161545213956}, + &segment{path: "seg_41", level: 12, size: 85106406717}, + &segment{path: "seg_42", level: 12, size: 121845832221}, + &segment{path: "seg_43", level: 8, size: 7567704640}, + &segment{path: "seg_44", level: 7, size: 3025167714}, + &segment{path: "seg_45", level: 4, size: 372239668}, + &segment{path: "seg_46", level: 3, size: 176198587}, + &segment{path: "seg_47", level: 2, size: 92733242}, + &segment{path: "seg_48", level: 1, size: 45556463}, + &segment{path: "seg_49", level: 0, size: 24278171}, + } +} + +func Test_IsSimilarSegmentSizes(t *testing.T) { + type testCase struct { + leftSize, rightSize int64 + expected bool + } + + testCases := []testCase{ + { + leftSize: 10 * KiB, + rightSize: 999 * KiB, + expected: true, + }, + { + leftSize: 2 * KiB, + rightSize: 9 * MiB, + expected: true, + }, + { + leftSize: 88 * KiB, + rightSize: 99 * KiB, + expected: true, + }, + { + leftSize: 2 * MiB, + rightSize: 3 * MiB, + expected: true, + }, + { + leftSize: 1 * KiB, + rightSize: 10 * MiB, + expected: true, + }, + { + leftSize: 1 * KiB, + rightSize: 11 * MiB, + expected: false, + }, + { + leftSize: 1 * MiB, + rightSize: 11 * MiB, + expected: false, + }, + { + leftSize: 2 * MiB, + rightSize: 11 * MiB, + expected: true, + }, + { + leftSize: 2 * MiB, + rightSize: 21 * MiB, + expected: false, + }, + { + leftSize: 15 * MiB, + rightSize: 29 * MiB, + expected: true, + }, + { + leftSize: 9 * MiB, + rightSize: 90 * MiB, + expected: true, + }, + { + leftSize: 9 * MiB, + rightSize: 91 * MiB, + expected: false, + }, + { + leftSize: 10 * MiB, + rightSize: 100 * MiB, + expected: true, + }, + { + leftSize: 11 * MiB, + rightSize: 110 * MiB, + expected: false, + }, + { + leftSize: 22 * MiB, + rightSize: 110 * MiB, + expected: true, + }, + { + leftSize: 199 * MiB, + rightSize: 999 * MiB, + expected: false, + }, + { + leftSize: 200 * MiB, + rightSize: 999 * MiB, + expected: true, + }, + { + leftSize: 777 * MiB, + rightSize: 888 * MiB, + expected: true, + }, + { + leftSize: 500 * MiB, + rightSize: 2 * GiB, + expected: false, + }, + { + leftSize: 700 * MiB, + rightSize: 2 * GiB, + expected: true, + }, + { + leftSize: 2 * GiB, + rightSize: 7 * GiB, + expected: false, + }, + { + leftSize: 3 * GiB, + rightSize: 7 * GiB, + expected: true, + }, + { + leftSize: 5 * GiB, + rightSize: 6 * GiB, + expected: true, + }, + { + leftSize: 4 * GiB, + rightSize: 10 * GiB, + expected: true, + }, + { + leftSize: 4 * GiB, + rightSize: 11 * GiB, + expected: false, + }, + { + leftSize: 5 * GiB, + rightSize: 11 * GiB, + expected: false, + }, + { + leftSize: 6 * GiB, + rightSize: 11 * GiB, + expected: true, + }, + { + leftSize: 24 * GiB, + rightSize: 50 * GiB, + expected: false, + }, + { + leftSize: 25 * GiB, + rightSize: 50 * GiB, + expected: true, + }, + { + leftSize: 111 * GiB, + rightSize: 234 * GiB, + expected: false, + }, + { + leftSize: 123 * GiB, + rightSize: 234 * GiB, + expected: true, + }, + { + leftSize: 666 * GiB, + rightSize: 777 * GiB, + expected: true, + }, + } + + for _, tc := range testCases { + t.Run(fmt.Sprintf("%d + %d", tc.leftSize, tc.rightSize), func(t *testing.T) { + if tc.expected { + assert.True(t, isSimilarSegmentSizes(tc.leftSize, tc.rightSize)) + assert.True(t, isSimilarSegmentSizes(tc.rightSize, tc.leftSize)) + } else { + assert.False(t, isSimilarSegmentSizes(tc.leftSize, tc.rightSize)) + assert.False(t, isSimilarSegmentSizes(tc.rightSize, tc.leftSize)) + } + }) + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segment_group_loading_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segment_group_loading_test.go new file mode 100644 index 0000000000000000000000000000000000000000..d43cfaa11c4b1ad27c5fa112ac92f5a2a1190b8d --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segment_group_loading_test.go @@ -0,0 +1,322 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package lsmkv + +import ( + "context" + "fmt" + "io" + "os" + "path/filepath" + "sort" + "testing" + + "github.com/sirupsen/logrus" + + "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/entities/cyclemanager" +) + +func TestCompactionCleanupBothSegmentsPresent(t *testing.T) { + logger, _ := test.NewNullLogger() + + ctx := context.Background() + + // Tests that various states of the compaction being aborted are handled correctly + // There are 3 files involved: + // 1. The combined segment that is still a tmp file + // 2+3. The two source segment files + tests := []struct { + name string + copyLeft bool + copyRight bool + expectErr bool + expectedSegments int + }{ + {name: "only left present", copyLeft: true, copyRight: false, expectErr: true}, + {name: "only right present", copyLeft: false, copyRight: true, expectErr: false, expectedSegments: 1}, + {name: "nothing present", copyLeft: false, copyRight: false, expectErr: false, expectedSegments: 1}, + {name: "both present", copyLeft: true, copyRight: true, expectErr: false, expectedSegments: 2}, + } + + for _, tt := range tests { + for _, addFileInfo := range []bool{true, false} { + dirName := t.TempDir() + tmpDir := t.TempDir() + entriesTmp := createSegmentFiles(t, ctx, logger, dirName, tmpDir, []bool{addFileInfo}) + t.Run(tt.name, func(t *testing.T) { + testDir := t.TempDir() + if tt.copyLeft { + copyFile(t, tmpDir+"/"+entriesTmp[0].Name(), testDir+"/"+entriesTmp[0].Name()) + } + if tt.copyRight { + copyFile(t, tmpDir+"/"+entriesTmp[2].Name(), testDir+"/"+entriesTmp[2].Name()) + } + // always copy the combined file + copyFile(t, tmpDir+"/"+entriesTmp[1].Name(), testDir+"/"+entriesTmp[1].Name()) + + b2, err := NewBucketCreator().NewBucket(ctx, testDir, "", logger, nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), WithUseBloomFilter(false), WithWriteSegmentInfoIntoFileName(addFileInfo), WithCalcCountNetAdditions(true), + ) + if tt.expectErr { + require.Error(t, err) + } else { + require.NoError(t, err) + require.Equal(t, 20, b2.Count()) + entries, err := os.ReadDir(testDir) + require.NoError(t, err) + for _, entry := range entries { + if filepath.Ext(entry.Name()) == ".db" { + require.NotContains(t, entry.Name(), "_") + } + } + require.Len(t, b2.disk.segments, tt.expectedSegments) + for _, segment := range b2.disk.segments { + path := segment.getPath() + file := filepath.Base(path) + require.NotContains(t, file, "_") + } + } + }) + } + } +} + +func TestCompactionCleanupBothSegmentsPresentUpgrade(t *testing.T) { + logger, _ := test.NewNullLogger() + + ctx := context.Background() + + // Tests that various states of the compaction being aborted are handled correctly + // There are 3 files involved: + // 1. The combined segment that is still a tmp file + // 2+3. The two source segment files + tests := []struct { + name string + copyLeft bool + copyRight bool + expectErr bool + expectedSegments int + }{ + {name: "only left present", copyLeft: true, copyRight: false, expectErr: true}, + {name: "only right present", copyLeft: false, copyRight: true, expectErr: false, expectedSegments: 1}, + {name: "nothing present", copyLeft: false, copyRight: false, expectErr: false, expectedSegments: 1}, + {name: "both present", copyLeft: true, copyRight: true, expectErr: false, expectedSegments: 2}, + } + + for _, tt := range tests { + fileInfos := []struct { + sourceFileLeft bool + sourceFileRight bool + compactedTmpFile bool + loadingBucket bool + }{ + {sourceFileLeft: true, sourceFileRight: true, compactedTmpFile: false, loadingBucket: false}, + {sourceFileLeft: true, sourceFileRight: true, compactedTmpFile: true, loadingBucket: false}, + {sourceFileLeft: false, sourceFileRight: false, compactedTmpFile: true, loadingBucket: false}, + {sourceFileLeft: false, sourceFileRight: true, compactedTmpFile: true, loadingBucket: false}, + {sourceFileLeft: false, sourceFileRight: true, compactedTmpFile: false, loadingBucket: true}, + {sourceFileLeft: false, sourceFileRight: false, compactedTmpFile: true, loadingBucket: true}, + {sourceFileLeft: false, sourceFileRight: false, compactedTmpFile: false, loadingBucket: true}, + {sourceFileLeft: true, sourceFileRight: false, compactedTmpFile: true, loadingBucket: true}, + } + for _, fileInfo := range fileInfos { + dirName := t.TempDir() + tmpDir := t.TempDir() + entriesTmp := createSegmentFiles(t, ctx, logger, dirName, tmpDir, []bool{fileInfo.sourceFileLeft, fileInfo.sourceFileRight, fileInfo.compactedTmpFile}) + t.Run(tt.name, func(t *testing.T) { + testDir := t.TempDir() + if tt.copyLeft { + copyFile(t, tmpDir+"/"+entriesTmp[0].Name(), testDir+"/"+entriesTmp[0].Name()) + } + if tt.copyRight { + copyFile(t, tmpDir+"/"+entriesTmp[2].Name(), testDir+"/"+entriesTmp[2].Name()) + } + // always copy the combined file + copyFile(t, tmpDir+"/"+entriesTmp[1].Name(), testDir+"/"+entriesTmp[1].Name()) + + b2, err := NewBucketCreator().NewBucket(ctx, testDir, "", logger, nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), WithUseBloomFilter(false), WithWriteSegmentInfoIntoFileName(fileInfo.loadingBucket), WithCalcCountNetAdditions(true), + ) + if tt.expectErr { + require.Error(t, err) + } else { + require.NoError(t, err) + require.Equal(t, 20, b2.Count()) + entries, err := os.ReadDir(testDir) + require.NoError(t, err) + for _, entry := range entries { + if filepath.Ext(entry.Name()) == ".db" { + require.NotContains(t, entry.Name(), "_") + } + } + require.Len(t, b2.disk.segments, tt.expectedSegments) + for _, segment := range b2.disk.segments { + path := segment.getPath() + file := filepath.Base(path) + require.NotContains(t, file, "_") + } + } + }) + } + } +} + +func copyFile(t *testing.T, src, dest string) { + t.Helper() + target, err := os.Create(dest) + require.NoError(t, err) + + source, err := os.Open(src) + require.NoError(t, err) + + _, err = io.Copy(target, source) + require.NoError(t, err) + require.NoError(t, source.Sync()) + require.NoError(t, source.Close()) + require.NoError(t, target.Sync()) + require.NoError(t, target.Close()) +} + +func TestWalFilePresent(t *testing.T) { + logger, _ := test.NewNullLogger() + + ctx := context.Background() + dirName := t.TempDir() + b, err := NewBucketCreator().NewBucket(ctx, dirName, "", logger, nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), WithUseBloomFilter(false), WithWriteSegmentInfoIntoFileName(true), WithMinWalThreshold(4096), + ) + + // create "incomplete" segment + require.NoError(t, err) + require.NoError(t, b.Put([]byte("hello0"), []byte("world0"))) + require.NoError(t, b.Put([]byte("hello1"), []byte("world1"))) + require.NoError(t, b.FlushMemtable()) + + // create wal file with more entries + require.NoError(t, b.Put([]byte("hello0"), []byte("world0"))) + require.NoError(t, b.Put([]byte("hello1"), []byte("world1"))) + require.NoError(t, b.Put([]byte("hello2"), []byte("world2"))) + require.NoError(t, b.Shutdown(ctx)) + + dbFiles, walFiles := countDbAndWalFiles(t, dirName) + require.Equal(t, dbFiles, 1) + require.Equal(t, walFiles, 1) + + // .wal file needs same (base)name as segment file + entries, err := os.ReadDir(dirName) + require.NoError(t, err) + var segmentId string + for _, entry := range entries { + if filepath.Ext(entry.Name()) == ".db" { + segmentId = segmentID(entry.Name()) + } + } + + for _, entry := range entries { + if filepath.Ext(entry.Name()) == ".wal" { + require.NoError(t, os.Rename(dirName+"/"+entry.Name(), dirName+"/"+"segment-"+segmentId+".wal")) + } + } + + // incomplete segment will be deleted and memtable is reconstructed from .wal + b2, err := NewBucketCreator().NewBucket(ctx, dirName, "", logger, nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), WithUseBloomFilter(false), WithWriteSegmentInfoIntoFileName(true), WithMinWalThreshold(4096), + ) + require.NoError(t, err) + + val, err := b2.Get([]byte("hello2")) + require.NoError(t, err) + require.Equal(t, string(val), "world2") + + dbFiles, walFiles = countDbAndWalFiles(t, dirName) + require.Equal(t, dbFiles, 0) + require.Equal(t, walFiles, 1) +} + +func createSegmentFiles(t *testing.T, ctx context.Context, logger logrus.FieldLogger, dirName, tmpDir string, addFileInfo []bool) []os.DirEntry { + t.Helper() + if len(addFileInfo) == 1 { + addFileInfo = []bool{addFileInfo[0], addFileInfo[0], addFileInfo[0]} + } + + b, err := NewBucketCreator().NewBucket(ctx, dirName, "", logger, nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), WithUseBloomFilter(false), WithWriteSegmentInfoIntoFileName(addFileInfo[0]), WithMinWalThreshold(4096), + ) + require.NoError(t, err) + for i := 0; i < 10; i++ { + require.NoError(t, b.Put([]byte(fmt.Sprintf("hello%d", i)), []byte(fmt.Sprintf("world%d", i)))) + } + require.NoError(t, b.FlushMemtable()) + dbFiles, walFiles := countDbAndWalFiles(t, dirName) + require.Equal(t, dbFiles, 1) + require.Equal(t, walFiles, 0) + require.NoError(t, b.Shutdown(ctx)) + + b, err = NewBucketCreator().NewBucket(ctx, dirName, "", logger, nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), WithUseBloomFilter(false), WithWriteSegmentInfoIntoFileName(addFileInfo[1]), WithMinWalThreshold(4096), + ) + require.NoError(t, err) + + for i := 10; i < 20; i++ { + require.NoError(t, b.Put([]byte(fmt.Sprintf("hello%d", i)), []byte(fmt.Sprintf("world%d", i)))) + } + require.NoError(t, b.Put([]byte("hello1"), []byte("newworld"))) + require.NoError(t, b.FlushMemtable()) + dbFiles, walFiles = countDbAndWalFiles(t, dirName) + require.Equal(t, dbFiles, 2) + require.Equal(t, walFiles, 0) + + // copy segments to safe place + var segments []string + entriesTmp, err := os.ReadDir(dirName) + require.NoError(t, err) + for _, entry := range entriesTmp { + if filepath.Ext(entry.Name()) == ".db" { + copyFile(t, dirName+"/"+entry.Name(), tmpDir+"/"+entry.Name()) + segments = append(segments, segmentID(entry.Name())) + } + } + + once, err := b.disk.compactOnce() + require.NoError(t, err) + require.True(t, once) + dbFiles, walFiles = countDbAndWalFiles(t, dirName) + require.Equal(t, dbFiles, 1) + require.Equal(t, walFiles, 0) + require.NoError(t, b.Shutdown(ctx)) + + // move compacted segment to safe place + entries, err := os.ReadDir(dirName) + require.NoError(t, err) + for _, entry := range entries { + if filepath.Ext(entry.Name()) == ".db" { + ext := ".db.tmp" + if addFileInfo[2] { + ext = ".l1.s0" + ext + } + require.NoError(t, os.Rename(dirName+"/"+entry.Name(), tmpDir+"/"+"segment-"+segments[0]+"_"+segments[1]+ext)) + } + } + + // order after sorting is: + // 0: left segment + // 1: combined segment + // 2: right segment + entriesTmp, err = os.ReadDir(tmpDir) + sort.Slice(entriesTmp, func(i, j int) bool { + return entriesTmp[i].Name() < entriesTmp[j].Name() + }) + require.NoError(t, err) + return entriesTmp +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segment_group_size_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segment_group_size_test.go new file mode 100644 index 0000000000000000000000000000000000000000..fdd6637af2b2fc980cd1f4e3d898de1b0dcf3660 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segment_group_size_test.go @@ -0,0 +1,521 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package lsmkv + +import ( + "encoding/binary" + "path/filepath" + "testing" + + "github.com/bits-and-blooms/bloom/v3" + "github.com/stretchr/testify/assert" + + "github.com/weaviate/weaviate/adapters/repos/db/lsmkv/segmentindex" + "github.com/weaviate/weaviate/usecases/byteops" +) + +func TestSegmentGroup_Size(t *testing.T) { + tests := []struct { + name string + segments []Segment + expectedSize int64 + description string + }{ + { + name: "empty segment group", + segments: []Segment{}, + expectedSize: 0, + description: "should return 0 when no segments are present", + }, + { + name: "single segment", + segments: []Segment{ + &segment{size: 1024}, + }, + expectedSize: 1024, + description: "should return the size of a single segment", + }, + { + name: "multiple segments with different sizes", + segments: []Segment{ + &segment{size: 512}, + &segment{size: 1024}, + &segment{size: 2048}, + }, + expectedSize: 3584, // 512 + 1024 + 2048 + description: "should return the sum of all segment sizes", + }, + { + name: "segments with zero size", + segments: []Segment{ + &segment{size: 0}, + &segment{size: 1024}, + &segment{size: 0}, + }, + expectedSize: 1024, + description: "should handle segments with zero size correctly", + }, + { + name: "large sizes", + segments: []Segment{ + &segment{size: 1024 * 1024}, // 1MB + &segment{size: 2048 * 1024}, // 2MB + &segment{size: 4096 * 1024}, // 4MB + }, + expectedSize: 7168 * 1024, // 7MB + description: "should handle large sizes correctly", + }, + { + name: "many small segments", + segments: []Segment{ + &segment{size: 1}, + &segment{size: 2}, + &segment{size: 3}, + &segment{size: 4}, + &segment{size: 5}, + &segment{size: 6}, + &segment{size: 7}, + &segment{size: 8}, + &segment{size: 9}, + &segment{size: 10}, + }, + expectedSize: 55, // sum of 1 to 10 + description: "should handle many small segments correctly", + }, + { + name: "mixed sizes including edge cases", + segments: []Segment{ + &segment{size: 0}, + &segment{size: 1}, + &segment{size: 1000000}, + &segment{size: 0}, + &segment{size: 999999}, + }, + expectedSize: 2000000, // 0 + 1 + 1000000 + 0 + 999999 + description: "should handle mixed sizes including zeros and large values", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + sg := &SegmentGroup{ + segments: tt.segments, + } + + result := sg.Size() + + assert.Equal(t, tt.expectedSize, result, tt.description) + }) + } +} + +func TestSegmentGroup_Size_WithEnqueuedSegments(t *testing.T) { + sg := &SegmentGroup{ + segments: []Segment{ + &segment{size: 100}, + &segment{size: 200}, + }, + enqueuedSegments: []Segment{ + &segment{size: 300}, + &segment{size: 400}, + }, + } + + expectedSize := int64(1000) // 100 + 200 + 300 + 400 + + result := sg.Size() + assert.Equal(t, expectedSize, result, "should include both regular and enqueued segments") +} + +func TestBloomFilterSize(t *testing.T) { + // Test to determine the actual size of our test bloom filter + bf := createTestBloomFilter() + bs := bf.BitSet() + bsSize := bs.BinaryStorageSize() + totalSize := bsSize + 2*8 // 2 uint64s + + t.Logf("Bloom filter size: %d bytes (bitset: %d + 2*uint64: %d)", totalSize, bsSize, 2*8) + + // Test what getBloomFilterSize actually returns + actualSize := getBloomFilterSize(bf) + t.Logf("getBloomFilterSize returns: %d bytes", actualSize) + + // Test what happens when we create a segment with this bloom filter + seg := &segment{ + bloomFilter: bf, + } + sg := &SegmentGroup{ + segments: []Segment{seg}, + } + metadataSize := sg.MetadataSize() + t.Logf("SegmentGroup.MetadataSize() returns: %d bytes", metadataSize) +} + +func TestSegmentGroup_MetadataSize(t *testing.T) { + tests := []struct { + name string + segments []Segment + expectedSize int64 + description string + }{ + { + name: "empty segment group", + segments: []Segment{}, + expectedSize: 0, + description: "should return 0 when no segments are present", + }, + { + name: "single segment with bloom filter only", + segments: []Segment{ + &segment{ + bloomFilter: createTestBloomFilter(), + }, + }, + expectedSize: 60, // actual bloom filter size with 0.001 false positive rate + description: "should handle segment with bloom filter", + }, + { + name: "single segment with .cna file only", + segments: []Segment{ + &segment{ + calcCountNetAdditions: true, + path: "/tmp/test.dat", + }, + }, + expectedSize: 12, // .cna files are always 12 bytes + description: "should return 12 bytes for .cna file", + }, + { + name: "multiple segments with .cna files", + segments: []Segment{ + &segment{calcCountNetAdditions: true, path: "/tmp/test1.dat"}, + &segment{calcCountNetAdditions: true, path: "/tmp/test2.dat"}, + &segment{calcCountNetAdditions: true, path: "/tmp/test3.dat"}, + }, + expectedSize: 36, // 3 * 12 bytes + description: "should return sum of all .cna file sizes", + }, + { + name: "segment with secondary bloom filters", + segments: []Segment{ + &segment{ + secondaryIndexCount: 2, + secondaryBloomFilters: []*bloom.BloomFilter{createTestBloomFilter(), createTestBloomFilter()}, + }, + }, + expectedSize: 108, // 2 * 54 bytes + description: "should handle secondary bloom filters", + }, + { + name: "mixed segments with various metadata", + segments: []Segment{ + &segment{calcCountNetAdditions: true, path: "/tmp/test1.dat"}, // 12 bytes + &segment{calcCountNetAdditions: true, path: "/tmp/test2.dat"}, // 12 bytes + &segment{secondaryIndexCount: 1, secondaryBloomFilters: []*bloom.BloomFilter{createTestBloomFilter()}}, // 60 bytes + }, + expectedSize: 84, // 12 + 12 + 60 bytes + description: "should handle mixed metadata types correctly", + }, + { + name: "segment with nil bloom filter", + segments: []Segment{ + &segment{ + bloomFilter: nil, + calcCountNetAdditions: true, // .cna file present + path: "/tmp/test.dat", + }, + }, + expectedSize: 12, + description: "should handle nil bloom filter gracefully", + }, + { + name: "segment with nil secondary bloom filters", + segments: []Segment{ + &segment{ + secondaryIndexCount: 2, + secondaryBloomFilters: nil, + calcCountNetAdditions: true, // .cna file present + path: "/tmp/test.dat", + }, + }, + expectedSize: 12, + description: "should handle nil secondary bloom filters gracefully", + }, + { + name: "segment with mixed nil and non-nil secondary bloom filters", + segments: []Segment{ + &segment{ + secondaryIndexCount: 3, + secondaryBloomFilters: []*bloom.BloomFilter{nil, createTestBloomFilter(), nil}, + calcCountNetAdditions: true, // .cna file present + path: "/tmp/test.dat", + }, + }, + expectedSize: 60, // 12 + 48 + description: "should handle mixed nil and non-nil secondary bloom filters", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + sg := &SegmentGroup{ + segments: tt.segments, + } + + result := sg.MetadataSize() + + assert.Equal(t, tt.expectedSize, result, tt.description) + }) + } +} + +func TestSegmentGroup_MetadataSize_WithEnqueuedSegments(t *testing.T) { + sg := &SegmentGroup{ + segments: []Segment{ + &segment{calcCountNetAdditions: true, path: "/tmp/test1.dat"}, // 12 bytes + &segment{calcCountNetAdditions: true, path: "/tmp/test2.dat"}, // 12 bytes + }, + enqueuedSegments: []Segment{ + &segment{calcCountNetAdditions: true, path: "/tmp/test3.dat"}, // 12 bytes + &segment{calcCountNetAdditions: true, path: "/tmp/test4.dat"}, // 12 bytes + }, + } + + expectedSize := int64(48) // 4 * 12 bytes + + result := sg.MetadataSize() + assert.Equal(t, expectedSize, result, "should include metadata from both regular and enqueued segments") +} + +// Test helper function to create a mock segment with specific metadata +func createMockSegmentWithMetadata(hasCNA bool, bloomFilterSize int, secondaryBloomFilterSizes []int) *segment { + seg := &segment{ + calcCountNetAdditions: hasCNA, + } + + if hasCNA { + seg.path = "/tmp/test.dat" + } + + if bloomFilterSize > 0 { + // Create a bloom filter with some data + seg.bloomFilter = createTestBloomFilter() + } + + if len(secondaryBloomFilterSizes) > 0 { + seg.secondaryIndexCount = uint16(len(secondaryBloomFilterSizes)) + seg.secondaryBloomFilters = make([]*bloom.BloomFilter, len(secondaryBloomFilterSizes)) + for i, size := range secondaryBloomFilterSizes { + if size > 0 { + seg.secondaryBloomFilters[i] = createTestBloomFilter() + } + } + } + + return seg +} + +func TestSegmentGroup_MetadataSize_ComplexScenarios(t *testing.T) { + tests := []struct { + name string + segments []*segment + expectedSize int64 + description string + }{ + { + name: "complex scenario with all metadata types", + segments: []*segment{ + createMockSegmentWithMetadata(true, 100, []int{50, 75}), // 12 + 60 + 48 + 48 = 168 + createMockSegmentWithMetadata(false, 200, []int{}), // 60 + createMockSegmentWithMetadata(true, 0, []int{25, 0, 30}), // 12 + 48 + 0 + 48 = 108 + }, + expectedSize: 324, // 168 + 60 + 96 + description: "should handle complex scenarios with mixed metadata types", + }, + { + name: "segments with only .cna files", + segments: []*segment{ + createMockSegmentWithMetadata(true, 0, []int{}), + createMockSegmentWithMetadata(true, 0, []int{}), + createMockSegmentWithMetadata(true, 0, []int{}), + createMockSegmentWithMetadata(true, 0, []int{}), + createMockSegmentWithMetadata(true, 0, []int{}), + }, + expectedSize: 60, // 5 * 12 bytes + description: "should handle segments with only .cna files", + }, + { + name: "segments with only bloom filters", + segments: []*segment{ + createMockSegmentWithMetadata(false, 150, []int{}), + createMockSegmentWithMetadata(false, 250, []int{}), + createMockSegmentWithMetadata(false, 350, []int{}), + }, + expectedSize: 180, // 3 * 60 bytes + description: "should handle segments with only bloom filters", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Convert []*segment to []Segment for the test + segments := make([]Segment, len(tt.segments)) + for i, seg := range tt.segments { + segments[i] = seg + } + + sg := &SegmentGroup{ + segments: segments, + } + + result := sg.MetadataSize() + + assert.Equal(t, tt.expectedSize, result, tt.description) + }) + } +} + +func TestSegmentGroup_MetadataSize_WithWriteMetadata(t *testing.T) { + tests := []struct { + name string + writeMetadata bool + segments []*segment + expectedSize int64 + description string + }{ + { + name: "writeMetadata disabled - count bloom filters and .cna files", + writeMetadata: false, + segments: []*segment{ + createMockSegmentWithMetadata(true, 48, []int{}), // 48 bytes (bloom filter) + 12 bytes (.cna file) + createMockSegmentWithMetadata(true, 48, []int{}), // 48 bytes (bloom filter) + 12 bytes (.cna file) + }, + expectedSize: 120, // 2 * (48 + 12) bytes + description: "should count bloom filters and .cna files when writeMetadata is disabled", + }, + { + name: "writeMetadata enabled - count metadata files", + writeMetadata: true, + segments: []*segment{ + createMockSegmentWithMetadataFile(t, true, 48, []int{}), // metadata file with bloom filter + count + createMockSegmentWithMetadataFile(t, true, 48, []int{}), // metadata file with bloom filter + count + }, + // Expected size: 138 bytes (includes metadata file header overhead: checksum + version + length indicators) + expectedSize: 138, // Actual metadata file size (includes header overhead) + description: "should count metadata files when writeMetadata is enabled (requires actual files)", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Convert []*segment to []Segment for the test + segments := make([]Segment, len(tt.segments)) + for i, seg := range tt.segments { + segments[i] = seg + } + + sg := &SegmentGroup{ + segments: segments, + writeMetadata: tt.writeMetadata, + } + + result := sg.MetadataSize() + + assert.Equal(t, tt.expectedSize, result, tt.description) + }) + } +} + +// createTestBloomFilter creates a bloom filter with some data for testing +func createTestBloomFilter() *bloom.BloomFilter { + // Create a bloom filter with the same parameters as the actual implementation + // The actual implementation uses: bloom.NewWithEstimates(uint(len(keys)), 0.001) + bf := bloom.NewWithEstimates(10, 0.001) + bf.Add([]byte("test")) + return bf +} + +// createMockSegmentWithMetadataFile creates a segment with an actual metadata file for testing writeMetadata scenarios +func createMockSegmentWithMetadataFile(t *testing.T, hasCNA bool, bloomFilterSize int, secondaryBloomFilterSizes []int) *segment { + seg := &segment{ + calcCountNetAdditions: hasCNA, + useBloomFilter: true, + strategy: segmentindex.StrategyReplace, + } + + // Create a temporary directory for the segment + tempDir := t.TempDir() + seg.path = filepath.Join(tempDir, "test.dat") + + if bloomFilterSize > 0 { + // Create a bloom filter with some data + seg.bloomFilter = createTestBloomFilter() + } + + if len(secondaryBloomFilterSizes) > 0 { + seg.secondaryIndexCount = uint16(len(secondaryBloomFilterSizes)) + seg.secondaryBloomFilters = make([]*bloom.BloomFilter, len(secondaryBloomFilterSizes)) + for i, size := range secondaryBloomFilterSizes { + if size > 0 { + seg.secondaryBloomFilters[i] = createTestBloomFilter() + } + } + } + + // Use the actual implementation to write the metadata file + metadataPath := seg.metadataPath() + if metadataPath != "" { + // Create primary bloom filter data directly + var primaryBloom []byte + if seg.bloomFilter != nil { + bfSize := getBloomFilterSize(seg.bloomFilter) + rw := byteops.NewReadWriter(make([]byte, bfSize)) + if _, err := seg.bloomFilter.WriteTo(&rw); err != nil { + t.Fatalf("failed to write primary bloom filter: %v", err) + } + primaryBloom = rw.Buffer + } + + // Create secondary bloom filters data directly + var secondaryBloom [][]byte + if seg.secondaryIndexCount > 0 { + secondaryBloom = make([][]byte, seg.secondaryIndexCount) + for i, bf := range seg.secondaryBloomFilters { + if bf != nil { + bfSize := getBloomFilterSize(bf) + rw := byteops.NewReadWriter(make([]byte, bfSize)) + if _, err := bf.WriteTo(&rw); err != nil { + t.Fatalf("failed to write secondary bloom filter %d: %v", i, err) + } + secondaryBloom[i] = rw.Buffer + } + } + } + + // Create CNA data directly + var netAdditions []byte + if hasCNA { + // Create a simple CNA with a test count + cnaData := make([]byte, 8) + binary.LittleEndian.PutUint64(cnaData, 42) // Some test count + netAdditions = cnaData + } + + // Use the actual implementation to write the metadata file + err := seg.writeMetadataToDisk(metadataPath, primaryBloom, secondaryBloom, netAdditions) + if err != nil { + t.Fatalf("failed to write metadata file: %v", err) + } + } + + return seg +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segment_inverted.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segment_inverted.go new file mode 100644 index 0000000000000000000000000000000000000000..8a8c93db8cc7223442c155ac544fbab96076837b --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segment_inverted.go @@ -0,0 +1,205 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package lsmkv + +import ( + "bytes" + "encoding/binary" + "encoding/gob" + "fmt" + "math" + "sync" + + "github.com/weaviate/sroar" + "github.com/weaviate/weaviate/adapters/repos/db/lsmkv/segmentindex" +) + +type segmentInvertedData struct { + // lock to read tombstones and property lengths + lockInvertedData sync.RWMutex + + tombstones *sroar.Bitmap + tombstonesLoaded bool + + propertyLengths map[uint64]uint32 + propertyLengthsLoaded bool + + avgPropertyLengthsAvg float64 + avgPropertyLengthsCount uint64 +} + +func (s *segment) loadTombstones() (*sroar.Bitmap, error) { + s.invertedData.lockInvertedData.Lock() + defer s.invertedData.lockInvertedData.Unlock() + if s.strategy != segmentindex.StrategyInverted { + return nil, fmt.Errorf("property only supported for inverted strategy") + } + + if s.invertedData.tombstonesLoaded { + return s.invertedData.tombstones, nil + } + + buffer := make([]byte, 8) + if err := s.copyNode(buffer, nodeOffset{s.invertedHeader.TombstoneOffset, s.invertedHeader.TombstoneOffset + 8}); err != nil { + return nil, fmt.Errorf("copy node: %w", err) + } + bitmapSize := binary.LittleEndian.Uint64(buffer) + + if bitmapSize == 0 { + s.invertedData.tombstonesLoaded = true + return s.invertedData.tombstones, nil + } + + buffer = make([]byte, bitmapSize) + if err := s.copyNode(buffer, nodeOffset{s.invertedHeader.TombstoneOffset + 8, s.invertedHeader.TombstoneOffset + 8 + bitmapSize}); err != nil { + return nil, fmt.Errorf("copy node: %w", err) + } + + bitmap := sroar.FromBuffer(buffer) + + s.invertedData.tombstones = bitmap + s.invertedData.tombstonesLoaded = true + return bitmap, nil +} + +func (s *segment) loadPropertyLengths() (map[uint64]uint32, error) { + s.invertedData.lockInvertedData.Lock() + defer s.invertedData.lockInvertedData.Unlock() + if s.strategy != segmentindex.StrategyInverted { + return nil, fmt.Errorf("property only supported for inverted strategy") + } + + if s.invertedData.propertyLengthsLoaded { + return s.invertedData.propertyLengths, nil + } + + buffer := make([]byte, 8*3) + + if err := s.copyNode(buffer, nodeOffset{s.invertedHeader.PropertyLengthsOffset, s.invertedHeader.PropertyLengthsOffset + 8*3}); err != nil { + return nil, fmt.Errorf("copy node: %w", err) + } + + s.invertedData.avgPropertyLengthsAvg = math.Float64frombits(binary.LittleEndian.Uint64(buffer)) + s.invertedData.avgPropertyLengthsCount = binary.LittleEndian.Uint64(buffer[8:16]) + propertyLengthsSize := binary.LittleEndian.Uint64(buffer[16:24]) + + if propertyLengthsSize == 0 { + s.invertedData.propertyLengthsLoaded = true + return s.invertedData.propertyLengths, nil + } + + propertyLengthsStart := s.invertedHeader.PropertyLengthsOffset + 16 + 8 + propertyLengthsEnd := propertyLengthsStart + propertyLengthsSize + + buffer = make([]byte, propertyLengthsSize) + + if err := s.copyNode(buffer, nodeOffset{propertyLengthsStart, propertyLengthsEnd}); err != nil { + return nil, fmt.Errorf("copy node: %w", err) + } + e := gob.NewDecoder(bytes.NewReader(buffer)) + + propLengths := map[uint64]uint32{} + err := e.Decode(&propLengths) + if err != nil { + return s.invertedData.propertyLengths, fmt.Errorf("decode property lengths: %w", err) + } + + s.invertedData.propertyLengthsLoaded = true + s.invertedData.propertyLengths = propLengths + return s.invertedData.propertyLengths, nil +} + +// ReadOnlyTombstones returns segment's tombstones +// Returned bitmap must not be mutated +func (s *segment) ReadOnlyTombstones() (*sroar.Bitmap, error) { + if s.strategy != segmentindex.StrategyInverted { + return nil, fmt.Errorf("tombstones only supported for inverted strategy") + } + + s.invertedData.lockInvertedData.RLock() + if s.invertedData.tombstonesLoaded { + defer s.invertedData.lockInvertedData.RUnlock() + return s.invertedData.tombstones, nil + } + s.invertedData.lockInvertedData.RUnlock() + + return s.loadTombstones() +} + +// MergeTombstones merges segment's tombstones with other tombstones +// creating new bitmap that replaces the previous one (previous one is not mutated) +// Returned bitmap must not be mutated +func (s *segment) MergeTombstones(other *sroar.Bitmap) (*sroar.Bitmap, error) { + if s.strategy != segmentindex.StrategyInverted { + return nil, fmt.Errorf("tombstones only supported for inverted strategy") + } + + if _, err := s.ReadOnlyTombstones(); err != nil { + return nil, err + } + + s.invertedData.lockInvertedData.Lock() + defer s.invertedData.lockInvertedData.Unlock() + + s.invertedData.tombstones = sroar.Or(s.invertedData.tombstones, other) + return s.invertedData.tombstones, nil +} + +func (s *segment) GetPropertyLengths() (map[uint64]uint32, error) { + if s.strategy != segmentindex.StrategyInverted { + return nil, fmt.Errorf("property length only supported for inverted strategy") + } + + s.invertedData.lockInvertedData.RLock() + loaded := s.invertedData.propertyLengthsLoaded + s.invertedData.lockInvertedData.RUnlock() + + if !loaded { + return s.loadPropertyLengths() + } + + s.invertedData.lockInvertedData.RLock() + defer s.invertedData.lockInvertedData.RUnlock() + + return s.invertedData.propertyLengths, nil +} + +func (s *segment) hasKey(key []byte) bool { + if s.strategy != segmentindex.StrategyMapCollection && s.strategy != segmentindex.StrategyInverted { + return false + } + + if s.useBloomFilter && !s.bloomFilter.Test(key) { + return false + } + + _, err := s.index.Get(key) + return err == nil +} + +func (s *segment) getDocCount(key []byte) uint64 { + if s.strategy != segmentindex.StrategyMapCollection && s.strategy != segmentindex.StrategyInverted { + return 0 + } + + node, err := s.index.Get(key) + if err != nil { + return 0 + } + + buffer := make([]byte, 8) + if err = s.copyNode(buffer, nodeOffset{node.Start, node.Start + 8}); err != nil { + return 0 + } + + return binary.LittleEndian.Uint64(buffer) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segment_key_and_tombstone_extractor.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segment_key_and_tombstone_extractor.go new file mode 100644 index 0000000000000000000000000000000000000000..9910c691833164581006e5119233734023ecb446 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segment_key_and_tombstone_extractor.go @@ -0,0 +1,159 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package lsmkv + +import ( + "encoding/binary" +) + +// bufferedKeyAndTombstoneExtractor is a tool to build up the count stats for +// disk segments (i.e. all the keys in this segment as well as whether they +// contain a tombstone or not). It tries to be relatively memory-efficient +// while doing a whole-segment disk scan. It uses a primitive []byte buffer +// for its output which needs to be allocated just once. It can only read until +// the buffer is full, then it needs to call a callback fn which can do +// something with the data. After the callback function has been called on each +// key, the output buffer is reset. If the input segment it not at EOF yet, +// this cycle repeats +type bufferedKeyAndTombstoneExtractor struct { + outputBuffer []byte + outputBufferOffset uint64 + offset uint64 + end uint64 + rawSegment []byte + secondaryIndexCount uint16 + callback keyAndTombstoneCallbackFn + callbackCycle int +} + +type keyAndTombstoneCallbackFn func(key []byte, tombstone bool) + +func newBufferedKeyAndTombstoneExtractor(rawSegment []byte, initialOffset uint64, + end uint64, outputBufferSize uint64, secondaryIndexCount uint16, + callback keyAndTombstoneCallbackFn, +) *bufferedKeyAndTombstoneExtractor { + return &bufferedKeyAndTombstoneExtractor{ + rawSegment: rawSegment, + offset: initialOffset, + end: end, + outputBuffer: make([]byte, outputBufferSize), + outputBufferOffset: 0, + secondaryIndexCount: secondaryIndexCount, + callback: callback, + } +} + +func (e *bufferedKeyAndTombstoneExtractor) do() { + for { + if e.offset >= e.end { + break + } + + // returns false if the output buffer ran full + ok := e.readSingleEntry() + if !ok { + e.flushAndCallback() + } + } + + // one final callback + e.flushAndCallback() +} + +// returns true if the cycle completed, returns false if the cycle did not +// complete because the output buffer was full. In that case, the offsets have +// been reset to the values they had at the beginning of the cycle +func (e *bufferedKeyAndTombstoneExtractor) readSingleEntry() bool { + // if we discover during an iteration that the next entry can't fit in the + // buffer anymore, we must return to the start of this iteration, so that + // the this work can be picked up here once the buffer has been flushed + offsetAtLoopStart := e.offset + outputOffsetAtLoopStart := e.outputBufferOffset + + // the first output size check is static, as we will always read 5 bytes, + // no matter what. If they can't even fit, we can abort right away + if !e.outputBufferCanFit(5) { + e.offset = offsetAtLoopStart + e.outputBufferOffset = outputOffsetAtLoopStart + return false + } + + // copy tombstone value into output buffer + e.outputBuffer[e.outputBufferOffset] = e.rawSegment[e.offset] + e.offset++ + e.outputBufferOffset++ + + valueLen := binary.LittleEndian.Uint64(e.rawSegment[e.offset : e.offset+8]) + e.offset += 8 + + // we're not actually interested in the value, so we can skip it entirely + e.offset += valueLen + + primaryKeyLen := binary.LittleEndian.Uint32(e.rawSegment[e.offset : e.offset+4]) + if !e.outputBufferCanFit(uint64(primaryKeyLen) + 4) { + e.offset = offsetAtLoopStart + e.outputBufferOffset = outputOffsetAtLoopStart + return false + } + + // copy the primary key len indicator into the output buffer + copy(e.outputBuffer[e.outputBufferOffset:e.outputBufferOffset+4], + e.rawSegment[e.offset:e.offset+4]) + e.offset += 4 + e.outputBufferOffset += 4 + + // then copy the key itself + copy(e.outputBuffer[e.outputBufferOffset:e.outputBufferOffset+uint64(primaryKeyLen)], e.rawSegment[e.offset:e.offset+uint64(primaryKeyLen)]) + e.offset += uint64(primaryKeyLen) + e.outputBufferOffset += uint64(primaryKeyLen) + + for i := uint16(0); i < e.secondaryIndexCount; i++ { + secKeyLen := binary.LittleEndian.Uint32(e.rawSegment[e.offset : e.offset+4]) + e.offset += 4 + e.offset += uint64(secKeyLen) + } + + return true +} + +func (e *bufferedKeyAndTombstoneExtractor) outputBufferCanFit(size uint64) bool { + return (uint64(len(e.outputBuffer)) - e.outputBufferOffset) >= size +} + +// flushAndCallback calls the callback fn for each key/tombstone pair in the +// buffer, then resets the buffer offset, making it ready to be overwritten in +// the next cycle +func (e *bufferedKeyAndTombstoneExtractor) flushAndCallback() { + end := e.outputBufferOffset + e.outputBufferOffset = 0 + for e.outputBufferOffset < end { + var tombstone bool + if e.outputBuffer[e.outputBufferOffset] == 0x01 { + tombstone = true + } + + e.outputBufferOffset++ + + primaryKeyLen := binary.LittleEndian.Uint32(e.outputBuffer[e.outputBufferOffset : e.outputBufferOffset+4]) + + e.outputBufferOffset += 4 + + e.callback(e.outputBuffer[e.outputBufferOffset:e.outputBufferOffset+uint64(primaryKeyLen)], + tombstone) + e.outputBufferOffset += uint64(primaryKeyLen) + } + + // reset outputBufferOffset for next batch + e.outputBufferOffset = 0 + + e.callbackCycle++ +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segment_metadata.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segment_metadata.go new file mode 100644 index 0000000000000000000000000000000000000000..4cca05ec3fc75b1c63043c9189f1d8e41a358da3 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segment_metadata.go @@ -0,0 +1,341 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package lsmkv + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "os" + "path/filepath" + + "github.com/bits-and-blooms/bloom/v3" + + "github.com/weaviate/weaviate/adapters/repos/db/lsmkv/segmentindex" + "github.com/weaviate/weaviate/usecases/byteops" +) + +const ( + MetadataVersion = 0 + MetadataFileSuffix = ".metadata" +) + +func (s *segment) metadataPath() string { + return s.buildPath("%s.metadata") +} + +func (s *segment) initMetadata(metrics *Metrics, overwrite bool, exists existsOnLowerSegmentsFn, precomputedCNAValue *int, existingFilesList map[string]int64, writeMetadata bool) (bool, error) { + if !s.useBloomFilter && !s.calcCountNetAdditions { + return false, nil + } + s.bloomFilterMetrics = newBloomFilterMetrics(metrics) + + path := s.metadataPath() + + loadFromDisk, err := fileExistsInList(existingFilesList, filepath.Base(path)) + if err != nil { + return false, err + } + if loadFromDisk { + if overwrite { + err := os.Remove(path) + if err != nil { + return false, fmt.Errorf("delete metadata %s: %w", path, err) + } + } else { + err := s.loadMetaFromDisk(path) + if err == nil { + return true, nil // successfully loaded + } + if !errors.Is(err, ErrInvalidChecksum) { + // not a recoverable error + return false, err + } + + // now continue re-calculating + } + } + + if !writeMetadata { + return false, nil + } + // don't write metadata file if other metadata files exist + bloomFilterFileExists, err := fileExistsInList(existingFilesList, filepath.Base(s.bloomFilterPath())) + if err != nil || bloomFilterFileExists { + return false, nil + } + cnaExists, err := fileExistsInList(existingFilesList, filepath.Base(s.countNetPath())) + if err != nil || cnaExists { + return false, nil + } + for i := 0; i < int(s.secondaryIndexCount); i++ { + secondaryBloomFilterFileExists, err := fileExistsInList(existingFilesList, filepath.Base(s.bloomFilterSecondaryPath(i))) + if err != nil || secondaryBloomFilterFileExists { + return false, nil + } + } + + s.metaPaths = append(s.metaPaths, path) + + primaryBloom, secondaryBloom, err := s.recalculateBloomFilters() + if err != nil { + return false, err + } + + netAdditions, err := s.recalcCountNetAdditions(exists, precomputedCNAValue) + if err != nil { + return false, err + } + + return true, s.writeMetadataToDisk(path, primaryBloom, secondaryBloom, netAdditions) +} + +func (s *segment) writeMetadataToDisk(path string, primaryBloom []byte, secondaryBloom [][]byte, netAdditions []byte) error { + // Uint32 for checksum + // byte for version + // Uint32 for lengths - primary bloom filters, N secondary bloom filters and CNA + sizeHeader := byteops.Uint32Len + 1 + (2+s.secondaryIndexCount)*byteops.Uint32Len + sizeData := len(primaryBloom) + len(netAdditions) + for _, b := range secondaryBloom { + sizeData += len(b) + } + rw := byteops.NewReadWriter(make([]byte, int(sizeHeader)+sizeData)) + rw.MoveBufferPositionForward(byteops.Uint32Len) // leave space for checksum + + rw.WriteByte(MetadataVersion) + + if err := rw.CopyBytesToBufferWithUint32LengthIndicator(primaryBloom); err != nil { + return err + } + + if err := rw.CopyBytesToBufferWithUint32LengthIndicator(netAdditions); err != nil { + return err + } + for _, b := range secondaryBloom { + if err := rw.CopyBytesToBufferWithUint32LengthIndicator(b); err != nil { + return err + } + } + return writeWithChecksum(rw, path, s.observeMetaWrite) +} + +func (s *segment) recalculateBloomFilters() ([]byte, [][]byte, error) { + if !s.useBloomFilter { + return nil, nil, nil + } + primaryBloom, err := s.recalculatePrimaryBloomFilter() + if err != nil { + return nil, nil, err + } + + secondaryBlooms, err := s.recalculateSecondaryBloomFilter() + if err != nil { + return nil, nil, err + } + + return primaryBloom, secondaryBlooms, nil +} + +func (s *segment) recalculatePrimaryBloomFilter() ([]byte, error) { + keys, err := s.index.AllKeys() + if err != nil { + return nil, err + } + + s.bloomFilter = bloom.NewWithEstimates(uint(len(keys)), 0.001) + for _, key := range keys { + s.bloomFilter.Add(key) + } + + bfSize := getBloomFilterSize(s.bloomFilter) + + rw := byteops.NewReadWriter(make([]byte, bfSize)) + + if _, err := s.bloomFilter.WriteTo(&rw); err != nil { + return nil, err + } + + return rw.Buffer, nil +} + +func (s *segment) recalculateSecondaryBloomFilter() ([][]byte, error) { + if s.secondaryIndexCount == 0 { + return nil, nil + } + + s.secondaryBloomFilters = make([]*bloom.BloomFilter, s.secondaryIndexCount) + out := make([][]byte, s.secondaryIndexCount) + for i := range s.secondaryBloomFilters { + keys, err := s.secondaryIndices[i].AllKeys() + if err != nil { + return nil, err + } + + s.secondaryBloomFilters[i] = bloom.NewWithEstimates(uint(len(keys)), 0.001) + for _, key := range keys { + s.secondaryBloomFilters[i].Add(key) + } + bfSize := getBloomFilterSize(s.secondaryBloomFilters[i]) + + rw := byteops.NewReadWriter(make([]byte, bfSize)) + if _, err := s.secondaryBloomFilters[i].WriteTo(&rw); err != nil { + return nil, err + } + + out[i] = rw.Buffer + } + return out, nil +} + +func (s *segment) loadMetaFromDisk(path string) error { + data, err := loadWithChecksum(path, -1, s.metrics.ReadObserver("loadMetadata")) + if err != nil { + return err + } + + rw := byteops.NewReadWriter(data) + version := rw.ReadUint8() + if version != MetadataVersion { + return fmt.Errorf("invalid metadata version: %d", version) + } + primaryBloom := rw.ReadBytesFromBufferWithUint32LengthIndicator() + netAdditions := rw.ReadBytesFromBufferWithUint32LengthIndicator() + secondaryBloom := make([][]byte, s.secondaryIndexCount) + for i := range secondaryBloom { + secondaryBloom[i] = rw.ReadBytesFromBufferWithUint32LengthIndicator() + } + + if err := s.initBloomFiltersFromData(primaryBloom, secondaryBloom); err != nil { + return err + } + + if err := s.initCNAFromData(netAdditions); err != nil { + return err + } + + return nil +} + +func (s *segment) initBloomFiltersFromData(primary []byte, secondary [][]byte) error { + if !s.useBloomFilter { + return nil + } + + s.bloomFilter = new(bloom.BloomFilter) + _, err := s.bloomFilter.ReadFrom(bytes.NewReader(primary)) + if err != nil { + return fmt.Errorf("read bloom filter: %w", err) + } + + s.secondaryBloomFilters = make([]*bloom.BloomFilter, s.secondaryIndexCount) + for i := range s.secondaryBloomFilters { + s.secondaryBloomFilters[i] = new(bloom.BloomFilter) + _, err := s.secondaryBloomFilters[i].ReadFrom(bytes.NewReader(secondary[i])) + if err != nil { + return fmt.Errorf("read bloom filter: %w", err) + } + + } + return nil +} + +func (s *segment) initCNAFromData(netAdditions []byte) error { + if !s.calcCountNetAdditions || s.strategy != segmentindex.StrategyReplace { + return nil + } + + s.countNetAdditions = int(binary.LittleEndian.Uint64(netAdditions)) + + return nil +} + +func (s *segment) recalcCountNetAdditions(exists existsOnLowerSegmentsFn, precomputedCNAValue *int) ([]byte, error) { + if !s.calcCountNetAdditions || s.strategy != segmentindex.StrategyReplace { + return nil, nil + } + + if precomputedCNAValue != nil { + s.countNetAdditions = *precomputedCNAValue + } else { + var lastErr error + countNet := 0 + cb := func(key []byte, tombstone bool) { + existedOnPrior, err := exists(key) + if err != nil { + lastErr = err + } + + if tombstone && existedOnPrior { + countNet-- + } + + if !tombstone && !existedOnPrior { + countNet++ + } + } + + extr := newBufferedKeyAndTombstoneExtractor(s.contents, s.dataStartPos, + s.dataEndPos, 10e6, s.secondaryIndexCount, cb) + + extr.do() + + if lastErr != nil { + return nil, lastErr + } + + s.countNetAdditions = countNet + } + + data := make([]byte, 8) + binary.LittleEndian.PutUint64(data, uint64(s.countNetAdditions)) + return data, nil +} + +// ReadObjectCountFromMetadataFile reads a .metadata file and returns the count net additions value +// Returns (count, nil) if successful, (0, error) if the file is invalid or corrupted +func ReadObjectCountFromMetadataFile(path string) (int64, error) { + data, err := loadWithChecksum(path, -1, nil) + if err != nil { + return 0, fmt.Errorf("failed to read .metadata file: %w", err) + } + + rw := byteops.NewReadWriter(data) + + // Read version to detect file structure + version := rw.ReadUint8() + + switch version { + case MetadataVersion: + // Version 0: checksum + version + primary bloom + cna + secondary blooms + // Read bloom filter length and skip + bloomLen := rw.ReadUint32() + rw.MoveBufferPositionForward(uint64(bloomLen)) + + // Read CNA length + cnaLen := rw.ReadUint32() + + // Read CNA data + if cnaLen >= 8 { + cnaData := rw.ReadBytesFromBuffer(uint64(cnaLen)) + if len(cnaData) >= 8 { + count := int64(binary.LittleEndian.Uint64(cnaData)) + return count, nil + } + } + + default: + return 0, fmt.Errorf("unsupported metadata version: %d", version) + } + + return 0, fmt.Errorf("invalid net additions data in metadata file") +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segment_metadata_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segment_metadata_test.go new file mode 100644 index 0000000000000000000000000000000000000000..f7b54418c9e58ffa3c23330b815263a975475f87 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segment_metadata_test.go @@ -0,0 +1,213 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package lsmkv + +import ( + "context" + "os" + "path" + "path/filepath" + "testing" + + "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/require" + + "github.com/weaviate/weaviate/entities/cyclemanager" + "github.com/weaviate/weaviate/usecases/byteops" +) + +func TestMetadataNoWrites(t *testing.T) { + ctx := context.Background() + + logger, _ := test.NewNullLogger() + + tests := []struct { + name string + writeMetadata bool + bloomFilter bool + cna bool + expectedFiles []string + }{ + {name: "no meta at all1", writeMetadata: false, bloomFilter: false, cna: false, expectedFiles: []string{".db"}}, + {name: "no meta at all1", writeMetadata: true, bloomFilter: false, cna: false, expectedFiles: []string{".db"}}, + {name: "no meta but bloom", writeMetadata: false, bloomFilter: true, cna: false, expectedFiles: []string{".db", ".bloom"}}, + {name: "no meta but bloom+cna", writeMetadata: false, bloomFilter: true, cna: true, expectedFiles: []string{".db", ".bloom", ".cna"}}, + {name: "with meta and bloom+cna", writeMetadata: true, bloomFilter: true, cna: true, expectedFiles: []string{".db", ".metadata"}}, + {name: "with meta and cna", writeMetadata: true, bloomFilter: true, cna: true, expectedFiles: []string{".db", ".metadata"}}, + {name: "with meta and bloom", writeMetadata: true, bloomFilter: true, cna: true, expectedFiles: []string{".db", ".metadata"}}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + dirName := t.TempDir() + + secondaryIndexCount := 2 + b, err := NewBucketCreator().NewBucket(ctx, dirName, "", logger, nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), + WithWriteMetadata(tt.writeMetadata), WithUseBloomFilter(tt.bloomFilter), WithCalcCountNetAdditions(tt.cna), WithSecondaryIndices(uint16(secondaryIndexCount))) + require.NoError(t, err) + + require.NoError(t, b.Put([]byte("key"), []byte("value"))) + require.NoError(t, b.FlushMemtable()) + fileTypes := countFileTypes(t, dirName) + require.Len(t, fileTypes, len(tt.expectedFiles)) + for _, expectedFile := range tt.expectedFiles { + if expectedFile == ".bloom" { + require.Equal(t, fileTypes[expectedFile], 1+secondaryIndexCount) + } else { + require.Equal(t, fileTypes[expectedFile], 1) + } + } + }) + } +} + +func TestNoWriteIfBloomPresent(t *testing.T) { + ctx := context.Background() + logger, _ := test.NewNullLogger() + dirName := t.TempDir() + + b, err := NewBucketCreator().NewBucket(ctx, dirName, "", logger, nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), + WithUseBloomFilter(true)) + require.NoError(t, err) + require.NoError(t, b.Put([]byte("key"), []byte("value"))) + require.NoError(t, b.FlushMemtable()) + require.NoError(t, b.Shutdown(ctx)) + fileTypes := countFileTypes(t, dirName) + require.Len(t, fileTypes, 2) + require.Equal(t, fileTypes[".db"], 1) + require.Equal(t, fileTypes[".bloom"], 1) + + // load with writeMetadata enabled, no metadata files should be written + b2, err := NewBucketCreator().NewBucket(ctx, dirName, "", logger, nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), + WithUseBloomFilter(true), WithWriteMetadata(true)) + require.NoError(t, err) + require.NoError(t, b2.Shutdown(ctx)) + + fileTypes = countFileTypes(t, dirName) + require.Len(t, fileTypes, 2) + require.Equal(t, fileTypes[".db"], 1) + require.Equal(t, fileTypes[".bloom"], 1) +} + +func TestCnaNoBloomPresent(t *testing.T) { + ctx := context.Background() + logger, _ := test.NewNullLogger() + dirName := t.TempDir() + + b, err := NewBucketCreator().NewBucket(ctx, dirName, "", logger, nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), + WithUseBloomFilter(false), WithWriteMetadata(true), WithCalcCountNetAdditions(true)) + require.NoError(t, err) + require.NoError(t, b.Put([]byte("key"), []byte("value"))) + require.NoError(t, b.FlushMemtable()) + fileTypes := countFileTypes(t, dirName) + require.Len(t, fileTypes, 2) + require.Equal(t, fileTypes[".db"], 1) + require.Equal(t, fileTypes[".metadata"], 1) + + require.Equal(t, b.disk.segments[0].getSegment().getCountNetAdditions(), 1) + require.NoError(t, b.Shutdown(ctx)) +} + +func TestSecondaryBloomNoCna(t *testing.T) { + ctx := context.Background() + logger, _ := test.NewNullLogger() + dirName := t.TempDir() + + b, err := NewBucketCreator().NewBucket(ctx, dirName, "", logger, nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), + WithUseBloomFilter(true), WithWriteMetadata(true), WithCalcCountNetAdditions(false), WithSecondaryIndices(2)) + require.NoError(t, err) + require.NoError(t, b.Put([]byte("key"), []byte("value"), WithSecondaryKey(0, []byte("key0")), WithSecondaryKey(1, []byte("key1")))) + require.NoError(t, b.FlushMemtable()) + fileTypes := countFileTypes(t, dirName) + require.Len(t, fileTypes, 2) + require.Equal(t, fileTypes[".db"], 1) + require.Equal(t, fileTypes[".metadata"], 1) + + require.True(t, b.disk.segments[0].getSegment().secondaryBloomFilters[0].Test([]byte("key0"))) + require.False(t, b.disk.segments[0].getSegment().secondaryBloomFilters[0].Test([]byte("key1"))) + require.True(t, b.disk.segments[0].getSegment().secondaryBloomFilters[1].Test([]byte("key1"))) + require.False(t, b.disk.segments[0].getSegment().secondaryBloomFilters[1].Test([]byte("key0"))) + require.NoError(t, b.Shutdown(ctx)) +} + +func TestCorruptFile(t *testing.T) { + dirName := t.TempDir() + ctx := context.Background() + logger, _ := test.NewNullLogger() + + b, err := NewBucketCreator().NewBucket(ctx, dirName, "", logger, nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), + WithWriteMetadata(true), WithUseBloomFilter(true), WithCalcCountNetAdditions(true), WithSecondaryIndices(uint16(2))) + require.NoError(t, err) + + require.NoError(t, b.Put([]byte("key"), []byte("value"))) + require.NoError(t, b.FlushMemtable()) + require.NoError(t, b.Shutdown(ctx)) + + files, err := os.ReadDir(dirName) + require.NoError(t, err) + fname, ok := findFileWithExt(files, ".metadata") + require.True(t, ok) + require.NoError(t, corruptBloomFileByTruncatingIt(path.Join(dirName, fname))) + + // broken file is ignored and correct one is recreated + b2, err := NewBucketCreator().NewBucket(ctx, dirName, "", logger, nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), + WithWriteMetadata(true), WithUseBloomFilter(true), WithCalcCountNetAdditions(true), WithSecondaryIndices(uint16(2))) + require.NoError(t, err) + value, err := b2.Get([]byte("key")) + require.NoError(t, err) + require.Equal(t, []byte("value"), value) +} + +func TestReadObjectCountFromMetadataFile(t *testing.T) { + dir := t.TempDir() + metadataPath := filepath.Join(dir, "test.metadata") + + // checksum (4) + version (1) + primary bloom len (4) + cna len (4) + cna data (8) + totalSize := 4 + 1 + 4 + 4 + 8 + + data := make([]byte, totalSize) + rw := byteops.NewReadWriter(data) + + rw.MoveBufferPositionForward(4) // leave space for checksum + rw.WriteByte(0) // version + rw.WriteUint32(0) // primary bloom filter length (0 bytes) + rw.WriteUint32(8) // CNA length (8 bytes) + rw.WriteUint64(42) // CNA data + + // Write with checksum + err := writeWithChecksum(rw, metadataPath, nil) + require.NoError(t, err) + + // Test reading the metadata file + count, err := ReadObjectCountFromMetadataFile(metadataPath) + require.NoError(t, err) + require.Equal(t, int64(42), count) +} + +func countFileTypes(t *testing.T, path string) map[string]int { + t.Helper() + fileTypes := map[string]int{} + + entries, err := os.ReadDir(path) + require.NoError(t, err) + for _, entry := range entries { + fileTypes[filepath.Ext(entry.Name())] += 1 + } + return fileTypes +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segment_net_count_additions.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segment_net_count_additions.go new file mode 100644 index 0000000000000000000000000000000000000000..446f0c9172a8dcf9b12a5f2092f1b9ef12f2c158 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segment_net_count_additions.go @@ -0,0 +1,151 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package lsmkv + +import ( + "encoding/binary" + "errors" + "fmt" + "os" + "path/filepath" + + "github.com/weaviate/weaviate/adapters/repos/db/lsmkv/segmentindex" + "github.com/weaviate/weaviate/entities/diskio" + "github.com/weaviate/weaviate/usecases/byteops" +) + +// ErrInvalidChecksum indicates that the read file should not be trusted. For +// any pre-computed data this is a recoverable issue, as the data can simply be +// re-computed at read-time. +var ErrInvalidChecksum = errors.New("invalid checksum") + +const CountNetAdditionsFileSuffix = ".cna" + +// existOnLowerSegments is a simple function that can be passed at segment +// initialization time to check if any of the keys are truly new or previously +// seen. This can in turn be used to build up the net count additions. The +// reason this is abstract: +type existsOnLowerSegmentsFn func(key []byte) (bool, error) + +func (s *segment) countNetPath() string { + return s.buildPath("%s.cna") +} + +func (s *segment) initCountNetAdditions(exists existsOnLowerSegmentsFn, overwrite bool, precomputedCNAValue *int, existingFilesList map[string]int64) error { + if s.strategy != segmentindex.StrategyReplace { + // replace is the only strategy that supports counting + return nil + } + + path := s.countNetPath() + s.metaPaths = append(s.metaPaths, path) + + loadFromDisk, err := fileExistsInList(existingFilesList, filepath.Base(path)) + if err != nil { + return err + } + if loadFromDisk { + if overwrite { + err := os.Remove(path) + if err != nil { + return fmt.Errorf("delete existing net additions counter %s: %w", path, err) + } + } else { + err = s.loadCountNetFromDisk() + if err == nil { + return nil + } + + if !errors.Is(err, ErrInvalidChecksum) { + // not a recoverable error + return err + } + + // now continue re-calculating + } + } + + if precomputedCNAValue != nil { + s.countNetAdditions = *precomputedCNAValue + } else { + var lastErr error + countNet := 0 + cb := func(key []byte, tombstone bool) { + existedOnPrior, err := exists(key) + if err != nil { + lastErr = err + } + + if tombstone && existedOnPrior { + countNet-- + } + + if !tombstone && !existedOnPrior { + countNet++ + } + } + + extr := newBufferedKeyAndTombstoneExtractor(s.contents, s.dataStartPos, + s.dataEndPos, 10e6, s.secondaryIndexCount, cb) + + extr.do() + + s.countNetAdditions = countNet + + if lastErr != nil { + return lastErr + } + } + + if err := s.storeCountNetOnDisk(); err != nil { + return fmt.Errorf("store count net additions on disk: %w", err) + } + + return nil +} + +func (s *segment) storeCountNetOnDisk() error { + return storeCountNetOnDisk(s.countNetPath(), s.countNetAdditions, s.observeMetaWrite) +} + +func storeCountNetOnDisk(path string, value int, observeWrite diskio.MeteredWriterCallback) error { + rw := byteops.NewReadWriter(make([]byte, byteops.Uint64Len+byteops.Uint32Len)) + rw.MoveBufferPositionForward(byteops.Uint32Len) // leave space for checksum + rw.WriteUint64(uint64(value)) + + return writeWithChecksum(rw, path, observeWrite) +} + +func (s *segment) loadCountNetFromDisk() error { + data, err := loadWithChecksum(s.countNetPath(), 12, s.metrics.ReadObserver("netAdditions")) + if err != nil { + return err + } + + s.countNetAdditions = int(binary.LittleEndian.Uint64(data[0:8])) + + return nil +} + +// ReadCountNetAdditionsFile reads a .cna file and returns the count net additions value +// Returns (count, nil) if successful, (0, error) if the file is invalid or corrupted +func ReadCountNetAdditionsFile(path string) (int64, error) { + data, err := loadWithChecksum(path, 12, nil) + if err != nil { + return 0, fmt.Errorf("failed to read .cna file: %w", err) + } + + // Extract count value (first 8 bytes, uint64 little-endian) + count := int64(binary.LittleEndian.Uint64(data[0:8])) + + return count, nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segment_net_count_additions_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segment_net_count_additions_test.go new file mode 100644 index 0000000000000000000000000000000000000000..cbe26390b3fd02a3ef0288993a48922bbf18a87f --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segment_net_count_additions_test.go @@ -0,0 +1,330 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package lsmkv + +import ( + "context" + "encoding/binary" + "io" + "os" + "path" + "strings" + "testing" + + "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/entities/cyclemanager" +) + +func TestCNA(t *testing.T) { + ctx := context.Background() + tests := bucketTests{ + { + name: "createCNAOnFlush", + f: createCNAOnFlush, + opts: []BucketOption{ + WithStrategy(StrategyReplace), + WithCalcCountNetAdditions(true), + }, + }, + { + name: "createCNAInit", + f: createCNAInit, + opts: []BucketOption{ + WithStrategy(StrategyReplace), + WithCalcCountNetAdditions(true), + }, + }, + { + name: "repairCorruptedCNAOnInit", + f: repairCorruptedCNAOnInit, + opts: []BucketOption{ + WithStrategy(StrategyReplace), + WithCalcCountNetAdditions(true), + }, + }, + } + tests.run(ctx, t) +} + +func createCNAOnFlush(ctx context.Context, t *testing.T, opts []BucketOption) { + dirName := t.TempDir() + + logger, _ := test.NewNullLogger() + + b, err := NewBucketCreator().NewBucket(ctx, dirName, "", logger, nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), opts...) + require.Nil(t, err) + defer b.Shutdown(ctx) + + require.Nil(t, b.Put([]byte("hello"), []byte("world"))) + require.Nil(t, b.FlushMemtable()) + + files, err := os.ReadDir(dirName) + require.Nil(t, err) + + _, ok := findFileWithExt(files, ".cna") + assert.True(t, ok) +} + +func createCNAInit(ctx context.Context, t *testing.T, opts []BucketOption) { + // this test deletes the initial cna and makes sure it gets recreated after + // the bucket is initialized + dirName := t.TempDir() + + logger, _ := test.NewNullLogger() + + b, err := NewBucketCreator().NewBucket(ctx, dirName, "", logger, nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), opts...) + require.Nil(t, err) + defer b.Shutdown(ctx) + + require.Nil(t, b.Put([]byte("hello"), []byte("world"))) + require.Nil(t, b.FlushMemtable()) + + files, err := os.ReadDir(dirName) + require.Nil(t, err) + fname, ok := findFileWithExt(files, ".cna") + require.True(t, ok) + + err = os.RemoveAll(path.Join(dirName, fname)) + require.Nil(t, err) + + // just to ensure segments are loaded + cursor := b.Cursor() + cursor.Close() + + files, err = os.ReadDir(dirName) + require.Nil(t, err) + _, ok = findFileWithExt(files, ".cna") + require.False(t, ok, "verify the file is really gone") + + // on Windows we have to shutdown the bucket before opening it again + require.Nil(t, b.Shutdown(ctx)) + + // now create a new bucket and assert that the file is re-created on init + b2, err := NewBucketCreator().NewBucket(ctx, dirName, "", logger, nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), opts...) + require.Nil(t, err) + defer b2.Shutdown(ctx) + + // just to ensure segments are loaded + cursor = b2.Cursor() + cursor.Close() + + files, err = os.ReadDir(dirName) + require.Nil(t, err) + _, ok = findFileWithExt(files, ".cna") + require.True(t, ok) +} + +func repairCorruptedCNAOnInit(ctx context.Context, t *testing.T, opts []BucketOption) { + // this test deletes the initial cna and makes sure it gets recreated after + // the bucket is initialized + dirName := t.TempDir() + + logger, _ := test.NewNullLogger() + + b, err := NewBucketCreator().NewBucket(ctx, dirName, "", logger, nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), opts...) + require.Nil(t, err) + defer b.Shutdown(ctx) + + require.Nil(t, b.Put([]byte("hello"), []byte("world"))) + require.Nil(t, b.FlushMemtable()) + + files, err := os.ReadDir(dirName) + require.Nil(t, err) + fname, ok := findFileWithExt(files, ".cna") + require.True(t, ok) + + // now corrupt the file by replacing the count value without adapting the checksum + require.Nil(t, corruptCNAFile(path.Join(dirName, fname), 12345)) + + // on Windows we have to shutdown the bucket before opening it again + require.Nil(t, b.Shutdown(ctx)) + // now create a new bucket and assert that the file is ignored, re-created on + // init, and the count matches + b2, err := NewBucketCreator().NewBucket(ctx, dirName, "", logger, nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), opts...) + require.Nil(t, err) + defer b2.Shutdown(ctx) + + assert.Equal(t, 1, b2.Count()) +} + +func TestCNA_OFF(t *testing.T) { + ctx := context.Background() + tests := bucketTests{ + { + name: "dontCreateCNA", + f: dontCreateCNA, + opts: []BucketOption{ + WithStrategy(StrategyReplace), + }, + }, + { + name: "dontRecreateCNA", + f: dontRecreateCNA, + opts: []BucketOption{ + WithStrategy(StrategyReplace), + }, + }, + { + name: "dontPrecomputeCNA", + f: dontPrecomputeCNA, + opts: []BucketOption{ + WithStrategy(StrategyReplace), + }, + }, + } + tests.run(ctx, t) +} + +func dontCreateCNA(ctx context.Context, t *testing.T, opts []BucketOption) { + dirName := t.TempDir() + logger, _ := test.NewNullLogger() + + b, err := NewBucketCreator().NewBucket(ctx, dirName, "", logger, nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), + opts...) + require.NoError(t, err) + defer b.Shutdown(ctx) + + t.Run("populate", func(t *testing.T) { + require.NoError(t, b.Put([]byte("hello"), []byte("world"))) + require.NoError(t, b.FlushMemtable()) + }) + + t.Run("check files", func(t *testing.T) { + files, err := os.ReadDir(dirName) + require.NoError(t, err) + + _, ok := findFileWithExt(files, ".cna") + assert.False(t, ok) + }) + + t.Run("count", func(t *testing.T) { + assert.Equal(t, 0, b.Count()) + }) +} + +func dontRecreateCNA(ctx context.Context, t *testing.T, opts []BucketOption) { + dirName := t.TempDir() + logger, _ := test.NewNullLogger() + + t.Run("create, populate, shutdown", func(t *testing.T) { + b, err := NewBucketCreator().NewBucket(ctx, dirName, "", logger, nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), + opts...) + require.NoError(t, err) + defer b.Shutdown(ctx) + + require.NoError(t, b.Put([]byte("hello"), []byte("world"))) + require.NoError(t, b.FlushMemtable()) + }) + + b2, err := NewBucketCreator().NewBucket(ctx, dirName, "", logger, nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), + opts...) + require.NoError(t, err) + defer b2.Shutdown(ctx) + + t.Run("check files", func(t *testing.T) { + files, err := os.ReadDir(dirName) + require.NoError(t, err) + + _, ok := findFileWithExt(files, ".cna") + assert.False(t, ok) + }) + + t.Run("count", func(t *testing.T) { + assert.Equal(t, 0, b2.Count()) + }) +} + +func dontPrecomputeCNA(ctx context.Context, t *testing.T, opts []BucketOption) { + dirName := t.TempDir() + logger, _ := test.NewNullLogger() + + b, err := NewBucketCreator().NewBucket(ctx, dirName, "", logger, nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), + opts...) + require.NoError(t, err) + defer b.Shutdown(ctx) + + t.Run("populate, compact", func(t *testing.T) { + require.NoError(t, b.Put([]byte("hello"), []byte("world"))) + require.NoError(t, b.FlushMemtable()) + + require.NoError(t, b.Put([]byte("hello2"), []byte("world2"))) + require.NoError(t, b.FlushMemtable()) + + compacted, err := b.disk.compactOnce() + require.NoError(t, err) + require.True(t, compacted) + }) + + t.Run("check files", func(t *testing.T) { + files, err := os.ReadDir(dirName) + require.NoError(t, err) + + _, ok := findFileWithExt(files, ".cna") + assert.False(t, ok) + }) + + t.Run("count", func(t *testing.T) { + assert.Equal(t, 0, b.Count()) + }) +} + +func findFileWithExt(files []os.DirEntry, ext string) (string, bool) { + for _, file := range files { + fname := file.Name() + if strings.HasSuffix(fname, ext) { + return fname, true + } + + } + return "", false +} + +func corruptCNAFile(fname string, corruptValue uint64) error { + f, err := os.Open(fname) + if err != nil { + return err + } + + data, err := io.ReadAll(f) + if err != nil { + return err + } + + if err := f.Close(); err != nil { + return err + } + + binary.LittleEndian.PutUint64(data[4:12], corruptValue) + + f, err = os.Create(fname) + if err != nil { + return err + } + + _, err = f.Write(data) + if err != nil { + return err + } + + return f.Close() +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segment_precompute_for_new_segment.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segment_precompute_for_new_segment.go new file mode 100644 index 0000000000000000000000000000000000000000..6809bbe0ba626cb24891efa8ba2559d708c8ddea --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segment_precompute_for_new_segment.go @@ -0,0 +1,55 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package lsmkv + +import "fmt" + +func (sg *SegmentGroup) initAndPrecomputeNewSegment(path string) (*segment, error) { + // During this entire operation we need to make sure that no compaction + // happens, otherwise we get a race between the existsOnLower func and + // the meta count init. + // + // Normal operations (user CRUD) are fine. + + // We can't simply hold an RLock on the maintenanceLock without coordinating + // with potential Lock() callers. Otherwise if we hold the RLock for minutes + // and someone else calls Lock() we will deadlock. + // + // The only known caller of Lock() is the compaction routine, so we can + // synchronize with it by holding the flushVsCompactLock. + sg.flushVsCompactLock.Lock() + defer sg.flushVsCompactLock.Unlock() + + // It is now safe to hold the RLock on the maintenanceLock because we know + // that the compaction routine will not try to obtain the Lock() until we + // have released the flushVsCompactLock. + segments, release := sg.getAndLockSegments() + defer release() + + segment, err := newSegment(path, sg.logger, + sg.metrics, sg.makeExistsOn(segments), + segmentConfig{ + mmapContents: sg.mmapContents, + useBloomFilter: sg.useBloomFilter, + calcCountNetAdditions: sg.calcCountNetAdditions, + overwriteDerived: true, + enableChecksumValidation: sg.enableChecksumValidation, + MinMMapSize: sg.MinMMapSize, + allocChecker: sg.allocChecker, + writeMetadata: sg.writeMetadata, + }) + if err != nil { + return nil, fmt.Errorf("init and pre-compute new segment %s: %w", path, err) + } + + return segment, nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segment_reader_benchmark_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segment_reader_benchmark_test.go new file mode 100644 index 0000000000000000000000000000000000000000..ae025a5e3d8c4cbcb5e6a983039806865f668bdd --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segment_reader_benchmark_test.go @@ -0,0 +1,53 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package lsmkv + +import ( + "os" + "path/filepath" + "testing" + + "github.com/prometheus/client_golang/prometheus" + "github.com/stretchr/testify/require" +) + +func BenchmarkSegmentReader(b *testing.B) { + dirName := b.TempDir() + f, err := os.Create(filepath.Join(dirName, "segment1.tmp")) + require.NoError(b, err) + + f.Write(make([]byte, 1024*1024)) // Write 1MB of data + f.Sync() + + reg := prometheus.NewRegistry() + + ioRead := prometheus.NewSummaryVec(prometheus.SummaryOpts{ + Name: "test_file_io_reads_total_bytes", + Help: "Total number of bytes read from disk", + }, []string{"operation"}) + + err = reg.Register(ioRead) + require.NoError(b, err) + + segment := &segment{ + contentFile: f, + size: 1024 * 1024, + metrics: &Metrics{IORead: ioRead}, + } + + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, release, _ := segment.bufferedReaderAt(0, "some op") + release() + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segment_replace_strategy.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segment_replace_strategy.go new file mode 100644 index 0000000000000000000000000000000000000000..a43121ec23abb147ba408868fbb3db9563c1f336 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segment_replace_strategy.go @@ -0,0 +1,180 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package lsmkv + +import ( + "encoding/binary" + "errors" + "fmt" + "time" + + "github.com/weaviate/weaviate/adapters/repos/db/lsmkv/segmentindex" + "github.com/weaviate/weaviate/entities/lsmkv" +) + +func (s *segment) get(key []byte) ([]byte, error) { + if s.strategy != segmentindex.StrategyReplace { + return nil, fmt.Errorf("get only possible for strategy %q", StrategyReplace) + } + + before := time.Now() + + if s.useBloomFilter && !s.bloomFilter.Test(key) { + s.bloomFilterMetrics.trueNegative(before) + return nil, lsmkv.NotFound + } + + node, err := s.index.Get(key) + if err != nil { + if errors.Is(err, lsmkv.NotFound) { + if s.useBloomFilter { + s.bloomFilterMetrics.falsePositive(before) + } + return nil, lsmkv.NotFound + } else { + return nil, err + } + } + + defer func() { + if s.useBloomFilter { + s.bloomFilterMetrics.truePositive(before) + } + }() + + // We need to copy the data we read from the segment exactly once in this + // place. This means that future processing can share this memory as much as + // it wants to, as it can now be considered immutable. If we didn't copy in + // this place it would only be safe to hold this data while still under the + // protection of the segmentGroup.maintenanceLock. This lock makes sure that + // no compaction is started during an ongoing read. However, once read, + // further processing is no longer protected by lock. + // If a compaction completes and the old segment is removed, we would be accessing + // invalid memory without the copy, thus leading to a SEGFAULT. + // Similar approach was used to fix SEGFAULT in collection strategy + // https://github.com/weaviate/weaviate/issues/1837 + contentsCopy := make([]byte, node.End-node.Start) + if err = s.copyNode(contentsCopy, nodeOffset{node.Start, node.End}); err != nil { + return nil, err + } + + _, v, err := s.replaceStratParseData(contentsCopy) + if err != nil { + return nil, err + } + + return v, nil +} + +func (s *segment) getBySecondaryIntoMemory(pos int, key []byte, buffer []byte) ([]byte, []byte, []byte, error) { + if s.strategy != segmentindex.StrategyReplace { + return nil, nil, nil, fmt.Errorf("get only possible for strategy %q", StrategyReplace) + } + + if pos >= len(s.secondaryIndices) || s.secondaryIndices[pos] == nil { + return nil, nil, nil, fmt.Errorf("no secondary index at pos %d", pos) + } + + if s.useBloomFilter && !s.secondaryBloomFilters[pos].Test(key) { + return nil, nil, nil, lsmkv.NotFound + } + + node, err := s.secondaryIndices[pos].Get(key) + if err != nil { + return nil, nil, nil, err + } + + // We need to copy the data we read from the segment exactly once in this + // place. This means that future processing can share this memory as much as + // it wants to, as it can now be considered immutable. If we didn't copy in + // this place it would only be safe to hold this data while still under the + // protection of the segmentGroup.maintenanceLock. This lock makes sure that + // no compaction is started during an ongoing read. However, once read, + // further processing is no longer protected by lock. + // If a compaction completes and the old segment is removed, we would be accessing + // invalid memory without the copy, thus leading to a SEGFAULT. + // Similar approach was used to fix SEGFAULT in collection strategy + // https://github.com/weaviate/weaviate/issues/1837 + var contentsCopy []byte + if uint64(cap(buffer)) >= node.End-node.Start { + contentsCopy = buffer[:node.End-node.Start] + } else { + contentsCopy = make([]byte, node.End-node.Start) + } + if err = s.copyNode(contentsCopy, nodeOffset{node.Start, node.End}); err != nil { + return nil, nil, nil, err + } + + primaryKey, currContent, err := s.replaceStratParseData(contentsCopy) + if err != nil { + return nil, nil, nil, err + } + + return primaryKey, currContent, contentsCopy, err +} + +func (s *segment) replaceStratParseData(in []byte) ([]byte, []byte, error) { + if len(in) == 0 { + return nil, nil, lsmkv.NotFound + } + + // byte meaning + // 0 is tombstone + // 1-8 data length as Little Endian uint64 + // 9-length data + + // check the tombstone byte + if in[0] == 0x01 { + if len(in) < 9 { + return nil, nil, lsmkv.Deleted + } + + valueLength := binary.LittleEndian.Uint64(in[1:9]) + + return nil, nil, errorFromTombstonedValue(in[9 : 9+valueLength]) + } + + valueLength := binary.LittleEndian.Uint64(in[1:9]) + + pkLength := binary.LittleEndian.Uint32(in[9+valueLength:]) + + return in[9+valueLength+4 : 9+valueLength+4+uint64(pkLength)], in[9 : 9+valueLength], nil +} + +func (s *segment) exists(key []byte) (bool, error) { + if s.strategy != segmentindex.StrategyReplace { + return false, fmt.Errorf("exists only possible for strategy %q", StrategyReplace) + } + + before := time.Now() + + if s.useBloomFilter && !s.bloomFilter.Test(key) { + s.bloomFilterMetrics.trueNegative(before) + return false, nil + } + + _, err := s.index.Get(key) + + if err == nil { + if s.useBloomFilter { + s.bloomFilterMetrics.truePositive(before) + } + return true, nil + } + if errors.Is(err, lsmkv.NotFound) { + if s.useBloomFilter { + s.bloomFilterMetrics.falsePositive(before) + } + return false, nil + } + return false, err +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segment_roaring_set_strategy.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segment_roaring_set_strategy.go new file mode 100644 index 0000000000000000000000000000000000000000..7b270ff898bd2f039105eb27ab2053948ebbab98 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segment_roaring_set_strategy.go @@ -0,0 +1,124 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package lsmkv + +import ( + "github.com/pkg/errors" + "github.com/weaviate/weaviate/adapters/repos/db/lsmkv/segmentindex" + "github.com/weaviate/weaviate/adapters/repos/db/roaringset" + "github.com/weaviate/weaviate/entities/concurrency" + "github.com/weaviate/weaviate/entities/lsmkv" +) + +// returned bitmaps are cloned and safe to mutate +func (s *segment) roaringSetGet(key []byte, bitmapBufPool roaringset.BitmapBufPool, +) (l roaringset.BitmapLayer, release func(), err error) { + out := roaringset.BitmapLayer{} + + if err := segmentindex.CheckExpectedStrategy(s.strategy, segmentindex.StrategyRoaringSet); err != nil { + return out, noopRelease, err + } + + if s.useBloomFilter && !s.bloomFilter.Test(key) { + return out, noopRelease, lsmkv.NotFound + } + node, err := s.index.Get(key) + if err != nil { + return out, noopRelease, err + } + + var releaseAdd, releaseDel func() + offset := nodeOffset{node.Start, node.End} + if s.readFromMemory { + sn, err := s.segmentNodeFromBufferMmap(offset) + if err != nil { + return out, noopRelease, err + } + out.Deletions, releaseDel = bitmapBufPool.CloneToBuf(sn.Deletions()) + out.Additions, releaseAdd = bitmapBufPool.CloneToBuf(sn.Additions()) + } else { + sn, release, err := s.segmentNodeFromBufferPread(offset, bitmapBufPool) + if err != nil { + return out, noopRelease, err + } + out.Deletions, releaseDel = bitmapBufPool.CloneToBuf(sn.Deletions()) + // reuse buffer of entire segment node. + // node's data might get overwritten by changes of underlying additions bitmap. + // overwrites should be safe, as other data is not used later on + out.Additions, releaseAdd = sn.AdditionsUnlimited(), release + } + + return out, func() { releaseAdd(); releaseDel() }, nil +} + +func (s *segment) roaringSetMergeWith(key []byte, input roaringset.BitmapLayer, bitmapBufPool roaringset.BitmapBufPool, +) error { + if err := segmentindex.CheckExpectedStrategy(s.strategy, segmentindex.StrategyRoaringSet); err != nil { + return err + } + + if s.useBloomFilter && !s.bloomFilter.Test(key) { + return nil + } + node, err := s.index.Get(key) + if err != nil { + if errors.Is(err, lsmkv.NotFound) { + return nil + } + return err + } + + var sn *roaringset.SegmentNode + offset := nodeOffset{node.Start, node.End} + if s.readFromMemory { + sn, err = s.segmentNodeFromBufferMmap(offset) + } else { + var release func() + sn, release, err = s.segmentNodeFromBufferPread(offset, bitmapBufPool) + defer release() + } + if err != nil { + return err + } + + input.Additions. + AndNotConc(sn.Deletions(), concurrency.SROAR_MERGE). + OrConc(sn.Additions(), concurrency.SROAR_MERGE) + return nil +} + +func (s *segment) segmentNodeFromBufferMmap(offset nodeOffset, +) (sn *roaringset.SegmentNode, err error) { + return roaringset.NewSegmentNodeFromBuffer(s.contents[offset.start:offset.end]), nil +} + +func (s *segment) segmentNodeFromBufferPread(offset nodeOffset, bitmapBufPool roaringset.BitmapBufPool, +) (sn *roaringset.SegmentNode, release func(), err error) { + reader, readerRelease, err := s.bufferedReaderAt(offset.start, "roaringSetRead") + if err != nil { + return nil, noopRelease, err + } + defer readerRelease() + + ln := int(offset.end - offset.start) + contents, release := bitmapBufPool.Get(ln) + contents = contents[:ln] + + _, err = reader.Read(contents) + if err != nil { + release() + return nil, noopRelease, err + } + return roaringset.NewSegmentNodeFromBuffer(contents), release, nil +} + +var noopRelease = func() {} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segment_serialization.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segment_serialization.go new file mode 100644 index 0000000000000000000000000000000000000000..bb0da13f6904acf46460d23b1b6b189f60d43b6b --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segment_serialization.go @@ -0,0 +1,496 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package lsmkv + +import ( + "encoding/binary" + "fmt" + "io" + + "github.com/pkg/errors" + "github.com/weaviate/weaviate/adapters/repos/db/lsmkv/segmentindex" + "github.com/weaviate/weaviate/usecases/byteops" +) + +// a single node of strategy "replace" +type segmentReplaceNode struct { + tombstone bool + value []byte + primaryKey []byte + secondaryIndexCount uint16 + secondaryKeys [][]byte + offset int +} + +func (s *segmentReplaceNode) KeyIndexAndWriteTo(w io.Writer) (segmentindex.Key, error) { + out := segmentindex.Key{} + written := 0 + + buf := make([]byte, 9) + if s.tombstone { + buf[0] = 1 + } else { + buf[0] = 0 + } + + valueLength := uint64(len(s.value)) + binary.LittleEndian.PutUint64(buf[1:9], valueLength) + if _, err := w.Write(buf); err != nil { + return out, err + } + + written += 9 + + n, err := w.Write(s.value) + if err != nil { + return out, errors.Wrapf(err, "write node value") + } + written += n + + keyLength := uint32(len(s.primaryKey)) + binary.LittleEndian.PutUint32(buf[0:4], keyLength) + if _, err := w.Write(buf[0:4]); err != nil { + return out, err + } + written += 4 + + n, err = w.Write(s.primaryKey) + if err != nil { + return out, errors.Wrapf(err, "write node key") + } + written += n + + for j := 0; j < int(s.secondaryIndexCount); j++ { + var secondaryKeyLength uint32 + if j < len(s.secondaryKeys) { + secondaryKeyLength = uint32(len(s.secondaryKeys[j])) + } + + // write the key length in any case + binary.LittleEndian.PutUint32(buf[0:4], secondaryKeyLength) + if _, err := w.Write(buf[0:4]); err != nil { + return out, err + } + written += 4 + + if secondaryKeyLength == 0 { + // we're done here + continue + } + + // only write the key if it exists + n, err = w.Write(s.secondaryKeys[j]) + if err != nil { + return out, errors.Wrapf(err, "write secondary key %d", j) + } + written += n + } + + return segmentindex.Key{ + ValueStart: s.offset, + ValueEnd: s.offset + written, + Key: s.primaryKey, + SecondaryKeys: s.secondaryKeys, + }, nil +} + +func ParseReplaceNode(r io.Reader, secondaryIndexCount uint16) (segmentReplaceNode, error) { + out := segmentReplaceNode{} + + // 9 bytes is the most we can ever read uninterrupted, i.e. without a dynamic + // read in between. + tmpBuf := make([]byte, 9) + if n, err := io.ReadFull(r, tmpBuf); err != nil { + return out, errors.Wrap(err, "read tombstone and value length") + } else { + out.offset += n + } + + out.tombstone = tmpBuf[0] == 0x1 + valueLength := binary.LittleEndian.Uint64(tmpBuf[1:9]) + out.value = make([]byte, valueLength) + if n, err := io.ReadFull(r, out.value); err != nil { + return out, errors.Wrap(err, "read value") + } else { + out.offset += n + } + + if n, err := io.ReadFull(r, tmpBuf[0:4]); err != nil { + return out, errors.Wrap(err, "read key length encoding") + } else { + out.offset += n + } + + keyLength := binary.LittleEndian.Uint32(tmpBuf[0:4]) + out.primaryKey = make([]byte, keyLength) + if n, err := io.ReadFull(r, out.primaryKey); err != nil { + return out, errors.Wrap(err, "read key") + } else { + out.offset += n + } + + out.secondaryIndexCount = secondaryIndexCount + if secondaryIndexCount > 0 { + out.secondaryKeys = make([][]byte, secondaryIndexCount) + } + + for j := 0; j < int(secondaryIndexCount); j++ { + if n, err := io.ReadFull(r, tmpBuf[0:4]); err != nil { + return out, errors.Wrap(err, "read secondary key length encoding") + } else { + out.offset += n + } + secKeyLen := binary.LittleEndian.Uint32(tmpBuf[0:4]) + if secKeyLen == 0 { + continue + } + + out.secondaryKeys[j] = make([]byte, secKeyLen) + if n, err := io.ReadFull(r, out.secondaryKeys[j]); err != nil { + return out, errors.Wrap(err, "read secondary key") + } else { + out.offset += n + } + } + + return out, nil +} + +func ParseReplaceNodeIntoPread(r io.Reader, secondaryIndexCount uint16, out *segmentReplaceNode) (err error) { + out.offset = 0 + + if err := binary.Read(r, binary.LittleEndian, &out.tombstone); err != nil { + return errors.Wrap(err, "read tombstone") + } + out.offset += 1 + + var valueLength uint64 + if err := binary.Read(r, binary.LittleEndian, &valueLength); err != nil { + return errors.Wrap(err, "read value length encoding") + } + out.offset += 8 + + if int(valueLength) > cap(out.value) { + out.value = make([]byte, valueLength) + } else { + out.value = out.value[:valueLength] + } + + if n, err := io.ReadFull(r, out.value); err != nil { + return errors.Wrap(err, "read value") + } else { + out.offset += n + } + + var keyLength uint32 + if err := binary.Read(r, binary.LittleEndian, &keyLength); err != nil { + return errors.Wrap(err, "read key length encoding") + } + out.offset += 4 + + out.primaryKey = make([]byte, keyLength) + if n, err := io.ReadFull(r, out.primaryKey); err != nil { + return errors.Wrap(err, "read key") + } else { + out.offset += n + } + + if secondaryIndexCount > 0 { + out.secondaryKeys = make([][]byte, secondaryIndexCount) + } + + for j := 0; j < int(secondaryIndexCount); j++ { + var secKeyLen uint32 + if err := binary.Read(r, binary.LittleEndian, &secKeyLen); err != nil { + return errors.Wrap(err, "read secondary key length encoding") + } + out.offset += 4 + + if secKeyLen == 0 { + continue + } + + out.secondaryKeys[j] = make([]byte, secKeyLen) + if n, err := io.ReadFull(r, out.secondaryKeys[j]); err != nil { + return errors.Wrap(err, "read secondary key") + } else { + out.offset += n + } + } + + return nil +} + +func ParseReplaceNodeIntoMMAP(r *byteops.ReadWriter, secondaryIndexCount uint16, out *segmentReplaceNode) error { + out.tombstone = r.ReadUint8() == 0x01 + valueLength := r.ReadUint64() + + if int(valueLength) > cap(out.value) { + out.value = make([]byte, valueLength) + } else { + out.value = out.value[:valueLength] + } + + if _, err := r.CopyBytesFromBuffer(valueLength, out.value); err != nil { + return err + } + + // Note: In a previous version (prior to + // https://github.com/weaviate/weaviate/pull/3660) this was a copy. The + // mentioned PR optimizes the Replace Cursor which led to this now being + // shared memory. After internal review, we believe this is safe to do. The + // cursor gives no guarantees about memory after calling .next(). Before + // .next() is called, this should be safe. Nevertheless, we are leaving this + // note in case a future bug appears, as this should make this spot easier to + // find. + out.primaryKey = r.ReadBytesFromBufferWithUint32LengthIndicator() + + if secondaryIndexCount > 0 { + out.secondaryKeys = make([][]byte, secondaryIndexCount) + } + + for j := 0; j < int(secondaryIndexCount); j++ { + // Note: In a previous version (prior to + // https://github.com/weaviate/weaviate/pull/3660) this was a copy. The + // mentioned PR optimizes the Replace Cursor which led to this now being + // shared memory. After internal review, we believe this is safe to do. The + // cursor gives no guarantees about memory after calling .next(). Before + // .next() is called, this should be safe. Nevertheless, we are leaving this + // note in case a future bug appears, as this should make this spot easier to + // find. + out.secondaryKeys[j] = r.ReadBytesFromBufferWithUint32LengthIndicator() + } + + out.offset = int(r.Position) + return nil +} + +// collection strategy does not support secondary keys at this time +type segmentCollectionNode struct { + values []value + primaryKey []byte + offset int +} + +func (s segmentCollectionNode) KeyIndexAndWriteTo(w io.Writer) (segmentindex.Key, error) { + out := segmentindex.Key{} + written := 0 + valueLen := uint64(len(s.values)) + buf := make([]byte, 9) + binary.LittleEndian.PutUint64(buf, valueLen) + if _, err := w.Write(buf[0:8]); err != nil { + return out, errors.Wrapf(err, "write values len for node") + } + written += 8 + + for i, value := range s.values { + if value.tombstone { + buf[0] = 0x01 + } else { + buf[0] = 0x00 + } + + valueLen := uint64(len(value.value)) + binary.LittleEndian.PutUint64(buf[1:9], valueLen) + if _, err := w.Write(buf[0:9]); err != nil { + return out, errors.Wrapf(err, "write len of value %d", i) + } + written += 9 + + n, err := w.Write(value.value) + if err != nil { + return out, errors.Wrapf(err, "write value %d", i) + } + written += n + } + + keyLength := uint32(len(s.primaryKey)) + binary.LittleEndian.PutUint32(buf[0:4], keyLength) + if _, err := w.Write(buf[0:4]); err != nil { + return out, errors.Wrapf(err, "write key length encoding for node") + } + written += 4 + + n, err := w.Write(s.primaryKey) + if err != nil { + return out, errors.Wrapf(err, "write node") + } + written += n + + out = segmentindex.Key{ + ValueStart: s.offset, + ValueEnd: s.offset + written, + Key: s.primaryKey, + } + + return out, nil +} + +// ParseCollectionNode reads from r and parses the collection values into a segmentCollectionNode +// +// When only given an offset, r is constructed as a *bufio.Reader to avoid first reading the +// entire segment (could be GBs). Each consecutive read will be buffered to avoid excessive +// syscalls. +// +// When we already have a finite and manageable []byte (i.e. when we have already seeked to an +// lsmkv node and have start+end offset), r should be constructed as a *bytes.Reader, since the +// contents have already been `pread` from the segment contentFile. +func ParseCollectionNode(r io.Reader) (segmentCollectionNode, error) { + out := segmentCollectionNode{} + // 9 bytes is the most we can ever read uninterrupted, i.e. without a dynamic + // read in between. + tmpBuf := make([]byte, 9) + + if n, err := io.ReadFull(r, tmpBuf[0:8]); err != nil { + return out, errors.Wrap(err, "read values len") + } else { + out.offset += n + } + + valuesLen := binary.LittleEndian.Uint64(tmpBuf[0:8]) + out.values = make([]value, valuesLen) + for i := range out.values { + if n, err := io.ReadFull(r, tmpBuf[0:9]); err != nil { + return out, errors.Wrap(err, "read value tombstone and len") + } else { + out.offset += n + } + out.values[i].tombstone = tmpBuf[0] == 0x1 + valueLen := binary.LittleEndian.Uint64(tmpBuf[1:9]) + out.values[i].value = make([]byte, valueLen) + n, err := io.ReadFull(r, out.values[i].value) + if err != nil { + return out, errors.Wrap(err, "read value") + } + out.offset += n + } + + if n, err := io.ReadFull(r, tmpBuf[0:4]); err != nil { + return out, errors.Wrap(err, "read key len") + } else { + out.offset += n + } + keyLen := binary.LittleEndian.Uint32(tmpBuf[0:4]) + out.primaryKey = make([]byte, keyLen) + n, err := io.ReadFull(r, out.primaryKey) + if err != nil { + return out, errors.Wrap(err, "read key") + } + out.offset += n + + return out, nil +} + +// ParseCollectionNodeInto takes the []byte slice and parses it into the +// specified node. It does not perform any copies and the caller must be aware +// that memory may be shared between the two. As a result, the caller must make +// sure that they do not modify "in" while "node" is still in use. A safer +// alternative is to use ParseCollectionNode. +// +// The primary intention of this function is to provide a way to reuse buffers +// when the lifetime is controlled tightly, for example in cursors used within +// compactions. Use at your own risk! +// +// If the buffers of the provided node have enough capacity they will be +// reused. Only if the capacity is not enough, will an allocation occur. This +// allocation uses 25% overhead to avoid future allocations for nodes of +// similar size. +// +// As a result calling this method only makes sense if you plan on calling it +// multiple times. Calling it just once on an uninitialized node does not have +// major advantages over calling ParseCollectionNode. +func ParseCollectionNodeInto(r io.Reader, node *segmentCollectionNode) error { + // offset is only the local offset relative to "in". In the end we need to + // update the global offset. + offset := 0 + + buf := make([]byte, 9) + _, err := io.ReadFull(r, buf[0:8]) + if err != nil { + return fmt.Errorf("read values len: %w", err) + } + + valuesLen := binary.LittleEndian.Uint64(buf[0:8]) + offset += 8 + + resizeValuesOfCollectionNode(node, valuesLen) + for i := range node.values { + _, err = io.ReadFull(r, buf) + if err != nil { + return fmt.Errorf("read values len: %w", err) + } + + node.values[i].tombstone = buf[0] == 0x1 + offset += 1 + + valueLen := binary.LittleEndian.Uint64(buf[1:9]) + offset += 8 + + resizeValueOfCollectionNodeAtPos(node, i, valueLen) + + _, err = io.ReadFull(r, node.values[i].value) + if err != nil { + return fmt.Errorf("read node value: %w", err) + } + + offset += int(valueLen) + } + + _, err = io.ReadFull(r, buf[0:4]) + if err != nil { + return fmt.Errorf("read values len: %w", err) + } + keyLen := binary.LittleEndian.Uint32(buf) + offset += 4 + + resizeKeyOfCollectionNode(node, keyLen) + _, err = io.ReadFull(r, node.primaryKey) + if err != nil { + return fmt.Errorf("read primary key: %w", err) + } + offset += int(keyLen) + + node.offset = offset + return nil +} + +func resizeValuesOfCollectionNode(node *segmentCollectionNode, size uint64) { + if cap(node.values) >= int(size) { + node.values = node.values[:size] + } else { + // Allocate with 25% overhead to reduce chance of having to do multiple + // allocations sequentially. + node.values = make([]value, size, int(float64(size)*1.25)) + } +} + +func resizeValueOfCollectionNodeAtPos(node *segmentCollectionNode, pos int, + size uint64, +) { + if cap(node.values[pos].value) >= int(size) { + node.values[pos].value = node.values[pos].value[:size] + } else { + // Allocate with 25% overhead to reduce chance of having to do multiple + // allocations sequentially. + node.values[pos].value = make([]byte, size, int(float64(size)*1.25)) + } +} + +func resizeKeyOfCollectionNode(node *segmentCollectionNode, size uint32) { + if cap(node.primaryKey) >= int(size) { + node.primaryKey = node.primaryKey[:size] + } else { + // Allocate with 25% overhead to reduce chance of having to do multiple + // allocations sequentially. + node.primaryKey = make([]byte, size, int(float64(size)*1.25)) + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segment_serialization_benchmarks_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segment_serialization_benchmarks_test.go new file mode 100644 index 0000000000000000000000000000000000000000..c14a68099a3e5da30f15e20829b8bc9ef036ed4a --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segment_serialization_benchmarks_test.go @@ -0,0 +1,329 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package lsmkv + +import ( + "bytes" + "crypto/rand" + "encoding/binary" + "fmt" + "io" + "os" + "path/filepath" + "runtime" + "testing" + + "github.com/stretchr/testify/require" +) + +func BenchmarkReplaceNodeKeyIndexAndWriteTo(b *testing.B) { + // targetBuf := bytes.NewBuffer(make([]byte, 32*1024*1024)) // large enough to avoid growths during running + targetBuf := bytes.NewBuffer(nil) // large enough to avoid growths during running + + node := segmentReplaceNode{ + tombstone: true, + value: []byte("foo bar"), + primaryKey: []byte("foo bar"), + secondaryIndexCount: 1, + secondaryKeys: [][]byte{[]byte("foo bar")}, + offset: 27, + } + + b.ReportAllocs() + + for i := 0; i < b.N; i++ { + _, err := node.KeyIndexAndWriteTo(targetBuf) + require.Nil(b, err) + } +} + +func BenchmarkCollectionNodeKeyIndexAndWriteTo(b *testing.B) { + targetBuf := bytes.NewBuffer(make([]byte, 32*1024*1024)) // large enough to avoid growths during running + + node := segmentCollectionNode{ + primaryKey: []byte("foo bar"), + offset: 27, + values: []value{ + { + value: []byte("my-value"), + tombstone: true, + }, + { + value: []byte("my-value"), + tombstone: true, + }, + }, + } + + b.ReportAllocs() + + for i := 0; i < b.N; i++ { + node.KeyIndexAndWriteTo(targetBuf) + } +} + +type testCase struct { + name string + valueSize int + keySize int + secondaryKeysCount int + secondaryKeySize int +} + +func BenchmarkFileParseReplaceNode(b *testing.B) { + testCases := []testCase{ + {"PrimaryKey-64-Sec-0", 4096, 64, 0, 0}, + {"PrimaryKey-64-Sec-0", 4096, 128, 0, 0}, + {"PrimaryKey-512-Sec-0", 4096, 512, 0, 0}, + {"PrimaryKey-1024-Sec-0", 4096, 1024, 0, 0}, + {"PrimaryKey-4096-Sec-0", 4096, 4096, 0, 0}, + {"SecondaryKeys-1-128", 4096, 128, 1, 128}, + {"SecondaryKeys-2-128", 4096, 128, 2, 128}, + {"SecondaryKeys-3-128", 4096, 128, 3, 128}, + {"SecondaryKeys-4-128", 4096, 128, 4, 128}, + {"SecondaryKeys-8-128", 4096, 128, 8, 128}, + {"SecondaryKeys-16-128", 4096, 128, 16, 128}, + {"SecondaryKeys-128-128", 4096, 128, 128, 128}, + } + + out := &segmentReplaceNode{} + tempDir := b.TempDir() + + for _, tc := range testCases { + data, err := generateTestData(b, tc.valueSize, tc.keySize, tc.secondaryKeysCount, tc.secondaryKeySize) + require.NoErrorf(b, err, "error generating test data") + dataLen := len(data) + + tempFile := makeTempFile(b, tempDir, tc, data) + + benchmarkWithGCMetrics(b, fmt.Sprintf("DirectFileAccess-%s-%d", tc.name, dataLen), func(b *testing.B) { + runDirectFileAccess(b, tc, data, tempFile, out) + }) + + benchmarkWithGCMetrics(b, fmt.Sprintf("BufferedFileAccess-%s-%d", tc.name, dataLen), func(b *testing.B) { + runBufferedFileAccess(b, tc, data, tempFile, out) + }) + + benchmarkWithGCMetrics(b, fmt.Sprintf("PreloadBufferAccess-%s-%d", tc.name, dataLen), func(b *testing.B) { + runPreloadBufferAccess(b, tc, data, tempFile, out) + }) + + benchmarkWithGCMetrics(b, fmt.Sprintf("FileBufferingOnly-%s-%d", tc.name, dataLen), func(b *testing.B) { + runFileBufferingOnly(b, data, tempFile) + }) + + benchmarkWithGCMetrics(b, fmt.Sprintf("FileParsingOnly-%s-%d", tc.name, dataLen), func(b *testing.B) { + runFileParsingOnly(b, tc, data, tempFile, out) + }) + } +} + +// benchmarkWithGCMetrics runs a benchmark and reports GC pressure metrics +func benchmarkWithGCMetrics(b *testing.B, name string, benchFn func(b *testing.B)) { + b.Run(name, func(b *testing.B) { + var memStatsBeforeGC, memStatsAfterGC runtime.MemStats + + // Force GC before measurement to get a clean slate + runtime.GC() + runtime.ReadMemStats(&memStatsBeforeGC) + benchFn(b) + runtime.ReadMemStats(&memStatsAfterGC) + + cycles := float64(memStatsAfterGC.NumGC - memStatsBeforeGC.NumGC) + b.ReportMetric(cycles/float64(b.N), "num/op") + + pauseTimeNanos := float64(memStatsAfterGC.PauseTotalNs - memStatsBeforeGC.PauseTotalNs) + b.ReportMetric(pauseTimeNanos/float64(b.N), "ns/op") + }) +} + +// runFileParsingOnly measures the cost of parsing when the file is already in memory. +// This isolates parsing performance without any file reading overhead. +func runFileParsingOnly(b *testing.B, tc testCase, data []byte, tempFile string, out *segmentReplaceNode, +) { + fileContents, err := os.ReadFile(tempFile) // Read file before timing. + require.NoErrorf(b, err, "error reading file %s", tempFile) + + reader := bytes.NewReader(fileContents) + b.ResetTimer() + b.SetBytes(int64(len(data))) + b.ReportAllocs() + + for i := 0; i < b.N; i++ { + reader.Reset(fileContents) + err = ParseReplaceNodeIntoPread(reader, uint16(tc.secondaryKeysCount), out) + require.NoErrorf(b, err, "error parsing file %s", tempFile) + } +} + +// runFileBufferingOnly measures the cost of reading the file into a memory buffer (without parsing). +// This isolates the I/O overhead from parsing. +func runFileBufferingOnly(b *testing.B, data []byte, tempFile string) { + var err error + file, cleanup := openFile(b, tempFile) + b.Cleanup(cleanup) + + b.ResetTimer() + b.SetBytes(int64(len(data))) + b.ReportAllocs() + + for i := 0; i < b.N; i++ { + _, err = file.Seek(0, 0) + require.NoError(b, err, "error seeking file: %v", err) + + _, err = io.ReadAll(file) // Includes measuring the cost of reading the file into memory. + require.NoErrorf(b, err, "error reading all: %v", err) + } +} + +// runPreloadBufferAccess measures parsing performance when the file has already been loaded into memory and a shared buffer is used. +// This isolates the parsing cost and avoids measuring I/O overhead. +func runPreloadBufferAccess(b *testing.B, tc testCase, data []byte, tempFile string, out *segmentReplaceNode, +) { + fileContents, err := os.ReadFile(tempFile) // File read before benchmark timing. + require.NoErrorf(b, err, "error reading file: %v", err) + + reader := bytes.NewReader(fileContents) + b.ResetTimer() + b.SetBytes(int64(len(data))) + b.ReportAllocs() + + for i := 0; i < b.N; i++ { + reader.Reset(fileContents) // Resets reader instead of allocating a new one. + err = ParseReplaceNodeIntoPread(reader, uint16(tc.secondaryKeysCount), out) + require.NoErrorf(b, err, "error parsing test data: %v", err) + } +} + +// runBufferedFileAccess measures performance of a buffered approach where the entire file is read into memory first, +// then parsed from a memory buffer. Includes the cost of reading the file into memory. +func runBufferedFileAccess(b *testing.B, tc testCase, data []byte, tempFile string, out *segmentReplaceNode, +) { + file, cleanup := openFile(b, tempFile) + b.Cleanup(cleanup) + + b.ResetTimer() + b.SetBytes(int64(len(data))) + b.ReportAllocs() + + for i := 0; i < b.N; i++ { + _, err := file.Seek(0, 0) + require.NoError(b, err, "error seeking file: %v", err) + + all, err := io.ReadAll(file) // Reading full file into memory. + require.NoErrorf(b, err, "error reading data: %v", err) + + require.NotEmpty(b, all, "file is empty") + + err = ParseReplaceNodeIntoPread(bytes.NewReader(all), uint16(tc.secondaryKeysCount), out) + require.NoErrorf(b, err, "error parsing test data: %v", err) + } +} + +// runDirectFileAccess measures performance of direct file access using multiple `pread` calls. +// Includes the overhead of system calls for reading from disk. +func runDirectFileAccess(b *testing.B, tc testCase, data []byte, tempFile string, out *segmentReplaceNode, +) { + var err error + file, cleanup := openFile(b, tempFile) + b.Cleanup(cleanup) + + b.ResetTimer() + b.SetBytes(int64(len(data))) + b.ReportAllocs() + + for i := 0; i < b.N; i++ { + _, err = file.Seek(0, 0) + require.NoErrorf(b, err, "error seeking file: %v", err) + + err = ParseReplaceNodeIntoPread(file, uint16(tc.secondaryKeysCount), out) + require.NoErrorf(b, err, "error parsing test data: %v", err) + } +} + +func randomTombstone() (int, error) { + b := make([]byte, 1) + if _, err := rand.Read(b); err != nil { + return 0, err + } + return int(b[0] & 1), nil +} + +func generateTestData(b *testing.B, valueSize, keySize, secondaryKeysCount, secondaryKeySize int) ([]byte, error) { + b.Helper() + + var buffer bytes.Buffer + var err error + + tombstone, err := randomTombstone() + if err != nil { + return nil, fmt.Errorf("error generating random tombstone: %w", err) + } + + if err = binary.Write(&buffer, binary.LittleEndian, tombstone == 0); err != nil { + return nil, fmt.Errorf("error writing tombstone binary: %w", err) + } + + if err = binary.Write(&buffer, binary.LittleEndian, uint64(valueSize)); err != nil { + return nil, fmt.Errorf("error writing valueSize binary: %w", err) + } + + valueBuffer := make([]byte, valueSize) + if _, err = rand.Read(valueBuffer); err != nil { + return nil, fmt.Errorf("error reading valueSize binary: %w", err) + } + buffer.Write(valueBuffer) + + if err = binary.Write(&buffer, binary.LittleEndian, uint32(keySize)); err != nil { + return nil, fmt.Errorf("error writing keySize binary: %w", err) + } + + keyBuffer := make([]byte, keySize) + if _, err = rand.Read(keyBuffer); err != nil { + return nil, fmt.Errorf("error reading keySize binary: %w", err) + } + buffer.Write(keyBuffer) + + for i := 0; i < secondaryKeysCount; i++ { + if err = binary.Write(&buffer, binary.LittleEndian, uint32(secondaryKeySize)); err != nil { + return nil, fmt.Errorf("error writing secondaryKeySize binary: %w", err) + } + + if secondaryKeySize > 0 { + secondaryKeyBuffer := make([]byte, secondaryKeySize) + if _, err := rand.Read(secondaryKeyBuffer); err != nil { + return nil, fmt.Errorf("error reading secondaryKeySize binary: %w", err) + } + buffer.Write(secondaryKeyBuffer) + } + } + + return buffer.Bytes(), nil +} + +func openFile(b *testing.B, tempFile string) (*os.File, func()) { + file, err := os.Open(tempFile) + require.NoErrorf(b, err, "error opening temp file: %v", err) + cleanup := func() { + err := file.Close() + require.NoErrorf(b, err, "error closing temp file: %v", err) + } + return file, cleanup +} + +func makeTempFile(b *testing.B, tempDir string, tc testCase, data []byte, +) string { + tempFile := filepath.Join(tempDir, fmt.Sprintf("%s.dat", tc.name)) + err := os.WriteFile(tempFile, data, 0o644) + require.NoErrorf(b, err, "error writing temp file: %v", err) + return tempFile +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segment_serialization_inverted.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segment_serialization_inverted.go new file mode 100644 index 0000000000000000000000000000000000000000..c77f691425077c2ce0c1d687f82b7d5451266514 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segment_serialization_inverted.go @@ -0,0 +1,449 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package lsmkv + +import ( + "encoding/binary" + "io" + "math" + + "github.com/pkg/errors" + "github.com/weaviate/sroar" + "github.com/weaviate/weaviate/adapters/repos/db/inverted/terms" + "github.com/weaviate/weaviate/adapters/repos/db/lsmkv/segmentindex" + "github.com/weaviate/weaviate/adapters/repos/db/lsmkv/varenc" +) + +var defaultAveragePropLength = float64(40.0) + +func extractTombstones(nodes []MapPair) (*sroar.Bitmap, []MapPair) { + out := sroar.NewBitmap() + values := make([]MapPair, 0, len(nodes)) + + for _, n := range nodes { + if n.Tombstone { + id := binary.BigEndian.Uint64(n.Key) + out.Set(id) + } else { + values = append(values, n) + } + } + + return out, values +} + +func packedEncode(docIds, termFreqs []uint64, deltaEnc, tfEnc varenc.VarEncEncoder[uint64]) *terms.BlockData { + deltaEnc.Init(len(docIds)) + docIdsPacked := deltaEnc.Encode(docIds) + + tfEnc.Init(len(termFreqs)) + termFreqsPacked := tfEnc.Encode(termFreqs) + + return &terms.BlockData{ + DocIds: docIdsPacked, + Tfs: termFreqsPacked, + } +} + +func packedDecode(values *terms.BlockData, numValues int, deltaEnc, tfEnc varenc.VarEncEncoder[uint64]) ([]uint64, []uint64) { + deltaEnc.Init(numValues) + docIds := deltaEnc.Decode(values.DocIds) + + tfEnc.Init(numValues) + termFreqs := tfEnc.Decode(values.Tfs) + return docIds, termFreqs +} + +func encodeBlockParam(nodes []MapPair, deltaEnc, tfEnc varenc.VarEncEncoder[uint64]) *terms.BlockData { + docIds := make([]uint64, len(nodes)) + termFreqs := make([]uint64, len(nodes)) + + for i, n := range nodes { + docIds[i] = binary.BigEndian.Uint64(n.Key) + termFreqs[i] = uint64(math.Float32frombits(binary.LittleEndian.Uint32(n.Value[0:4]))) + // propLengths[i] = uint64(math.Float32frombits(binary.LittleEndian.Uint32(n.Value[4:8]))) + } + + packed := packedEncode(docIds, termFreqs, deltaEnc, tfEnc) + + return packed +} + +func createBlocks(nodes []MapPair, propLengths map[uint64]uint32, deltaEnc, tfEnc varenc.VarEncEncoder[uint64], k1, b, avgPropLen float64) ([]*terms.BlockEntry, []*terms.BlockData, *sroar.Bitmap, map[uint64]uint32) { + tombstones, values := extractTombstones(nodes) + externalPropLengths := len(propLengths) != 0 + + blockCount := (len(values) + (terms.BLOCK_SIZE - 1)) / terms.BLOCK_SIZE + + blockMetadata := make([]*terms.BlockEntry, blockCount) + blockDataEncoded := make([]*terms.BlockData, blockCount) + + offset := uint32(0) + + for i := 0; i < blockCount; i++ { + start := i * terms.BLOCK_SIZE + end := start + terms.BLOCK_SIZE + if end > len(values) { + end = len(values) + } + maxImpact := float64(0) + MaxImpactTf := uint32(0) + MaxImpactPropLength := uint32(0) + + for j := start; j < end; j++ { + tf := float64(math.Float32frombits(binary.LittleEndian.Uint32(values[j].Value[0:4]))) + pl := float64(math.Float32frombits(binary.LittleEndian.Uint32(values[j].Value[4:8]))) + docId := binary.BigEndian.Uint64(values[j].Key) + if externalPropLengths { + pl = float64(propLengths[docId]) + } else { + propLengths[docId] = uint32(pl) + } + + impact := tf / (tf + k1*(1-b+b*(pl/avgPropLen))) + + if impact > maxImpact { + maxImpact = impact + MaxImpactTf = uint32(tf) + MaxImpactPropLength = uint32(pl) + } + } + + maxId := binary.BigEndian.Uint64(values[end-1].Key) + blockDataEncoded[i] = encodeBlockParam(values[start:end], deltaEnc, tfEnc) + + blockMetadata[i] = &terms.BlockEntry{ + MaxId: maxId, + Offset: offset, + MaxImpactTf: MaxImpactTf, + MaxImpactPropLength: MaxImpactPropLength, + } + + offset += uint32(blockDataEncoded[i].Size()) + } + + return blockMetadata, blockDataEncoded, tombstones, propLengths +} + +func encodeBlocks(blockEntries []*terms.BlockEntry, blockDatas []*terms.BlockData, docCount uint64) []byte { + length := 0 + for i := range blockDatas { + length += blockDatas[i].Size() + blockEntries[i].Size() + } + out := make([]byte, length+8+8) + binary.LittleEndian.PutUint64(out, docCount) + offset := 8 + + binary.LittleEndian.PutUint64(out[offset:], uint64(length)) + offset += 8 + + for _, blockEntry := range blockEntries { + copy(out[offset:], blockEntry.Encode()) + offset += blockEntry.Size() + } + for _, blockData := range blockDatas { + // write the block data + copy(out[offset:], blockData.Encode()) + offset += blockData.Size() + } + + return out +} + +func createAndEncodeSingleValue(mapPairs []MapPair, propLengths map[uint64]uint32) ([]byte, *sroar.Bitmap) { + tombstones := sroar.NewBitmap() + buffer := make([]byte, 8+12*len(mapPairs)) + offset := 0 + binary.LittleEndian.PutUint64(buffer, uint64(len(mapPairs))) + offset += 8 + for i := 0; i < len(mapPairs); i++ { + if mapPairs[i].Tombstone { + id := binary.BigEndian.Uint64(mapPairs[i].Key) + tombstones.Set(id) + } + copy(buffer[offset:offset+8], mapPairs[i].Key) + copy(buffer[offset+8:offset+12], mapPairs[i].Value) + + offset += 12 + } + return buffer[:offset], tombstones +} + +func createAndEncodeBlocksTest(nodes []MapPair, propLengths map[uint64]uint32, encodeSingleSeparate int, deltaEnc, tfEnc varenc.VarEncEncoder[uint64], k1, b, avgPropLen float64) ([]byte, *sroar.Bitmap) { + if len(nodes) <= encodeSingleSeparate { + return createAndEncodeSingleValue(nodes, propLengths) + } + blockEntries, blockDatas, tombstones, _ := createBlocks(nodes, propLengths, deltaEnc, tfEnc, k1, b, avgPropLen) + return encodeBlocks(blockEntries, blockDatas, uint64(len(nodes))), tombstones +} + +func createAndEncodeBlocksWithLengths(nodes []MapPair, deltaEnc, tfEnc varenc.VarEncEncoder[uint64], k1, b, avgPropLen float64) ([]byte, *sroar.Bitmap) { + propLengths := make(map[uint64]uint32) + return createAndEncodeBlocksTest(nodes, propLengths, terms.ENCODE_AS_FULL_BYTES, deltaEnc, tfEnc, k1, b, avgPropLen) +} + +func createAndEncodeBlocks(nodes []MapPair, propLengths map[uint64]uint32, deltaEnc, tfEnc varenc.VarEncEncoder[uint64], k1, b, avgPropLen float64) ([]byte, *sroar.Bitmap) { + return createAndEncodeBlocksTest(nodes, propLengths, terms.ENCODE_AS_FULL_BYTES, deltaEnc, tfEnc, k1, b, avgPropLen) +} + +func decodeBlocks(data []byte) ([]*terms.BlockEntry, []*terms.BlockData, int) { + offset := 0 + docCount := int(binary.LittleEndian.Uint64(data)) + offset += 16 + + // calculate the number of blocks by dividing the number of documents by the block size and rounding up + blockCount := (docCount + (terms.BLOCK_SIZE - 1)) / terms.BLOCK_SIZE + + blockEntries := make([]*terms.BlockEntry, blockCount) + blockDatas := make([]*terms.BlockData, blockCount) + + blockDataInitialOffset := offset + blockCount*(terms.BlockEntry{}.Size()) + + for i := 0; i < blockCount; i++ { + blockEntries[i] = terms.DecodeBlockEntry(data[offset:]) + dataOffset := int(blockEntries[i].Offset) + blockDataInitialOffset + blockDatas[i] = terms.DecodeBlockData(data[dataOffset:]) + offset += blockEntries[i].Size() + } + dataOffset := int(blockEntries[blockCount-1].Offset) + blockDataInitialOffset + blockDatas[blockCount-1].Size() + + return blockEntries, blockDatas, dataOffset +} + +func decodeAndConvertValuesFromBlocks(data []byte) ([]value, int) { + return decodeAndConvertValuesFromBlocksTest(data, terms.ENCODE_AS_FULL_BYTES, &varenc.VarIntDeltaEncoder{}, &varenc.VarIntEncoder{}) +} + +func decodeAndConvertValuesFromBlocksTest(data []byte, encodeSingleSeparate int, deltaEnc, tfEnc varenc.VarEncEncoder[uint64]) ([]value, int) { + collectionSize := binary.LittleEndian.Uint64(data) + + if collectionSize <= uint64(encodeSingleSeparate) { + values := make([]value, 0, collectionSize) + offset := 8 + for i := 0; i < int(collectionSize*16); i += 16 { + val := make([]byte, 16) + copy(val, data[offset:offset+16]) + values = append(values, value{ + value: val, + tombstone: false, + }) + offset += 16 + } + return values, offset + } + blockEntries, blockDatas, offset := decodeBlocks(data) + return convertFromBlocksValue(blockEntries, blockDatas, collectionSize, deltaEnc, tfEnc), offset +} + +func decodeAndConvertFromBlocks(data []byte) ([]MapPair, int) { + return decodeAndConvertFromBlocksTest(data, terms.ENCODE_AS_FULL_BYTES, &varenc.VarIntDeltaEncoder{}, &varenc.VarIntEncoder{}) +} + +func decodeAndConvertFromBlocksTest(data []byte, encodeSingleSeparate int, deltaEnc, tfEnc varenc.VarEncEncoder[uint64]) ([]MapPair, int) { + collectionSize := binary.LittleEndian.Uint64(data) + + if collectionSize <= uint64(encodeSingleSeparate) { + values := make([]MapPair, 0, collectionSize) + offset := 8 + for i := 0; i < int(collectionSize*16); i += 16 { + key := make([]byte, 8) + copy(key, data[offset:offset+8]) + value := make([]byte, 8) + copy(value, data[offset+8:offset+12]) + values = append(values, MapPair{ + Key: key, + Value: value, + }) + offset += 16 + } + return values, offset + } + blockEntries, blockDatas, offset := decodeBlocks(data) + return convertFromBlocks(blockEntries, blockDatas, collectionSize, deltaEnc, tfEnc), offset +} + +func convertFromBlocksValue(blockEntries []*terms.BlockEntry, encodedBlocks []*terms.BlockData, objectCount uint64, deltaEnc, tfEnc varenc.VarEncEncoder[uint64]) []value { + out := make([]value, 0, objectCount) + + for i := range blockEntries { + + blockSize := uint64(terms.BLOCK_SIZE) + if i == len(blockEntries)-1 { + blockSize = objectCount - uint64(terms.BLOCK_SIZE)*uint64(i) + } + blockSizeInt := int(blockSize) + + docIds, tfs := packedDecode(encodedBlocks[i], blockSizeInt, deltaEnc, tfEnc) + + for j := 0; j < blockSizeInt; j++ { + docId := docIds[j] + tf := float32(tfs[j]) + // pl := float32(propLengths[j]) + + val := make([]byte, 16) + binary.BigEndian.PutUint64(val, docId) + binary.LittleEndian.PutUint32(val[8:], math.Float32bits(tf)) + // binary.LittleEndian.PutUint32(value[4:], math.Float32bits(pl)) + + out = append(out, value{ + value: val, + tombstone: false, + }) + } + } + return out +} + +func convertFromBlocks(blockEntries []*terms.BlockEntry, encodedBlocks []*terms.BlockData, objectCount uint64, deltaEnc, tfEnc varenc.VarEncEncoder[uint64]) []MapPair { + out := make([]MapPair, 0, objectCount) + + for i := range blockEntries { + + blockSize := uint64(terms.BLOCK_SIZE) + if i == len(blockEntries)-1 { + blockSize = objectCount - uint64(terms.BLOCK_SIZE)*uint64(i) + } + blockSizeInt := int(blockSize) + + docIds, tfs := packedDecode(encodedBlocks[i], blockSizeInt, deltaEnc, tfEnc) + + for j := 0; j < blockSizeInt; j++ { + docId := docIds[j] + tf := float32(tfs[j]) + // pl := float32(propLengths[j]) + + key := make([]byte, 8) + binary.BigEndian.PutUint64(key, docId) + + value := make([]byte, 8) + binary.LittleEndian.PutUint32(value, math.Float32bits(tf)) + // binary.LittleEndian.PutUint32(value[4:], math.Float32bits(pl)) + + out = append(out, MapPair{ + Key: key, + Value: value, + }) + } + } + return out +} + +func convertFixedLengthFromMemory(data []byte, blockSize int) *terms.BlockDataDecoded { + out := &terms.BlockDataDecoded{ + DocIds: make([]uint64, blockSize), + Tfs: make([]uint64, blockSize), + } + offset := 8 + i := 0 + for offset < len(data) { + out.DocIds[i] = binary.BigEndian.Uint64(data[offset : offset+8]) + out.Tfs[i] = uint64(math.Float32frombits(binary.LittleEndian.Uint32(data[offset+8 : offset+12]))) + offset += 16 + i++ + } + return out +} + +// a single node of strategy "inverted" +type segmentInvertedNode struct { + values []MapPair + primaryKey []byte + offset int + propLengths map[uint64]uint32 +} + +var invPayloadLen = 16 + +func (s segmentInvertedNode) KeyIndexAndWriteTo(w io.Writer, deltaEnc, tfEnc varenc.VarEncEncoder[uint64], k1, b, avgPropLen float64) (segmentindex.Key, error) { + out := segmentindex.Key{} + written := 0 + buf := make([]byte, 8) // uint64 size + + blocksEncoded, _ := createAndEncodeBlocks(s.values, s.propLengths, deltaEnc, tfEnc, k1, b, avgPropLen) + n, err := w.Write(blocksEncoded) + if err != nil { + return out, errors.Wrapf(err, "write values for node") + } + written += n + + keyLength := uint32(len(s.primaryKey)) + binary.LittleEndian.PutUint32(buf[0:4], keyLength) + if _, err := w.Write(buf[0:4]); err != nil { + return out, errors.Wrapf(err, "write key length encoding for node") + } + written += 4 + + n, err = w.Write(s.primaryKey) + if err != nil { + return out, errors.Wrapf(err, "write node") + } + written += n + + out = segmentindex.Key{ + ValueStart: s.offset, + ValueEnd: s.offset + written, + Key: s.primaryKey, + } + + return out, nil +} + +// ParseInvertedNode reads from r and parses the Inverted values into a segmentCollectionNode +// +// When only given an offset, r is constructed as a *bufio.Reader to avoid first reading the +// entire segment (could be GBs). Each consecutive read will be buffered to avoid excessive +// syscalls. +// +// When we already have a finite and manageable []byte (i.e. when we have already seeked to an +// lsmkv node and have start+end offset), r should be constructed as a *bytes.Reader, since the +// contents have already been `pread` from the segment contentFile. +func ParseInvertedNode(r io.Reader) (segmentCollectionNode, error) { + out := segmentCollectionNode{} + buffer := make([]byte, 24) + + if _, err := io.ReadFull(r, buffer); err != nil { + return out, errors.Wrap(err, "read values len") + } + out.offset = 24 + docCount := binary.LittleEndian.Uint64(buffer[:8]) + allBytes := buffer + if docCount > uint64(terms.ENCODE_AS_FULL_BYTES) { + toRead := binary.LittleEndian.Uint64(buffer[8:16]) + 4 + bufferSize := 24 + toRead + allBytes = make([]byte, bufferSize) + copy(allBytes, buffer) + _, err := r.Read(allBytes[24:]) + if err != nil { + return out, err + } + out.offset += int(toRead) + } + + nodes, _ := decodeAndConvertValuesFromBlocks(allBytes) + + keyLen := binary.LittleEndian.Uint32(allBytes[len(allBytes)-4:]) + + key := make([]byte, keyLen) + + if keyLen > 0 { + _, err := r.Read(key) + if err != nil { + return out, err + } + } + + out.offset += int(keyLen) + out.primaryKey = key + out.values = nodes + + return out, nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segment_serialization_inverted_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segment_serialization_inverted_test.go new file mode 100644 index 0000000000000000000000000000000000000000..d92aaa9439fb74df5add1370b6b341a842fe7a9e --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segment_serialization_inverted_test.go @@ -0,0 +1,59 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package lsmkv + +/* +func Test_SerializeAndParseInvertedNode(t *testing.T) { + tfs := []float32{2.0, 3.0, 4.0} + lens := []uint32{1, 2, 3} + + values := make([]value, len(tfs)) + + for i := range tfs { + buf := make([]byte, invPayloadLen) + binary.LittleEndian.PutUint64(buf[0:8], uint64(i)) + binary.LittleEndian.PutUint32(buf[8:12], math.Float32bits(tfs[i])) + binary.LittleEndian.PutUint32(buf[12:16], lens[i]) + + values[i] = value{value: buf} + } + + before := segmentInvertedNode{ + primaryKey: []byte("this-is-my-primary-key"), + values: values, + } + + buf := &bytes.Buffer{} + _, err := before.KeyIndexAndWriteTo(buf) + require.Nil(t, err) + encoded := buf.Bytes() + + expected := segmentCollectionNode{ + primaryKey: []byte("this-is-my-primary-key"), + values: values, + offset: buf.Len(), + } + + t.Run("parse using the _regular_ way", func(t *testing.T) { + after, err := ParseInvertedNode(bytes.NewReader(encoded)) + assert.Nil(t, err) + assert.Equal(t, expected, after) + }) + + t.Run("parse using the reusable way", func(t *testing.T) { + var node segmentCollectionNode + err := ParseInvertedNodeInto(bytes.NewReader(encoded), &node) + assert.Nil(t, err) + assert.Equal(t, expected, node) + }) +} +*/ diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segment_serialization_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segment_serialization_test.go new file mode 100644 index 0000000000000000000000000000000000000000..643d35b40fe2fda72e5b45cc95aea232c1db31c4 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segment_serialization_test.go @@ -0,0 +1,61 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package lsmkv + +import ( + "bytes" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func Test_SerializeAndParseCollectionNode(t *testing.T) { + before := segmentCollectionNode{ + primaryKey: []byte("this-is-my-primary-key"), + values: []value{{ + value: []byte("the-first-value"), + }, { + value: []byte("the-second-value-with-a-tombstone"), + tombstone: true, + }}, + } + + buf := &bytes.Buffer{} + _, err := before.KeyIndexAndWriteTo(buf) + require.Nil(t, err) + encoded := buf.Bytes() + + expected := segmentCollectionNode{ + primaryKey: []byte("this-is-my-primary-key"), + values: []value{{ + value: []byte("the-first-value"), + }, { + value: []byte("the-second-value-with-a-tombstone"), + tombstone: true, + }}, + offset: buf.Len(), + } + + t.Run("parse using the _regular_ way", func(t *testing.T) { + after, err := ParseCollectionNode(bytes.NewReader(encoded)) + assert.Nil(t, err) + assert.Equal(t, expected, after) + }) + + t.Run("parse using the reusable way", func(t *testing.T) { + var node segmentCollectionNode + err := ParseCollectionNodeInto(bytes.NewReader(encoded), &node) + assert.Nil(t, err) + assert.Equal(t, expected, node) + }) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segmentindex/balanced_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segmentindex/balanced_test.go new file mode 100644 index 0000000000000000000000000000000000000000..ff5264b6ded7cc8ab055096e62889aceb24908a4 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segmentindex/balanced_test.go @@ -0,0 +1,67 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package segmentindex + +import ( + "crypto/rand" + "fmt" + "math" + "math/big" + "testing" + + "github.com/stretchr/testify/assert" +) + +func mustRandUint64() uint64 { + randInt, err := rand.Int(rand.Reader, big.NewInt(math.MaxInt64)) + if err != nil { + panic(fmt.Sprintf("mustRandUint64 error: %v", err)) + } + return randInt.Uint64() +} + +func TestBuildBalancedTree(t *testing.T) { + size := 2000 + idealHeight := int(math.Ceil(math.Log2(float64(size)))) + fmt.Printf("ideal height would be %d\n", idealHeight) + + nodes := make([]Node, size) + var tree Tree + + t.Run("generate random data", func(t *testing.T) { + for i := range nodes { + nodes[i].Key = make([]byte, 8) + rand.Read(nodes[i].Key) + + nodes[i].Start = mustRandUint64() + nodes[i].End = mustRandUint64() + } + }) + + t.Run("insert", func(t *testing.T) { + tree = NewBalanced(nodes) + }) + + t.Run("check height", func(t *testing.T) { + assert.Equal(t, idealHeight, tree.Height()) + }) + + t.Run("check values", func(t *testing.T) { + for _, control := range nodes { + k, s, e := tree.Get(control.Key) + + assert.Equal(t, control.Key, k) + assert.Equal(t, control.Start, s) + assert.Equal(t, control.End, e) + } + }) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segmentindex/disk_tree.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segmentindex/disk_tree.go new file mode 100644 index 0000000000000000000000000000000000000000..483ca8446081a1949b92cd6bdd076187554f4e94 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segmentindex/disk_tree.go @@ -0,0 +1,205 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package segmentindex + +import ( + "bytes" + "errors" + "fmt" + "io" + + "github.com/weaviate/weaviate/entities/lsmkv" + "github.com/weaviate/weaviate/usecases/byteops" +) + +// DiskTree is a read-only wrapper around a marshalled index search tree, which +// can be used for reading, but cannot change the underlying structure. It is +// thus perfectly suited as an index for an (immutable) LSM disk segment, but +// pretty much useless for anything else +type DiskTree struct { + data []byte +} + +type dtNode struct { + key []byte + startPos uint64 + endPos uint64 + leftChild int64 + rightChild int64 +} + +func NewDiskTree(data []byte) *DiskTree { + return &DiskTree{ + data: data, + } +} + +func (t *DiskTree) Get(key []byte) (Node, error) { + if len(t.data) == 0 { + return Node{}, lsmkv.NotFound + } + var out Node + rw := byteops.NewReadWriter(t.data) + + // jump to the buffer until the node with _key_ is found or return a NotFound error. + // This function avoids allocations by reusing the same buffer for all keys and avoids memory reads by only + // extracting the necessary pieces of information while skipping the rest + NodeKeyBuffer := make([]byte, len(key)) + for { + // detect if there is no node with the wanted key. + if rw.Position+4 > uint64(len(t.data)) || rw.Position+4 < 4 { + return out, lsmkv.NotFound + } + + keyLen := rw.ReadUint32() + if int(keyLen) > len(NodeKeyBuffer) { + NodeKeyBuffer = make([]byte, int(keyLen)) + } else if int(keyLen) < len(NodeKeyBuffer) { + NodeKeyBuffer = NodeKeyBuffer[:keyLen] + } + _, err := rw.CopyBytesFromBuffer(uint64(keyLen), NodeKeyBuffer) + if err != nil { + return out, fmt.Errorf("copy node key: %w", err) + } + + keyEqual := bytes.Compare(key, NodeKeyBuffer) + if keyEqual == 0 { + out.Key = NodeKeyBuffer + out.Start = rw.ReadUint64() + out.End = rw.ReadUint64() + return out, nil + } else if keyEqual < 0 { + rw.MoveBufferPositionForward(2 * 8) // jump over start+end position + rw.Position = rw.ReadUint64() // left child + } else { + rw.MoveBufferPositionForward(3 * 8) // jump over start+end position and left child + rw.Position = rw.ReadUint64() // right child + } + } +} + +func (t *DiskTree) readNodeAt(offset int64) (dtNode, error) { + retNode, _, err := t.readNode(t.data[offset:]) + return retNode, err +} + +func (t *DiskTree) readNode(in []byte) (dtNode, int, error) { + var out dtNode + // in buffer needs at least 36 bytes of data: + // 4bytes for key length, 32bytes for position and children + if len(in) < 36 { + return out, 0, io.EOF + } + + rw := byteops.NewReadWriter(in) + + keyLen := uint64(rw.ReadUint32()) + copiedBytes, err := rw.CopyBytesFromBuffer(keyLen, nil) + if err != nil { + return out, int(rw.Position), fmt.Errorf("copy node key: %w", err) + } + out.key = copiedBytes + + out.startPos = rw.ReadUint64() + out.endPos = rw.ReadUint64() + out.leftChild = int64(rw.ReadUint64()) + out.rightChild = int64(rw.ReadUint64()) + return out, int(rw.Position), nil +} + +func (t *DiskTree) Seek(key []byte) (Node, error) { + if len(t.data) == 0 { + return Node{}, lsmkv.NotFound + } + + return t.seekAt(0, key, true) +} + +func (t *DiskTree) Next(key []byte) (Node, error) { + if len(t.data) == 0 { + return Node{}, lsmkv.NotFound + } + + return t.seekAt(0, key, false) +} + +func (t *DiskTree) seekAt(offset int64, key []byte, includingKey bool) (Node, error) { + node, err := t.readNodeAt(offset) + if err != nil { + return Node{}, err + } + + self := Node{ + Key: node.key, + Start: node.startPos, + End: node.endPos, + } + + if includingKey && bytes.Equal(key, node.key) { + return self, nil + } + + if bytes.Compare(key, node.key) < 0 { + if node.leftChild < 0 { + return self, nil + } + + left, err := t.seekAt(node.leftChild, key, includingKey) + if err == nil { + return left, nil + } + + if errors.Is(err, lsmkv.NotFound) { + return self, nil + } + + return Node{}, err + } else { + if node.rightChild < 0 { + return Node{}, lsmkv.NotFound + } + + return t.seekAt(node.rightChild, key, includingKey) + } +} + +// AllKeys is a relatively expensive operation as it basically does a full disk +// read of the index. It is meant for one of operations, such as initializing a +// segment where we need access to all keys, e.g. to build a bloom filter. This +// should not run at query time. +// +// The binary tree is traversed in Level-Order so keys have no meaningful +// order. Do not use this method if an In-Order traversal is required, but only +// for use cases who don't require a specific order, such as building a +// bloom filter. +func (t *DiskTree) AllKeys() ([][]byte, error) { + var out [][]byte + bufferPos := 0 + for { + node, readLength, err := t.readNode(t.data[bufferPos:]) + bufferPos += readLength + if errors.Is(err, io.EOF) { + break + } + if err != nil { + return nil, err + } + + out = append(out, node.key) + } + + return out, nil +} + +func (t *DiskTree) Size() int { + return len(t.data) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segmentindex/header.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segmentindex/header.go new file mode 100644 index 0000000000000000000000000000000000000000..cb93da49ba38040a6b071d0b14a23ebed6a90629 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segmentindex/header.go @@ -0,0 +1,134 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package segmentindex + +import ( + "bufio" + "bytes" + "encoding/binary" + "fmt" + "io" + + "github.com/weaviate/weaviate/usecases/byteops" +) + +const ( + // HeaderSize describes the general offset in a segment until the data + // starts, it is composed of 2 bytes for level, 2 bytes for version, + // 2 bytes for secondary index count, 2 bytes for strategy, 8 bytes + // for the pointer to the index part + HeaderSize = 16 + + // ChecksumSize describes the length of the segment file checksum. + // This is currently based on the CRC32 hashing algorithm. + ChecksumSize = 4 +) + +type Header struct { + Level uint16 + Version uint16 + SecondaryIndices uint16 + Strategy Strategy + IndexStart uint64 +} + +func (h *Header) WriteTo(w io.Writer) (int64, error) { + data := make([]byte, HeaderSize) + rw := byteops.NewReadWriter(data) + rw.WriteUint16(h.Level) + rw.WriteUint16(h.Version) + rw.WriteUint16(h.SecondaryIndices) + rw.WriteUint16(uint16(h.Strategy)) + rw.WriteUint64(h.IndexStart) + + write, err := w.Write(data) + if err != nil { + return 0, err + } + if write != HeaderSize { + return 0, fmt.Errorf("expected to write %d bytes, got %d", HeaderSize, write) + } + + return int64(HeaderSize), nil +} + +func (h *Header) PrimaryIndex(source []byte) ([]byte, error) { + if h.SecondaryIndices == 0 { + return source[h.IndexStart:], nil + } + + offsets, err := h.parseSecondaryIndexOffsets( + source[h.IndexStart:h.secondaryIndexOffsetsEnd()]) + if err != nil { + return nil, err + } + + // the beginning of the first secondary is also the end of the primary + end := offsets[0] + return source[h.secondaryIndexOffsetsEnd():end], nil +} + +func (h *Header) secondaryIndexOffsetsEnd() uint64 { + return h.IndexStart + (uint64(h.SecondaryIndices) * 8) +} + +func (h *Header) parseSecondaryIndexOffsets(source []byte) ([]uint64, error) { + r := bufio.NewReader(bytes.NewReader(source)) + + offsets := make([]uint64, h.SecondaryIndices) + if err := binary.Read(r, binary.LittleEndian, &offsets); err != nil { + return nil, err + } + + return offsets, nil +} + +func (h *Header) SecondaryIndex(source []byte, indexID uint16) ([]byte, error) { + if indexID >= h.SecondaryIndices { + return nil, fmt.Errorf("retrieve index %d with len %d", + indexID, h.SecondaryIndices) + } + + offsets, err := h.parseSecondaryIndexOffsets( + source[h.IndexStart:h.secondaryIndexOffsetsEnd()]) + if err != nil { + return nil, err + } + + start := offsets[indexID] + if indexID == h.SecondaryIndices-1 { + // this is the last index, return until EOF + return source[start:], nil + } + + end := offsets[indexID+1] + return source[start:end], nil +} + +func ParseHeader(data []byte) (*Header, error) { + if len(data) != HeaderSize { + return nil, fmt.Errorf("expected %d bytes, got %d", HeaderSize, len(data)) + } + rw := byteops.NewReadWriter(data) + out := &Header{} + out.Level = rw.ReadUint16() + out.Version = rw.ReadUint16() + out.SecondaryIndices = rw.ReadUint16() + out.Strategy = Strategy(rw.ReadUint16()) + out.IndexStart = rw.ReadUint64() + + if out.Version > CurrentSegmentVersion { + return nil, fmt.Errorf("unsupported version %d", out.Version) + } + + return out, nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segmentindex/header_inverted.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segmentindex/header_inverted.go new file mode 100644 index 0000000000000000000000000000000000000000..d39b412f56e5264f9c51d349ef61b308382c368f --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segmentindex/header_inverted.go @@ -0,0 +1,131 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package segmentindex + +import ( + "encoding/binary" + "io" + + "github.com/weaviate/weaviate/adapters/repos/db/inverted/terms" + "github.com/weaviate/weaviate/adapters/repos/db/lsmkv/varenc" +) + +var ( + SegmentInvertedDefaultHeaderSize = 27 + SegmentInvertedDefaultBlockSize = terms.BLOCK_SIZE + SegmentInvertedDefaultFieldCount = 2 +) + +const HeaderInvertedSize = 29 // 27 + 2 bytes for data field count + +type HeaderInverted struct { + KeysOffset uint64 + TombstoneOffset uint64 + PropertyLengthsOffset uint64 + Version uint8 + BlockSize uint8 + DataFieldCount uint8 + DataFields []varenc.VarEncDataType +} + +func LoadHeaderInverted(headerBytes []byte) (*HeaderInverted, error) { + header := &HeaderInverted{} + + header.KeysOffset = binary.LittleEndian.Uint64(headerBytes[0:8]) + header.TombstoneOffset = binary.LittleEndian.Uint64(headerBytes[8:16]) + header.PropertyLengthsOffset = binary.LittleEndian.Uint64(headerBytes[16:24]) + header.Version = headerBytes[24] + header.BlockSize = headerBytes[25] + header.DataFieldCount = headerBytes[26] + + header.DataFields = make([]varenc.VarEncDataType, header.DataFieldCount) + for i, b := range headerBytes[27:] { + header.DataFields[i] = varenc.VarEncDataType(b) + } + + return header, nil +} + +func (h *HeaderInverted) WriteTo(w io.Writer) (int64, error) { + if err := binary.Write(w, binary.LittleEndian, h.KeysOffset); err != nil { + return 0, err + } + + if err := binary.Write(w, binary.LittleEndian, h.TombstoneOffset); err != nil { + return 0, err + } + + if err := binary.Write(w, binary.LittleEndian, h.PropertyLengthsOffset); err != nil { + return 0, err + } + + if err := binary.Write(w, binary.LittleEndian, h.Version); err != nil { + return 0, err + } + + if err := binary.Write(w, binary.LittleEndian, h.BlockSize); err != nil { + return 0, err + } + + if err := binary.Write(w, binary.LittleEndian, h.DataFieldCount); err != nil { + return 0, err + } + + for _, df := range h.DataFields { + if err := binary.Write(w, binary.LittleEndian, df); err != nil { + return 0, err + } + } + + return int64(SegmentInvertedDefaultHeaderSize + len(h.DataFields)), nil +} + +func ParseHeaderInverted(r io.Reader) (*HeaderInverted, error) { + out := &HeaderInverted{} + + if err := binary.Read(r, binary.LittleEndian, &out.KeysOffset); err != nil { + return nil, err + } + + if err := binary.Read(r, binary.LittleEndian, &out.TombstoneOffset); err != nil { + return nil, err + } + + if err := binary.Read(r, binary.LittleEndian, &out.PropertyLengthsOffset); err != nil { + return nil, err + } + + if err := binary.Read(r, binary.LittleEndian, &out.Version); err != nil { + return nil, err + } + + if err := binary.Read(r, binary.LittleEndian, &out.BlockSize); err != nil { + return nil, err + } + + if err := binary.Read(r, binary.LittleEndian, &out.DataFieldCount); err != nil { + return nil, err + } + + out.DataFields = make([]varenc.VarEncDataType, out.DataFieldCount) + + for i := 0; i < int(out.DataFieldCount); i++ { + var b byte + if err := binary.Read(r, binary.LittleEndian, &b); err != nil { + return nil, err + } + + out.DataFields[i] = varenc.VarEncDataType(b) + } + + return out, nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segmentindex/header_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segmentindex/header_test.go new file mode 100644 index 0000000000000000000000000000000000000000..f3e199ef28f13feef515eab72cfd44c037dd3436 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segmentindex/header_test.go @@ -0,0 +1,46 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package segmentindex + +import ( + "os" + "testing" + + "github.com/stretchr/testify/require" +) + +func BenchmarkParseHeader(b *testing.B) { + data := []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + require.Len(b, data, HeaderSize) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := ParseHeader(data) + require.NoError(b, err) + } +} + +func BenchmarkWriteHeader(b *testing.B) { + header := Header{ + Version: 1, + Level: 1, + SecondaryIndices: 35, + Strategy: StrategyReplace, + IndexStart: 234, + } + path := b.TempDir() + b.ResetTimer() + for i := 0; i < b.N; i++ { + f, err := os.Create(path + "/test.tmp") + require.NoError(b, err) + header.WriteTo(f) + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segmentindex/header_version.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segmentindex/header_version.go new file mode 100644 index 0000000000000000000000000000000000000000..0248987df70049fb6aac7a50ff4b9f6c4bb35ebf --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segmentindex/header_version.go @@ -0,0 +1,29 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package segmentindex + +const ( + // SegmentV1 is the current latest version, and introduced support + // for integrity checks with checksums added to the segment files. + SegmentV1 = uint16(1) + + // CurrentSegmentVersion is used to ensure that the parsed header + // version does not exceed the highest valid version. + CurrentSegmentVersion = SegmentV1 +) + +func ChooseHeaderVersion(checksumsEnabled bool) uint16 { + if !checksumsEnabled { + return 0 + } + return CurrentSegmentVersion +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segmentindex/indexes.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segmentindex/indexes.go new file mode 100644 index 0000000000000000000000000000000000000000..f46f9bb2fc17a946e752ad9b32538bd3738c76a0 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segmentindex/indexes.go @@ -0,0 +1,328 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package segmentindex + +import ( + "bufio" + "encoding/binary" + "fmt" + "io" + "os" + "path/filepath" + + "github.com/prometheus/client_golang/prometheus" + "github.com/weaviate/weaviate/entities/diskio" + "github.com/weaviate/weaviate/usecases/byteops" + "github.com/weaviate/weaviate/usecases/memwatch" + + "github.com/pkg/errors" +) + +type Indexes struct { + Keys []Key + SecondaryIndexCount uint16 + ScratchSpacePath string + ObserveWrite prometheus.Observer + AllocChecker memwatch.AllocChecker +} + +const WriteToMemoryMaxSize = 10 * 1024 * 1024 + +func (s *Indexes) WriteTo(w io.Writer, expectedSize uint64) (int64, error) { + if s.SecondaryIndexCount == 0 { + // In case there are no secondary indices present, we can write the primary index directly to the writer without + // all the extra steps + return s.buildAndMarshalPrimary(w, s.Keys) + } + + // the expectedSize is only used to decide if we should use the more efficient (but memory-intensive) in-Memory code path + // or the one with scratch files which is less efficient (more write operations) but can handle any size + if expectedSize < WriteToMemoryMaxSize && (s.AllocChecker == nil || s.AllocChecker.CheckAlloc(int64(expectedSize)) == nil) { + return s.writeToMemory(w) + } else { + return s.writeToScratchFiles(w) + } +} + +func (s *Indexes) writeToMemory(w io.Writer) (int64, error) { + var currentOffset uint64 = HeaderSize + if len(s.Keys) > 0 { + currentOffset = uint64(s.Keys[len(s.Keys)-1].ValueEnd) + } + secondaryIndexCountSize := uint64(s.SecondaryIndexCount) * byteops.Uint64Len + currentOffset += secondaryIndexCountSize + + primaryIndex := s.buildPrimary(s.Keys) + currentOffset += uint64(primaryIndex.Size()) + + offsetSecondaryStart := currentOffset + secondaryIndexSize := uint64(0) + var secondaryTrees []*Tree + if s.SecondaryIndexCount > 0 { + secondaryTrees = make([]*Tree, s.SecondaryIndexCount) + for pos := range secondaryTrees { + secondary, err := s.buildSecondary(s.Keys, pos) + if err != nil { + return 0, err + } + secondaryTrees[pos] = &secondary + secondaryIndexSize += uint64(secondary.Size()) + } + } + + buf := make([]byte, uint64(primaryIndex.Size())+secondaryIndexCountSize+secondaryIndexSize) + rw := byteops.NewReadWriter(buf) + + for _, secondary := range secondaryTrees { + rw.WriteUint64(offsetSecondaryStart) + offsetSecondaryStart += uint64(secondary.Size()) + } + + _, err := primaryIndex.MarshalBinaryInto(&rw) + if err != nil { + return 0, err + } + + for _, secondary := range secondaryTrees { + _, err := secondary.MarshalBinaryInto(&rw) + if err != nil { + return 0, err + } + } + written, err := w.Write(rw.Buffer) + if err != nil { + return 0, err + } + + return int64(written), nil +} + +func (s *Indexes) buildSecondary(keys []Key, pos int) (Tree, error) { + keyNodes := make([]Node, len(keys)) + i := 0 + for _, key := range keys { + if pos >= len(key.SecondaryKeys) { + // a secondary key is not guaranteed to be present. For example, a delete + // operation could pe performed using only the primary key + continue + } + + keyNodes[i] = Node{ + Key: key.SecondaryKeys[pos], + Start: uint64(key.ValueStart), + End: uint64(key.ValueEnd), + } + i++ + } + + keyNodes = keyNodes[:i] + + index := NewBalanced(keyNodes) + return index, nil +} + +// assumes sorted keys and does NOT sort them again +func (s *Indexes) buildPrimary(keys []Key) Tree { + keyNodes := make([]Node, len(keys)) + for i, key := range keys { + keyNodes[i] = Node{ + Key: key.Key, + Start: uint64(key.ValueStart), + End: uint64(key.ValueEnd), + } + } + index := NewBalanced(keyNodes) + + return index +} + +// WriteTo writes the indices (primary and secondary) to the writer +// +// The segment files look like this: +// - header (already written) +// - secondary indexes offset (if present) +// - primary index +// - secondary indexes (if present) +// +// We first write the primary index to a scratch file to know the positions of the secondary indices. Only then we know +// the offsets of the secondary indices. +func (s *Indexes) writeToScratchFiles(w io.Writer) (int64, error) { + var currentOffset uint64 = HeaderSize + if len(s.Keys) > 0 { + currentOffset = uint64(s.Keys[len(s.Keys)-1].ValueEnd) + } + var written int64 + + if _, err := os.Stat(s.ScratchSpacePath); err == nil { + // exists, we need to delete + // This could be the case if Weaviate shut down unexpectedly (i.e. crashed) + // while a compaction was running. We can safely discard the contents of + // the scratch space. + + if err := os.RemoveAll(s.ScratchSpacePath); err != nil { + return written, errors.Wrap(err, "clean up previous scratch space") + } + } else if os.IsNotExist(err) { + // does not exist yet, nothing to - will be created in the next step + } else { + return written, errors.Wrap(err, "check for scratch space directory") + } + + if err := os.Mkdir(s.ScratchSpacePath, 0o777); err != nil { + return written, errors.Wrap(err, "create scratch space") + } + + primaryFileName := filepath.Join(s.ScratchSpacePath, "primary") + primaryFD, err := os.Create(primaryFileName) + if err != nil { + return written, err + } + primaryFDBuffered := bufio.NewWriter(diskio.NewMeteredWriter(primaryFD, func(written int64) { + s.ObserveWrite.Observe(float64(written)) + })) + + n, err := s.buildAndMarshalPrimary(primaryFDBuffered, s.Keys) + if err != nil { + return written, err + } + + if err := primaryFDBuffered.Flush(); err != nil { + return written, err + } + + if _, err := primaryFD.Seek(0, io.SeekStart); err != nil { + return written, fmt.Errorf("seek to start of primary scratch space: %w", err) + } + + // pretend that primary index was already written, then also account for the + // additional offset pointers (one for each secondary index) + currentOffset = currentOffset + uint64(n) + + uint64(s.SecondaryIndexCount)*8 + + secondaryFileName := filepath.Join(s.ScratchSpacePath, "secondary") + secondaryFD, err := os.Create(secondaryFileName) + if err != nil { + return written, err + } + + secondaryFDBuffered := bufio.NewWriter(diskio.NewMeteredWriter(secondaryFD, func(written int64) { + s.ObserveWrite.Observe(float64(written)) + })) + + if s.SecondaryIndexCount > 0 { + offsets := make([]uint64, s.SecondaryIndexCount) + for pos := range offsets { + n, err := s.buildAndMarshalSecondary(secondaryFDBuffered, pos, s.Keys) + if err != nil { + return written, err + } else { + written += int64(n) + } + + offsets[pos] = currentOffset + currentOffset = offsets[pos] + uint64(n) + } + + if err := binary.Write(w, binary.LittleEndian, &offsets); err != nil { + return written, err + } + + written += int64(len(offsets)) * 8 + } + + if err := secondaryFDBuffered.Flush(); err != nil { + return written, err + } + + if _, err := secondaryFD.Seek(0, io.SeekStart); err != nil { + return written, fmt.Errorf("seek to start of secondary scratch space: %w", err) + } + + if n, err := io.Copy(w, primaryFD); err != nil { + return written, err + } else { + written += int64(n) + } + + if n, err := io.Copy(w, secondaryFD); err != nil { + return written, err + } else { + written += int64(n) + } + + if err := primaryFD.Close(); err != nil { + return written, err + } + + if err := secondaryFD.Close(); err != nil { + return written, err + } + + if err := os.RemoveAll(s.ScratchSpacePath); err != nil { + return written, err + } + + return written, nil +} + +// pos indicates the position of a secondary index, assumes unsorted keys and +// sorts them +func (s *Indexes) buildAndMarshalSecondary(w io.Writer, pos int, + keys []Key, +) (int64, error) { + keyNodes := make([]Node, len(keys)) + i := 0 + for _, key := range keys { + if pos >= len(key.SecondaryKeys) { + // a secondary key is not guaranteed to be present. For example, a delete + // operation could pe performed using only the primary key + continue + } + + keyNodes[i] = Node{ + Key: key.SecondaryKeys[pos], + Start: uint64(key.ValueStart), + End: uint64(key.ValueEnd), + } + i++ + } + + keyNodes = keyNodes[:i] + + index := NewBalanced(keyNodes) + n, err := index.MarshalBinaryInto(w) + if err != nil { + return 0, err + } + + return n, nil +} + +// assumes sorted keys and does NOT sort them again +func (s *Indexes) buildAndMarshalPrimary(w io.Writer, keys []Key) (int64, error) { + keyNodes := make([]Node, len(keys)) + for i, key := range keys { + keyNodes[i] = Node{ + Key: key.Key, + Start: uint64(key.ValueStart), + End: uint64(key.ValueEnd), + } + } + index := NewBalanced(keyNodes) + + n, err := index.MarshalBinaryInto(w) + if err != nil { + return -1, err + } + + return n, nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segmentindex/indexes_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segmentindex/indexes_test.go new file mode 100644 index 0000000000000000000000000000000000000000..c08cc2f9d9beb55550cc9b15f21c35e092fe966f --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segmentindex/indexes_test.go @@ -0,0 +1,73 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package segmentindex + +import ( + "bufio" + "fmt" + "math" + "os" + "testing" + + "github.com/prometheus/client_golang/prometheus" + "github.com/weaviate/weaviate/usecases/monitoring" + + "github.com/stretchr/testify/require" +) + +func BenchmarkIndexesWriteTo(b *testing.B) { + path := b.TempDir() + + index := Indexes{ + SecondaryIndexCount: 10, + ScratchSpacePath: path + "/scratch", + ObserveWrite: monitoring.GetMetrics().FileIOWrites.With(prometheus.Labels{ + "strategy": "test", + "operation": "writeIndices", + }), + } + start := HeaderSize + for i := 0; i < 10; i++ { + key := Key{Key: []byte(fmt.Sprintf("primary%d", i))} + secondaryLength := 0 + for j := 0; j < 10; j++ { + secondary := []byte(fmt.Sprintf("secondary%d", j)) + key.SecondaryKeys = append(key.SecondaryKeys, secondary) + secondaryLength += len(secondary) + } + key.ValueStart = start + key.ValueEnd = start + len(key.Key)*8 + secondaryLength*8 + index.Keys = append(index.Keys, key) + } + + b.ResetTimer() + + for _, size := range []uint64{4096, math.MaxUint64} { + b.Run(fmt.Sprintf("size=%d", size), func(b *testing.B) { + path := b.TempDir() + + for i := 0; i < b.N; i++ { + f, err := os.Create(path + fmt.Sprintf("/test%d", i)) + require.NoError(b, err) + + w := bufio.NewWriter(f) + + _, err = index.WriteTo(w, size) + require.NoError(b, err) + + require.NoError(b, w.Flush()) + require.NoError(b, f.Sync()) + require.NoError(b, f.Close()) + } + }) + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segmentindex/key_index.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segmentindex/key_index.go new file mode 100644 index 0000000000000000000000000000000000000000..232191c6320437831fb54d405b8e2df85853618d --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segmentindex/key_index.go @@ -0,0 +1,23 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package segmentindex + +// Key is a helper struct that can be used to build the key nodes for the +// segment index. It contains the primary key and an arbitrary number of +// secondary keys, as well as valueStart and valueEnd indicator. Those are used +// to find the correct payload for each key. +type Key struct { + Key []byte + SecondaryKeys [][]byte + ValueStart int + ValueEnd int +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segmentindex/quantile_keys.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segmentindex/quantile_keys.go new file mode 100644 index 0000000000000000000000000000000000000000..eb1b4693c8c1e594e43de4d4f74a7b7265ddf6bd --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segmentindex/quantile_keys.go @@ -0,0 +1,104 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package segmentindex + +import ( + "math" + + "github.com/weaviate/weaviate/usecases/byteops" +) + +// QuantileKeys returns a list of keys that roughly represent the quantiles of +// the tree. This can be very useful to bootstrap parallel cursors over the +// segment that are more or less evenly distributed. +// +// This method uses the natural shape of the tree to determine the +// distribution of the keys. This is a performance-accuracy trade-off. It does +// not guarantee perfect distribution, but it is fairly cheap to obtain as most +// runs will only need to go a few levels deep – even on massive trees. +// +// The number of keys returned is not guaranteed to be exactly q, in most cases +// returns more keys. This is because in a real-life application you would +// likely aggregate across multiple segments. Similarly keys are not returned +// in any specific order, as the assumption is that post-processing will be +// done when keys are aggregated across multiple segments. +// +// The two guarantees you get are: +// +// 1. If there are at least q keys in the tree, you will get at least q keys, +// most likely more +// 2. If there are less than q keys in the tree, you will get all keys. +func (t *DiskTree) QuantileKeys(q int) [][]byte { + if q <= 0 { + return nil + } + + // we will overfetch a bit because we will have q keys at level n, but in + // addition we can use all keys discovered on the way to get to level n. This + // will help us to get a more even distribution of keys – especially when + // multiple trees need to merged down the line (e.g. because there are many + // segements). + depth := int(math.Ceil(math.Log2(float64(q)))) + bfs := parallelBFS{ + dt: t, + maxDepth: depth, + keysDiscovered: make([][]byte, 0, depth), + } + + return bfs.run() +} + +type parallelBFS struct { + dt *DiskTree + maxDepth int + keysDiscovered [][]byte +} + +func (bfs *parallelBFS) run() [][]byte { + bfs.parse(0, 0) + + return bfs.keysDiscovered +} + +func (bfs *parallelBFS) parse(offset uint64, level int) { + if offset+4 > uint64(len(bfs.dt.data)) || offset+4 < 4 { + // exit condition + return + } + rw := byteops.NewReadWriter(bfs.dt.data) + rw.Position = offset + keyLen := rw.ReadUint32() + nodeKeyBuffer := make([]byte, int(keyLen)) + _, err := rw.CopyBytesFromBuffer(uint64(keyLen), nodeKeyBuffer) + if err != nil { + // no special handling other than skipping this node. If the key could not + // be read correctly, we have much bigger problems worrying about quantile + // keys for cursor efficiency. This error is handled during normal .Get() + // operations. It is not worth changing the signature of quantile keys just + // to return this one error. We could also consider explicitly panic'ing + // here, so this error does not get lost. + return + } + + bfs.keysDiscovered = append(bfs.keysDiscovered, nodeKeyBuffer) + + if level+1 > bfs.maxDepth { + return + } + + rw.MoveBufferPositionForward(2 * 8) // jump over start+end position + leftChildPos := rw.ReadUint64() // left child + rightChildPos := rw.ReadUint64() // left child + + bfs.parse(leftChildPos, level+1) + bfs.parse(rightChildPos, level+1) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segmentindex/quantile_keys_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segmentindex/quantile_keys_test.go new file mode 100644 index 0000000000000000000000000000000000000000..2fcf3172f6984501b292ab9effa04b8b344b0635 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segmentindex/quantile_keys_test.go @@ -0,0 +1,146 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package segmentindex + +import ( + "bytes" + "encoding/binary" + "sort" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func FuzzQuantileKeys(f *testing.F) { + type test struct { + name string + objects int + inputQuantiles int + expectedMinimumOutput int + } + + tests := []test{ + { + name: "many entries, few quantiles", + objects: 1000, + inputQuantiles: 10, + expectedMinimumOutput: 10, + }, + { + name: "single entry, no quantiles", + objects: 1, + inputQuantiles: 0, + expectedMinimumOutput: 0, + }, + { + name: "negative quanitles", + objects: 50, + inputQuantiles: -100, + expectedMinimumOutput: 0, + }, + { + name: "single entry, single quantile", + objects: 1, + inputQuantiles: 1, + expectedMinimumOutput: 1, + }, + { + name: "single entry, many quantiles", + objects: 1, + inputQuantiles: 100, + expectedMinimumOutput: 1, + }, + { + name: "few entries, many quantiles", + objects: 17, + inputQuantiles: 100, + expectedMinimumOutput: 17, + }, + { + name: "same number of entries and quantiles", + objects: 31, + inputQuantiles: 31, + expectedMinimumOutput: 31, + }, + { + name: "no entries", + objects: 0, + inputQuantiles: 31, + expectedMinimumOutput: 0, + }, + } + + for _, test := range tests { + f.Add(test.objects, test.inputQuantiles) + } + + f.Fuzz(func(t *testing.T, objects int, inputQuantiles int) { + if objects < 0 || objects > 1000 { + return + } + + if inputQuantiles < 0 || inputQuantiles > 1000 { + return + } + + minimumOutput := inputQuantiles + if objects < inputQuantiles { + minimumOutput = objects + } + + dt := buildSampleDiskTree(t, objects) + keys := dt.QuantileKeys(inputQuantiles) + + require.GreaterOrEqual(t, len(keys), minimumOutput) + }) +} + +func TestQuantileKeysDistribution(t *testing.T) { + dt := buildSampleDiskTree(t, 1000) + keys := dt.QuantileKeys(8) + sort.Slice(keys, func(a, b int) bool { + return bytes.Compare(keys[a], keys[b]) < 0 + }) + + asNumbers := make([]uint64, 0, len(keys)) + for _, key := range keys { + asNumbers = append(asNumbers, binary.BigEndian.Uint64(key)) + } + + idealStepSize := float64(1000) / float64(len(asNumbers)+1) + for i, n := range asNumbers { + actualStepSize := float64(n) / float64(i+1) + assert.InEpsilon(t, idealStepSize, actualStepSize, 0.1) + } +} + +func buildSampleDiskTree(t *testing.T, n int) *DiskTree { + nodes := make([]Node, 0, n) + for i := 0; i < n; i++ { + key := make([]byte, 8) + binary.BigEndian.PutUint64(key, uint64(i)) + // the index positions do not matter for this test + start, end := uint64(0), uint64(0) + nodes = append(nodes, Node{Key: key, Start: start, End: end}) + } + + sort.Slice(nodes, func(a, b int) bool { + return bytes.Compare(nodes[a].Key, nodes[b].Key) < 0 + }) + + balanced := NewBalanced(nodes) + dt, err := balanced.MarshalBinary() + require.Nil(t, err) + + return NewDiskTree(dt) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segmentindex/segment_file.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segmentindex/segment_file.go new file mode 100644 index 0000000000000000000000000000000000000000..13e1cd7f28ec1dcfd7022a2f6a8eaf8d970ee3d6 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segmentindex/segment_file.go @@ -0,0 +1,360 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package segmentindex + +import ( + "bufio" + "bytes" + "fmt" + "io" + "time" + + "github.com/weaviate/weaviate/usecases/integrity" + "github.com/weaviate/weaviate/usecases/monitoring" +) + +type SegmentWriter interface { + Write(p []byte) (n int, err error) + Flush() error +} + +// SegmentFile facilitates the writing/reading of an LSM bucket segment file. +// +// These contents include the CRC32 checksum which is calculated based on the: +// - segment data +// - segment indexes +// - segment header +// +// The checksum is calculated using those components in that exact ordering. +// This is because during compactions, the header is not actually known until +// the compaction process is complete. So to accommodate this, all segment +// checksum calculations are made using the header last. +// +// Usage: +// +// To write a segment file, initialization and API are as follows: +// ``` +// sf := NewSegmentFile(WithBufferedWriter()) +// sf.WriterHeader() +// .WriteTo(sf.BodyWriter()) +// sf.WriteChecksum() +// ``` +// +// To validate a segment file checksum, initialization and API are as follows: +// ``` +// sf := NewSegmentFile(WithReader()) +// sf.ValidateChecksum() +// ``` +type SegmentFile struct { + header *Header + headerInverted *HeaderInverted + writer SegmentWriter + reader *bufio.Reader + checksumWriter integrity.ChecksumWriter + checksumReader integrity.ChecksumReader + // flag to indicate if the segment file is empty. + // this is necessary, because in the case of + // compactions, we don't want to re-write the header + // when it is later re-written + writtenTo bool + checksumsDisabled bool +} + +type SegmentFileOption func(*SegmentFile) + +// WithBufferedWriter sets the desired segment file writer +// This will typically wrap the segment *os.File +func WithBufferedWriter(writer SegmentWriter) SegmentFileOption { + return func(segmentFile *SegmentFile) { + segmentFile.writer = writer + segmentFile.checksumWriter = integrity.NewCRC32Writer(writer) + } +} + +// WithReader sets the desired segment file reader. +// This will typically be the segment *os.File. +func WithReader(reader io.Reader) SegmentFileOption { + return func(segmentFile *SegmentFile) { + segmentFile.reader = bufio.NewReader(reader) + segmentFile.checksumReader = integrity.NewCRC32Reader(reader) + } +} + +// WithReader sets the desired segment file reader. +// This will typically be the segment *os.File. +func WithReaderCustomBufferSize(reader io.Reader, size int) SegmentFileOption { + return func(segmentFile *SegmentFile) { + segmentFile.reader = bufio.NewReaderSize(reader, size) + segmentFile.checksumReader = integrity.NewCRC32Reader(reader) + } +} + +// WithChecksumsDisabled configures the segment file +// to be written without checksums +func WithChecksumsDisabled(disable bool) SegmentFileOption { + return func(segmentFile *SegmentFile) { + segmentFile.checksumsDisabled = disable + } +} + +// NewSegmentFile creates a new instance of SegmentFile. +// Be sure to include a writer or reader option depending on your needs. +func NewSegmentFile(opts ...SegmentFileOption) *SegmentFile { + s := &SegmentFile{ + checksumsDisabled: true, + } + for _, opt := range opts { + opt(s) + } + return s +} + +// BodyWriter exposes the underlying writer which calculates the hash inline. +// This method is used when writing the body of the segment, the user data +// itself. +// +// Because there are many segment node types, and each exposes its own `WriteTo` +// (or similar) method, it would be cumbersome to support each node type, in the +// way we support WriteHeader and WriteIndexes. So this method exists to hook +// into each segment node's `WriteTo` instead. +// +// This method uses the written data to further calculate the checksum. +func (f *SegmentFile) BodyWriter() io.Writer { + f.writtenTo = true + + if f.checksumsDisabled { + return f.writer + } + return f.checksumWriter +} + +// SetHeader sets the header in the SegmentFile without writing anything. This should be used if the header was already +// written by another reader. +func (f *SegmentFile) SetHeader(header *Header) { + f.header = header +} + +// SetHeaderInverted sets the inverted header in the SegmentFile without writing anything. This should be used if the +// inverted header was already written by another reader. +func (f *SegmentFile) SetHeaderInverted(headerInverted *HeaderInverted) { + f.headerInverted = headerInverted +} + +// WriteHeader writes the header struct to the underlying writer. +// This method resets the internal hash, so that the header can be written +// to the checksum last. For more details see SegmentFile. +func (f *SegmentFile) WriteHeader(header *Header) (int64, error) { + if f.writer == nil { + return 0, fmt.Errorf(" SegmentFile not initialized with a reader, " + + "try adding one with segmentindex.WithBufferedWriter(*bufio.Writer)") + } + + if f.checksumsDisabled { + if f.writtenTo { + return 0, nil + } + return header.WriteTo(f.writer) + } + + f.header = header + // If this is a memtable flush, we want to write the header up front. + // If this is a compaction, the dummy header already exists, and will + // be overwritten through a different writer. In that case, all we care + // about is saving the header pointer, so we can add it to the hash when + // WriteChecksum is called. + if !f.writtenTo { + n, err := header.WriteTo(f.checksumWriter) + if err != nil { + return n, fmt.Errorf("write segment file header: %w", err) + } + // We save the header, and only write it to the checksum at the end + f.checksumWriter.Reset() + f.writtenTo = true + return n, nil + } + + return 0, nil +} + +// WriteHeaderInverted does the same as WriteHeader, but for the extra header on inverted indexes. +func (f *SegmentFile) WriteHeaderInverted(headerInverted *HeaderInverted) (int64, error) { + if f.writer == nil { + return 0, fmt.Errorf(" SegmentFile not initialized with a reader, " + + "try adding one with segmentindex.WithBufferedWriter(*bufio.Writer)") + } + + if f.checksumsDisabled { + if f.writtenTo { + return 0, nil + } + return headerInverted.WriteTo(f.writer) + } + + f.headerInverted = headerInverted + // If this is a memtable flush, we want to write the header up front. + // If this is a compaction, the dummy header already exists, and will + // be overwritten through a different writer. In that case, all we care + // about is saving the header pointer, so we can add it to the hash when + // WriteChecksum is called. + if !f.writtenTo { + n, err := headerInverted.WriteTo(f.checksumWriter) + if err != nil { + return n, fmt.Errorf("write segment file header: %w", err) + } + // We save the header, and only write it to the checksum at the end + f.checksumWriter.Reset() + f.writtenTo = true + return n, nil + } + + return 0, nil +} + +// WriteIndexes writes the indexes struct to the underlying writer. +// This method uses the written data to further calculate the checksum. +func (f *SegmentFile) WriteIndexes(indexes *Indexes, expectedSize int64) (int64, error) { + if f.writer == nil { + return 0, fmt.Errorf(" SegmentFile not initialized with a reader, " + + "try adding one with segmentindex.WithBufferedWriter(*bufio.Writer)") + } + + if f.checksumsDisabled { + return indexes.WriteTo(f.writer, uint64(expectedSize)) + } + + n, err := indexes.WriteTo(f.checksumWriter, uint64(expectedSize)) + if err != nil { + return n, fmt.Errorf("write segment file indexes: %w", err) + } + f.writtenTo = true + return n, nil +} + +// WriteChecksum writes checksum itself to the segment file. +// As mentioned elsewhere in SegmentFile, the header is added to the checksum last. +// This method finally adds the header to the hash, and then writes the resulting +// checksum to the segment file. +func (f *SegmentFile) WriteChecksum() (int64, error) { + if f.writer == nil { + return 0, fmt.Errorf(" SegmentFile not initialized with a reader, " + + "try adding one with segmentindex.WithBufferedWriter(*bufio.Writer)") + } + + var n int + var err error + + if !f.checksumsDisabled { + if err = f.addHeaderToChecksum(); err != nil { + return 0, err + } + + n, err = f.writer.Write(f.checksumWriter.Hash()) + if err != nil { + return int64(n), fmt.Errorf("write segment file checksum: %w", err) + } + } + + if err = f.writer.Flush(); err != nil { + return 0, fmt.Errorf("flush segmentfile: %w", err) + } + + return int64(n), nil +} + +// ValidateChecksum determines if a segment's content matches its checksum +func (f *SegmentFile) ValidateChecksum(size, headerSize int64) error { + start := time.Now() + read := 0 + + defer func() { + duration := time.Since(start) + m := monitoring.GetMetrics() + // Record the duration of the checksum validation + if m != nil { + m.ChecksumValidationDuration.Observe(float64(duration) / float64(time.Second)) + m.ChecksumBytesRead.Observe(float64(read)) + } + }() + if f.reader == nil { + return fmt.Errorf(" SegmentFile not initialized with a reader, " + + "try adding one with segmentindex.WithReader(io.Reader)") + } + + f.checksumReader = integrity.NewCRC32Reader(f.reader) + + header := make([]byte, headerSize) + n, err := f.reader.Read(header[:]) + if err != nil { + return fmt.Errorf("read segment file header: %w", err) + } + read += n + + var ( + buffer = make([]byte, 4096) // Buffer for chunked reads + dataSize = size - headerSize - ChecksumSize + remaining = dataSize + ) + + for remaining > 0 { + toRead := int64(len(buffer)) + if remaining < toRead { + toRead = remaining + } + + n, err := f.checksumReader.Read(buffer[:toRead]) + if err != nil { + return fmt.Errorf("read segment file: %w", err) + } + + remaining -= int64(n) + read += n + } + + var checksumBytes [ChecksumSize]byte + n, err = io.ReadFull(f.reader, checksumBytes[:]) + if err != nil { + return fmt.Errorf("read segment file checksum: %w", err) + } + read += n + + f.reader.Reset(bytes.NewReader(header[:])) + n, err = f.checksumReader.Read(make([]byte, headerSize)) + if err != nil { + return fmt.Errorf("add header to checksum: %w", err) + } + read += n + + computedChecksum := f.checksumReader.Hash() + if !bytes.Equal(computedChecksum, checksumBytes[:]) { + return fmt.Errorf("invalid checksum") + } + + return nil +} + +func (f *SegmentFile) addHeaderToChecksum() error { + b := bytes.NewBuffer(nil) + if _, err := f.header.WriteTo(b); err != nil { + return fmt.Errorf( + "serialize segment header to write to checksum hash: %w", err) + } + if f.headerInverted != nil { + if _, err := f.headerInverted.WriteTo(b); err != nil { + return fmt.Errorf( + "serialize segment inverted header to write to checksum hash: %w", err) + } + } + if _, err := f.checksumWriter.HashWrite(b.Bytes()); err != nil { + return fmt.Errorf("write segment header to checksum hash: %w", err) + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segmentindex/segment_file_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segmentindex/segment_file_test.go new file mode 100644 index 0000000000000000000000000000000000000000..ae6b232eb585d58862b20b516efb4ecbb8f49565 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segmentindex/segment_file_test.go @@ -0,0 +1,104 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package segmentindex + +import ( + "fmt" + "os" + "path" + "testing" + + "github.com/stretchr/testify/require" +) + +// Note: checksums are also being tested in the compaction_integration_test.go file, that tests multiple segments and strategies +// to ensure that the ValidateChecksum function works correctly for all segment types. + +// contentsWithChecksum is a precomputed segment file from the property__id bucket. +// The data object which was used to generate this segment file is the following: +// +// { +// "id": "3722dfe8-b26c-4d05-95ee-41045673b43d", +// "class": "Paragraph", +// "properties": { +// "contents": "para1" +// } +// } +// +// These contents include the CRC32 checksum which was calculated based on the: +// - segment data +// - segment indexes +// - segment header +// +// The checksum is calculated using those components in that exact ordering. +// This is because during compactions, the header is not actually known until +// the compaction process is complete. So to accommodate this, all segment +// checksum calculations are made using the header last. +var contentsWithChecksum = []byte{ + 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x51, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x24, 0x00, 0x00, 0x00, 0x33, 0x37, 0x32, + 0x32, 0x64, 0x66, 0x65, 0x38, 0x2d, 0x62, 0x32, 0x36, 0x63, 0x2d, 0x34, + 0x64, 0x30, 0x35, 0x2d, 0x39, 0x35, 0x65, 0x65, 0x2d, 0x34, 0x31, 0x30, + 0x34, 0x35, 0x36, 0x37, 0x33, 0x62, 0x34, 0x33, 0x64, 0x24, 0x00, 0x00, + 0x00, 0x33, 0x37, 0x32, 0x32, 0x64, 0x66, 0x65, 0x38, 0x2d, 0x62, 0x32, + 0x36, 0x63, 0x2d, 0x34, 0x64, 0x30, 0x35, 0x2d, 0x39, 0x35, 0x65, 0x65, + 0x2d, 0x34, 0x31, 0x30, 0x34, 0x35, 0x36, 0x37, 0x33, 0x62, 0x34, 0x33, + 0x64, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x51, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x53, 0x4c, 0x67, + 0x5a, +} + +func prepareSegment(t *testing.T) (*os.File, int64) { + dir := t.TempDir() + fname := path.Join(dir, "tmp.db") + + { + f, err := os.Create(fname) + require.Nil(t, err) + _, err = f.Write(contentsWithChecksum) + require.Nil(t, err) + f.Close() + } + + f, err := os.Open(fname) + require.Nil(t, err) + + fileInfo, err := f.Stat() + if err != nil { + fmt.Printf("Error getting file info: %v\n", err) + return nil, 0 + } + return f, fileInfo.Size() +} + +func TestSegmentFile_ValidateChecksum(t *testing.T) { + f, fileSize := prepareSegment(t) + defer f.Close() + segmentFile := NewSegmentFile(WithReader(f)) + err := segmentFile.ValidateChecksum(fileSize, HeaderSize) + require.Nil(t, err) +} + +// This test checks that the ValidateChecksum function works correctly when the reader buffer is close to the size of the final read of the checksum. +// In a previous implementation, this caused an error because the reader would not read the checksum bytes correctly by not using io.ReadFull. +// Setting a custom buffer size is simpler than creating a segment file with a size that is close to the checksum size. +// Interesting note, for this segment, the test would fail in the old implementation if the buffer size is set to 77, 78, 154, 155 or 156 bytes. +func TestSegmentFile_ValidateChecksumMultipleOfBufferReader(t *testing.T) { + f, fileSize := prepareSegment(t) + defer f.Close() + segmentFile := NewSegmentFile(WithReaderCustomBufferSize(f, 77)) + err := segmentFile.ValidateChecksum(fileSize, HeaderSize) + require.Nil(t, err) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segmentindex/strategies.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segmentindex/strategies.go new file mode 100644 index 0000000000000000000000000000000000000000..dd44510d640d3bb4caf450febd00f1b6a0f302f3 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segmentindex/strategies.go @@ -0,0 +1,79 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package segmentindex + +import "fmt" + +type Strategy uint16 + +const ( + StrategyReplace Strategy = iota + StrategySetCollection + StrategyMapCollection + StrategyRoaringSet + StrategyRoaringSetRange + StrategyInverted +) + +// consistent labels with adapters/repos/db/lsmkv/strategies.go +func (s Strategy) String() string { + switch s { + case StrategyReplace: + return "replace" + case StrategySetCollection: + return "setcollection" + case StrategyMapCollection: + return "mapcollection" + case StrategyRoaringSet: + return "roaringset" + case StrategyRoaringSetRange: + return "roaringsetrange" + default: + return "n/a" + } +} + +func IsExpectedStrategy(strategy Strategy, expectedStrategies ...Strategy) bool { + if len(expectedStrategies) == 0 { + expectedStrategies = []Strategy{ + StrategyReplace, + StrategySetCollection, + StrategyMapCollection, + StrategyRoaringSet, + StrategyRoaringSetRange, + StrategyInverted, + } + } + + for _, s := range expectedStrategies { + if s == strategy { + return true + } + } + return false +} + +func CheckExpectedStrategy(strategy Strategy, expectedStrategies ...Strategy) error { + if IsExpectedStrategy(strategy, expectedStrategies...) { + return nil + } + if len(expectedStrategies) == 1 { + return fmt.Errorf("strategy %v expected, got %v", expectedStrategies[0], strategy) + } + return fmt.Errorf("one of strategies %v expected, got %v", expectedStrategies, strategy) +} + +func MustBeExpectedStrategy(strategy Strategy, expectedStrategies ...Strategy) { + if err := CheckExpectedStrategy(strategy, expectedStrategies...); err != nil { + panic(err) + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segmentindex/tree.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segmentindex/tree.go new file mode 100644 index 0000000000000000000000000000000000000000..1e5ad842967456ff1301df7dc13c39bd4d141ccb --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segmentindex/tree.go @@ -0,0 +1,328 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package segmentindex + +import ( + "bytes" + "encoding/binary" + "fmt" + "io" + "math" + "sort" + + "github.com/pkg/errors" +) + +type Nodes []Node + +func (n Nodes) Len() int { return len(n) } +func (n Nodes) Swap(i, j int) { n[i], n[j] = n[j], n[i] } +func (n Nodes) Less(i, j int) bool { return bytes.Compare(n[i].Key, n[j].Key) < 0 } + +type Tree struct { + nodes []*Node +} + +type Node struct { + Key []byte + Start uint64 + End uint64 +} + +func NewTree(capacity int) Tree { + return Tree{ + nodes: make([]*Node, 0, capacity), + } +} + +func NewBalanced(nodes Nodes) Tree { + t := Tree{nodes: make([]*Node, len(nodes))} + + if len(nodes) > 0 { + // sort the slice just once + sort.Sort(nodes) + t.buildBalanced(nodes, 0, 0, len(nodes)-1) + } + + return t +} + +func (t *Tree) buildBalanced(nodes []Node, targetPos, leftBound, rightBound int) { + t.grow(targetPos) + + if leftBound > rightBound { + return + } + + mid := (leftBound + rightBound) / 2 + t.nodes[targetPos] = &nodes[mid] + + t.buildBalanced(nodes, t.left(targetPos), leftBound, mid-1) + t.buildBalanced(nodes, t.right(targetPos), mid+1, rightBound) +} + +func (t *Tree) Insert(key []byte, start, end uint64) { + newNode := Node{ + Key: key, + Start: start, + End: end, + } + + if len(t.nodes) == 0 { + t.nodes = append(t.nodes, &newNode) + return + } + + t.insertAt(0, newNode) +} + +func (t *Tree) insertAt(nodeID int, newNode Node) { + if !t.exists(nodeID) { + // we are at the target and can insert now + t.grow(nodeID) + t.nodes[nodeID] = &newNode + return + } + + if bytes.Equal(newNode.Key, t.nodes[nodeID].Key) { + // this key already exists, which is an unexpected situation for an index + // key + panic(fmt.Sprintf("duplicate key %s", newNode.Key)) + } + + if bytes.Compare(newNode.Key, t.nodes[nodeID].Key) < 0 { + t.insertAt(t.left(nodeID), newNode) + } else { + t.insertAt(t.right(nodeID), newNode) + } +} + +func (t *Tree) Get(key []byte) ([]byte, uint64, uint64) { + if len(t.nodes) == 0 { + return nil, 0, 0 + } + + return t.getAt(0, key) +} + +func (t *Tree) getAt(nodeID int, key []byte) ([]byte, uint64, uint64) { + if !t.exists(nodeID) { + return nil, 0, 0 + } + + node := t.nodes[nodeID] + if bytes.Equal(node.Key, key) { + return node.Key, node.Start, node.End + } + + if bytes.Compare(key, node.Key) < 0 { + return t.getAt(t.left(nodeID), key) + } else { + return t.getAt(t.right(nodeID), key) + } +} + +func (t Tree) left(i int) int { + return 2*i + 1 +} + +func (t Tree) right(i int) int { + return 2*i + 2 +} + +func (t *Tree) exists(i int) bool { + if i >= len(t.nodes) { + return false + } + + return t.nodes[i] != nil +} + +// size calculates the exact size of this node on disk which is helpful to +// figure out the personal offset +func (n *Node) size() int { + if n == nil { + return 0 + } + size := 0 + size += 4 // uint32 for key length + size += len(n.Key) + size += 8 // uint64 startPos + size += 8 // uint64 endPos + size += 8 // int64 pointer left child + size += 8 // int64 pointer right child + return size +} + +func (t *Tree) grow(i int) { + if i < len(t.nodes) { + return + } + + oldSize := len(t.nodes) + newSize := oldSize + for newSize <= i { + newSize += oldSize + } + + newNodes := make([]*Node, newSize) + copy(newNodes, t.nodes) + for i := range t.nodes { + t.nodes[i] = nil + } + + t.nodes = newNodes +} + +func (t *Tree) MarshalBinary() ([]byte, error) { + offsets, size := t.calculateDiskOffsets() + + buf := bytes.NewBuffer(nil) + + for i, node := range t.nodes { + if node == nil { + continue + } + + var leftOffset int64 + var rightOffset int64 + + if t.exists(t.left(i)) { + leftOffset = int64(offsets[t.left(i)]) + } else { + leftOffset = -1 + } + + if t.exists(t.right(i)) { + rightOffset = int64(offsets[t.right(i)]) + } else { + rightOffset = -1 + } + + if len(node.Key) > math.MaxUint32 { + return nil, errors.Errorf("max key size is %d", math.MaxUint32) + } + + keyLen := uint32(len(node.Key)) + if err := binary.Write(buf, binary.LittleEndian, keyLen); err != nil { + return nil, err + } + if _, err := buf.Write(node.Key); err != nil { + return nil, err + } + if err := binary.Write(buf, binary.LittleEndian, node.Start); err != nil { + return nil, err + } + if err := binary.Write(buf, binary.LittleEndian, node.End); err != nil { + return nil, err + } + if err := binary.Write(buf, binary.LittleEndian, leftOffset); err != nil { + return nil, err + } + if err := binary.Write(buf, binary.LittleEndian, rightOffset); err != nil { + return nil, err + } + } + bytes := buf.Bytes() + if size != len(bytes) { + return nil, errors.Errorf("corrupt: wrote %d bytes with target %d", len(bytes), size) + } + + return bytes, nil +} + +func (t *Tree) MarshalBinaryInto(w io.Writer) (int64, error) { + offsets, size := t.calculateDiskOffsets() + + // create buf just once and reuse for each iteration, each iteration + // overwrites every single byte of the buffer, so no initializing or + // resetting after a round is required. + buf := make([]byte, 36) // 1x uint32 + 4x uint64 + + for i, node := range t.nodes { + if node == nil { + continue + } + + var leftOffset int64 + var rightOffset int64 + + if t.exists(t.left(i)) { + leftOffset = int64(offsets[t.left(i)]) + } else { + leftOffset = -1 + } + + if t.exists(t.right(i)) { + rightOffset = int64(offsets[t.right(i)]) + } else { + rightOffset = -1 + } + + if len(node.Key) > math.MaxUint32 { + return 0, errors.Errorf("max key size is %d", math.MaxUint32) + } + + keyLen := uint32(len(node.Key)) + binary.LittleEndian.PutUint32(buf[0:4], keyLen) + binary.LittleEndian.PutUint64(buf[4:12], node.Start) + binary.LittleEndian.PutUint64(buf[12:20], node.End) + binary.LittleEndian.PutUint64(buf[20:28], uint64(leftOffset)) + binary.LittleEndian.PutUint64(buf[28:36], uint64(rightOffset)) + + if _, err := w.Write(buf[:4]); err != nil { + return 0, err + } + if _, err := w.Write(node.Key); err != nil { + return 0, err + } + if _, err := w.Write(buf[4:36]); err != nil { + return 0, err + } + } + + return int64(size), nil +} + +// returns individual offsets and total size, nil nodes are skipped +func (t *Tree) calculateDiskOffsets() ([]int, int) { + current := 0 + out := make([]int, len(t.nodes)) + for i, node := range t.nodes { + out[i] = current + size := node.size() + current += size + } + + return out, current +} + +func (t *Tree) Height() int { + var highestElem int + for i := len(t.nodes) - 1; i >= 0; i-- { + if t.nodes[i] != nil { + highestElem = i + break + } + } + + return int(math.Ceil(math.Log2(float64(highestElem)))) +} + +func (t *Tree) Size() int { + size := 0 + for _, node := range t.nodes { + size += node.size() + } + + return size +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segmentindex/tree_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segmentindex/tree_test.go new file mode 100644 index 0000000000000000000000000000000000000000..88826f19047c01ed45843f804a1891bfcc42acde --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/segmentindex/tree_test.go @@ -0,0 +1,212 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package segmentindex + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/entities/lsmkv" +) + +func TestTree(t *testing.T) { + type elem struct { + key []byte + start uint64 + end uint64 + } + + tree := NewTree(4) + + elements := []elem{ + { + key: []byte("foobar"), + start: 17, + end: 18, + }, + { + key: []byte("abc"), + start: 4, + end: 5, + }, + { + key: []byte("zzz"), + start: 34, + end: 35, + }, + { + key: []byte("aaa"), + start: 1, + end: 2, + }, + { + // makes the tree slightly imbalanced to the right, which in turn assures + // that we have a nil node in between + key: []byte("zzzz"), + start: 100, + end: 102, + }, + } + + t.Run("inserting", func(t *testing.T) { + for _, elem := range elements { + tree.Insert(elem.key, elem.start, elem.end) + } + }) + + t.Run("exact get", func(t *testing.T) { + key, start, end := tree.Get([]byte("foobar")) + assert.Equal(t, []byte("foobar"), key) + assert.Equal(t, uint64(17), start) + assert.Equal(t, uint64(18), end) + + key, start, end = tree.Get([]byte("abc")) + assert.Equal(t, []byte("abc"), key) + assert.Equal(t, uint64(4), start) + assert.Equal(t, uint64(5), end) + + key, start, end = tree.Get([]byte("zzz")) + assert.Equal(t, []byte("zzz"), key) + assert.Equal(t, uint64(34), start) + assert.Equal(t, uint64(35), end) + + key, start, end = tree.Get([]byte("aaa")) + assert.Equal(t, []byte("aaa"), key) + assert.Equal(t, uint64(1), start) + assert.Equal(t, uint64(2), end) + + key, start, end = tree.Get([]byte("zzzz")) + assert.Equal(t, []byte("zzzz"), key) + assert.Equal(t, uint64(100), start) + assert.Equal(t, uint64(102), end) + }) + + t.Run("marshalling and then reading the byte representation", func(t *testing.T) { + bytes, err := tree.MarshalBinary() + require.Nil(t, err) + + dTree := NewDiskTree(bytes) + + t.Run("get", func(t *testing.T) { + n, err := dTree.Get([]byte("foobar")) + assert.Nil(t, err) + assert.Equal(t, []byte("foobar"), n.Key) + assert.Equal(t, uint64(17), n.Start) + assert.Equal(t, uint64(18), n.End) + + n, err = dTree.Get([]byte("abc")) + assert.Nil(t, err) + assert.Equal(t, []byte("abc"), n.Key) + assert.Equal(t, uint64(4), n.Start) + assert.Equal(t, uint64(5), n.End) + + n, err = dTree.Get([]byte("zzz")) + assert.Nil(t, err) + assert.Equal(t, []byte("zzz"), n.Key) + assert.Equal(t, uint64(34), n.Start) + assert.Equal(t, uint64(35), n.End) + + n, err = dTree.Get([]byte("aaa")) + assert.Nil(t, err) + assert.Equal(t, []byte("aaa"), n.Key) + assert.Equal(t, uint64(1), n.Start) + assert.Equal(t, uint64(2), n.End) + + n, err = dTree.Get([]byte("zzzz")) + assert.Nil(t, err) + assert.Equal(t, []byte("zzzz"), n.Key) + assert.Equal(t, uint64(100), n.Start) + assert.Equal(t, uint64(102), n.End) + }) + + t.Run("seek", func(t *testing.T) { + n, err := dTree.Seek([]byte("foobar")) + assert.Nil(t, err) + assert.Equal(t, []byte("foobar"), n.Key) + assert.Equal(t, uint64(17), n.Start) + assert.Equal(t, uint64(18), n.End) + + n, err = dTree.Seek([]byte("f")) + assert.Nil(t, err) + assert.Equal(t, []byte("foobar"), n.Key) + assert.Equal(t, uint64(17), n.Start) + assert.Equal(t, uint64(18), n.End) + + n, err = dTree.Seek([]byte("abc")) + assert.Nil(t, err) + assert.Equal(t, []byte("abc"), n.Key) + assert.Equal(t, uint64(4), n.Start) + assert.Equal(t, uint64(5), n.End) + + n, err = dTree.Seek([]byte("ab")) + assert.Nil(t, err) + assert.Equal(t, []byte("abc"), n.Key) + assert.Equal(t, uint64(4), n.Start) + assert.Equal(t, uint64(5), n.End) + + n, err = dTree.Seek([]byte("zzz")) + assert.Nil(t, err) + assert.Equal(t, []byte("zzz"), n.Key) + assert.Equal(t, uint64(34), n.Start) + assert.Equal(t, uint64(35), n.End) + + n, err = dTree.Seek([]byte("z")) + assert.Nil(t, err) + assert.Equal(t, []byte("zzz"), n.Key) + assert.Equal(t, uint64(34), n.Start) + assert.Equal(t, uint64(35), n.End) + + n, err = dTree.Seek([]byte("aaa")) + assert.Nil(t, err) + assert.Equal(t, []byte("aaa"), n.Key) + assert.Equal(t, uint64(1), n.Start) + assert.Equal(t, uint64(2), n.End) + + n, err = dTree.Seek([]byte("a")) + assert.Nil(t, err) + assert.Equal(t, []byte("aaa"), n.Key) + assert.Equal(t, uint64(1), n.Start) + assert.Equal(t, uint64(2), n.End) + + n, err = dTree.Seek([]byte("zzzz")) + assert.Nil(t, err) + assert.Equal(t, []byte("zzzz"), n.Key) + assert.Equal(t, uint64(100), n.Start) + assert.Equal(t, uint64(102), n.End) + + n, err = dTree.Seek([]byte("zzza")) + assert.Nil(t, err) + assert.Equal(t, []byte("zzzz"), n.Key) + assert.Equal(t, uint64(100), n.Start) + assert.Equal(t, uint64(102), n.End) + + n, err = dTree.Seek([]byte("zzzzz")) + assert.Equal(t, lsmkv.NotFound, err) + }) + + t.Run("get all keys (for building bloom filters at segment init time)", func(t *testing.T) { + expected := [][]byte{ + []byte("aaa"), + []byte("abc"), + []byte("foobar"), + []byte("zzz"), + []byte("zzzz"), + } + + keys, err := dTree.AllKeys() + + require.Nil(t, err) + assert.ElementsMatch(t, expected, keys) + }) + }) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/store.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/store.go new file mode 100644 index 0000000000000000000000000000000000000000..ae9014b694fdd5e6be0a66686a0e4897a2912dc9 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/store.go @@ -0,0 +1,610 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package lsmkv + +import ( + "context" + "fmt" + "os" + "path" + "path/filepath" + "runtime" + "runtime/debug" + "strings" + "sync" + + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + + "github.com/weaviate/weaviate/adapters/repos/db/helpers" + "github.com/weaviate/weaviate/entities/cyclemanager" + "github.com/weaviate/weaviate/entities/errorcompounder" + enterrors "github.com/weaviate/weaviate/entities/errors" + entsentry "github.com/weaviate/weaviate/entities/sentry" + "github.com/weaviate/weaviate/entities/storagestate" + wsync "github.com/weaviate/weaviate/entities/sync" +) + +var ErrAlreadyClosed = errors.New("store already closed") + +// Store groups multiple buckets together, it "owns" one folder on the file +// system +type Store struct { + dir string + rootDir string + + // Prevent concurrent manipulations to the bucketsByNameMap, most notably + // when initializing buckets in parallel + bucketAccessLock sync.RWMutex + bucketsByName map[string]*Bucket + + logger logrus.FieldLogger + metrics *Metrics + + cycleCallbacks *storeCycleCallbacks + bcreator BucketCreator + // Prevent concurrent manipulations to the same Bucket, specially if there is + // action on the bucket in the meantime. + bucketsLocks *wsync.KeyLocker + + closeLock sync.RWMutex + closed bool +} + +// New initializes a new [Store] based on the root dir. If state is present on +// disk, it is loaded, if the folder is empty a new store is initialized in +// there. +func New(dir, rootDir string, logger logrus.FieldLogger, metrics *Metrics, + shardCompactionCallbacks, shardCompactionAuxCallbacks, + shardFlushCallbacks cyclemanager.CycleCallbackGroup, +) (*Store, error) { + s := &Store{ + dir: dir, + rootDir: rootDir, + bucketsByName: map[string]*Bucket{}, + bucketsLocks: wsync.NewKeyLocker(), + bcreator: NewBucketCreator(), + logger: logger, + metrics: metrics, + } + s.initCycleCallbacks(shardCompactionCallbacks, shardCompactionAuxCallbacks, shardFlushCallbacks) + + return s, s.init() +} + +func (s *Store) Bucket(name string) *Bucket { + s.bucketAccessLock.RLock() + defer s.bucketAccessLock.RUnlock() + + return s.bucketsByName[name] +} + +func (s *Store) UpdateBucketsStatus(targetStatus storagestate.Status) error { + s.closeLock.RLock() + defer s.closeLock.RUnlock() + + if s.closed { + return fmt.Errorf("%w: updating buckets state in store %q", ErrAlreadyClosed, s.dir) + } + + // UpdateBucketsStatus is a write operation on the bucket itself, but from + // the perspective of our bucket access map this is a read-only operation, + // hence an RLock() + s.bucketAccessLock.RLock() + defer s.bucketAccessLock.RUnlock() + + for _, b := range s.bucketsByName { + if b != nil { + b.UpdateStatus(targetStatus) + } + } + + return nil +} + +func (s *Store) init() error { + if err := os.MkdirAll(s.dir, 0o700); err != nil { + return err + } + return nil +} + +func (s *Store) bucketDir(bucketName string) string { + return path.Join(s.dir, bucketName) +} + +// CreateOrLoadBucket registers a bucket with the given name. If state on disk +// exists for this bucket it is loaded, otherwise created. Pass [BucketOptions] +// to configure the strategy of a bucket. The strategy defaults to "replace". +// For example, to load or create a map-type bucket, do: +// +// ctx := context.Background() +// err := store.CreateOrLoadBucket(ctx, "my_bucket_name", WithStrategy(StrategyReplace)) +// if err != nil { /* handle error */ } +// +// // you can now access the bucket using store.Bucket() +// b := store.Bucket("my_bucket_name") +func (s *Store) CreateOrLoadBucket(ctx context.Context, bucketName string, + opts ...BucketOption, +) (err error) { + defer func() { + p := recover() + if p == nil { + // happy path + return + } + + entsentry.Recover(p) + + err = fmt.Errorf("unexpected error loading bucket %q at path %q: %v", + bucketName, s.rootDir, p) + // logger is already annotated to identify the store (e.g. collection + + // shard), we only need to annotate it with the exact path of this + // bucket. + s.logger. + WithFields(logrus.Fields{ + "action": "lsm_create_or_load_bucket", + "root_dir": s.rootDir, + "dir": s.dir, + "bucket": bucketName, + }). + WithError(err).Errorf("unexpected error loading shard") + debug.PrintStack() + }() + + s.closeLock.RLock() + defer s.closeLock.RUnlock() + + if s.closed { + return fmt.Errorf("%w: adding a bucket %q to store %q", ErrAlreadyClosed, bucketName, s.dir) + } + + s.bucketsLocks.Lock(bucketName) + defer s.bucketsLocks.Unlock(bucketName) + + if b := s.Bucket(bucketName); b != nil { + return nil + } + + compactionCallbacks := s.cycleCallbacks.compactionCallbacks + if bucketName == helpers.ObjectsBucketLSM && s.cycleCallbacks.compactionAuxCallbacks != nil { + compactionCallbacks = s.cycleCallbacks.compactionAuxCallbacks + } + + // bucket can be concurrently loaded with another buckets but + // the same bucket will be loaded only once + b, err := s.bcreator.NewBucket(ctx, s.bucketDir(bucketName), s.rootDir, s.logger, s.metrics, + compactionCallbacks, s.cycleCallbacks.flushCallbacks, opts...) + if err != nil { + return err + } + + s.setBucket(bucketName, b) + + return nil +} + +func (s *Store) setBucket(name string, b *Bucket) { + s.bucketAccessLock.Lock() + defer s.bucketAccessLock.Unlock() + + s.bucketsByName[name] = b +} + +func (s *Store) Shutdown(ctx context.Context) error { + s.closeLock.Lock() + defer s.closeLock.Unlock() + + if s.closed { + return fmt.Errorf("%w: closing store %q", ErrAlreadyClosed, s.dir) + } + + s.closed = true + + s.bucketAccessLock.Lock() + defer s.bucketAccessLock.Unlock() + + // shutdown must be called on every bucket + eg := enterrors.NewErrorGroupWrapper(s.logger) + eg.SetLimit(runtime.GOMAXPROCS(0)) + + for name, bucket := range s.bucketsByName { + name := name + bucket := bucket + + eg.Go(func() error { + if err := bucket.Shutdown(ctx); err != nil { + return errors.Wrapf(err, "shutdown bucket %q of store %q", name, s.dir) + } + return nil + }) + } + + return eg.Wait() +} + +func (s *Store) ShutdownBucket(ctx context.Context, bucketName string) error { + s.closeLock.RLock() + defer s.closeLock.RUnlock() + + s.bucketAccessLock.Lock() + defer s.bucketAccessLock.Unlock() + + bucket, ok := s.bucketsByName[bucketName] + if !ok { + return fmt.Errorf("shutdown bucket %q of store %q: bucket not found", bucketName, s.dir) + } + if err := bucket.Shutdown(ctx); err != nil { + return errors.Wrapf(err, "shutdown bucket %q of store %q", bucketName, s.dir) + } + delete(s.bucketsByName, bucketName) + + return nil +} + +func (s *Store) WriteWALs() error { + s.closeLock.RLock() + defer s.closeLock.RUnlock() + + if s.closed { + return fmt.Errorf("%w: writing wals of store %q", ErrAlreadyClosed, s.dir) + } + + s.bucketAccessLock.RLock() + defer s.bucketAccessLock.RUnlock() + + for name, bucket := range s.bucketsByName { + if err := bucket.WriteWAL(); err != nil { + return errors.Wrapf(err, "bucket %q", name) + } + } + + return nil +} + +// bucketJobStatus is used to safely track the status of +// a job applied to each of a store's buckets when run +// in parallel +type bucketJobStatus struct { + sync.Mutex + buckets map[*Bucket]error +} + +func newBucketJobStatus() *bucketJobStatus { + return &bucketJobStatus{ + buckets: make(map[*Bucket]error), + } +} + +type jobFunc func(context.Context, *Bucket) (interface{}, error) + +type rollbackFunc func(context.Context, *Bucket) error + +func (s *Store) ListFiles(ctx context.Context, basePath string) ([]string, error) { + listFiles := func(ctx context.Context, b *Bucket) (interface{}, error) { + basePath, err := filepath.Rel(basePath, b.GetDir()) + if err != nil { + return nil, fmt.Errorf("bucket relative path: %w", err) + } + return b.ListFiles(ctx, basePath) + } + + result, err := s.runJobOnBuckets(ctx, listFiles, nil) + if err != nil { + return nil, err + } + + migrationFiles, err := s.listMigrationFiles(basePath) + if err != nil { + return nil, err + } + + files := migrationFiles + for _, res := range result { + files = append(files, res.([]string)...) + } + + return files, nil +} + +func (s *Store) listMigrationFiles(basePath string) ([]string, error) { + migrationRoot := filepath.Join(s.dir, ".migrations") + + var files []string + err := filepath.WalkDir(migrationRoot, func(path string, d os.DirEntry, _ error) error { + if d == nil || d.IsDir() { + return nil + } + + relPath, err := filepath.Rel(basePath, path) + if err != nil { + return err + } + files = append(files, relPath) + return nil + }) + if err != nil { + return nil, errors.Errorf("failed to list files for migrations: %s", err) + } + return files, nil +} + +// runJobOnBuckets applies a jobFunc to each bucket in the store in parallel. +// The jobFunc allows for the job to return an arbitrary value. +// Additionally, a rollbackFunc may be provided which will be run on the target +// bucket in the event of an unsuccessful job. +func (s *Store) runJobOnBuckets(ctx context.Context, + jobFunc jobFunc, rollbackFunc rollbackFunc, +) ([]interface{}, error) { + s.bucketAccessLock.Lock() + var ( + status = newBucketJobStatus() + resultQueue = make(chan interface{}, len(s.bucketsByName)) + wg = sync.WaitGroup{} + ) + + for _, bucket := range s.bucketsByName { + wg.Add(1) + b := bucket + f := func() { + status.Lock() + defer status.Unlock() + res, err := jobFunc(ctx, b) + resultQueue <- res + status.buckets[b] = err + wg.Done() + } + enterrors.GoWrapper(f, s.logger) + } + s.bucketAccessLock.Unlock() + wg.Wait() + close(resultQueue) + + var errs errorcompounder.ErrorCompounder + for _, err := range status.buckets { + errs.Add(err) + } + + if errs.Len() != 0 { + // if any of the bucket jobs failed, and a + // rollbackFunc has been provided, attempt + // to roll back. if this fails, the err is + // added to the compounder + for b, jobErr := range status.buckets { + if jobErr != nil && rollbackFunc != nil { + if rollbackErr := rollbackFunc(ctx, b); rollbackErr != nil { + errs.AddWrap(rollbackErr, "bucket job rollback") + } + } + } + + return nil, errs.ToError() + } + + var finalResult []interface{} + for res := range resultQueue { + finalResult = append(finalResult, res) + } + + return finalResult, nil +} + +func (s *Store) GetBucketsByName() map[string]*Bucket { + s.bucketAccessLock.RLock() + defer s.bucketAccessLock.RUnlock() + + newMap := map[string]*Bucket{} + for name, bucket := range s.bucketsByName { + newMap[name] = bucket + } + + return newMap +} + +// Creates bucket, first removing any files if already exist +// Bucket can not be registered in bucketsByName before removal +func (s *Store) CreateBucket(ctx context.Context, bucketName string, + opts ...BucketOption, +) error { + s.closeLock.RLock() + defer s.closeLock.RUnlock() + + if s.closed { + return fmt.Errorf("%w: adding a bucket %q to store %q", ErrAlreadyClosed, bucketName, s.dir) + } + + s.bucketsLocks.Lock(bucketName) + defer s.bucketsLocks.Unlock(bucketName) + + if b := s.Bucket(bucketName); b != nil { + return fmt.Errorf("bucket %s exists and is already in use", bucketName) + } + + bucketDir := s.bucketDir(bucketName) + if err := os.RemoveAll(bucketDir); err != nil { + return errors.Wrapf(err, "failed removing bucket %s files", bucketName) + } + + compactionCallbacks := s.cycleCallbacks.compactionCallbacks + if bucketName == helpers.ObjectsBucketLSM && s.cycleCallbacks.compactionAuxCallbacks != nil { + compactionCallbacks = s.cycleCallbacks.compactionAuxCallbacks + } + + b, err := s.bcreator.NewBucket(ctx, bucketDir, s.rootDir, s.logger, s.metrics, + compactionCallbacks, s.cycleCallbacks.flushCallbacks, opts...) + if err != nil { + return err + } + + s.setBucket(bucketName, b) + + return nil +} + +func (s *Store) replaceBucket(ctx context.Context, replacementBucket *Bucket, replacementBucketName string, bucket *Bucket, bucketName string) (string, string, string, string, error) { + replacementBucket.disk.maintenanceLock.Lock() + defer replacementBucket.disk.maintenanceLock.Unlock() + + currBucketDir := bucket.dir + newBucketDir := bucket.dir + "___del" + currReplacementBucketDir := replacementBucket.dir + newReplacementBucketDir := currBucketDir + + if err := bucket.Shutdown(ctx); err != nil { + return "", "", "", "", errors.Wrapf(err, "failed shutting down bucket old '%s'", bucketName) + } + + s.logger.WithField("action", "lsm_replace_bucket"). + WithField("bucket", bucketName). + WithField("replacement_bucket", replacementBucketName). + WithField("dir", s.dir). + Info("replacing bucket") + + replacementBucket.flushLock.Lock() + defer replacementBucket.flushLock.Unlock() + if err := os.Rename(currBucketDir, newBucketDir); err != nil { + return "", "", "", "", errors.Wrapf(err, "failed moving orig bucket dir '%s'", currBucketDir) + } + if err := os.Rename(currReplacementBucketDir, newReplacementBucketDir); err != nil { + return "", "", "", "", errors.Wrapf(err, "failed moving replacement bucket dir '%s'", currReplacementBucketDir) + } + + return currBucketDir, newBucketDir, currReplacementBucketDir, newReplacementBucketDir, nil +} + +// Replaces 1st bucket with 2nd one. Both buckets have to registered in bucketsByName. +// 2nd bucket swaps the 1st one in bucketsByName using 1st one's name, 2nd one's name is deleted. +// Dir path of 2nd bucket is changed to dir of 1st bucket as well as all other related paths of +// bucket resources (segment group, memtables, commit log). +// Dir path of 1st bucket is temporarily suffixed with "___del", later on bucket is shutdown and +// its files deleted. +// 2nd bucket becomes 1st bucket +func (s *Store) ReplaceBuckets(ctx context.Context, bucketName, replacementBucketName string) error { + s.closeLock.RLock() + defer s.closeLock.RUnlock() + + if s.closed { + return fmt.Errorf("%w: replacing bucket %q for %q in store %q", ErrAlreadyClosed, bucketName, replacementBucketName, s.dir) + } + + s.bucketAccessLock.Lock() + defer s.bucketAccessLock.Unlock() + + bucket := s.bucketsByName[bucketName] + if bucket == nil { + return fmt.Errorf("bucket '%s' not found", bucketName) + } + + replacementBucket := s.bucketsByName[replacementBucketName] + if replacementBucket == nil { + return fmt.Errorf("replacement bucket '%s' not found", replacementBucketName) + } + s.bucketsByName[bucketName] = replacementBucket + delete(s.bucketsByName, replacementBucketName) + + var currBucketDir, newBucketDir, currReplacementBucketDir, newReplacementBucketDir string + var err error + currBucketDir, newBucketDir, currReplacementBucketDir, newReplacementBucketDir, err = s.replaceBucket(ctx, replacementBucket, replacementBucketName, bucket, bucketName) + if err != nil { + return errors.Wrapf(err, "failed renaming bucket '%s' to '%s'", bucketName, replacementBucketName) + } + + replacementBucket.flushLock.Lock() + defer replacementBucket.flushLock.Unlock() + + if replacementBucket.flushing != nil { + return fmt.Errorf("bucket '%s' can not be renamed before flushing", replacementBucketName) + } + + replacementBucket.dir = newReplacementBucketDir + + err = replacementBucket.setNewActiveMemtable() + if err != nil { + return fmt.Errorf("switch active memtable: %w", err) + } + + s.updateBucketDir(bucket, currBucketDir, newBucketDir) + s.updateBucketDir(replacementBucket, currReplacementBucketDir, newReplacementBucketDir) + + if err := os.RemoveAll(newBucketDir); err != nil { + return errors.Wrapf(err, "failed removing dir '%s'", newBucketDir) + } + + return nil +} + +func (s *Store) RenameBucket(ctx context.Context, bucketName, newBucketName string) error { + s.closeLock.RLock() + defer s.closeLock.RUnlock() + + if s.closed { + return fmt.Errorf("%w: renaming bucket %q for %q in store %q", ErrAlreadyClosed, bucketName, newBucketName, s.dir) + } + + s.bucketAccessLock.Lock() + defer s.bucketAccessLock.Unlock() + + currBucket := s.bucketsByName[bucketName] + if currBucket == nil { + return fmt.Errorf("bucket '%s' not found", bucketName) + } + newBucket := s.bucketsByName[newBucketName] + if newBucket != nil { + return fmt.Errorf("bucket '%s' already exists", newBucketName) + } + + if !currBucket.isReadOnly() { + return fmt.Errorf("bucket '%s' must be in %s mode to be renamed", bucketName, storagestate.StatusReadOnly) + } + + currBucketDir := currBucket.dir + newBucketDir := s.bucketDir(newBucketName) + + currBucket.flushLock.Lock() + defer currBucket.flushLock.Unlock() + + if currBucket.flushing != nil { + return fmt.Errorf("bucket '%s' can not be renamed before flushing", bucketName) + } + + currBucket.dir = newBucketDir + + err := currBucket.setNewActiveMemtable() + if err != nil { + return fmt.Errorf("switch active memtable: %w", err) + } + + s.bucketsByName[newBucketName] = currBucket + delete(s.bucketsByName, bucketName) + + if err := os.Rename(currBucketDir, newBucketDir); err != nil { + return errors.Wrapf(err, "failed renaming bucket dir '%s' to '%s'", currBucketDir, newBucketDir) + } + + s.updateBucketDir(currBucket, currBucketDir, newBucketDir) + + return nil +} + +func (s *Store) updateBucketDir(bucket *Bucket, bucketDir, newBucketDir string) { + updatePath := func(src string) string { + return strings.Replace(src, bucketDir, newBucketDir, 1) + } + + segments, release := bucket.disk.getAndLockSegments() + defer release() + + bucket.disk.dir = newBucketDir + for _, segment := range segments { + segment.setPath(updatePath(segment.getPath())) + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/store_backup.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/store_backup.go new file mode 100644 index 0000000000000000000000000000000000000000..7dbc4bbdd578a4026a9b5d541fe2b22676a4876f --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/store_backup.go @@ -0,0 +1,85 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package lsmkv + +import ( + "context" + + "github.com/pkg/errors" +) + +// PauseCompaction waits for all ongoing compactions to finish, +// then makes sure that no new compaction can be started. +// +// This is a preparatory stage for creating backups. +// +// A timeout should be specified for the input context as some +// compactions are long-running, in which case it may be better +// to fail the backup attempt and retry later, than to block +// indefinitely. +func (s *Store) PauseCompaction(ctx context.Context) error { + if err := s.cycleCallbacks.compactionCallbacksCtrl.Deactivate(ctx); err != nil { + return errors.Wrap(err, "long-running compaction in progress") + } + if err := s.cycleCallbacks.compactionAuxCallbacksCtrl.Deactivate(ctx); err != nil { + return errors.Wrap(err, "long-running auxiliary compaction in progress") + } + + s.bucketAccessLock.RLock() + defer s.bucketAccessLock.RUnlock() + + // TODO common_cycle_manager maybe not necessary, or to be replaced with store pause stats + for _, b := range s.bucketsByName { + b.doStartPauseTimer() + } + + return nil +} + +// ResumeCompaction starts the compaction cycle again. +// It errors if compactions were not paused +func (s *Store) ResumeCompaction(ctx context.Context) error { + s.cycleCallbacks.compactionAuxCallbacksCtrl.Activate() + s.cycleCallbacks.compactionCallbacksCtrl.Activate() + + s.bucketAccessLock.RLock() + defer s.bucketAccessLock.RUnlock() + + // TODO common_cycle_manager maybe not necessary, or to be replaced with store pause stats + for _, b := range s.bucketsByName { + b.doStopPauseTimer() + } + + return nil +} + +// FlushMemtable flushes any active memtable and returns only once the memtable +// has been fully flushed and a stable state on disk has been reached. +// +// This is a preparatory stage for creating backups. +// +// A timeout should be specified for the input context as some +// flushes are long-running, in which case it may be better +// to fail the backup attempt and retry later, than to block +// indefinitely. +func (s *Store) FlushMemtables(ctx context.Context) error { + if err := s.cycleCallbacks.flushCallbacksCtrl.Deactivate(ctx); err != nil { + return errors.Wrap(err, "long-running memtable flush in progress") + } + defer s.cycleCallbacks.flushCallbacksCtrl.Activate() + + flushMemtable := func(ctx context.Context, b *Bucket) (interface{}, error) { + return nil, b.FlushMemtable() + } + _, err := s.runJobOnBuckets(ctx, flushMemtable, nil) + return err +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/store_backup_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/store_backup_test.go new file mode 100644 index 0000000000000000000000000000000000000000..75f762b2d9d0a68786368858954528be4dc96cc4 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/store_backup_test.go @@ -0,0 +1,314 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package lsmkv + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/pkg/errors" + "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/entities/cyclemanager" + "github.com/weaviate/weaviate/entities/errorcompounder" + "github.com/weaviate/weaviate/entities/storagestate" +) + +func TestStoreBackup(t *testing.T) { + ctx := context.Background() + tests := bucketTests{ + { + name: "pauseCompaction", + f: pauseCompaction, + }, + { + name: "resumeCompaction", + f: resumeCompaction, + }, + { + name: "flushMemtable", + f: flushMemtable, + }, + } + tests.run(ctx, t) +} + +func pauseCompaction(ctx context.Context, t *testing.T, opts []BucketOption) { + logger, _ := test.NewNullLogger() + + t.Run("assert that context timeout works for long compactions", func(t *testing.T) { + for _, buckets := range [][]string{ + {"test_bucket"}, + {"test_bucket1", "test_bucket2"}, + {"test_bucket1", "test_bucket2", "test_bucket3", "test_bucket4", "test_bucket5"}, + } { + t.Run(fmt.Sprintf("with %d buckets", len(buckets)), func(t *testing.T) { + dirName := t.TempDir() + + shardCompactionCallbacks := cyclemanager.NewCallbackGroup("classCompactionNonObjects", logger, 1) + shardCompactionAuxCallbacks := cyclemanager.NewCallbackGroup("classCompactionObjects", logger, 1) + shardFlushCallbacks := cyclemanager.NewCallbackGroupNoop() + + store, err := New(dirName, dirName, logger, nil, + shardCompactionCallbacks, shardCompactionAuxCallbacks, shardFlushCallbacks) + require.Nil(t, err) + + for _, bucket := range buckets { + err = store.CreateOrLoadBucket(ctx, bucket, opts...) + require.Nil(t, err) + } + + expiredCtx, cancel := context.WithDeadline(ctx, time.Now()) + defer cancel() + + err = store.PauseCompaction(expiredCtx) + require.NotNil(t, err) + assert.Equal(t, "long-running compaction in progress:"+ + " deactivating callback 'store/compaction-non-objects/.' of 'classCompactionNonObjects' failed:"+ + " context deadline exceeded", err.Error()) + + err = store.Shutdown(ctx) + require.Nil(t, err) + }) + } + }) + + t.Run("assert compaction is successfully paused", func(t *testing.T) { + for _, buckets := range [][]string{ + {"test_bucket"}, + {"test_bucket1", "test_bucket2"}, + {"test_bucket1", "test_bucket2", "test_bucket3", "test_bucket4", "test_bucket5"}, + } { + t.Run(fmt.Sprintf("with %d buckets", len(buckets)), func(t *testing.T) { + dirName := t.TempDir() + + shardCompactionCallbacks := cyclemanager.NewCallbackGroup("classCompactionNonObjects", logger, 1) + shardCompactionAuxCallbacks := cyclemanager.NewCallbackGroup("classCompactionObjects", logger, 1) + shardFlushCallbacks := cyclemanager.NewCallbackGroupNoop() + + store, err := New(dirName, dirName, logger, nil, + shardCompactionCallbacks, shardCompactionAuxCallbacks, shardFlushCallbacks) + require.Nil(t, err) + + for _, bucket := range buckets { + err = store.CreateOrLoadBucket(ctx, bucket, opts...) + require.Nil(t, err) + + t.Run("insert contents into bucket", func(t *testing.T) { + bucket := store.Bucket(bucket) + for i := 0; i < 10; i++ { + err := bucket.Put([]byte(fmt.Sprint(i)), []byte(fmt.Sprint(i))) + require.Nil(t, err) + } + }) + } + + expirableCtx, cancel := context.WithTimeout(ctx, 3*time.Second) + defer cancel() + + err = store.PauseCompaction(expirableCtx) + assert.Nil(t, err) + + err = store.Shutdown(context.Background()) + require.Nil(t, err) + }) + } + }) +} + +func resumeCompaction(ctx context.Context, t *testing.T, opts []BucketOption) { + logger, _ := test.NewNullLogger() + + t.Run("assert compaction restarts after pausing", func(t *testing.T) { + for _, buckets := range [][]string{ + {"test_bucket"}, + {"test_bucket1", "test_bucket2"}, + {"test_bucket1", "test_bucket2", "test_bucket3", "test_bucket4", "test_bucket5"}, + } { + t.Run(fmt.Sprintf("with %d buckets", len(buckets)), func(t *testing.T) { + dirName := t.TempDir() + + shardCompactionCallbacks := cyclemanager.NewCallbackGroup("classCompactionNonObjects", logger, 1) + shardCompactionAuxCallbacks := cyclemanager.NewCallbackGroup("classCompactionObjects", logger, 1) + shardFlushCallbacks := cyclemanager.NewCallbackGroupNoop() + + store, err := New(dirName, dirName, logger, nil, + shardCompactionCallbacks, shardCompactionAuxCallbacks, shardFlushCallbacks) + require.Nil(t, err) + + for _, bucket := range buckets { + err = store.CreateOrLoadBucket(ctx, bucket, opts...) + require.Nil(t, err) + + t.Run("insert contents into bucket", func(t *testing.T) { + bucket := store.Bucket(bucket) + for i := 0; i < 10; i++ { + err := bucket.Put([]byte(fmt.Sprint(i)), []byte(fmt.Sprint(i))) + require.Nil(t, err) + } + }) + } + + expirableCtx, cancel := context.WithTimeout(ctx, 5*time.Second) + defer cancel() + + err = store.PauseCompaction(expirableCtx) + require.Nil(t, err) + + err = store.ResumeCompaction(expirableCtx) + require.Nil(t, err) + + assert.True(t, store.cycleCallbacks.compactionCallbacksCtrl.IsActive()) + assert.True(t, store.cycleCallbacks.compactionAuxCallbacksCtrl.IsActive()) + + err = store.Shutdown(ctx) + require.Nil(t, err) + }) + } + }) +} + +func flushMemtable(ctx context.Context, t *testing.T, opts []BucketOption) { + logger, _ := test.NewNullLogger() + + t.Run("assert that context timeout works for long flushes", func(t *testing.T) { + for _, buckets := range [][]string{ + {"test_bucket"}, + {"test_bucket1", "test_bucket2"}, + {"test_bucket1", "test_bucket2", "test_bucket3", "test_bucket4", "test_bucket5"}, + } { + t.Run(fmt.Sprintf("with %d buckets", len(buckets)), func(t *testing.T) { + dirName := t.TempDir() + + shardCompactionCallbacks := cyclemanager.NewCallbackGroupNoop() + shardCompactionAuxCallbacks := cyclemanager.NewCallbackGroupNoop() + shardFlushCallbacks := cyclemanager.NewCallbackGroup("classFlush", logger, 1) + + store, err := New(dirName, dirName, logger, nil, + shardCompactionCallbacks, shardCompactionAuxCallbacks, shardFlushCallbacks) + require.Nil(t, err) + + for _, bucket := range buckets { + err = store.CreateOrLoadBucket(ctx, bucket, opts...) + require.Nil(t, err) + } + + expiredCtx, cancel := context.WithDeadline(ctx, time.Now()) + defer cancel() + + err = store.FlushMemtables(expiredCtx) + require.NotNil(t, err) + assert.Equal(t, "long-running memtable flush in progress:"+ + " deactivating callback 'store/flush/.' of 'classFlush' failed:"+ + " context deadline exceeded", err.Error()) + + err = store.Shutdown(ctx) + require.Nil(t, err) + }) + } + }) + + t.Run("assert that flushes run successfully", func(t *testing.T) { + for _, buckets := range [][]string{ + {"test_bucket"}, + {"test_bucket1", "test_bucket2"}, + {"test_bucket1", "test_bucket2", "test_bucket3", "test_bucket4", "test_bucket5"}, + } { + t.Run(fmt.Sprintf("with %d buckets", len(buckets)), func(t *testing.T) { + dirName := t.TempDir() + + shardCompactionCallbacks := cyclemanager.NewCallbackGroupNoop() + shardCompactionAuxCallbacks := cyclemanager.NewCallbackGroupNoop() + shardFlushCallbacks := cyclemanager.NewCallbackGroup("classFlush", logger, 1) + + store, err := New(dirName, dirName, logger, nil, + shardCompactionCallbacks, shardCompactionAuxCallbacks, shardFlushCallbacks) + require.Nil(t, err) + + err = store.CreateOrLoadBucket(ctx, "test_bucket", opts...) + require.Nil(t, err) + + for _, bucket := range buckets { + err = store.CreateOrLoadBucket(ctx, bucket, opts...) + require.Nil(t, err) + + t.Run("insert contents into bucket", func(t *testing.T) { + bucket := store.Bucket(bucket) + for i := 0; i < 10; i++ { + err := bucket.Put([]byte(fmt.Sprint(i)), []byte(fmt.Sprint(i))) + require.Nil(t, err) + } + }) + } + + expirableCtx, cancel := context.WithTimeout(ctx, 5*time.Second) + defer cancel() + + err = store.FlushMemtables(expirableCtx) + assert.Nil(t, err) + + err = store.Shutdown(ctx) + require.Nil(t, err) + }) + } + }) + + t.Run("assert that readonly bucket fails to flush", func(t *testing.T) { + singleErr := errors.Wrap(storagestate.ErrStatusReadOnly, "flush memtable") + expectedErr := func(bucketsCount int) error { + ec := errorcompounder.New() + for i := 0; i < bucketsCount; i++ { + ec.Add(singleErr) + } + return ec.ToError() + } + + for _, buckets := range [][]string{ + {"test_bucket"}, + {"test_bucket1", "test_bucket2"}, + {"test_bucket1", "test_bucket2", "test_bucket3", "test_bucket4", "test_bucket5"}, + } { + t.Run(fmt.Sprintf("with %d buckets", len(buckets)), func(t *testing.T) { + dirName := t.TempDir() + + shardCompactionCallbacks := cyclemanager.NewCallbackGroupNoop() + shardCompactionAuxCallbacks := cyclemanager.NewCallbackGroupNoop() + shardFlushCallbacks := cyclemanager.NewCallbackGroup("classFlush", logger, 1) + + store, err := New(dirName, dirName, logger, nil, + shardCompactionCallbacks, shardCompactionAuxCallbacks, shardFlushCallbacks) + require.Nil(t, err) + + for _, bucket := range buckets { + err = store.CreateOrLoadBucket(ctx, bucket, opts...) + require.Nil(t, err) + } + + err = store.UpdateBucketsStatus(storagestate.StatusReadOnly) + require.NoError(t, err) + + expirableCtx, cancel := context.WithTimeout(ctx, 5*time.Second) + defer cancel() + + err = store.FlushMemtables(expirableCtx) + require.NotNil(t, err) + assert.EqualError(t, expectedErr(len(buckets)), err.Error()) + + err = store.Shutdown(ctx) + require.Nil(t, err) + }) + } + }) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/store_cyclecallbacks.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/store_cyclecallbacks.go new file mode 100644 index 0000000000000000000000000000000000000000..f03f9dcfaba4b49cdf4479ed85881290d8aff5d4 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/store_cyclecallbacks.go @@ -0,0 +1,80 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package lsmkv + +import ( + "path/filepath" + "strings" + + "github.com/weaviate/weaviate/entities/cyclemanager" +) + +type storeCycleCallbacks struct { + compactionCallbacks cyclemanager.CycleCallbackGroup + compactionCallbacksCtrl cyclemanager.CycleCallbackCtrl + compactionAuxCallbacks cyclemanager.CycleCallbackGroup + compactionAuxCallbacksCtrl cyclemanager.CycleCallbackCtrl + + flushCallbacks cyclemanager.CycleCallbackGroup + flushCallbacksCtrl cyclemanager.CycleCallbackCtrl +} + +func (s *Store) initCycleCallbacks(shardCompactionCallbacks, shardCompactionAuxCallbacks, + shardFlushCallbacks cyclemanager.CycleCallbackGroup, +) { + id := func(elems ...string) string { + path, err := filepath.Rel(s.dir, s.rootDir) + if err != nil { + path = s.dir + } + elems = append([]string{"store"}, elems...) + elems = append(elems, path) + return strings.Join(elems, "/") + } + + var compactionCallbacks cyclemanager.CycleCallbackGroup + var compactionCallbacksCtrl cyclemanager.CycleCallbackCtrl + var compactionAuxCallbacks cyclemanager.CycleCallbackGroup + var compactionAuxCallbacksCtrl cyclemanager.CycleCallbackCtrl + + if shardCompactionAuxCallbacks == nil { + compactionId := id("compaction") + compactionCallbacks = cyclemanager.NewCallbackGroup(compactionId, s.logger, 1) + compactionCallbacksCtrl = shardCompactionCallbacks.Register( + compactionId, compactionCallbacks.CycleCallback) + compactionAuxCallbacksCtrl = cyclemanager.NewCallbackCtrlNoop() + } else { + compactionId := id("compaction-non-objects") + compactionCallbacks = cyclemanager.NewCallbackGroup(compactionId, s.logger, 1) + compactionCallbacksCtrl = shardCompactionCallbacks.Register( + compactionId, compactionCallbacks.CycleCallback) + compactionAuxId := id("compaction-objects") + compactionAuxCallbacks = cyclemanager.NewCallbackGroup(compactionAuxId, s.logger, 1) + compactionAuxCallbacksCtrl = shardCompactionAuxCallbacks.Register( + compactionAuxId, compactionAuxCallbacks.CycleCallback) + } + + flushId := id("flush") + flushCallbacks := cyclemanager.NewCallbackGroup(flushId, s.logger, 1) + flushCallbacksCtrl := shardFlushCallbacks.Register( + flushId, flushCallbacks.CycleCallback) + + s.cycleCallbacks = &storeCycleCallbacks{ + compactionCallbacks: compactionCallbacks, + compactionCallbacksCtrl: compactionCallbacksCtrl, + compactionAuxCallbacks: compactionAuxCallbacks, + compactionAuxCallbacksCtrl: compactionAuxCallbacksCtrl, + + flushCallbacks: flushCallbacks, + flushCallbacksCtrl: flushCallbacksCtrl, + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/store_integration_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/store_integration_test.go new file mode 100644 index 0000000000000000000000000000000000000000..c3eeb08bdd76cb61760adc058a1cd6ad9a4e8f14 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/store_integration_test.go @@ -0,0 +1,103 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +//go:build integrationTest +// +build integrationTest + +package lsmkv + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/entities/cyclemanager" +) + +func TestStoreLifecycle(t *testing.T) { + ctx := testCtx() + tests := bucketIntegrationTests{ + { + name: "testStoreLifecycle", + f: testStoreLifecycle, + opts: []BucketOption{ + WithStrategy(StrategyReplace), + }, + }, + } + tests.run(ctx, t) +} + +func testStoreLifecycle(ctx context.Context, t *testing.T, opts []BucketOption) { + dirName := t.TempDir() + logger := nullLogger() + + t.Run("cycle 1", func(t *testing.T) { + store, err := New(dirName, dirName, logger, nil, + cyclemanager.NewCallbackGroupNoop(), + cyclemanager.NewCallbackGroupNoop(), + cyclemanager.NewCallbackGroupNoop()) + require.Nil(t, err) + + err = store.CreateOrLoadBucket(testCtx(), "bucket1", opts...) + require.Nil(t, err) + + b1 := store.Bucket("bucket1") + require.NotNil(t, b1) + + err = b1.Put([]byte("name"), []byte("Jane Doe")) + require.Nil(t, err) + + err = store.CreateOrLoadBucket(testCtx(), "bucket2", opts...) + require.Nil(t, err) + + b2 := store.Bucket("bucket2") + require.NotNil(t, b2) + + err = b2.Put([]byte("foo"), []byte("bar")) + require.Nil(t, err) + + err = store.Shutdown(context.Background()) + require.Nil(t, err) + }) + + t.Run("cycle 2", func(t *testing.T) { + store, err := New(dirName, dirName, logger, nil, + cyclemanager.NewCallbackGroupNoop(), + cyclemanager.NewCallbackGroupNoop(), + cyclemanager.NewCallbackGroupNoop()) + require.Nil(t, err) + + err = store.CreateOrLoadBucket(testCtx(), "bucket1", opts...) + require.Nil(t, err) + + b1 := store.Bucket("bucket1") + require.NotNil(t, b1) + + err = store.CreateOrLoadBucket(testCtx(), "bucket2", opts...) + require.Nil(t, err) + + b2 := store.Bucket("bucket2") + require.NotNil(t, b2) + + res, err := b1.Get([]byte("name")) + require.Nil(t, err) + assert.Equal(t, []byte("Jane Doe"), res) + + res, err = b2.Get([]byte("foo")) + require.Nil(t, err) + assert.Equal(t, []byte("bar"), res) + + err = store.Shutdown(context.Background()) + require.Nil(t, err) + }) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/store_reindex.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/store_reindex.go new file mode 100644 index 0000000000000000000000000000000000000000..fd59d1cb2598d4657c0d1c030bd965eafb648814 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/store_reindex.go @@ -0,0 +1,52 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package lsmkv + +import ( + "context" + "fmt" + + "github.com/weaviate/weaviate/adapters/repos/db/helpers" +) + +// PauseObjectBucketCompaction pauses the compaction cycle for the objects bucket. +// This is so that the BMW migration can run without interference from the +// compaction process, as they both use the same locks. +func (s *Store) PauseObjectBucketCompaction(ctx context.Context) error { + s.bucketAccessLock.RLock() + defer s.bucketAccessLock.RUnlock() + + b := s.Bucket(helpers.ObjectsBucketLSM) + + b.disk.compactionCallbackCtrl.Deactivate(ctx) + b.doStartPauseTimer() + return nil +} + +// ResumeObjectBucketCompaction resumes the compaction cycle for the objects bucket. +func (s *Store) ResumeObjectBucketCompaction(ctx context.Context) error { + s.bucketAccessLock.RLock() + defer s.bucketAccessLock.RUnlock() + + b := s.Bucket(helpers.ObjectsBucketLSM) + if b == nil { + return fmt.Errorf("no bucket named 'objects' found in store %s", s.dir) + } + + if err := b.disk.compactionCallbackCtrl.Activate(); err != nil { + return err + } + + b.doStopPauseTimer() + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/store_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/store_test.go new file mode 100644 index 0000000000000000000000000000000000000000..8db159233d15a0c4e3788f6209b21e9b8415a365 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/store_test.go @@ -0,0 +1,112 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package lsmkv + +import ( + "context" + "os" + "sync" + "testing" + + "github.com/sirupsen/logrus/hooks/test" + mock "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/entities/cyclemanager" +) + +func TestCreateOrLoadBucketConcurrency(t *testing.T) { + t.Parallel() + + dirName := t.TempDir() + defer os.RemoveAll(dirName) + logger, _ := test.NewNullLogger() + + store, err := New(dirName, dirName, logger, nil, + cyclemanager.NewCallbackGroup("classCompactionObjects", logger, 1), + cyclemanager.NewCallbackGroup("classCompactionNonObjects", logger, 1), + cyclemanager.NewCallbackGroupNoop()) + require.Nil(t, err) + + mockBucketCreator := NewMockBucketCreator(t) + mockBucketCreator.On("NewBucket", + mock.Anything, + mock.Anything, + mock.Anything, + mock.Anything, + mock.Anything, + mock.Anything, + mock.Anything, + ).Return(&Bucket{}, nil) + store.bcreator = mockBucketCreator + + defer func() { + // this test create in total 2 new bucket so NewBucket + // shall be called only twice and the other go routine shall get it + // from memory + mockBucketCreator.AssertNumberOfCalls(t, "NewBucket", 2) + mockBucketCreator.AssertExpectations(t) + }() + tcs := []string{"bucket1", "bucket1", "bucket1", "bucket2"} + wg := sync.WaitGroup{} + ctx := context.Background() + wg.Add(len(tcs)) + + for _, bucket := range tcs { + go func(bucket string) { + defer wg.Done() + require.Nil(t, store.CreateOrLoadBucket(ctx, bucket)) + }(bucket) + } + wg.Wait() +} + +func TestCreateBucketConcurrency(t *testing.T) { + t.Parallel() + + dirName := t.TempDir() + defer os.RemoveAll(dirName) + logger, _ := test.NewNullLogger() + + store, err := New(dirName, dirName, logger, nil, + cyclemanager.NewCallbackGroup("classCompactionObjects", logger, 1), + cyclemanager.NewCallbackGroup("classCompactionNonObjects", logger, 1), + cyclemanager.NewCallbackGroupNoop()) + require.Nil(t, err) + + mockBucketCreator := NewMockBucketCreator(t) + mockBucketCreator.On("NewBucket", + mock.Anything, + mock.Anything, + mock.Anything, + mock.Anything, + mock.Anything, + mock.Anything, + mock.Anything, + ).Return(&Bucket{}, nil) + store.bcreator = mockBucketCreator + + tcs := []string{"bucket1", "bucket1", "bucket1"} + wg := sync.WaitGroup{} + ctx := context.Background() + wg.Add(len(tcs)) + + for _, tc := range tcs { + tc := tc + go func() { + defer wg.Done() + store.CreateBucket(ctx, tc) + }() + } + wg.Wait() + mockBucketCreator.AssertNumberOfCalls(t, "NewBucket", 1) + mockBucketCreator.AssertExpectations(t) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/strategies.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/strategies.go new file mode 100644 index 0000000000000000000000000000000000000000..2b4fe832b10c29408093c2ba60626a99b99c81c5 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/strategies.go @@ -0,0 +1,98 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package lsmkv + +import ( + "fmt" + + "github.com/weaviate/weaviate/adapters/repos/db/lsmkv/segmentindex" +) + +const ( + // StrategyReplace allows for idem-potent PUT where the latest takes presence + StrategyReplace = "replace" + StrategySetCollection = "setcollection" + StrategyMapCollection = "mapcollection" + StrategyRoaringSet = "roaringset" + StrategyRoaringSetRange = "roaringsetrange" + StrategyInverted = "inverted" +) + +func SegmentStrategyFromString(in string) segmentindex.Strategy { + switch in { + case StrategyReplace: + return segmentindex.StrategyReplace + case StrategySetCollection: + return segmentindex.StrategySetCollection + case StrategyMapCollection: + return segmentindex.StrategyMapCollection + case StrategyRoaringSet: + return segmentindex.StrategyRoaringSet + case StrategyRoaringSetRange: + return segmentindex.StrategyRoaringSetRange + case StrategyInverted: + return segmentindex.StrategyInverted + default: + panic("unsupported strategy") + } +} + +func IsExpectedStrategy(strategy string, expectedStrategies ...string) bool { + if len(expectedStrategies) == 0 { + expectedStrategies = []string{ + StrategyReplace, + StrategySetCollection, + StrategyMapCollection, + StrategyRoaringSet, + StrategyRoaringSetRange, + StrategyInverted, + } + } + + for _, s := range expectedStrategies { + if s == strategy { + return true + } + } + return false +} + +func CheckExpectedStrategy(strategy string, expectedStrategies ...string) error { + if IsExpectedStrategy(strategy, expectedStrategies...) { + return nil + } + if len(expectedStrategies) == 1 { + return fmt.Errorf("strategy %q expected, got %q", expectedStrategies[0], strategy) + } + return fmt.Errorf("one of strategies %v expected, got %q", expectedStrategies, strategy) +} + +func MustBeExpectedStrategy(strategy string, expectedStrategies ...string) { + if err := CheckExpectedStrategy(strategy, expectedStrategies...); err != nil { + panic(err) + } +} + +func CheckStrategyRoaringSet(strategy string) error { + return CheckExpectedStrategy(strategy, StrategyRoaringSet) +} + +func CheckStrategyRoaringSetRange(strategy string) error { + return CheckExpectedStrategy(strategy, StrategyRoaringSetRange) +} + +func DefaultSearchableStrategy(useInvertedSearchable bool) string { + if useInvertedSearchable { + return StrategyInverted + } + return StrategyMapCollection +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/strategies_map.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/strategies_map.go new file mode 100644 index 0000000000000000000000000000000000000000..fe468f325d1592647da8f1d4e3616d2a2a6f5d04 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/strategies_map.go @@ -0,0 +1,477 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package lsmkv + +import ( + "bytes" + "encoding/binary" + "math" + + "github.com/weaviate/weaviate/usecases/byteops" + + "github.com/pkg/errors" +) + +type mapDecoder struct{} + +func newMapDecoder() *mapDecoder { + return &mapDecoder{} +} + +func (m *mapDecoder) Do(in []value, acceptDuplicates bool) ([]MapPair, error) { + // if acceptDuplicates { + // return m.doSimplified(in) + // } + + seenKeys := map[string]uint{} + kvs := make([]MapPair, len(in)) + + // unmarshalling := time.Duration(0) + + // beforeFirst := time.Now() + for i, pair := range in { + kv := MapPair{} + // beforeUnmarshal := time.Now() + err := kv.FromBytes(pair.value, pair.tombstone) + if err != nil { + return nil, err + } + // unmarshalling += time.Since(beforeUnmarshal) + kv.Tombstone = pair.tombstone + kvs[i] = kv + count := seenKeys[string(kv.Key)] + seenKeys[string(kv.Key)] = count + 1 + } + // fmt.Printf("first decoder loop took %s\n", time.Since(beforeFirst)) + // fmt.Printf("unmarshalling in first loop took %s\n", unmarshalling) + + // beforeSecond := time.Now() + out := make([]MapPair, len(in)) + i := 0 + for _, pair := range kvs { + count := seenKeys[string(pair.Key)] + if count != 1 { + seenKeys[string(pair.Key)] = count - 1 + continue + + } + + if pair.Tombstone { + continue + } + + out[i] = pair + i++ + } + // fmt.Printf("second decoder loop took %s\n", time.Since(beforeSecond)) + + return out[:i], nil +} + +type tombstone struct { + pos int + key []byte +} + +func (m *mapDecoder) doSimplified(in []value) ([]MapPair, error) { + out := make([]MapPair, len(in)) + + var tombstones []tombstone + + i := 0 + for _, raw := range in { + if raw.tombstone { + mp := MapPair{} + mp.FromBytes(raw.value, true) + tombstones = append(tombstones, tombstone{pos: i, key: mp.Key}) + continue + } + + out[i].FromBytes(raw.value, raw.tombstone) + i++ + } + + out = out[:i] + + if len(tombstones) > 0 { + out = m.removeTombstonesFromResults(out, tombstones) + } + + return out, nil +} + +func (m *mapDecoder) removeTombstonesFromResults(candidates []MapPair, + tombstones []tombstone, +) []MapPair { + after := make([]MapPair, len(candidates)) + newPos := 0 + for origPos, candidate := range candidates { + + skip := false + for _, tombstone := range tombstones { + if tombstone.pos > origPos && bytes.Equal(tombstone.key, candidate.Key) { + skip = true + } + } + + if skip { + continue + } + + after[newPos] = candidate + newPos++ + } + + return after[:newPos] +} + +// DoPartial keeps "unused" tombstones +func (m *mapDecoder) DoPartial(in []value) ([]MapPair, error) { + seenKeys := map[string]uint{} + kvs := make([]MapPair, len(in)) + + for i, pair := range in { + kv := MapPair{} + err := kv.FromBytes(pair.value, pair.tombstone) + if err != nil { + return nil, err + } + kv.Tombstone = pair.tombstone + kvs[i] = kv + count := seenKeys[string(kv.Key)] + seenKeys[string(kv.Key)] = count + 1 + } + + out := make([]MapPair, len(in)) + i := 0 + for _, pair := range kvs { + count := seenKeys[string(pair.Key)] + if count != 1 { + seenKeys[string(pair.Key)] = count - 1 + continue + + } + + out[i] = pair + i++ + } + + return out[:i], nil +} + +type MapPair struct { + Key []byte + Value []byte + Tombstone bool +} + +// Size() returns the exact size in bytes that will be used when Bytes() is +// called +func (kv MapPair) Size() int { + // each field uses a uint16 (2 bytes) length indicator + return 2 + len(kv.Key) + 2 + len(kv.Value) +} + +func (kv MapPair) EncodeBytes(buf []byte) error { + if len(buf) != kv.Size() { + return errors.Errorf("buffer has size %d, but MapPair has size %d", + len(buf), kv.Size()) + } + + // make sure the 2 byte length indicators will never overflow: + if len(kv.Key) >= math.MaxUint16 { + return errors.Errorf("mapCollection key must be smaller than %d", + math.MaxUint16) + } + keyLen := uint16(len(kv.Key)) + + if len(kv.Value) >= math.MaxUint16 { + return errors.Errorf("mapCollection value must be smaller than %d", + math.MaxUint16) + } + valueLen := uint16(len(kv.Value)) + + offset := 0 + binary.LittleEndian.PutUint16(buf[offset:offset+2], keyLen) + offset += 2 + copy(buf[offset:], kv.Key) + offset += len(kv.Key) + + binary.LittleEndian.PutUint16(buf[offset:offset+2], valueLen) + offset += 2 + copy(buf[offset:], kv.Value) + + return nil +} + +func (kv MapPair) EncodeBytesInverted(buf []byte) error { + if len(buf) != invPayloadLen { + return errors.Errorf("buffer has size %d, but MapPair has size %d", + len(buf), invPayloadLen) + } + + // make sure the 2 byte length indicators will never overflow: + if len(kv.Key) >= math.MaxUint16 { + return errors.Errorf("mapCollection key must be smaller than %d", + math.MaxUint16) + } + + if len(kv.Value) >= math.MaxUint16 { + return errors.Errorf("mapCollection value must be smaller than %d", + math.MaxUint16) + } + offset := 0 + copy(buf[offset:], kv.Key) + offset += len(kv.Key) + + copy(buf[offset:], kv.Value) + + return nil +} + +func (kv MapPair) Bytes() ([]byte, error) { + // make sure the 2 byte length indicators will never overflow: + if len(kv.Key) >= math.MaxUint16 { + return nil, errors.Errorf("mapCollection key must be smaller than %d", + math.MaxUint16) + } + keyLen := uint16(len(kv.Key)) + + if len(kv.Value) >= math.MaxUint16 { + return nil, errors.Errorf("mapCollection value must be smaller than %d", + math.MaxUint16) + } + valueLen := uint16(len(kv.Value)) + + data := make([]byte, byteops.Uint16Len+keyLen+byteops.Uint16Len+valueLen) + rw := byteops.NewReadWriter(data) + rw.WriteUint16(keyLen) + + if err := rw.CopyBytesToBuffer(kv.Key); err != nil { + return nil, err + } + rw.WriteUint16(valueLen) + if err := rw.CopyBytesToBuffer(kv.Value); err != nil { + return nil, err + } + + return data, nil +} + +func (kv *MapPair) BytesInverted() ([]byte, error) { + if len(kv.Key) != 8 { + return nil, errors.Errorf("inverted mapCollection key must be 8 bytes, got %d", len(kv.Key)) + } + + if len(kv.Value) != 8 { + return nil, errors.Errorf("inverted mapCollection value must be 8 bytes, got %d", len(kv.Value)) + } + + out := bytes.NewBuffer(nil) + + if _, err := out.Write(kv.Key); err != nil { + return nil, errors.Wrap(err, "write map key") + } + + if _, err := out.Write(kv.Value); err != nil { + return nil, errors.Wrap(err, "write map value") + } + + return out.Bytes(), nil +} + +func (kv *MapPair) FromBytes(in []byte, keyOnly bool) error { + var read uint16 + + // NOTE: A previous implementation was using copy statements in here to avoid + // sharing the memory. The general idea of that is good (protect against the + // mmaped memory being removed from a completed compaction), however this is + // the wrong place. By the time we are in this method, we can no longer + // control the memory safety of the "in" argument. Thus, such a copy must + // happen at a much earlier scope when a lock is held that protects against + // removing the segment. Such an implementation can now be found in + // segment_collection_strategy.go as part of the *segment.getCollection + // method. As a result all memory used here can now be considered read-only + // and is safe to be used indefinitely. + + keyLen := binary.LittleEndian.Uint16(in[:2]) + read += 2 // uint16 -> 2 bytes + + kv.Key = in[read : read+keyLen] + read += keyLen + + if keyOnly { + return nil + } + + valueLen := binary.LittleEndian.Uint16(in[read : read+2]) + read += 2 + + kv.Value = in[read : read+valueLen] + read += valueLen + + if read != uint16(len(in)) { + return errors.Errorf("inconsistent map pair: read %d out of %d bytes", + read, len(in)) + } + + return nil +} + +func (kv *MapPair) FromBytesReusable(in []byte, keyOnly bool) error { + var read uint16 + + keyLen := binary.LittleEndian.Uint16(in[:2]) + read += 2 // uint16 -> 2 bytes + + if int(keyLen) > cap(kv.Key) { + kv.Key = make([]byte, keyLen) + } else { + kv.Key = kv.Key[:keyLen] + } + copy(kv.Key, in[read:read+keyLen]) + read += keyLen + + if keyOnly { + return nil + } + + valueLen := binary.LittleEndian.Uint16(in[read : read+2]) + read += 2 + + if int(valueLen) > cap(kv.Value) { + kv.Value = make([]byte, valueLen) + } else { + kv.Value = kv.Value[:valueLen] + } + copy(kv.Value, in[read:read+valueLen]) + read += valueLen + + if read != uint16(len(in)) { + return errors.Errorf("inconsistent map pair: read %d out of %d bytes", + read, len(in)) + } + + return nil +} + +func (kv *MapPair) FromBytesInverted(in []byte, keyOnly bool) error { + var read uint16 + + // NOTE: A previous implementation was using copy statements in here to avoid + // sharing the memory. The general idea of that is good (protect against the + // mmaped memory being removed from a completed compaction), however this is + // the wrong place. By the time we are in this method, we can no longer + // control the memory safety of the "in" argument. Thus, such a copy must + // happen at a much earlier scope when a lock is held that protects against + // removing the segment. Such an implementation can now be found in + // segment_collection_strategy.go as part of the *segment.getCollection + // method. As a result all memory used here can now be considered read-only + // and is safe to be used indefinitely. + + if len(in) < 8 { + return errors.Errorf("inverted map pair must be at least 8 bytes, got %d", len(in)) + } + + kv.Key = in[read : read+8] + read += 8 + + if keyOnly { + return nil + } + + if len(in) < 16 { + return errors.Errorf("inverted map pair with value must be at least 16 bytes, got %d", len(in)) + } + + kv.Value = in[read : read+8] + return nil +} + +type mapEncoder struct { + pairBuf []value +} + +func newMapEncoder() *mapEncoder { + return &mapEncoder{} +} + +func (m *mapEncoder) Do(kv MapPair) ([]value, error) { + v, err := kv.Bytes() + if err != nil { + return nil, err + } + + out := make([]value, 1) + out[0] = value{ + tombstone: kv.Tombstone, + value: v, + } + + return out, nil +} + +func (m *mapEncoder) DoMulti(kvs []MapPair) ([]value, error) { + out := make([]value, len(kvs)) + + for i, kv := range kvs { + v := make([]byte, kv.Size()) + err := kv.EncodeBytes(v) + if err != nil { + return nil, err + } + + out[i] = value{ + tombstone: kv.Tombstone, + value: v, + } + } + + return out, nil +} + +// DoMultiReusable reuses a MapPair buffer that it exposes to the caller on +// this request. Warning: The caller must make sure that they no longer access +// the return value once they call this method a second time, otherwise they +// risk overwriting a previous result. The intended usage for example in a loop +// where each loop copies the results, for example using a bufio.Writer. +func (m *mapEncoder) DoMultiReusable(kvs []MapPair) ([]value, error) { + m.resizeBuffer(len(kvs)) + + for i, kv := range kvs { + m.resizeValueAtBuffer(i, kv.Size()) + err := kv.EncodeBytes(m.pairBuf[i].value) + if err != nil { + return nil, err + } + + m.pairBuf[i].tombstone = kv.Tombstone + } + + return m.pairBuf, nil +} + +func (m *mapEncoder) resizeBuffer(size int) { + if cap(m.pairBuf) >= size { + m.pairBuf = m.pairBuf[:size] + } else { + m.pairBuf = make([]value, size, int(float64(size)*1.25)) + } +} + +func (m *mapEncoder) resizeValueAtBuffer(pos, size int) { + if cap(m.pairBuf[pos].value) >= size { + m.pairBuf[pos].value = m.pairBuf[pos].value[:size] + } else { + m.pairBuf[pos].value = make([]byte, size, int(float64(size)*1.25)) + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/strategies_map_benchmark_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/strategies_map_benchmark_test.go new file mode 100644 index 0000000000000000000000000000000000000000..cddb4eb978aa7d2ea0d709f910124b8e885e49b5 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/strategies_map_benchmark_test.go @@ -0,0 +1,162 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package lsmkv + +import ( + "crypto/rand" + "fmt" + randInsecure "math/rand" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func BenchmarkBytes(b *testing.B) { + for _, val := range []int{10, 100, 1000, 10000, 24 * 1024} { + b.Run(fmt.Sprintf("%d", val), func(b *testing.B) { + kv := MapPair{ + Key: []byte("my-key-1"), + Value: make([]byte, val), + } + for i := 0; i < len(kv.Value); i++ { + kv.Value[i] = byte(randInsecure.Intn(100)) + } + b.ResetTimer() + b.ReportAllocs() + + for i := 0; i < b.N; i++ { + _, err := kv.Bytes() + require.NoError(b, err) + } + }) + } +} + +func BenchmarkMapDecoderDoPartial_SingleKey(b *testing.B) { + before := []MapPair{{ + Key: []byte("my-key-1"), + Value: []byte("my-value-1"), + }} + + encoded, err := newMapEncoder().DoMulti(before) + require.Nil(b, err) + + md := newMapDecoder() + + b.ReportAllocs() + + for i := 0; i < b.N; i++ { + md.DoPartial(encoded) + } +} + +func BenchmarkMapPairFromBytes(b *testing.B) { + before := MapPair{ + Key: []byte("my-key-1"), + Value: make([]byte, 24*1024), + } + + rand.Read(before.Value) + + encoded, err := before.Bytes() + require.Nil(b, err) + + b.ReportAllocs() + + target := MapPair{} + + for i := 0; i < b.N; i++ { + target.FromBytes(encoded, false) + } +} + +func BenchmarkMapPairFromBytesReusable_Fits(b *testing.B) { + before := MapPair{ + Key: []byte("my-key-1"), + Value: make([]byte, 24*1024), + } + + rand.Read(before.Value) + + encoded, err := before.Bytes() + require.Nil(b, err) + + target := MapPair{ + Key: make([]byte, 8), + Value: make([]byte, 24*1024), + } + + b.ReportAllocs() + + for i := 0; i < b.N; i++ { + err := target.FromBytesReusable(encoded, false) + require.Nil(b, err) + } + + assert.Equal(b, before.Key, target.Key) + assert.Equal(b, before.Value, target.Value) +} + +func BenchmarkMapPairFromBytesReusable_BuffersTooLarge(b *testing.B) { + before := MapPair{ + Key: []byte("my-key-1"), + Value: make([]byte, 24*1024), + } + + rand.Read(before.Value) + + encoded, err := before.Bytes() + require.Nil(b, err) + + target := MapPair{ + Key: make([]byte, 100), + Value: make([]byte, 100*1024), + } + + b.ReportAllocs() + + for i := 0; i < b.N; i++ { + err := target.FromBytesReusable(encoded, false) + require.Nil(b, err) + } + + assert.Equal(b, before.Key, target.Key) + assert.Equal(b, before.Value, target.Value) +} + +func BenchmarkMapPairFromBytesReusable_BuffersTooSmall(b *testing.B) { + before := MapPair{ + Key: []byte("my-key-1"), + Value: make([]byte, 24*1024), + } + + rand.Read(before.Value) + + encoded, err := before.Bytes() + require.Nil(b, err) + + target := MapPair{ + Key: make([]byte, 1), + Value: make([]byte, 1*1024), + } + + b.ReportAllocs() + + for i := 0; i < b.N; i++ { + err := target.FromBytesReusable(encoded, false) + require.Nil(b, err) + } + + assert.Equal(b, before.Key, target.Key) + assert.Equal(b, before.Value, target.Value) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/strategies_map_integration_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/strategies_map_integration_test.go new file mode 100644 index 0000000000000000000000000000000000000000..b36a3e6eb8fb774237315155c22a39bc8360297d --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/strategies_map_integration_test.go @@ -0,0 +1,1263 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +//go:build integrationTest +// +build integrationTest + +package lsmkv + +import ( + "context" + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/entities/cyclemanager" +) + +func TestMapCollectionStrategy(t *testing.T) { + ctx := testCtx() + tests := bucketIntegrationTests{ + { + name: "mapInsertAndAppend", + f: mapInsertAndAppend, + opts: []BucketOption{ + WithStrategy(StrategyMapCollection), + }, + }, + { + name: "mapInsertAndDelete", + f: mapInsertAndDelete, + opts: []BucketOption{ + WithStrategy(StrategyMapCollection), + }, + }, + { + name: "mapCursors", + f: mapCursors, + opts: []BucketOption{ + WithStrategy(StrategyMapCollection), + }, + }, + } + tests.run(ctx, t) +} + +func mapInsertAndAppend(ctx context.Context, t *testing.T, opts []BucketOption) { + dirName := t.TempDir() + + t.Run("memtable-only", func(t *testing.T) { + b, err := NewBucketCreator().NewBucket(ctx, dirName, "", nullLogger(), nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), opts...) + require.Nil(t, err) + + defer b.Shutdown(ctx) + + // so big it effectively never triggers as part of this test + b.SetMemtableThreshold(1e9) + + rowKey1 := []byte("test1-key-1") + rowKey2 := []byte("test1-key-2") + + t.Run("set original values and verify", func(t *testing.T) { + row1Map := []MapPair{ + { + Key: []byte("row1-key1"), + Value: []byte("row1-key1-value1"), + }, { + Key: []byte("row1-key2"), + Value: []byte("row1-key2-value1"), + }, + } + + row2Map := []MapPair{ + { + Key: []byte("row2-key1"), + Value: []byte("row2-key1-value1"), + }, { + Key: []byte("row2-key2"), + Value: []byte("row2-key2-value1"), + }, + } + + for _, pair := range row1Map { + err = b.MapSet(rowKey1, pair) + require.Nil(t, err) + } + + for _, pair := range row2Map { + err = b.MapSet(rowKey2, pair) + require.Nil(t, err) + } + + res, err := b.MapList(ctx, rowKey1) + require.Nil(t, err) + assert.Equal(t, row1Map, res) + res, err = b.MapList(ctx, rowKey2) + require.Nil(t, err) + assert.Equal(t, res, row2Map) + }) + + t.Run("replace an existing map key", func(t *testing.T) { + err = b.MapSet(rowKey1, MapPair{ + Key: []byte("row1-key1"), // existing key + Value: []byte("row1-key1-value2"), // updated value + }) + require.Nil(t, err) + + row1Updated := []MapPair{ + { + Key: []byte("row1-key1"), + Value: []byte("row1-key1-value2"), // <--- updated, rest unchanged + }, { + Key: []byte("row1-key2"), + Value: []byte("row1-key2-value1"), + }, + } + + row2Unchanged := []MapPair{ + { + Key: []byte("row2-key1"), + Value: []byte("row2-key1-value1"), + }, { + Key: []byte("row2-key2"), + Value: []byte("row2-key2-value1"), + }, + } + + res, err := b.MapList(ctx, rowKey1) + require.Nil(t, err) + // NOTE: We are accepting that the order is changed here. Given the name + // "MapCollection" there should be no expectations regarding the order, + // but we have yet to validate if this fits with all of the intended use + // cases. + assert.ElementsMatch(t, row1Updated, res) + res, err = b.MapList(ctx, rowKey2) + require.Nil(t, err) + assert.Equal(t, res, row2Unchanged) + }) + }) + + t.Run("with a single flush between updates", func(t *testing.T) { + b, err := NewBucketCreator().NewBucket(ctx, dirName, "", nullLogger(), nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), opts...) + require.Nil(t, err) + + defer b.Shutdown(ctx) + + // so big it effectively never triggers as part of this test + b.SetMemtableThreshold(1e9) + + rowKey1 := []byte("test2-key-1") + rowKey2 := []byte("test2-key-2") + + t.Run("set original values and verify", func(t *testing.T) { + row1Map := []MapPair{ + { + Key: []byte("row1-key1"), + Value: []byte("row1-key1-value1"), + }, { + Key: []byte("row1-key2"), + Value: []byte("row1-key2-value1"), + }, + } + + row2Map := []MapPair{ + { + Key: []byte("row2-key1"), + Value: []byte("row2-key1-value1"), + }, { + Key: []byte("row2-key2"), + Value: []byte("row2-key2-value1"), + }, + } + + for _, pair := range row1Map { + err = b.MapSet(rowKey1, pair) + require.Nil(t, err) + } + + for _, pair := range row2Map { + err = b.MapSet(rowKey2, pair) + require.Nil(t, err) + } + + res, err := b.MapList(ctx, rowKey1) + require.Nil(t, err) + assert.Equal(t, row1Map, res) + res, err = b.MapList(ctx, rowKey2) + require.Nil(t, err) + assert.Equal(t, res, row2Map) + }) + + t.Run("flush to disk", func(t *testing.T) { + require.Nil(t, b.FlushAndSwitch()) + }) + + t.Run("replace an existing map key", func(t *testing.T) { + err = b.MapSet(rowKey1, MapPair{ + Key: []byte("row1-key1"), // existing key + Value: []byte("row1-key1-value2"), // updated value + }) + require.Nil(t, err) + + row1Updated := []MapPair{ + { + Key: []byte("row1-key1"), + Value: []byte("row1-key1-value2"), // <--- updated, rest unchanged + }, { + Key: []byte("row1-key2"), + Value: []byte("row1-key2-value1"), + }, + } + + row2Unchanged := []MapPair{ + { + Key: []byte("row2-key1"), + Value: []byte("row2-key1-value1"), + }, { + Key: []byte("row2-key2"), + Value: []byte("row2-key2-value1"), + }, + } + + res, err := b.MapList(ctx, rowKey1) + require.Nil(t, err) + // NOTE: We are accepting that the order is changed here. Given the name + // "MapCollection" there should be no expectations regarding the order, + // but we have yet to validate if this fits with all of the intended use + // cases. + assert.ElementsMatch(t, row1Updated, res) + res, err = b.MapList(ctx, rowKey2) + require.Nil(t, err) + assert.Equal(t, res, row2Unchanged) + }) + }) + + t.Run("with flushes after initial and update", func(t *testing.T) { + b, err := NewBucketCreator().NewBucket(ctx, dirName, "", nullLogger(), nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), opts...) + require.Nil(t, err) + + defer b.Shutdown(ctx) + + // so big it effectively never triggers as part of this test + b.SetMemtableThreshold(1e9) + + rowKey1 := []byte("test3-key-1") + rowKey2 := []byte("test3-key-2") + + t.Run("set original values and verify", func(t *testing.T) { + row1Map := []MapPair{ + { + Key: []byte("row1-key1"), + Value: []byte("row1-key1-value1"), + }, { + Key: []byte("row1-key2"), + Value: []byte("row1-key2-value1"), + }, + } + + row2Map := []MapPair{ + { + Key: []byte("row2-key1"), + Value: []byte("row2-key1-value1"), + }, { + Key: []byte("row2-key2"), + Value: []byte("row2-key2-value1"), + }, + } + + for _, pair := range row1Map { + err = b.MapSet(rowKey1, pair) + require.Nil(t, err) + } + + for _, pair := range row2Map { + err = b.MapSet(rowKey2, pair) + require.Nil(t, err) + } + + res, err := b.MapList(ctx, rowKey1) + require.Nil(t, err) + assert.Equal(t, row1Map, res) + res, err = b.MapList(ctx, rowKey2) + require.Nil(t, err) + assert.Equal(t, res, row2Map) + }) + + t.Run("flush to disk", func(t *testing.T) { + require.Nil(t, b.FlushAndSwitch()) + }) + + t.Run("replace an existing map key", func(t *testing.T) { + err = b.MapSet(rowKey1, MapPair{ + Key: []byte("row1-key1"), // existing key + Value: []byte("row1-key1-value2"), // updated value + }) + require.Nil(t, err) + + // Flush again! + require.Nil(t, b.FlushAndSwitch()) + + row1Updated := []MapPair{ + { + Key: []byte("row1-key1"), + Value: []byte("row1-key1-value2"), // <--- updated, rest unchanged + }, { + Key: []byte("row1-key2"), + Value: []byte("row1-key2-value1"), + }, + } + + row2Unchanged := []MapPair{ + { + Key: []byte("row2-key1"), + Value: []byte("row2-key1-value1"), + }, { + Key: []byte("row2-key2"), + Value: []byte("row2-key2-value1"), + }, + } + + res, err := b.MapList(ctx, rowKey1) + require.Nil(t, err) + // NOTE: We are accepting that the order is changed here. Given the name + // "MapCollection" there should be no expectations regarding the order, + // but we have yet to validate if this fits with all of the intended use + // cases. + assert.ElementsMatch(t, row1Updated, res) + res, err = b.MapList(ctx, rowKey2) + require.Nil(t, err) + assert.Equal(t, res, row2Unchanged) + }) + }) + + t.Run("update in memtable, then do an orderly shutdown, and re-init", func(t *testing.T) { + b, err := NewBucketCreator().NewBucket(ctx, dirName, "", nullLogger(), nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), opts...) + require.Nil(t, err) + + defer b.Shutdown(ctx) + + // so big it effectively never triggers as part of this test + b.SetMemtableThreshold(1e9) + + rowKey1 := []byte("test4-key-1") + rowKey2 := []byte("test4-key-2") + + t.Run("set original values and verify", func(t *testing.T) { + row1Map := []MapPair{ + { + Key: []byte("row1-key1"), + Value: []byte("row1-key1-value1"), + }, { + Key: []byte("row1-key2"), + Value: []byte("row1-key2-value1"), + }, + } + + row2Map := []MapPair{ + { + Key: []byte("row2-key1"), + Value: []byte("row2-key1-value1"), + }, { + Key: []byte("row2-key2"), + Value: []byte("row2-key2-value1"), + }, + } + + for _, pair := range row1Map { + err = b.MapSet(rowKey1, pair) + require.Nil(t, err) + } + + for _, pair := range row2Map { + err = b.MapSet(rowKey2, pair) + require.Nil(t, err) + } + + res, err := b.MapList(ctx, rowKey1) + require.Nil(t, err) + assert.Equal(t, row1Map, res) + res, err = b.MapList(ctx, rowKey2) + require.Nil(t, err) + assert.Equal(t, res, row2Map) + }) + + t.Run("replace an existing map key", func(t *testing.T) { + err = b.MapSet(rowKey1, MapPair{ + Key: []byte("row1-key1"), // existing key + Value: []byte("row1-key1-value2"), // updated value + }) + require.Nil(t, err) + }) + + t.Run("orderly shutdown", func(t *testing.T) { + b.Shutdown(context.Background()) + }) + + t.Run("init another bucket on the same files", func(t *testing.T) { + b2, err := NewBucketCreator().NewBucket(ctx, dirName, "", nullLogger(), nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), opts...) + require.Nil(t, err) + + row1Updated := []MapPair{ + { + Key: []byte("row1-key1"), + Value: []byte("row1-key1-value2"), // <--- updated, rest unchanged + }, { + Key: []byte("row1-key2"), + Value: []byte("row1-key2-value1"), + }, + } + + row2Unchanged := []MapPair{ + { + Key: []byte("row2-key1"), + Value: []byte("row2-key1-value1"), + }, { + Key: []byte("row2-key2"), + Value: []byte("row2-key2-value1"), + }, + } + + res, err := b2.MapList(ctx, rowKey1) + require.Nil(t, err) + // NOTE: We are accepting that the order is changed here. Given the name + // "MapCollection" there should be no expectations regarding the order, + // but we have yet to validate if this fits with all of the intended use + // cases. + assert.ElementsMatch(t, row1Updated, res) + res, err = b2.MapList(ctx, rowKey2) + require.Nil(t, err) + assert.Equal(t, res, row2Unchanged) + }) + }) +} + +func mapInsertAndDelete(ctx context.Context, t *testing.T, opts []BucketOption) { + dirName := t.TempDir() + + t.Run("memtable-only", func(t *testing.T) { + b, err := NewBucketCreator().NewBucket(ctx, dirName, "", nullLogger(), nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), opts...) + require.Nil(t, err) + + defer b.Shutdown(ctx) + + // so big it effectively never triggers as part of this test + b.SetMemtableThreshold(1e9) + + rowKey1 := []byte("test1-key-1") + rowKey2 := []byte("test1-key-2") + + t.Run("set original values and verify", func(t *testing.T) { + row1Map := []MapPair{ + { + Key: []byte("row1-key1"), + Value: []byte("row1-key1-value1"), + }, { + Key: []byte("row1-key2"), + Value: []byte("row1-key2-value1"), + }, + } + + row2Map := []MapPair{ + { + Key: []byte("row2-key1"), + Value: []byte("row2-key1-value1"), + }, { + Key: []byte("row2-key2"), + Value: []byte("row2-key2-value1"), + }, + } + + for _, pair := range row1Map { + err = b.MapSet(rowKey1, pair) + require.Nil(t, err) + } + + for _, pair := range row2Map { + err = b.MapSet(rowKey2, pair) + require.Nil(t, err) + } + + res, err := b.MapList(ctx, rowKey1) + require.Nil(t, err) + assert.Equal(t, row1Map, res) + res, err = b.MapList(ctx, rowKey2) + require.Nil(t, err) + assert.Equal(t, res, row2Map) + }) + + t.Run("delete some keys, re-add one of them", func(t *testing.T) { + err := b.MapDeleteKey(rowKey1, []byte("row1-key1")) + require.Nil(t, err) + err = b.MapDeleteKey(rowKey2, []byte("row2-key2")) + require.Nil(t, err) + err = b.MapSet(rowKey2, MapPair{ + Key: []byte("row2-key2"), + Value: []byte("row2-key2-reinserted"), + }) + require.Nil(t, err) + }) + + t.Run("validate the results", func(t *testing.T) { + row1Updated := []MapPair{ + // key 1 was deleted + { + Key: []byte("row1-key2"), + Value: []byte("row1-key2-value1"), + }, + } + + row2Updated := []MapPair{ + { + Key: []byte("row2-key1"), + Value: []byte("row2-key1-value1"), + }, { + Key: []byte("row2-key2"), + Value: []byte("row2-key2-reinserted"), + }, + } + + // NOTE: We are accepting that the order is changed here. Given the name + // "MapCollection" there should be no expectations regarding the order, + // but we have yet to validate if this fits with all of the intended use + // cases. + res, err := b.MapList(ctx, rowKey1) + require.Nil(t, err) + assert.ElementsMatch(t, row1Updated, res) + res, err = b.MapList(ctx, rowKey2) + require.Nil(t, err) + assert.ElementsMatch(t, row2Updated, res) + }) + }) + + t.Run("with flushes between updates", func(t *testing.T) { + b, err := NewBucketCreator().NewBucket(ctx, dirName, "", nullLogger(), nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), opts...) + require.Nil(t, err) + + defer b.Shutdown(ctx) + + // so big it effectively never triggers as part of this test + b.SetMemtableThreshold(1e9) + + rowKey1 := []byte("test1-key-1") + rowKey2 := []byte("test1-key-2") + + t.Run("set original values and verify", func(t *testing.T) { + row1Map := []MapPair{ + { + Key: []byte("row1-key1"), + Value: []byte("row1-key1-value1"), + }, { + Key: []byte("row1-key2"), + Value: []byte("row1-key2-value1"), + }, + } + + row2Map := []MapPair{ + { + Key: []byte("row2-key1"), + Value: []byte("row2-key1-value1"), + }, { + Key: []byte("row2-key2"), + Value: []byte("row2-key2-value1"), + }, + } + + for _, pair := range row1Map { + err = b.MapSet(rowKey1, pair) + require.Nil(t, err) + } + + for _, pair := range row2Map { + err = b.MapSet(rowKey2, pair) + require.Nil(t, err) + } + + res, err := b.MapList(ctx, rowKey1) + require.Nil(t, err) + assert.Equal(t, row1Map, res) + res, err = b.MapList(ctx, rowKey2) + require.Nil(t, err) + assert.Equal(t, res, row2Map) + }) + + t.Run("flush to disk", func(t *testing.T) { + require.Nil(t, b.FlushAndSwitch()) + }) + + t.Run("delete some keys, re-add one of them", func(t *testing.T) { + err := b.MapDeleteKey(rowKey1, []byte("row1-key1")) + require.Nil(t, err) + err = b.MapDeleteKey(rowKey2, []byte("row2-key2")) + require.Nil(t, err) + err = b.MapSet(rowKey2, MapPair{ + Key: []byte("row2-key2"), + Value: []byte("row2-key2-reinserted"), + }) + require.Nil(t, err) + }) + + t.Run("flush to disk", func(t *testing.T) { + require.Nil(t, b.FlushAndSwitch()) + }) + + t.Run("validate the results", func(t *testing.T) { + row1Updated := []MapPair{ + // key 1 was deleted + { + Key: []byte("row1-key2"), + Value: []byte("row1-key2-value1"), + }, + } + + row2Updated := []MapPair{ + { + Key: []byte("row2-key1"), + Value: []byte("row2-key1-value1"), + }, { + Key: []byte("row2-key2"), + Value: []byte("row2-key2-reinserted"), + }, + } + + // NOTE: We are accepting that the order is changed here. Given the name + // "MapCollection" there should be no expectations regarding the order, + // but we have yet to validate if this fits with all of the intended use + // cases. + res, err := b.MapList(ctx, rowKey1) + require.Nil(t, err) + assert.ElementsMatch(t, row1Updated, res) + res, err = b.MapList(ctx, rowKey2) + require.Nil(t, err) + assert.ElementsMatch(t, row2Updated, res) + }) + }) + + t.Run("with memtable only, then an orderly shutdown and restart", func(t *testing.T) { + b, err := NewBucketCreator().NewBucket(ctx, dirName, "", nullLogger(), nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), opts...) + require.Nil(t, err) + + defer b.Shutdown(ctx) + + // so big it effectively never triggers as part of this test + b.SetMemtableThreshold(1e9) + + rowKey1 := []byte("test1-key-1") + rowKey2 := []byte("test1-key-2") + + t.Run("set original values and verify", func(t *testing.T) { + row1Map := []MapPair{ + { + Key: []byte("row1-key1"), + Value: []byte("row1-key1-value1"), + }, { + Key: []byte("row1-key2"), + Value: []byte("row1-key2-value1"), + }, + } + + row2Map := []MapPair{ + { + Key: []byte("row2-key1"), + Value: []byte("row2-key1-value1"), + }, { + Key: []byte("row2-key2"), + Value: []byte("row2-key2-value1"), + }, + } + + for _, pair := range row1Map { + err = b.MapSet(rowKey1, pair) + require.Nil(t, err) + } + + for _, pair := range row2Map { + err = b.MapSet(rowKey2, pair) + require.Nil(t, err) + } + + res, err := b.MapList(ctx, rowKey1) + require.Nil(t, err) + assert.Equal(t, row1Map, res) + res, err = b.MapList(ctx, rowKey2) + require.Nil(t, err) + assert.Equal(t, res, row2Map) + }) + + t.Run("delete some keys, re-add one of them", func(t *testing.T) { + err := b.MapDeleteKey(rowKey1, []byte("row1-key1")) + require.Nil(t, err) + err = b.MapDeleteKey(rowKey2, []byte("row2-key2")) + require.Nil(t, err) + err = b.MapSet(rowKey2, MapPair{ + Key: []byte("row2-key2"), + Value: []byte("row2-key2-reinserted"), + }) + require.Nil(t, err) + }) + + t.Run("orderly shutdown", func(t *testing.T) { + b.Shutdown(context.Background()) + }) + + t.Run("init another bucket on the same files", func(t *testing.T) { + b2, err := NewBucketCreator().NewBucket(ctx, dirName, "", nullLogger(), nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), opts...) + require.Nil(t, err) + + row1Updated := []MapPair{ + // key 1 was deleted + { + Key: []byte("row1-key2"), + Value: []byte("row1-key2-value1"), + }, + } + + row2Updated := []MapPair{ + { + Key: []byte("row2-key1"), + Value: []byte("row2-key1-value1"), + }, { + Key: []byte("row2-key2"), + Value: []byte("row2-key2-reinserted"), + }, + } + + // NOTE: We are accepting that the order is changed here. Given the name + // "MapCollection" there should be no expectations regarding the order, + // but we have yet to validate if this fits with all of the intended use + // cases. + res, err := b2.MapList(ctx, rowKey1) + require.Nil(t, err) + assert.ElementsMatch(t, row1Updated, res) + res, err = b2.MapList(ctx, rowKey2) + require.Nil(t, err) + assert.ElementsMatch(t, row2Updated, res) + }) + }) +} + +func mapCursors(ctx context.Context, t *testing.T, opts []BucketOption) { + t.Run("memtable-only", func(t *testing.T) { + r := getRandomSeed() + dirName := t.TempDir() + + b, err := NewBucketCreator().NewBucket(ctx, dirName, "", nullLogger(), nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), opts...) + require.Nil(t, err) + + defer b.Shutdown(ctx) + + // so big it effectively never triggers as part of this test + b.SetMemtableThreshold(1e9) + + t.Run("set original values", func(t *testing.T) { + pairs := 20 + valuesPerPair := 3 + keys := make([][]byte, pairs) + values := make([][]MapPair, pairs) + + for i := range keys { + keys[i] = []byte(fmt.Sprintf("row-%03d", i)) + values[i] = make([]MapPair, valuesPerPair) + for j := range values[i] { + values[i][j] = MapPair{ + Key: []byte(fmt.Sprintf("row-%03d-key-%d", i, j)), + Value: []byte(fmt.Sprintf("row-%03d-value-%d", i, j)), + } + } + } + + // shuffle to make sure the BST isn't accidentally in order + r.Shuffle(len(keys), func(i, j int) { + keys[i], keys[j] = keys[j], keys[i] + values[i], values[j] = values[j], values[i] + }) + + for i := range keys { + mapPairs := values[i] + for j := range mapPairs { + err = b.MapSet(keys[i], mapPairs[j]) + require.Nil(t, err) + } + } + }) + + t.Run("seek from somewhere in the middle", func(t *testing.T) { + expectedKeys := [][]byte{ + []byte("row-016"), + []byte("row-017"), + []byte("row-018"), + []byte("row-019"), + } + expectedValues := [][]MapPair{ + { + {Key: []byte("row-016-key-0"), Value: []byte("row-016-value-0")}, + {Key: []byte("row-016-key-1"), Value: []byte("row-016-value-1")}, + {Key: []byte("row-016-key-2"), Value: []byte("row-016-value-2")}, + }, + { + {Key: []byte("row-017-key-0"), Value: []byte("row-017-value-0")}, + {Key: []byte("row-017-key-1"), Value: []byte("row-017-value-1")}, + {Key: []byte("row-017-key-2"), Value: []byte("row-017-value-2")}, + }, + { + {Key: []byte("row-018-key-0"), Value: []byte("row-018-value-0")}, + {Key: []byte("row-018-key-1"), Value: []byte("row-018-value-1")}, + {Key: []byte("row-018-key-2"), Value: []byte("row-018-value-2")}, + }, + { + {Key: []byte("row-019-key-0"), Value: []byte("row-019-value-0")}, + {Key: []byte("row-019-key-1"), Value: []byte("row-019-value-1")}, + {Key: []byte("row-019-key-2"), Value: []byte("row-019-value-2")}, + }, + } + + var retrievedKeys [][]byte + var retrievedValues [][]MapPair + c := b.MapCursor() + defer c.Close() + for k, v := c.Seek(ctx, []byte("row-016")); k != nil; k, v = c.Next(ctx) { + retrievedKeys = append(retrievedKeys, k) + retrievedValues = append(retrievedValues, v) + } + + assert.Equal(t, expectedKeys, retrievedKeys) + + require.Equal(t, len(expectedValues), len(retrievedValues)) + for i := range expectedValues { + assert.ElementsMatch(t, expectedValues[i], retrievedValues[i]) + } + }) + + t.Run("start from beginning", func(t *testing.T) { + expectedKeys := [][]byte{ + []byte("row-000"), + []byte("row-001"), + []byte("row-002"), + } + expectedValues := [][]MapPair{ + { + {Key: []byte("row-000-key-0"), Value: []byte("row-000-value-0")}, + {Key: []byte("row-000-key-1"), Value: []byte("row-000-value-1")}, + {Key: []byte("row-000-key-2"), Value: []byte("row-000-value-2")}, + }, + { + {Key: []byte("row-001-key-0"), Value: []byte("row-001-value-0")}, + {Key: []byte("row-001-key-1"), Value: []byte("row-001-value-1")}, + {Key: []byte("row-001-key-2"), Value: []byte("row-001-value-2")}, + }, + { + {Key: []byte("row-002-key-0"), Value: []byte("row-002-value-0")}, + {Key: []byte("row-002-key-1"), Value: []byte("row-002-value-1")}, + {Key: []byte("row-002-key-2"), Value: []byte("row-002-value-2")}, + }, + } + + var retrievedKeys [][]byte + var retrievedValues [][]MapPair + c := b.MapCursor() + defer c.Close() + retrieved := 0 + for k, v := c.First(ctx); k != nil && retrieved < 3; k, v = c.Next(ctx) { + retrieved++ + retrievedKeys = append(retrievedKeys, k) + retrievedValues = append(retrievedValues, v) + } + + assert.Equal(t, expectedKeys, retrievedKeys) + + require.Equal(t, len(expectedValues), len(retrievedValues)) + for i := range expectedValues { + assert.ElementsMatch(t, expectedValues[i], retrievedValues[i]) + } + }) + + t.Run("delete/replace an existing map key/value pair", func(t *testing.T) { + row := []byte("row-002") + pair := MapPair{ + Key: []byte("row-002-key-1"), // existing key + Value: []byte("row-002-value-1-updated"), // updated value + } + + require.Nil(t, b.MapSet(row, pair)) + + row = []byte("row-001") + key := []byte("row-001-key-1") + + require.Nil(t, b.MapDeleteKey(row, key)) + }) + + t.Run("verify update is contained", func(t *testing.T) { + expectedKeys := [][]byte{ + []byte("row-001"), + []byte("row-002"), + } + expectedValues := [][]MapPair{ + { + {Key: []byte("row-001-key-0"), Value: []byte("row-001-value-0")}, + // key-1 was deleted + {Key: []byte("row-001-key-2"), Value: []byte("row-001-value-2")}, + }, + { + {Key: []byte("row-002-key-0"), Value: []byte("row-002-value-0")}, + {Key: []byte("row-002-key-1"), Value: []byte("row-002-value-1-updated")}, + {Key: []byte("row-002-key-2"), Value: []byte("row-002-value-2")}, + }, + } + + var retrievedKeys [][]byte + var retrievedValues [][]MapPair + c := b.MapCursor() + defer c.Close() + retrieved := 0 + for k, v := c.Seek(ctx, []byte("row-001")); k != nil && retrieved < 2; k, v = c.Next(ctx) { + retrieved++ + retrievedKeys = append(retrievedKeys, k) + retrievedValues = append(retrievedValues, v) + } + + assert.Equal(t, expectedKeys, retrievedKeys) + + require.Equal(t, len(expectedValues), len(retrievedValues)) + for i := range expectedValues { + assert.ElementsMatch(t, expectedValues[i], retrievedValues[i]) + } + }) + }) + + t.Run("with flushes", func(t *testing.T) { + r := getRandomSeed() + dirName := t.TempDir() + + b, err := NewBucketCreator().NewBucket(ctx, dirName, "", nullLogger(), nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), opts...) + require.Nil(t, err) + + // so big it effectively never triggers as part of this test + b.SetMemtableThreshold(1e9) + + t.Run("first third (%3==0)", func(t *testing.T) { + pairs := 20 + valuesPerPair := 3 + var keys [][]byte + var values [][]MapPair + + for i := 0; i < pairs; i++ { + if i%3 != 0 { + continue + } + + keys = append(keys, []byte(fmt.Sprintf("row-%03d", i))) + curValues := make([]MapPair, valuesPerPair) + for j := range curValues { + curValues[j] = MapPair{ + Key: []byte(fmt.Sprintf("row-%03d-key-%d", i, j)), + Value: []byte(fmt.Sprintf("row-%03d-value-%d", i, j)), + } + } + + values = append(values, curValues) + } + + // shuffle to make sure the BST isn't accidentally in order + r.Shuffle(len(keys), func(i, j int) { + keys[i], keys[j] = keys[j], keys[i] + values[i], values[j] = values[j], values[i] + }) + + for i := range keys { + mapPairs := values[i] + for j := range mapPairs { + err = b.MapSet(keys[i], mapPairs[j]) + require.Nil(t, err) + } + } + }) + + t.Run("flush to disk", func(t *testing.T) { + require.Nil(t, b.FlushAndSwitch()) + }) + + t.Run("second third (%3==1)", func(t *testing.T) { + pairs := 20 + valuesPerPair := 3 + var keys [][]byte + var values [][]MapPair + + for i := 0; i < pairs; i++ { + if i%3 != 1 { + continue + } + + keys = append(keys, []byte(fmt.Sprintf("row-%03d", i))) + curValues := make([]MapPair, valuesPerPair) + for j := range curValues { + curValues[j] = MapPair{ + Key: []byte(fmt.Sprintf("row-%03d-key-%d", i, j)), + Value: []byte(fmt.Sprintf("row-%03d-value-%d", i, j)), + } + } + + values = append(values, curValues) + } + + // shuffle to make sure the BST isn't accidentally in order + r.Shuffle(len(keys), func(i, j int) { + keys[i], keys[j] = keys[j], keys[i] + values[i], values[j] = values[j], values[i] + }) + + for i := range keys { + mapPairs := values[i] + for j := range mapPairs { + err = b.MapSet(keys[i], mapPairs[j]) + require.Nil(t, err) + } + } + }) + + t.Run("flush to disk", func(t *testing.T) { + require.Nil(t, b.FlushAndSwitch()) + }) + + t.Run("third third (%3==2) memtable only", func(t *testing.T) { + pairs := 20 + valuesPerPair := 3 + var keys [][]byte + var values [][]MapPair + + for i := 0; i < pairs; i++ { + if i%3 != 2 { + continue + } + + keys = append(keys, []byte(fmt.Sprintf("row-%03d", i))) + curValues := make([]MapPair, valuesPerPair) + for j := range curValues { + curValues[j] = MapPair{ + Key: []byte(fmt.Sprintf("row-%03d-key-%d", i, j)), + Value: []byte(fmt.Sprintf("row-%03d-value-%d", i, j)), + } + } + + values = append(values, curValues) + } + + // shuffle to make sure the BST isn't accidentally in order + r.Shuffle(len(keys), func(i, j int) { + keys[i], keys[j] = keys[j], keys[i] + values[i], values[j] = values[j], values[i] + }) + + for i := range keys { + mapPairs := values[i] + for j := range mapPairs { + err = b.MapSet(keys[i], mapPairs[j]) + require.Nil(t, err) + } + } + + // no flush for this one, so this segment stays in the memtable + }) + + t.Run("seek from somewhere in the middle", func(t *testing.T) { + expectedKeys := [][]byte{ + []byte("row-016"), + []byte("row-017"), + []byte("row-018"), + []byte("row-019"), + } + expectedValues := [][]MapPair{ + { + {Key: []byte("row-016-key-0"), Value: []byte("row-016-value-0")}, + {Key: []byte("row-016-key-1"), Value: []byte("row-016-value-1")}, + {Key: []byte("row-016-key-2"), Value: []byte("row-016-value-2")}, + }, + { + {Key: []byte("row-017-key-0"), Value: []byte("row-017-value-0")}, + {Key: []byte("row-017-key-1"), Value: []byte("row-017-value-1")}, + {Key: []byte("row-017-key-2"), Value: []byte("row-017-value-2")}, + }, + { + {Key: []byte("row-018-key-0"), Value: []byte("row-018-value-0")}, + {Key: []byte("row-018-key-1"), Value: []byte("row-018-value-1")}, + {Key: []byte("row-018-key-2"), Value: []byte("row-018-value-2")}, + }, + { + {Key: []byte("row-019-key-0"), Value: []byte("row-019-value-0")}, + {Key: []byte("row-019-key-1"), Value: []byte("row-019-value-1")}, + {Key: []byte("row-019-key-2"), Value: []byte("row-019-value-2")}, + }, + } + + var retrievedKeys [][]byte + var retrievedValues [][]MapPair + c := b.MapCursor() + defer c.Close() + for k, v := c.Seek(ctx, []byte("row-016")); k != nil; k, v = c.Next(ctx) { + retrievedKeys = append(retrievedKeys, k) + retrievedValues = append(retrievedValues, v) + } + + assert.Equal(t, expectedKeys, retrievedKeys) + + require.Equal(t, len(expectedValues), len(retrievedValues)) + for i := range expectedValues { + assert.ElementsMatch(t, expectedValues[i], retrievedValues[i]) + } + }) + + t.Run("start from beginning", func(t *testing.T) { + expectedKeys := [][]byte{ + []byte("row-000"), + []byte("row-001"), + []byte("row-002"), + } + expectedValues := [][]MapPair{ + { + {Key: []byte("row-000-key-0"), Value: []byte("row-000-value-0")}, + {Key: []byte("row-000-key-1"), Value: []byte("row-000-value-1")}, + {Key: []byte("row-000-key-2"), Value: []byte("row-000-value-2")}, + }, + { + {Key: []byte("row-001-key-0"), Value: []byte("row-001-value-0")}, + {Key: []byte("row-001-key-1"), Value: []byte("row-001-value-1")}, + {Key: []byte("row-001-key-2"), Value: []byte("row-001-value-2")}, + }, + { + {Key: []byte("row-002-key-0"), Value: []byte("row-002-value-0")}, + {Key: []byte("row-002-key-1"), Value: []byte("row-002-value-1")}, + {Key: []byte("row-002-key-2"), Value: []byte("row-002-value-2")}, + }, + } + + var retrievedKeys [][]byte + var retrievedValues [][]MapPair + c := b.MapCursor() + defer c.Close() + retrieved := 0 + for k, v := c.First(ctx); k != nil && retrieved < 3; k, v = c.Next(ctx) { + retrieved++ + retrievedKeys = append(retrievedKeys, k) + retrievedValues = append(retrievedValues, v) + } + + assert.Equal(t, expectedKeys, retrievedKeys) + + require.Equal(t, len(expectedValues), len(retrievedValues)) + for i := range expectedValues { + assert.ElementsMatch(t, expectedValues[i], retrievedValues[i]) + } + }) + + t.Run("delete/replace an existing map key/value pair", func(t *testing.T) { + row := []byte("row-002") + pair := MapPair{ + Key: []byte("row-002-key-1"), // existing key + Value: []byte("row-002-value-1-updated"), // updated value + } + + require.Nil(t, b.MapSet(row, pair)) + + row = []byte("row-001") + key := []byte("row-001-key-1") + + require.Nil(t, b.MapDeleteKey(row, key)) + }) + + t.Run("verify update is contained", func(t *testing.T) { + expectedKeys := [][]byte{ + []byte("row-001"), + []byte("row-002"), + } + expectedValues := [][]MapPair{ + { + {Key: []byte("row-001-key-0"), Value: []byte("row-001-value-0")}, + // key-1 was deleted + {Key: []byte("row-001-key-2"), Value: []byte("row-001-value-2")}, + }, + { + {Key: []byte("row-002-key-0"), Value: []byte("row-002-value-0")}, + {Key: []byte("row-002-key-1"), Value: []byte("row-002-value-1-updated")}, + {Key: []byte("row-002-key-2"), Value: []byte("row-002-value-2")}, + }, + } + + var retrievedKeys [][]byte + var retrievedValues [][]MapPair + c := b.MapCursor() + defer c.Close() + retrieved := 0 + for k, v := c.Seek(ctx, []byte("row-001")); k != nil && retrieved < 2; k, v = c.Next(ctx) { + retrieved++ + retrievedKeys = append(retrievedKeys, k) + retrievedValues = append(retrievedValues, v) + } + + assert.Equal(t, expectedKeys, retrievedKeys) + + require.Equal(t, len(expectedValues), len(retrievedValues)) + for i := range expectedValues { + assert.ElementsMatch(t, expectedValues[i], retrievedValues[i]) + } + }) + + t.Run("one final flush to disk", func(t *testing.T) { + require.Nil(t, b.FlushAndSwitch()) + }) + + t.Run("verify update is contained - after flushing the update", func(t *testing.T) { + expectedKeys := [][]byte{ + []byte("row-001"), + []byte("row-002"), + } + expectedValues := [][]MapPair{ + { + {Key: []byte("row-001-key-0"), Value: []byte("row-001-value-0")}, + // key-1 was deleted + {Key: []byte("row-001-key-2"), Value: []byte("row-001-value-2")}, + }, + { + {Key: []byte("row-002-key-0"), Value: []byte("row-002-value-0")}, + {Key: []byte("row-002-key-1"), Value: []byte("row-002-value-1-updated")}, + {Key: []byte("row-002-key-2"), Value: []byte("row-002-value-2")}, + }, + } + + var retrievedKeys [][]byte + var retrievedValues [][]MapPair + c := b.MapCursor() + defer c.Close() + retrieved := 0 + for k, v := c.Seek(ctx, []byte("row-001")); k != nil && retrieved < 2; k, v = c.Next(ctx) { + retrieved++ + retrievedKeys = append(retrievedKeys, k) + retrievedValues = append(retrievedValues, v) + } + + assert.Equal(t, expectedKeys, retrievedKeys) + + require.Equal(t, len(expectedValues), len(retrievedValues)) + for i := range expectedValues { + assert.ElementsMatch(t, expectedValues[i], retrievedValues[i]) + } + }) + }) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/strategies_map_sorted_merger.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/strategies_map_sorted_merger.go new file mode 100644 index 0000000000000000000000000000000000000000..9383c1f77c1b4db085e6707bf9e9260c0366e023 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/strategies_map_sorted_merger.go @@ -0,0 +1,216 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package lsmkv + +import ( + "bytes" + "context" + + "github.com/pkg/errors" +) + +type sortedMapMerger struct { + input [][]MapPair + output []MapPair + offsets []int +} + +func newSortedMapMerger() *sortedMapMerger { + return &sortedMapMerger{} +} + +func (s *sortedMapMerger) do(ctx context.Context, segments [][]MapPair) ([]MapPair, error) { + if err := s.init(segments); err != nil { + return nil, errors.Wrap(err, "init sorted map decoder") + } + + i := 0 + for { + if i%100 == 0 && ctx.Err() != nil { + return nil, ctx.Err() + } + + match, ok := s.findSegmentWithLowestKey() + if !ok { + break + } + + if match.Tombstone { + // the latest version of this key was a tombstone, so we can ignore it + continue + } + + s.output[i] = match + i++ + } + + return s.output[:i], nil +} + +// same as .do() but does not remove the tombstone if the most latest version +// of a key is a tombstone. It can thus also be used in compactions +func (s *sortedMapMerger) doKeepTombstones(segments [][]MapPair) ([]MapPair, error) { + if err := s.init(segments); err != nil { + return nil, errors.Wrap(err, "init sorted map decoder") + } + + i := 0 + for { + match, ok := s.findSegmentWithLowestKey() + if !ok { + break + } + + s.output[i] = match + i++ + } + + return s.output[:i], nil +} + +// same as .doKeepTombstone() but requires initialization from the outside and +// can thus reuse state from previous rounds without having to allocate again. +// must be pre-faced by a call of reset() +func (s *sortedMapMerger) doKeepTombstonesReusable() ([]MapPair, error) { + i := 0 + for { + match, ok := s.findSegmentWithLowestKey() + if !ok { + break + } + + s.output[i] = match + i++ + } + + return s.output[:i], nil +} + +// init is automatically called by .do() or .doKeepTombstones() +func (s *sortedMapMerger) init(segments [][]MapPair) error { + s.input = segments + + // all offset pointers initialized at 0 which is where we want to start + s.offsets = make([]int, len(segments)) + + // The maximum output is the sum of all the input segments if there are only + // unique keys and zero tombstones. If there are duplicate keys (i.e. + // updates) or tombstones, we will slice off some elements of the output + // later, but this way we can be sure each index will always be initialized + // correctly + maxOutput := 0 + for _, seg := range segments { + maxOutput += len(seg) + } + s.output = make([]MapPair, maxOutput) + + return nil +} + +// reset can be manually called if sharing allocated state is desired, such as +// with .doKeepTombstonesReusable() +func (s *sortedMapMerger) reset(segments [][]MapPair) error { + s.input = segments + + if cap(s.offsets) >= len(segments) { + s.offsets = s.offsets[:len(segments)] + + // it existed before so we need to reset all offsets to 0 + for i := range s.offsets { + s.offsets[i] = 0 + } + } else { + s.offsets = make([]int, len(segments), int(float64(len(segments))*1.25)) + } + + // The maximum output is the sum of all the input segments if there are only + // unique keys and zero tombstones. If there are duplicate keys (i.e. + // updates) or tombstones, we will slice off some elements of the output + // later, but this way we can be sure each index will always be initialized + // correctly + maxOutput := 0 + for _, seg := range segments { + maxOutput += len(seg) + } + + if cap(s.output) >= maxOutput { + s.output = s.output[:maxOutput] + // no need to reset any values as all of them will be overridden anyway + } else { + s.output = make([]MapPair, maxOutput, int(float64(maxOutput)*1.25)) + } + + return nil +} + +func (s *sortedMapMerger) findSegmentWithLowestKey() (MapPair, bool) { + bestSeg := -1 + bestKey := []byte(nil) + + for segmentID := 0; segmentID < len(s.input); segmentID++ { + // check if a segment is already exhausted, then skip + if s.offsets[segmentID] >= len(s.input[segmentID]) { + continue + } + + currKey := s.input[segmentID][s.offsets[segmentID]].Key + if bestSeg == -1 { + // first time we're running, no need to compare, just set to current + bestSeg = segmentID + bestKey = currKey + continue + } + + cmp := bytes.Compare(currKey, bestKey) + if cmp > 0 { + // the segment we are currently looking at has a higher key than our + // current best so we can completely ignore it + continue + } + + if cmp < 0 { + // the segment we are currently looking at is a better match than the + // previous, this means, we have found a new favorite, but the previous + // best will still be valid in a future round + bestSeg = segmentID + bestKey = currKey + } + + if cmp == 0 { + // this the most interesting case: we are looking at a duplicate key. In + // this case the rightmost ("latest") segment takes precedence, however, + // we must make sure that the previous match gets discarded, otherwise we + // will find it again in the next round. + // + // We can simply increase the offset before updating the bestSeg pointer, + // which means we will never look at this element again + s.offsets[bestSeg]++ + + // now that the old element is discarded, we can update our pointers + bestSeg = segmentID + bestKey = currKey + } + } + + if bestSeg == -1 { + // we didn't find anything, looks like we have exhausted all segments + return MapPair{}, false + } + + // we can now be sure that bestSeg,bestKey is the latest version of the + // lowest key, there is only one job left to do: increase the offset, so we + // never find this segment again + bestMatch := s.input[bestSeg][s.offsets[bestSeg]] + s.offsets[bestSeg]++ + + return bestMatch, true +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/strategies_map_sorted_merger_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/strategies_map_sorted_merger_test.go new file mode 100644 index 0000000000000000000000000000000000000000..79547921a8eb5f085ed73c48dca05678840a35cb --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/strategies_map_sorted_merger_test.go @@ -0,0 +1,372 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package lsmkv + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func Test_SortedMapMerger_RemoveTombstones(t *testing.T) { + t.Run("single entry, no tombstones", func(t *testing.T) { + m := newSortedMapMerger() + input1 := []MapPair{ + { + Key: []byte("hello"), + Value: []byte("world"), + }, + } + + input := [][]MapPair{input1} + + actual, err := m.do(context.Background(), input) + require.Nil(t, err) + + expected := []MapPair{ + { + Key: []byte("hello"), + Value: []byte("world"), + }, + } + assert.Equal(t, expected, actual) + }) + + t.Run("single entry, single tombstone for unrelated key", func(t *testing.T) { + m := newSortedMapMerger() + input1 := []MapPair{ + { + Key: []byte("hello"), + Value: []byte("world"), + }, + { + Key: []byte("unrelated"), + Tombstone: true, + }, + } + + input := [][]MapPair{input1} + + actual, err := m.do(context.Background(), input) + require.Nil(t, err) + + expected := []MapPair{ + { + Key: []byte("hello"), + Value: []byte("world"), + }, + } + assert.Equal(t, expected, actual) + }) + + t.Run("single entry with tombstone over two segments", func(t *testing.T) { + m := newSortedMapMerger() + input := [][]MapPair{ + { + { + Key: []byte("hello"), + Value: []byte("world"), + }, + }, + { + { + Key: []byte("hello"), + Tombstone: true, + }, + }, + } + + actual, err := m.do(context.Background(), input) + require.Nil(t, err) + + expected := []MapPair{} + assert.Equal(t, expected, actual) + }) + + t.Run("multiple segments including updates", func(t *testing.T) { + m := newSortedMapMerger() + input := [][]MapPair{ + { + { + Key: []byte("a"), + Value: []byte("a1"), + }, + { + Key: []byte("c"), + Value: []byte("c1"), + }, + { + Key: []byte("e"), + Value: []byte("e1"), + }, + }, + { + { + Key: []byte("a"), + Value: []byte("a2"), + }, + { + Key: []byte("b"), + Value: []byte("b2"), + }, + { + Key: []byte("c"), + Value: []byte("c2"), + }, + }, + { + { + Key: []byte("b"), + Value: []byte("b3"), + }, + }, + } + + actual, err := m.do(context.Background(), input) + require.Nil(t, err) + + expected := []MapPair{ + { + Key: []byte("a"), + Value: []byte("a2"), + }, + { + Key: []byte("b"), + Value: []byte("b3"), + }, + { + Key: []byte("c"), + Value: []byte("c2"), + }, + { + Key: []byte("e"), + Value: []byte("e1"), + }, + } + assert.Equal(t, expected, actual) + }) + + t.Run("multiple segments including deletes and re-adds", func(t *testing.T) { + m := newSortedMapMerger() + input := [][]MapPair{ + { + { + Key: []byte("a"), + Value: []byte("a1"), + }, + { + Key: []byte("c"), + Value: []byte("c1"), + }, + { + Key: []byte("e"), + Value: []byte("e1"), + }, + }, + { + { + Key: []byte("a"), + Value: []byte("a2"), + }, + { + Key: []byte("b"), + Tombstone: true, + }, + { + Key: []byte("c"), + Value: []byte("c2"), + }, + }, + { + { + Key: []byte("b"), + Value: []byte("b3"), + }, + { + Key: []byte("e"), + Tombstone: true, + }, + }, + } + + actual, err := m.do(context.Background(), input) + require.Nil(t, err) + + expected := []MapPair{ + { + Key: []byte("a"), + Value: []byte("a2"), + }, + { + Key: []byte("b"), + Value: []byte("b3"), + }, + { + Key: []byte("c"), + Value: []byte("c2"), + }, + } + assert.Equal(t, expected, actual) + }) +} + +func Test_SortedMapMerger_KeepTombstones(t *testing.T) { + m := newSortedMapMerger() + + t.Run("multiple segments including updates, deletes in 2nd segment", func(t *testing.T) { + input := [][]MapPair{ + { + { + Key: []byte("a"), + Value: []byte("a1"), + }, + { + Key: []byte("c"), + Value: []byte("c1"), + }, + { + Key: []byte("e"), + Value: []byte("e1"), + }, + }, + { + { + Key: []byte("a"), + Value: []byte("a2"), + }, + { + Key: []byte("b"), + Value: []byte("b2"), + }, + { + Key: []byte("c"), + Tombstone: true, + }, + }, + { + { + Key: []byte("b"), + Value: []byte("b3"), + }, + }, + } + + expected := []MapPair{ + { + Key: []byte("a"), + Value: []byte("a2"), + }, + { + Key: []byte("b"), + Value: []byte("b3"), + }, + { + Key: []byte("c"), + Tombstone: true, + }, + { + Key: []byte("e"), + Value: []byte("e1"), + }, + } + + t.Run("without reusable functionality - fresh state", func(t *testing.T) { + actual, err := m.doKeepTombstones(input) + require.Nil(t, err) + + assert.Equal(t, expected, actual) + }) + + t.Run("with reusable functionality - fresh state", func(t *testing.T) { + m.reset(input) + actual, err := m.doKeepTombstonesReusable() + require.Nil(t, err) + + assert.Equal(t, expected, actual) + }) + }) + + t.Run("inverse order, deletes in 1st segment", func(t *testing.T) { + input := [][]MapPair{ + { + { + Key: []byte("b"), + Value: []byte("b3"), + }, + }, + { + { + Key: []byte("a"), + Value: []byte("a2"), + }, + { + Key: []byte("b"), + Value: []byte("b2"), + }, + { + Key: []byte("c"), + Tombstone: true, + }, + }, + { + { + Key: []byte("a"), + Value: []byte("a1"), + }, + { + Key: []byte("c"), + Value: []byte("c1"), + }, + { + Key: []byte("e"), + Value: []byte("e1"), + }, + }, + } + + expected := []MapPair{ + { + Key: []byte("a"), + Value: []byte("a1"), + }, + { + Key: []byte("b"), + Value: []byte("b2"), + }, + { + Key: []byte("c"), + Value: []byte("c1"), + }, + { + Key: []byte("e"), + Value: []byte("e1"), + }, + } + + t.Run("without reusable functionality - fresh state", func(t *testing.T) { + actual, err := m.doKeepTombstones(input) + require.Nil(t, err) + + assert.Equal(t, expected, actual) + }) + + t.Run("with reusable functionality - dirty state", func(t *testing.T) { + m.reset(input) + actual, err := m.doKeepTombstonesReusable() + require.Nil(t, err) + + assert.Equal(t, expected, actual) + }) + }) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/strategies_map_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/strategies_map_test.go new file mode 100644 index 0000000000000000000000000000000000000000..4507533582ca7a6a1fa9986babfbe82a8e05abd3 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/strategies_map_test.go @@ -0,0 +1,352 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package lsmkv + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestMapEncoderDecoderJourney(t *testing.T) { + // this test first encodes the map pairs, then decodes them and replace + // duplicates, remove tombstones, etc. + type test struct { + name string + in []MapPair + out []MapPair + } + + tests := []test{ + { + name: "single pair", + in: []MapPair{ + { + Key: []byte("foo"), + Value: []byte("bar"), + }, + }, + out: []MapPair{ + { + Key: []byte("foo"), + Value: []byte("bar"), + }, + }, + }, + { + name: "single pair, updated value", + in: []MapPair{ + { + Key: []byte("foo"), + Value: []byte("bar"), + }, + { + Key: []byte("foo"), + Value: []byte("bar2"), + }, + }, + out: []MapPair{ + { + Key: []byte("foo"), + Value: []byte("bar2"), + }, + }, + }, + { + name: "single pair, tombstone added", + in: []MapPair{ + { + Key: []byte("foo"), + Value: []byte("bar"), + }, + { + Key: []byte("foo"), + Tombstone: true, + }, + }, + out: []MapPair{}, + }, + { + name: "single pair, tombstone added, same value added again", + in: []MapPair{ + { + Key: []byte("foo"), + Value: []byte("bar"), + }, + { + Key: []byte("foo"), + Tombstone: true, + }, + { + Key: []byte("foo"), + Value: []byte("bar2"), + }, + }, + out: []MapPair{ + { + Key: []byte("foo"), + Value: []byte("bar2"), + }, + }, + }, + { + name: "multiple values, combination of updates and tombstones", + in: []MapPair{ + { + Key: []byte("foo"), + Value: []byte("never-updated"), + }, + { + Key: []byte("foo1"), + Value: []byte("bar1"), + }, + { + Key: []byte("foo2"), + Value: []byte("bar2"), + }, + { + Key: []byte("foo2"), + Value: []byte("bar2.2"), + }, + { + Key: []byte("foo1"), + Tombstone: true, + }, + { + Key: []byte("foo2"), + Value: []byte("bar2.3"), + }, + { + Key: []byte("foo1"), + Value: []byte("bar1.2"), + }, + }, + out: []MapPair{ + { + Key: []byte("foo"), + Value: []byte("never-updated"), + }, + { + Key: []byte("foo1"), + Value: []byte("bar1.2"), + }, + { + Key: []byte("foo2"), + Value: []byte("bar2.3"), + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + encoded := make([]value, len(test.in)) + for i, kv := range test.in { + enc, err := newMapEncoder().Do(kv) + require.Nil(t, err) + encoded[i] = enc[0] + } + res, err := newMapDecoder().Do(encoded, false) + require.Nil(t, err) + // NOTE: we are accepting that the order can be lost on updates + assert.ElementsMatch(t, test.out, res) + }) + } +} + +func TestDecoderRemoveTombstones(t *testing.T) { + t.Run("single entry, no tombstones", func(t *testing.T) { + m := newMapDecoder() + input := mustEncode([]MapPair{ + { + Key: []byte("hello"), + Value: []byte("world"), + }, + }) + + actual, err := m.doSimplified(input) + require.Nil(t, err) + + expected := []MapPair{ + { + Key: []byte("hello"), + Value: []byte("world"), + }, + } + assert.Equal(t, expected, actual) + }) + + t.Run("single entry, single tombstone", func(t *testing.T) { + m := newMapDecoder() + input := mustEncode([]MapPair{ + { + Key: []byte("hello"), + Value: []byte("world"), + }, + { + Key: []byte("hello"), + Tombstone: true, + }, + }) + + actual, err := m.doSimplified(input) + require.Nil(t, err) + + expected := []MapPair{} + assert.Equal(t, expected, actual) + }) + + t.Run("single entry, single tombstone, then read", func(t *testing.T) { + m := newMapDecoder() + input := mustEncode([]MapPair{ + { + Key: []byte("hello"), + Value: []byte("world"), + }, + { + Key: []byte("hello"), + Tombstone: true, + }, + { + Key: []byte("hello"), + Value: []byte("world"), + }, + }) + + actual, err := m.doSimplified(input) + require.Nil(t, err) + + expected := []MapPair{ + { + Key: []byte("hello"), + Value: []byte("world"), + }, + } + assert.Equal(t, expected, actual) + }) + + t.Run("three entries, two tombstones at the end", func(t *testing.T) { + m := newMapDecoder() + input := mustEncode([]MapPair{ + { + Key: []byte("hello"), + Value: []byte("world"), + }, + { + Key: []byte("bonjour"), + Value: []byte("world"), + }, + { + Key: []byte("guten tag"), + Value: []byte("world"), + }, + { + Key: []byte("hello"), + Tombstone: true, + }, + { + Key: []byte("bonjour"), + Tombstone: true, + }, + }) + + actual, err := m.doSimplified(input) + require.Nil(t, err) + + expected := []MapPair{ + { + Key: []byte("guten tag"), + Value: []byte("world"), + }, + } + assert.Equal(t, expected, actual) + }) + + t.Run("three entries, two tombstones at the end, then recreate the first", func(t *testing.T) { + m := newMapDecoder() + input := mustEncode([]MapPair{ + { + Key: []byte("hello"), + Value: []byte("world"), + }, + { + Key: []byte("bonjour"), + Value: []byte("world"), + }, + { + Key: []byte("guten tag"), + Value: []byte("world"), + }, + { + Key: []byte("hello"), + Tombstone: true, + }, + { + Key: []byte("bonjour"), + Tombstone: true, + }, + { + Key: []byte("bonjour"), + Value: []byte("world"), + }, + { + Key: []byte("hello"), + Value: []byte("world"), + }, + }) + + actual, err := m.doSimplified(input) + require.Nil(t, err) + + expected := []MapPair{ + { + Key: []byte("guten tag"), + Value: []byte("world"), + }, + { + Key: []byte("bonjour"), + Value: []byte("world"), + }, + { + Key: []byte("hello"), + Value: []byte("world"), + }, + } + assert.Equal(t, expected, actual) + }) +} + +func mustEncode(kvs []MapPair) []value { + res, err := newMapEncoder().DoMulti(kvs) + if err != nil { + panic(err) + } + + return res +} + +func Test_MapPair_EncodingBytes(t *testing.T) { + kv := MapPair{ + Key: []byte("hello-world-key1"), + Value: []byte("this is the value ;-)"), + } + + control, err := kv.Bytes() + assert.Nil(t, err) + + encoded := make([]byte, kv.Size()) + err = kv.EncodeBytes(encoded) + assert.Nil(t, err) + + assert.Equal(t, control, encoded) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/strategies_replace_integration_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/strategies_replace_integration_test.go new file mode 100644 index 0000000000000000000000000000000000000000..ad07860dabe77e7475eaa79a1ac730178af2db05 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/strategies_replace_integration_test.go @@ -0,0 +1,1535 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +//go:build integrationTest +// +build integrationTest + +package lsmkv + +import ( + "context" + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/entities/cyclemanager" +) + +func TestReplaceStrategy(t *testing.T) { + ctx := testCtx() + tests := bucketIntegrationTests{ + { + name: "replaceInsertAndUpdate", + f: replaceInsertAndUpdate, + opts: []BucketOption{ + WithStrategy(StrategyReplace), + WithCalcCountNetAdditions(true), + }, + }, + { + name: "replaceInsertAndUpdate_WithSecondaryKeys", + f: replaceInsertAndUpdate_WithSecondaryKeys, + opts: []BucketOption{ + WithStrategy(StrategyReplace), + WithSecondaryIndices(1), + WithCalcCountNetAdditions(true), + }, + }, + { + name: "replaceInsertAndDelete", + f: replaceInsertAndDelete, + opts: []BucketOption{ + WithStrategy(StrategyReplace), + WithCalcCountNetAdditions(true), + }, + }, + { + name: "replaceCursors", + f: replaceCursors, + opts: []BucketOption{ + WithStrategy(StrategyReplace), + WithCalcCountNetAdditions(true), + }, + }, + } + tests.run(ctx, t) +} + +func replaceInsertAndUpdate(ctx context.Context, t *testing.T, opts []BucketOption) { + t.Run("memtable-only", func(t *testing.T) { + b, err := NewBucketCreator().NewBucket(ctx, t.TempDir(), "", nullLogger(), nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), opts...) + require.Nil(t, err) + + defer b.Shutdown(ctx) + + // so big it effectively never triggers as part of this test + b.SetMemtableThreshold(1e9) + + t.Run("set original values and verify", func(t *testing.T) { + key1 := []byte("key-1") + key2 := []byte("key-2") + key3 := []byte("key-3") + orig1 := []byte("original value for key1") + orig2 := []byte("original value for key2") + orig3 := []byte("original value for key3") + + err = b.Put(key1, orig1) + require.Nil(t, err) + err = b.Put(key2, orig2) + require.Nil(t, err) + err = b.Put(key3, orig3) + require.Nil(t, err) + + assert.Equal(t, 3, b.Count()) + assert.Equal(t, 0, b.CountAsync()) + + res, err := b.Get(key1) + require.Nil(t, err) + assert.Equal(t, res, orig1) + res, err = b.Get(key2) + require.Nil(t, err) + assert.Equal(t, res, orig2) + res, err = b.Get(key3) + require.Nil(t, err) + assert.Equal(t, res, orig3) + }) + + t.Run("replace some, keep one", func(t *testing.T) { + key1 := []byte("key-1") + key2 := []byte("key-2") + key3 := []byte("key-3") + orig1 := []byte("original value for key1") + replaced2 := []byte("updated value for key2") + replaced3 := []byte("updated value for key3") + + err = b.Put(key2, replaced2) + require.Nil(t, err) + err = b.Put(key3, replaced3) + require.Nil(t, err) + + assert.Equal(t, 3, b.Count()) + assert.Equal(t, 0, b.CountAsync()) + + res, err := b.Get(key1) + require.Nil(t, err) + assert.Equal(t, res, orig1) + res, err = b.Get(key2) + require.Nil(t, err) + assert.Equal(t, res, replaced2) + res, err = b.Get(key3) + require.Nil(t, err) + assert.Equal(t, res, replaced3) + }) + }) + + t.Run("with single flush in between updates", func(t *testing.T) { + b, err := NewBucketCreator().NewBucket(ctx, t.TempDir(), "", nullLogger(), nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), opts...) + require.Nil(t, err) + + defer b.Shutdown(ctx) + + // so big it effectively never triggers as part of this test + b.SetMemtableThreshold(1e9) + + t.Run("set original values and verify", func(t *testing.T) { + key1 := []byte("key-1") + key2 := []byte("key-2") + key3 := []byte("key-3") + orig1 := []byte("original value for key1") + orig2 := []byte("original value for key2") + orig3 := []byte("original value for key3") + + err = b.Put(key1, orig1) + require.Nil(t, err) + err = b.Put(key2, orig2) + require.Nil(t, err) + err = b.Put(key3, orig3) + require.Nil(t, err) + + res, err := b.Get(key1) + require.Nil(t, err) + assert.Equal(t, res, orig1) + res, err = b.Get(key2) + require.Nil(t, err) + assert.Equal(t, res, orig2) + res, err = b.Get(key3) + require.Nil(t, err) + assert.Equal(t, res, orig3) + }) + + t.Run("flush memtable to disk", func(t *testing.T) { + require.Nil(t, b.FlushAndSwitch()) + }) + + t.Run("count only objects on disk segment", func(t *testing.T) { + assert.Equal(t, 3, b.Count()) + assert.Equal(t, 3, b.CountAsync()) + }) + + t.Run("replace some, keep one", func(t *testing.T) { + key1 := []byte("key-1") + key2 := []byte("key-2") + key3 := []byte("key-3") + orig1 := []byte("original value for key1") + replaced2 := []byte("updated value for key2") + replaced3 := []byte("updated value for key3") + + err = b.Put(key2, replaced2) + require.Nil(t, err) + err = b.Put(key3, replaced3) + require.Nil(t, err) + + // make sure that the updates aren't counted as additions + assert.Equal(t, 3, b.Count()) + + // happens to be the same value, but that's just a coincidence, async + // ignores the memtable + assert.Equal(t, 3, b.CountAsync()) + + res, err := b.Get(key1) + require.Nil(t, err) + assert.Equal(t, orig1, res) + res, err = b.Get(key2) + require.Nil(t, err) + assert.Equal(t, replaced2, res) + res, err = b.Get(key3) + require.Nil(t, err) + assert.Equal(t, replaced3, res) + }) + }) + + t.Run("with a flush after the initial write and after the update", func(t *testing.T) { + b, err := NewBucketCreator().NewBucket(ctx, t.TempDir(), "", nullLogger(), nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), opts...) + require.Nil(t, err) + + defer b.Shutdown(ctx) + + // so big it effectively never triggers as part of this test + b.SetMemtableThreshold(1e9) + + t.Run("set original values and verify", func(t *testing.T) { + key1 := []byte("key-1") + key2 := []byte("key-2") + key3 := []byte("key-3") + orig1 := []byte("original value for key1") + orig2 := []byte("original value for key2") + orig3 := []byte("original value for key3") + + err = b.Put(key1, orig1) + require.Nil(t, err) + err = b.Put(key2, orig2) + require.Nil(t, err) + err = b.Put(key3, orig3) + require.Nil(t, err) + + res, err := b.Get(key1) + require.Nil(t, err) + assert.Equal(t, res, orig1) + res, err = b.Get(key2) + require.Nil(t, err) + assert.Equal(t, res, orig2) + res, err = b.Get(key3) + require.Nil(t, err) + assert.Equal(t, res, orig3) + }) + + t.Run("flush memtable to disk", func(t *testing.T) { + require.Nil(t, b.FlushAndSwitch()) + }) + + t.Run("replace some, keep one", func(t *testing.T) { + key1 := []byte("key-1") + key2 := []byte("key-2") + key3 := []byte("key-3") + orig1 := []byte("original value for key1") + replaced2 := []byte("updated value for key2") + replaced3 := []byte("updated value for key3") + + err = b.Put(key2, replaced2) + require.Nil(t, err) + err = b.Put(key3, replaced3) + require.Nil(t, err) + + // Flush before verifying! + require.Nil(t, b.FlushAndSwitch()) + + res, err := b.Get(key1) + require.Nil(t, err) + assert.Equal(t, res, orig1) + res, err = b.Get(key2) + require.Nil(t, err) + assert.Equal(t, res, replaced2) + res, err = b.Get(key3) + require.Nil(t, err) + assert.Equal(t, res, replaced3) + }) + + t.Run("count objects over several segments", func(t *testing.T) { + assert.Equal(t, 3, b.Count()) + assert.Equal(t, 3, b.CountAsync()) + }) + }) + + t.Run("update in memtable, then do an orderly shutdown, and re-init", func(t *testing.T) { + dirName := t.TempDir() + b, err := NewBucketCreator().NewBucket(ctx, dirName, "", nullLogger(), nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), opts...) + require.Nil(t, err) + + defer b.Shutdown(ctx) + + // so big it effectively never triggers as part of this test + b.SetMemtableThreshold(1e9) + + t.Run("set original values and verify", func(t *testing.T) { + key1 := []byte("key-1") + key2 := []byte("key-2") + key3 := []byte("key-3") + orig1 := []byte("original value for key1") + orig2 := []byte("original value for key2") + orig3 := []byte("original value for key3") + + err = b.Put(key1, orig1) + require.Nil(t, err) + err = b.Put(key2, orig2) + require.Nil(t, err) + err = b.Put(key3, orig3) + require.Nil(t, err) + }) + + t.Run("replace some, keep one", func(t *testing.T) { + key1 := []byte("key-1") + key2 := []byte("key-2") + key3 := []byte("key-3") + orig1 := []byte("original value for key1") + replaced2 := []byte("updated value for key2") + replaced3 := []byte("updated value for key3") + + err = b.Put(key2, replaced2) + require.Nil(t, err) + err = b.Put(key3, replaced3) + require.Nil(t, err) + + res, err := b.Get(key1) + require.Nil(t, err) + assert.Equal(t, res, orig1) + res, err = b.Get(key2) + require.Nil(t, err) + assert.Equal(t, res, replaced2) + res, err = b.Get(key3) + require.Nil(t, err) + assert.Equal(t, res, replaced3) + }) + + t.Run("orderly shutdown", func(t *testing.T) { + b.Shutdown(context.Background()) + }) + + t.Run("init another bucket on the same files", func(t *testing.T) { + b2, err := NewBucketCreator().NewBucket(ctx, dirName, "", nullLogger(), nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), opts...) + require.Nil(t, err) + + key1 := []byte("key-1") + key2 := []byte("key-2") + key3 := []byte("key-3") + orig1 := []byte("original value for key1") + replaced2 := []byte("updated value for key2") + replaced3 := []byte("updated value for key3") + + res, err := b2.Get(key1) + require.Nil(t, err) + assert.Equal(t, res, orig1) + res, err = b2.Get(key2) + require.Nil(t, err) + assert.Equal(t, res, replaced2) + res, err = b2.Get(key3) + require.Nil(t, err) + assert.Equal(t, res, replaced3) + + // count objects over several segments after disk read + assert.Equal(t, 3, b2.Count()) + assert.Equal(t, 3, b2.CountAsync()) + }) + }) +} + +func replaceInsertAndUpdate_WithSecondaryKeys(ctx context.Context, t *testing.T, opts []BucketOption) { + t.Run("memtable-only", func(t *testing.T) { + b, err := NewBucketCreator().NewBucket(ctx, t.TempDir(), "", nullLogger(), nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), opts...) + require.Nil(t, err) + + defer b.Shutdown(ctx) + + // so big it effectively never triggers as part of this test + b.SetMemtableThreshold(1e9) + + t.Run("set original values and verify", func(t *testing.T) { + key1 := []byte("key-1") + key2 := []byte("key-2") + key3 := []byte("key-3") + secondaryKey1 := []byte("secondary-key-1") + secondaryKey2 := []byte("secondary-key-2") + secondaryKey3 := []byte("secondary-key-3") + orig1 := []byte("original value for key1") + orig2 := []byte("original value for key2") + orig3 := []byte("original value for key3") + + err = b.Put(key1, orig1, WithSecondaryKey(0, secondaryKey1)) + require.Nil(t, err) + err = b.Put(key2, orig2, WithSecondaryKey(0, secondaryKey2)) + require.Nil(t, err) + err = b.Put(key3, orig3, WithSecondaryKey(0, secondaryKey3)) + require.Nil(t, err) + + res, err := b.GetBySecondary(0, secondaryKey1) + require.Nil(t, err) + assert.Equal(t, res, orig1) + res, err = b.GetBySecondary(0, secondaryKey2) + require.Nil(t, err) + assert.Equal(t, res, orig2) + res, err = b.GetBySecondary(0, secondaryKey3) + require.Nil(t, err) + assert.Equal(t, res, orig3) + }) + + t.Run("replace some values, keep one - secondary keys not changed", func(t *testing.T) { + key2 := []byte("key-2") + key3 := []byte("key-3") + secondaryKey1 := []byte("secondary-key-1") + secondaryKey2 := []byte("secondary-key-2") + secondaryKey3 := []byte("secondary-key-3") + orig1 := []byte("original value for key1") + replaced2 := []byte("updated value for key2") + replaced3 := []byte("updated value for key3") + + err = b.Put(key2, replaced2, WithSecondaryKey(0, secondaryKey2)) + require.Nil(t, err) + err = b.Put(key3, replaced3, WithSecondaryKey(0, secondaryKey3)) + require.Nil(t, err) + + res, err := b.GetBySecondary(0, secondaryKey1) + require.Nil(t, err) + assert.Equal(t, res, orig1) + res, err = b.GetBySecondary(0, secondaryKey2) + require.Nil(t, err) + assert.Equal(t, res, replaced2) + res, err = b.GetBySecondary(0, secondaryKey3) + require.Nil(t, err) + assert.Equal(t, res, replaced3) + }) + + t.Run("replace the secondary keys on an update", func(t *testing.T) { + key2 := []byte("key-2") + key3 := []byte("key-3") + secondaryKey1 := []byte("secondary-key-1") + secondaryKey2 := []byte("secondary-key-2-updated") + secondaryKey3 := []byte("secondary-key-3-updated") + orig1 := []byte("original value for key1") + replaced2 := []byte("twice updated value for key2") + replaced3 := []byte("twice updated value for key3") + + err = b.Put(key2, replaced2, WithSecondaryKey(0, secondaryKey2)) + require.Nil(t, err) + err = b.Put(key3, replaced3, WithSecondaryKey(0, secondaryKey3)) + require.Nil(t, err) + + // verify you can find by updated secondary keys + res, err := b.GetBySecondary(0, secondaryKey1) + require.Nil(t, err) + assert.Equal(t, res, orig1) + res, err = b.GetBySecondary(0, secondaryKey2) + require.Nil(t, err) + assert.Equal(t, res, replaced2) + res, err = b.GetBySecondary(0, secondaryKey3) + require.Nil(t, err) + assert.Equal(t, res, replaced3) + }) + }) + + t.Run("with single flush in between updates", func(t *testing.T) { + b, err := NewBucketCreator().NewBucket(ctx, t.TempDir(), "", nullLogger(), nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), opts...) + require.Nil(t, err) + + defer b.Shutdown(ctx) + + // so big it effectively never triggers as part of this test + b.SetMemtableThreshold(1e9) + + t.Run("set original values", func(t *testing.T) { + key1 := []byte("key-1") + key2 := []byte("key-2") + key3 := []byte("key-3") + secondaryKey1 := []byte("secondary-key-1") + secondaryKey2 := []byte("secondary-key-2") + secondaryKey3 := []byte("secondary-key-3") + orig1 := []byte("original value for key1") + orig2 := []byte("original value for key2") + orig3 := []byte("original value for key3") + + err = b.Put(key1, orig1, WithSecondaryKey(0, secondaryKey1)) + require.Nil(t, err) + err = b.Put(key2, orig2, WithSecondaryKey(0, secondaryKey2)) + require.Nil(t, err) + err = b.Put(key3, orig3, WithSecondaryKey(0, secondaryKey3)) + require.Nil(t, err) + }) + + t.Run("flush memtable to disk", func(t *testing.T) { + require.Nil(t, b.FlushAndSwitch()) + }) + + t.Run("replace the secondary keys on an update", func(t *testing.T) { + key2 := []byte("key-2") + key3 := []byte("key-3") + secondaryKey1 := []byte("secondary-key-1") + secondaryKey2 := []byte("secondary-key-2-updated") + secondaryKey3 := []byte("secondary-key-3-updated") + orig1 := []byte("original value for key1") + replaced2 := []byte("twice updated value for key2") + replaced3 := []byte("twice updated value for key3") + + err = b.Put(key2, replaced2, WithSecondaryKey(0, secondaryKey2)) + require.Nil(t, err) + err = b.Put(key3, replaced3, WithSecondaryKey(0, secondaryKey3)) + require.Nil(t, err) + + // verify you can find by updated secondary keys + res, err := b.GetBySecondary(0, secondaryKey1) + require.Nil(t, err) + assert.Equal(t, res, orig1) + res, err = b.GetBySecondary(0, secondaryKey2) + require.Nil(t, err) + assert.Equal(t, res, replaced2) + res, err = b.GetBySecondary(0, secondaryKey3) + require.Nil(t, err) + assert.Equal(t, res, replaced3) + }) + }) + + t.Run("with a flush after initial write and update", func(t *testing.T) { + b, err := NewBucketCreator().NewBucket(ctx, t.TempDir(), "", nullLogger(), nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), opts...) + require.Nil(t, err) + + defer b.Shutdown(ctx) + + // so big it effectively never triggers as part of this test + b.SetMemtableThreshold(1e9) + + t.Run("set original values", func(t *testing.T) { + key1 := []byte("key-1") + key2 := []byte("key-2") + key3 := []byte("key-3") + secondaryKey1 := []byte("secondary-key-1") + secondaryKey2 := []byte("secondary-key-2") + secondaryKey3 := []byte("secondary-key-3") + orig1 := []byte("original value for key1") + orig2 := []byte("original value for key2") + orig3 := []byte("original value for key3") + + err = b.Put(key1, orig1, WithSecondaryKey(0, secondaryKey1)) + require.Nil(t, err) + err = b.Put(key2, orig2, WithSecondaryKey(0, secondaryKey2)) + require.Nil(t, err) + err = b.Put(key3, orig3, WithSecondaryKey(0, secondaryKey3)) + require.Nil(t, err) + }) + + t.Run("flush memtable to disk", func(t *testing.T) { + require.Nil(t, b.FlushAndSwitch()) + }) + + t.Run("replace the secondary keys on an update", func(t *testing.T) { + key2 := []byte("key-2") + key3 := []byte("key-3") + secondaryKey2 := []byte("secondary-key-2-updated") + secondaryKey3 := []byte("secondary-key-3-updated") + replaced2 := []byte("twice updated value for key2") + replaced3 := []byte("twice updated value for key3") + + err = b.Put(key2, replaced2, WithSecondaryKey(0, secondaryKey2)) + require.Nil(t, err) + err = b.Put(key3, replaced3, WithSecondaryKey(0, secondaryKey3)) + require.Nil(t, err) + }) + + t.Run("flush memtable to disk", func(t *testing.T) { + require.Nil(t, b.FlushAndSwitch()) + }) + + t.Run("verify again", func(t *testing.T) { + secondaryKey1 := []byte("secondary-key-1") + secondaryKey2 := []byte("secondary-key-2-updated") + secondaryKey3 := []byte("secondary-key-3-updated") + orig1 := []byte("original value for key1") + replaced2 := []byte("twice updated value for key2") + replaced3 := []byte("twice updated value for key3") + + // verify you can find by updated secondary keys + res, err := b.GetBySecondary(0, secondaryKey1) + require.Nil(t, err) + assert.Equal(t, res, orig1) + res, err = b.GetBySecondary(0, secondaryKey2) + require.Nil(t, err) + assert.Equal(t, res, replaced2) + res, err = b.GetBySecondary(0, secondaryKey3) + require.Nil(t, err) + assert.Equal(t, res, replaced3) + }) + }) + + t.Run("update in memtable then do an orderly shutdown and reinit", func(t *testing.T) { + dirName := t.TempDir() + b, err := NewBucketCreator().NewBucket(ctx, dirName, "", nullLogger(), nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), opts...) + require.Nil(t, err) + + defer b.Shutdown(ctx) + + // so big it effectively never triggers as part of this test + b.SetMemtableThreshold(1e9) + + t.Run("set original values", func(t *testing.T) { + key1 := []byte("key-1") + key2 := []byte("key-2") + key3 := []byte("key-3") + secondaryKey1 := []byte("secondary-key-1") + secondaryKey2 := []byte("secondary-key-2") + secondaryKey3 := []byte("secondary-key-3") + orig1 := []byte("original value for key1") + orig2 := []byte("original value for key2") + orig3 := []byte("original value for key3") + + err = b.Put(key1, orig1, WithSecondaryKey(0, secondaryKey1)) + require.Nil(t, err) + err = b.Put(key2, orig2, WithSecondaryKey(0, secondaryKey2)) + require.Nil(t, err) + err = b.Put(key3, orig3, WithSecondaryKey(0, secondaryKey3)) + require.Nil(t, err) + }) + + t.Run("replace the secondary keys on an update", func(t *testing.T) { + key2 := []byte("key-2") + key3 := []byte("key-3") + secondaryKey2 := []byte("secondary-key-2-updated") + secondaryKey3 := []byte("secondary-key-3-updated") + replaced2 := []byte("twice updated value for key2") + replaced3 := []byte("twice updated value for key3") + + err = b.Put(key2, replaced2, WithSecondaryKey(0, secondaryKey2)) + require.Nil(t, err) + err = b.Put(key3, replaced3, WithSecondaryKey(0, secondaryKey3)) + require.Nil(t, err) + }) + + t.Run("flush memtable to disk", func(t *testing.T) { + require.Nil(t, b.Shutdown(context.Background())) + }) + + t.Run("init a new one and verify", func(t *testing.T) { + b2, err := NewBucketCreator().NewBucket(ctx, dirName, "", nullLogger(), nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), opts...) + require.Nil(t, err) + + secondaryKey1 := []byte("secondary-key-1") + secondaryKey2 := []byte("secondary-key-2-updated") + secondaryKey3 := []byte("secondary-key-3-updated") + orig1 := []byte("original value for key1") + replaced2 := []byte("twice updated value for key2") + replaced3 := []byte("twice updated value for key3") + + // verify you can find by updated secondary keys + res, err := b2.GetBySecondary(0, secondaryKey1) + require.Nil(t, err) + assert.Equal(t, res, orig1) + res, err = b2.GetBySecondary(0, secondaryKey2) + require.Nil(t, err) + assert.Equal(t, res, replaced2) + res, err = b2.GetBySecondary(0, secondaryKey3) + require.Nil(t, err) + assert.Equal(t, res, replaced3) + }) + }) +} + +func replaceInsertAndDelete(ctx context.Context, t *testing.T, opts []BucketOption) { + t.Run("memtable-only", func(t *testing.T) { + b, err := NewBucketCreator().NewBucket(ctx, t.TempDir(), "", nullLogger(), nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), opts...) + require.Nil(t, err) + + defer b.Shutdown(ctx) + + // so big it effectively never triggers as part of this test + b.SetMemtableThreshold(1e9) + + t.Run("set original values", func(t *testing.T) { + key1 := []byte("key-1") + key2 := []byte("key-2") + key3 := []byte("key-3") + orig1 := []byte("original value for key1") + orig2 := []byte("original value for key2") + orig3 := []byte("original value for key3") + + err = b.Put(key1, orig1) + require.Nil(t, err) + err = b.Put(key2, orig2) + require.Nil(t, err) + err = b.Put(key3, orig3) + require.Nil(t, err) + }) + + t.Run("delete some, keep one", func(t *testing.T) { + key1 := []byte("key-1") + key2 := []byte("key-2") + key3 := []byte("key-3") + orig1 := []byte("original value for key1") + + err = b.Delete(key2) + require.Nil(t, err) + err = b.Delete(key3) + require.Nil(t, err) + + res, err := b.Get(key1) + require.Nil(t, err) + assert.Equal(t, res, orig1) + res, err = b.Get(key2) + require.Nil(t, err) + assert.Nil(t, res) + res, err = b.Get(key3) + require.Nil(t, err) + assert.Nil(t, res) + }) + + t.Run("count objects", func(t *testing.T) { + assert.Equal(t, 1, b.Count()) + // all happenin in the memtable so far, async does not know of any + // objects yet + assert.Equal(t, 0, b.CountAsync()) + }) + }) + + t.Run("with single flush in between updates", func(t *testing.T) { + b, err := NewBucketCreator().NewBucket(ctx, t.TempDir(), "", nullLogger(), nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), opts...) + require.Nil(t, err) + + defer b.Shutdown(ctx) + + // so big it effectively never triggers as part of this test + b.SetMemtableThreshold(1e9) + + t.Run("set original values", func(t *testing.T) { + key1 := []byte("key-1") + key2 := []byte("key-2") + key3 := []byte("key-3") + orig1 := []byte("original value for key1") + orig2 := []byte("original value for key2") + orig3 := []byte("original value for key3") + + err = b.Put(key1, orig1) + require.Nil(t, err) + err = b.Put(key2, orig2) + require.Nil(t, err) + err = b.Put(key3, orig3) + require.Nil(t, err) + }) + + t.Run("flush to disk", func(t *testing.T) { + require.Nil(t, b.FlushAndSwitch()) + }) + + t.Run("delete some, keep one", func(t *testing.T) { + key1 := []byte("key-1") + key2 := []byte("key-2") + key3 := []byte("key-3") + orig1 := []byte("original value for key1") + + err = b.Delete(key2) + require.Nil(t, err) + err = b.Delete(key3) + require.Nil(t, err) + + res, err := b.Get(key1) + require.Nil(t, err) + assert.Equal(t, res, orig1) + res, err = b.Get(key2) + require.Nil(t, err) + assert.Nil(t, res) + res, err = b.Get(key3) + require.Nil(t, err) + assert.Nil(t, res) + }) + + t.Run("count objects", func(t *testing.T) { + assert.Equal(t, 1, b.Count()) + // async still looks at the objects in the segment, ignores deletes in + // the memtable + assert.Equal(t, 3, b.CountAsync()) + }) + }) + + t.Run("with flushes after initial write and delete", func(t *testing.T) { + b, err := NewBucketCreator().NewBucket(ctx, t.TempDir(), "", nullLogger(), nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), opts...) + require.Nil(t, err) + + defer b.Shutdown(ctx) + + // so big it effectively never triggers as part of this test + b.SetMemtableThreshold(1e9) + + t.Run("set original values", func(t *testing.T) { + key1 := []byte("key-1") + key2 := []byte("key-2") + key3 := []byte("key-3") + orig1 := []byte("original value for key1") + orig2 := []byte("original value for key2") + orig3 := []byte("original value for key3") + + err = b.Put(key1, orig1) + require.Nil(t, err) + err = b.Put(key2, orig2) + require.Nil(t, err) + err = b.Put(key3, orig3) + require.Nil(t, err) + }) + + t.Run("flush to disk", func(t *testing.T) { + require.Nil(t, b.FlushAndSwitch()) + }) + + t.Run("delete some, keep one", func(t *testing.T) { + key1 := []byte("key-1") + key2 := []byte("key-2") + key3 := []byte("key-3") + orig1 := []byte("original value for key1") + + err = b.Delete(key2) + require.Nil(t, err) + err = b.Delete(key3) + require.Nil(t, err) + + // Flush again! + require.Nil(t, b.FlushAndSwitch()) + + res, err := b.Get(key1) + require.Nil(t, err) + assert.Equal(t, res, orig1) + res, err = b.Get(key2) + require.Nil(t, err) + assert.Nil(t, res) + res, err = b.Get(key3) + require.Nil(t, err) + assert.Nil(t, res) + }) + + t.Run("count objects", func(t *testing.T) { + assert.Equal(t, 1, b.Count()) + assert.Equal(t, 1, b.CountAsync()) + }) + }) +} + +func replaceCursors(ctx context.Context, t *testing.T, opts []BucketOption) { + t.Run("memtable-only", func(t *testing.T) { + r := getRandomSeed() + dirName := t.TempDir() + + b, err := NewBucketCreator().NewBucket(ctx, dirName, "", nullLogger(), nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), opts...) + require.Nil(t, err) + + defer b.Shutdown(ctx) + + // so big it effectively never triggers as part of this test + b.SetMemtableThreshold(1e9) + + t.Run("set original values", func(t *testing.T) { + pairs := 20 + keys := make([][]byte, pairs) + values := make([][]byte, pairs) + + for i := range keys { + keys[i] = []byte(fmt.Sprintf("key-%03d", i)) + values[i] = []byte(fmt.Sprintf("value-%03d", i)) + } + + // shuffle to make sure the BST isn't accidentally in order + r.Shuffle(len(keys), func(i, j int) { + keys[i], keys[j] = keys[j], keys[i] + values[i], values[j] = values[j], values[i] + }) + + for i := range keys { + err = b.Put(keys[i], values[i]) + require.Nil(t, err) + } + }) + + t.Run("seek from somewhere in the middle", func(t *testing.T) { + expectedKeys := [][]byte{ + []byte("key-016"), + []byte("key-017"), + []byte("key-018"), + []byte("key-019"), + } + expectedValues := [][]byte{ + []byte("value-016"), + []byte("value-017"), + []byte("value-018"), + []byte("value-019"), + } + + var retrievedKeys [][]byte + var retrievedValues [][]byte + c := b.Cursor() + defer c.Close() + for k, v := c.Seek([]byte("key-016")); k != nil; k, v = c.Next() { + retrievedKeys = copyAndAppend(retrievedKeys, k) + retrievedValues = copyAndAppend(retrievedValues, v) + } + + assert.Equal(t, expectedKeys, retrievedKeys) + assert.Equal(t, expectedValues, retrievedValues) + }) + + t.Run("start from the beginning", func(t *testing.T) { + expectedKeys := [][]byte{ + []byte("key-000"), + []byte("key-001"), + []byte("key-002"), + } + expectedValues := [][]byte{ + []byte("value-000"), + []byte("value-001"), + []byte("value-002"), + } + + var retrievedKeys [][]byte + var retrievedValues [][]byte + c := b.Cursor() + defer c.Close() + retrieved := 0 + for k, v := c.First(); k != nil && retrieved < 3; k, v = c.Next() { + retrieved++ + retrievedKeys = copyAndAppend(retrievedKeys, k) + retrievedValues = copyAndAppend(retrievedValues, v) + } + + assert.Equal(t, expectedKeys, retrievedKeys) + assert.Equal(t, expectedValues, retrievedValues) + }) + + t.Run("replace a key", func(t *testing.T) { + key := []byte("key-002") + value := []byte("value-002-updated") + + err = b.Put(key, value) + require.Nil(t, err) + + expectedKeys := [][]byte{ + []byte("key-001"), + []byte("key-002"), + } + expectedValues := [][]byte{ + []byte("value-001"), + []byte("value-002-updated"), + } + + var retrievedKeys [][]byte + var retrievedValues [][]byte + c := b.Cursor() + defer c.Close() + retrieved := 0 + for k, v := c.Seek([]byte("key-001")); k != nil && retrieved < 2; k, v = c.Next() { + retrieved++ + retrievedKeys = copyAndAppend(retrievedKeys, k) + retrievedValues = copyAndAppend(retrievedValues, v) + } + + assert.Equal(t, expectedKeys, retrievedKeys) + assert.Equal(t, expectedValues, retrievedValues) + }) + + t.Run("delete a key", func(t *testing.T) { + key := []byte("key-002") + + err = b.Delete(key) + require.Nil(t, err) + + t.Run("seek to a specific key", func(t *testing.T) { + expectedKeys := [][]byte{ + []byte("key-001"), + []byte("key-003"), + } + expectedValues := [][]byte{ + []byte("value-001"), + []byte("value-003"), + } + var retrievedKeys [][]byte + var retrievedValues [][]byte + c := b.Cursor() + defer c.Close() + retrieved := 0 + for k, v := c.Seek([]byte("key-001")); k != nil && retrieved < 2; k, v = c.Next() { + retrieved++ + retrievedKeys = copyAndAppend(retrievedKeys, k) + retrievedValues = copyAndAppend(retrievedValues, v) + } + + assert.Equal(t, expectedKeys, retrievedKeys) + assert.Equal(t, expectedValues, retrievedValues) + }) + + t.Run("seek to first key", func(t *testing.T) { + expectedKeys := [][]byte{ + []byte("key-000"), + []byte("key-001"), + []byte("key-003"), + } + expectedValues := [][]byte{ + []byte("value-000"), + []byte("value-001"), + []byte("value-003"), + } + + var retrievedKeys [][]byte + var retrievedValues [][]byte + c := b.Cursor() + defer c.Close() + retrieved := 0 + for k, v := c.First(); k != nil && retrieved < 3; k, v = c.Next() { + retrieved++ + retrievedKeys = copyAndAppend(retrievedKeys, k) + retrievedValues = copyAndAppend(retrievedValues, v) + } + + assert.Equal(t, expectedKeys, retrievedKeys) + assert.Equal(t, expectedValues, retrievedValues) + }) + }) + + t.Run("delete the first key", func(t *testing.T) { + key := []byte("key-000") + + err = b.Delete(key) + require.Nil(t, err) + + t.Run("seek to a specific key", func(t *testing.T) { + expectedKeys := [][]byte{ + []byte("key-001"), + []byte("key-003"), + } + expectedValues := [][]byte{ + []byte("value-001"), + []byte("value-003"), + } + var retrievedKeys [][]byte + var retrievedValues [][]byte + c := b.Cursor() + defer c.Close() + retrieved := 0 + for k, v := c.Seek([]byte("key-000")); k != nil && retrieved < 2; k, v = c.Next() { + retrieved++ + retrievedKeys = copyAndAppend(retrievedKeys, k) + retrievedValues = copyAndAppend(retrievedValues, v) + } + + assert.Equal(t, expectedKeys, retrievedKeys) + assert.Equal(t, expectedValues, retrievedValues) + }) + + t.Run("seek to first key", func(t *testing.T) { + expectedKeys := [][]byte{ + []byte("key-001"), + []byte("key-003"), + } + expectedValues := [][]byte{ + []byte("value-001"), + []byte("value-003"), + } + + var retrievedKeys [][]byte + var retrievedValues [][]byte + c := b.Cursor() + defer c.Close() + retrieved := 0 + for k, v := c.First(); k != nil && retrieved < 2; k, v = c.Next() { + retrieved++ + retrievedKeys = copyAndAppend(retrievedKeys, k) + retrievedValues = copyAndAppend(retrievedValues, v) + } + + assert.Equal(t, expectedKeys, retrievedKeys) + assert.Equal(t, expectedValues, retrievedValues) + }) + }) + }) + + t.Run("with a single flush", func(t *testing.T) { + r := getRandomSeed() + dirName := t.TempDir() + + b, err := NewBucketCreator().NewBucket(ctx, dirName, "", nullLogger(), nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), opts...) + require.Nil(t, err) + + defer b.Shutdown(ctx) + + // so big it effectively never triggers as part of this test + b.SetMemtableThreshold(1e9) + + t.Run("set original values", func(t *testing.T) { + pairs := 20 + keys := make([][]byte, pairs) + values := make([][]byte, pairs) + + for i := range keys { + keys[i] = []byte(fmt.Sprintf("key-%03d", i)) + values[i] = []byte(fmt.Sprintf("value-%03d", i)) + } + + // shuffle to make sure the BST isn't accidentally in order + r.Shuffle(len(keys), func(i, j int) { + keys[i], keys[j] = keys[j], keys[i] + values[i], values[j] = values[j], values[i] + }) + + for i := range keys { + err = b.Put(keys[i], values[i]) + require.Nil(t, err) + } + }) + + t.Run("flush to disk", func(t *testing.T) { + require.Nil(t, b.FlushAndSwitch()) + }) + + t.Run("seek from somewhere in the middle", func(t *testing.T) { + expectedKeys := [][]byte{ + []byte("key-016"), + []byte("key-017"), + []byte("key-018"), + []byte("key-019"), + } + expectedValues := [][]byte{ + []byte("value-016"), + []byte("value-017"), + []byte("value-018"), + []byte("value-019"), + } + + var retrievedKeys [][]byte + var retrievedValues [][]byte + c := b.Cursor() + defer c.Close() + for k, v := c.Seek([]byte("key-016")); k != nil; k, v = c.Next() { + retrievedKeys = copyAndAppend(retrievedKeys, k) + retrievedValues = copyAndAppend(retrievedValues, v) + } + + assert.Equal(t, expectedKeys, retrievedKeys) + assert.Equal(t, expectedValues, retrievedValues) + }) + + t.Run("start from the beginning", func(t *testing.T) { + expectedKeys := [][]byte{ + []byte("key-000"), + []byte("key-001"), + []byte("key-002"), + } + expectedValues := [][]byte{ + []byte("value-000"), + []byte("value-001"), + []byte("value-002"), + } + + var retrievedKeys [][]byte + var retrievedValues [][]byte + c := b.Cursor() + defer c.Close() + retrieved := 0 + for k, v := c.First(); k != nil && retrieved < 3; k, v = c.Next() { + retrieved++ + retrievedKeys = copyAndAppend(retrievedKeys, k) + retrievedValues = copyAndAppend(retrievedValues, v) + } + + assert.Equal(t, expectedKeys, retrievedKeys) + assert.Equal(t, expectedValues, retrievedValues) + }) + }) + + t.Run("mixing several disk segments and memtable - with updates", func(t *testing.T) { + r := getRandomSeed() + dirName := t.TempDir() + + b, err := NewBucketCreator().NewBucket(ctx, dirName, "", nullLogger(), nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), opts...) + require.Nil(t, err) + + defer b.Shutdown(ctx) + + // so big it effectively never triggers as part of this test + b.SetMemtableThreshold(1e9) + + t.Run("first third (%3==0)", func(t *testing.T) { + pairs := 20 + var keys [][]byte + var values [][]byte + + for i := 0; i < pairs; i++ { + if i%3 == 0 { + keys = copyAndAppend(keys, []byte(fmt.Sprintf("key-%03d", i))) + values = copyAndAppend(values, []byte(fmt.Sprintf("value-%03d", i))) + } + } + + // shuffle to make sure the BST isn't accidentally in order + r.Shuffle(len(keys), func(i, j int) { + keys[i], keys[j] = keys[j], keys[i] + values[i], values[j] = values[j], values[i] + }) + + for i := range keys { + err = b.Put(keys[i], values[i]) + require.Nil(t, err) + } + }) + + t.Run("flush to disk", func(t *testing.T) { + require.Nil(t, b.FlushAndSwitch()) + }) + + t.Run("second third (%3==1)", func(t *testing.T) { + pairs := 20 + var keys [][]byte + var values [][]byte + + for i := 0; i < pairs; i++ { + if i%3 == 1 { + keys = copyAndAppend(keys, []byte(fmt.Sprintf("key-%03d", i))) + values = copyAndAppend(values, []byte(fmt.Sprintf("value-%03d", i))) + } + } + + // shuffle to make sure the BST isn't accidentally in order + r.Shuffle(len(keys), func(i, j int) { + keys[i], keys[j] = keys[j], keys[i] + values[i], values[j] = values[j], values[i] + }) + + for i := range keys { + err = b.Put(keys[i], values[i]) + require.Nil(t, err) + } + }) + + t.Run("update something that was already written in segment 1", func(t *testing.T) { + require.Nil(t, b.Put([]byte("key-000"), []byte("updated-value-000"))) + require.Nil(t, b.Delete([]byte("key-003"))) + }) + + t.Run("flush to disk", func(t *testing.T) { + require.Nil(t, b.FlushAndSwitch()) + }) + + t.Run("third third (%3==2) memtable only", func(t *testing.T) { + pairs := 20 + var keys [][]byte + var values [][]byte + + for i := 0; i < pairs; i++ { + if i%3 == 2 { + keys = copyAndAppend(keys, []byte(fmt.Sprintf("key-%03d", i))) + values = copyAndAppend(values, []byte(fmt.Sprintf("value-%03d", i))) + } + } + + // shuffle to make sure the BST isn't accidentally in order + r.Shuffle(len(keys), func(i, j int) { + keys[i], keys[j] = keys[j], keys[i] + values[i], values[j] = values[j], values[i] + }) + + for i := range keys { + err = b.Put(keys[i], values[i]) + require.Nil(t, err) + } + + // no flush for this one, so this segment stays in the memtable + }) + + t.Run("update something that was already written previously", func(t *testing.T) { + require.Nil(t, b.Put([]byte("key-000"), []byte("twice-updated-value-000"))) + require.Nil(t, b.Put([]byte("key-001"), []byte("once-updated-value-001"))) + require.Nil(t, b.Put([]byte("key-019"), []byte("once-updated-value-019"))) + require.Nil(t, b.Delete([]byte("key-018"))) + }) + + t.Run("seek from somewhere in the middle", func(t *testing.T) { + expectedKeys := [][]byte{ + []byte("key-016"), + []byte("key-017"), + // key-018 deleted + []byte("key-019"), + } + expectedValues := [][]byte{ + []byte("value-016"), + []byte("value-017"), + []byte("once-updated-value-019"), + } + + var retrievedKeys [][]byte + var retrievedValues [][]byte + c := b.Cursor() + defer c.Close() + for k, v := c.Seek([]byte("key-016")); k != nil; k, v = c.Next() { + retrievedKeys = copyAndAppend(retrievedKeys, k) + retrievedValues = copyAndAppend(retrievedValues, v) + } + + assert.Equal(t, expectedKeys, retrievedKeys) + assert.Equal(t, expectedValues, retrievedValues) + }) + + t.Run("start from the beginning", func(t *testing.T) { + expectedKeys := [][]byte{ + []byte("key-000"), + []byte("key-001"), + []byte("key-002"), + // key-003 was deleted + []byte("key-004"), + } + expectedValues := [][]byte{ + []byte("twice-updated-value-000"), + []byte("once-updated-value-001"), + []byte("value-002"), + []byte("value-004"), + } + + var retrievedKeys [][]byte + var retrievedValues [][]byte + c := b.Cursor() + defer c.Close() + retrieved := 0 + for k, v := c.First(); k != nil && retrieved < 4; k, v = c.Next() { + retrieved++ + retrievedKeys = copyAndAppend(retrievedKeys, k) + retrievedValues = copyAndAppend(retrievedValues, v) + } + + assert.Equal(t, expectedKeys, retrievedKeys) + assert.Equal(t, expectedValues, retrievedValues) + }) + + t.Run("re-add the deleted keys", func(t *testing.T) { + require.Nil(t, b.Put([]byte("key-003"), []byte("readded-003"))) + require.Nil(t, b.Put([]byte("key-018"), []byte("readded-018"))) + // tombstones are now only in memtable + }) + + t.Run("seek from somewhere in the middle", func(t *testing.T) { + expectedKeys := [][]byte{ + []byte("key-016"), + []byte("key-017"), + []byte("key-018"), + []byte("key-019"), + } + expectedValues := [][]byte{ + []byte("value-016"), + []byte("value-017"), + []byte("readded-018"), + []byte("once-updated-value-019"), + } + + var retrievedKeys [][]byte + var retrievedValues [][]byte + c := b.Cursor() + defer c.Close() + for k, v := c.Seek([]byte("key-016")); k != nil; k, v = c.Next() { + retrievedKeys = copyAndAppend(retrievedKeys, k) + retrievedValues = copyAndAppend(retrievedValues, v) + } + + assert.Equal(t, expectedKeys, retrievedKeys) + assert.Equal(t, expectedValues, retrievedValues) + }) + + t.Run("start from the beginning", func(t *testing.T) { + expectedKeys := [][]byte{ + []byte("key-000"), + []byte("key-001"), + []byte("key-002"), + []byte("key-003"), + } + expectedValues := [][]byte{ + []byte("twice-updated-value-000"), + []byte("once-updated-value-001"), + []byte("value-002"), + []byte("readded-003"), + } + + var retrievedKeys [][]byte + var retrievedValues [][]byte + c := b.Cursor() + defer c.Close() + retrieved := 0 + for k, v := c.First(); k != nil && retrieved < 4; k, v = c.Next() { + retrieved++ + retrievedKeys = copyAndAppend(retrievedKeys, k) + retrievedValues = copyAndAppend(retrievedValues, v) + } + + assert.Equal(t, expectedKeys, retrievedKeys) + assert.Equal(t, expectedValues, retrievedValues) + }) + + t.Run("perform a final flush to disk", func(t *testing.T) { + require.Nil(t, b.FlushAndSwitch()) + }) + + t.Run("seek from somewhere in the middle", func(t *testing.T) { + expectedKeys := [][]byte{ + []byte("key-016"), + []byte("key-017"), + []byte("key-018"), + []byte("key-019"), + } + expectedValues := [][]byte{ + []byte("value-016"), + []byte("value-017"), + []byte("readded-018"), + []byte("once-updated-value-019"), + } + + var retrievedKeys [][]byte + var retrievedValues [][]byte + c := b.Cursor() + defer c.Close() + for k, v := c.Seek([]byte("key-016")); k != nil; k, v = c.Next() { + retrievedKeys = copyAndAppend(retrievedKeys, k) + retrievedValues = copyAndAppend(retrievedValues, v) + } + + assert.Equal(t, expectedKeys, retrievedKeys) + assert.Equal(t, expectedValues, retrievedValues) + }) + + t.Run("start from the beginning", func(t *testing.T) { + expectedKeys := [][]byte{ + []byte("key-000"), + []byte("key-001"), + []byte("key-002"), + []byte("key-003"), + } + expectedValues := [][]byte{ + []byte("twice-updated-value-000"), + []byte("once-updated-value-001"), + []byte("value-002"), + []byte("readded-003"), + } + + var retrievedKeys [][]byte + var retrievedValues [][]byte + c := b.Cursor() + defer c.Close() + retrieved := 0 + for k, v := c.First(); k != nil && retrieved < 4; k, v = c.Next() { + retrieved++ + retrievedKeys = copyAndAppend(retrievedKeys, k) + retrievedValues = copyAndAppend(retrievedValues, v) + } + + assert.Equal(t, expectedKeys, retrievedKeys) + assert.Equal(t, expectedValues, retrievedValues) + }) + }) + + // This test is inspired by unusual behavior encountered as part of the + // evaluation of gh-1569 where a delete could sometimes lead to no data after + // a restart which was caused by the disk segment cursor's .first() method + // not returning the correct key. Thus we'd have a null-key with a tombstone + // which would override whatever is the real "first" key, since null is + // always smaller + t.Run("with deletes as latest in some segments", func(t *testing.T) { + dirName := t.TempDir() + + b, err := NewBucketCreator().NewBucket(ctx, dirName, "", nullLogger(), nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), opts...) + require.Nil(t, err) + + defer b.Shutdown(ctx) + + // so big it effectively never triggers as part of this test + b.SetMemtableThreshold(1e9) + + t.Run("add new datapoint", func(t *testing.T) { + err := b.Put([]byte("key-1"), []byte("value-1")) + require.Nil(t, err) + }) + + t.Run("add datapoint and flush", func(t *testing.T) { + err := b.Put([]byte("key-8"), []byte("value-8")) + require.Nil(t, err) + + require.Nil(t, b.FlushAndSwitch()) + }) + + t.Run("delete datapoint and flush", func(t *testing.T) { + err := b.Delete([]byte("key-8")) + // note that we are deleting the key with the 'higher' key, so a missing + // key on the delete would definitely be mismatched. If we had instead + // the deleted the first key, the incorrect tombstone would have been + // correct by coincidence + require.Nil(t, err) + + require.Nil(t, b.FlushAndSwitch()) + }) + + t.Run("verify", func(t *testing.T) { + expectedKeys := [][]byte{ + []byte("key-1"), + } + expectedValues := [][]byte{ + []byte("value-1"), + } + + var retrievedKeys [][]byte + var retrievedValues [][]byte + c := b.Cursor() + defer c.Close() + retrieved := 0 + for k, v := c.First(); k != nil && retrieved < 4; k, v = c.Next() { + retrieved++ + retrievedKeys = copyAndAppend(retrievedKeys, k) + retrievedValues = copyAndAppend(retrievedValues, v) + } + + assert.Equal(t, expectedKeys, retrievedKeys) + assert.Equal(t, expectedValues, retrievedValues) + }) + }) +} + +func copyAndAppend(list [][]byte, elem []byte) [][]byte { + elemCopy := make([]byte, len(elem)) + copy(elemCopy, elem) + return append(list, elemCopy) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/strategies_roaringset_integration_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/strategies_roaringset_integration_test.go new file mode 100644 index 0000000000000000000000000000000000000000..e404f897e956c8d7e6b1b0472345ff84c56e0757 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/strategies_roaringset_integration_test.go @@ -0,0 +1,214 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +//go:build integrationTest +// +build integrationTest + +package lsmkv + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/adapters/repos/db/roaringset" + "github.com/weaviate/weaviate/entities/cyclemanager" +) + +func TestRoaringSetStrategy(t *testing.T) { + ctx := testCtx() + tests := bucketIntegrationTests{ + { + name: "roaringsetInsertAndSetAdd", + f: roaringsetInsertAndSetAdd, + opts: []BucketOption{ + WithStrategy(StrategyRoaringSet), + WithBitmapBufPool(roaringset.NewBitmapBufPoolNoop()), + }, + }, + } + tests.run(ctx, t) +} + +func roaringsetInsertAndSetAdd(ctx context.Context, t *testing.T, opts []BucketOption) { + dirName := t.TempDir() + + t.Run("memtable-only", func(t *testing.T) { + b, err := NewBucketCreator().NewBucket(ctx, dirName, "", nullLogger(), nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), opts...) + require.Nil(t, err) + + defer b.Shutdown(ctx) + + // so big it effectively never triggers as part of this test + b.SetMemtableThreshold(1e9) + + key1 := []byte("test1-key-1") + key2 := []byte("test1-key-2") + key3 := []byte("test1-key-3") + + t.Run("set original values and verify", func(t *testing.T) { + orig1 := []uint64{1, 2} + orig2 := []uint64{3, 4} + orig3 := []uint64{5, 6} + + err = b.RoaringSetAddList(key1, orig1) + require.Nil(t, err) + err = b.RoaringSetAddList(key2, orig2) + require.Nil(t, err) + err = b.RoaringSetAddList(key3, orig3) + require.Nil(t, err) + + res, release, err := b.RoaringSetGet(key1) + require.NoError(t, err) + defer release() + for _, testVal := range orig1 { + assert.True(t, res.Contains(testVal)) + } + + res, release, err = b.RoaringSetGet(key2) + require.NoError(t, err) + defer release() + for _, testVal := range orig2 { + assert.True(t, res.Contains(testVal)) + } + + res, release, err = b.RoaringSetGet(key3) + require.NoError(t, err) + defer release() + for _, testVal := range orig3 { + assert.True(t, res.Contains(testVal)) + } + }) + + t.Run("extend some, delete some, keep some", func(t *testing.T) { + additions2 := []uint64{5} + removal3 := uint64(5) + + err = b.RoaringSetAddList(key2, additions2) + require.Nil(t, err) + err = b.RoaringSetRemoveOne(key3, removal3) + require.Nil(t, err) + + res, release, err := b.RoaringSetGet(key1) + require.NoError(t, err) + defer release() + for _, testVal := range []uint64{1, 2} { // unchanged values + assert.True(t, res.Contains(testVal)) + } + + res, release, err = b.RoaringSetGet(key2) + require.NoError(t, err) + defer release() + for _, testVal := range []uint64{3, 4, 5} { // extended with 5 + assert.True(t, res.Contains(testVal)) + } + + res, release, err = b.RoaringSetGet(key3) + require.NoError(t, err) + defer release() + for _, testVal := range []uint64{6} { // fewer remain + assert.True(t, res.Contains(testVal)) + } + for _, testVal := range []uint64{5} { // no longer contained + assert.False(t, res.Contains(testVal)) + } + }) + }) + + t.Run("with a single flush in between updates", func(t *testing.T) { + b, err := NewBucketCreator().NewBucket(ctx, dirName, "", nullLogger(), nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), opts...) + require.Nil(t, err) + + defer b.Shutdown(ctx) + + // so big it effectively never triggers as part of this test + b.SetMemtableThreshold(1e9) + + key1 := []byte("test1-key-1") + key2 := []byte("test1-key-2") + key3 := []byte("test1-key-3") + + t.Run("set original values and verify", func(t *testing.T) { + orig1 := []uint64{1, 2} + orig2 := []uint64{3, 4} + orig3 := []uint64{5, 6} + + err = b.RoaringSetAddList(key1, orig1) + require.Nil(t, err) + err = b.RoaringSetAddList(key2, orig2) + require.Nil(t, err) + err = b.RoaringSetAddList(key3, orig3) + require.Nil(t, err) + + res, release, err := b.RoaringSetGet(key1) + require.NoError(t, err) + defer release() + for _, testVal := range orig1 { + assert.True(t, res.Contains(testVal)) + } + + res, release, err = b.RoaringSetGet(key2) + require.NoError(t, err) + defer release() + for _, testVal := range orig2 { + assert.True(t, res.Contains(testVal)) + } + + res, release, err = b.RoaringSetGet(key3) + require.NoError(t, err) + defer release() + for _, testVal := range orig3 { + assert.True(t, res.Contains(testVal)) + } + }) + + t.Run("flush to disk", func(t *testing.T) { + require.Nil(t, b.FlushAndSwitch()) + }) + + t.Run("extend some, delete some, keep some", func(t *testing.T) { + additions2 := []uint64{5} + removal3 := uint64(5) + + err = b.RoaringSetAddList(key2, additions2) + require.Nil(t, err) + err = b.RoaringSetRemoveOne(key3, removal3) + require.Nil(t, err) + + res, release, err := b.RoaringSetGet(key1) + require.NoError(t, err) + defer release() + for _, testVal := range []uint64{1, 2} { // unchanged values + assert.True(t, res.Contains(testVal)) + } + + res, release, err = b.RoaringSetGet(key2) + require.NoError(t, err) + defer release() + for _, testVal := range []uint64{3, 4, 5} { // extended with 5 + assert.True(t, res.Contains(testVal)) + } + + res, release, err = b.RoaringSetGet(key3) + require.NoError(t, err) + defer release() + for _, testVal := range []uint64{6} { // fewer remain + assert.True(t, res.Contains(testVal)) + } + for _, testVal := range []uint64{5} { // no longer contained + assert.False(t, res.Contains(testVal)) + } + }) + }) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/strategies_roaringsetrange_integration_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/strategies_roaringsetrange_integration_test.go new file mode 100644 index 0000000000000000000000000000000000000000..af2cf9b255eaa3e2301cbd1f44530078e377216c --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/strategies_roaringsetrange_integration_test.go @@ -0,0 +1,189 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +//go:build integrationTest +// +build integrationTest + +package lsmkv + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/entities/cyclemanager" + "github.com/weaviate/weaviate/entities/filters" +) + +func TestRoaringSetRangeStrategy(t *testing.T) { + ctx := testCtx() + tests := bucketIntegrationTests{ + { + name: "roaringsetrangeInsertAndSetAdd", + f: roaringsetrangeInsertAndSetAdd, + opts: []BucketOption{ + WithStrategy(StrategyRoaringSetRange), + }, + }, + } + tests.run(ctx, t) +} + +func roaringsetrangeInsertAndSetAdd(ctx context.Context, t *testing.T, opts []BucketOption) { + t.Run("memtable-only", func(t *testing.T) { + dirName := t.TempDir() + b, err := NewBucketCreator().NewBucket(ctx, dirName, "", nullLogger(), nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), opts...) + require.Nil(t, err) + + defer b.Shutdown(ctx) + + key1 := uint64(1) + key2 := uint64(2) + key3 := uint64(3) + + t.Run("set original values and verify", func(t *testing.T) { + orig1 := []uint64{1, 2} + orig2 := []uint64{3, 4} + orig3 := []uint64{5, 6} + + err = b.RoaringSetRangeAdd(key1, orig1...) + require.Nil(t, err) + err = b.RoaringSetRangeAdd(key2, orig2...) + require.Nil(t, err) + err = b.RoaringSetRangeAdd(key3, orig3...) + require.Nil(t, err) + + reader := b.ReaderRoaringSetRange() + defer reader.Close() + + bm1, release1, err := reader.Read(testCtx(), key1, filters.OperatorEqual) + require.NoError(t, err) + defer release1() + assert.ElementsMatch(t, orig1, bm1.ToArray()) + + bm2, release2, err := reader.Read(testCtx(), key2, filters.OperatorEqual) + require.NoError(t, err) + defer release2() + assert.ElementsMatch(t, orig2, bm2.ToArray()) + + bm3, release3, err := reader.Read(testCtx(), key3, filters.OperatorEqual) + require.NoError(t, err) + defer release3() + assert.ElementsMatch(t, orig3, bm3.ToArray()) + }) + + t.Run("extend some, delete some, keep some", func(t *testing.T) { + deletions1 := []uint64{1} + additions2 := []uint64{5, 7} + + err = b.RoaringSetRangeRemove(key1, deletions1...) + require.NoError(t, err) + err = b.RoaringSetRangeAdd(key2, additions2...) // implicit removal from key3 (5) + require.NoError(t, err) + + reader := b.ReaderRoaringSetRange() + defer reader.Close() + + bm1, release1, err := reader.Read(testCtx(), key1, filters.OperatorEqual) + require.NoError(t, err) + defer release1() + assert.ElementsMatch(t, []uint64{2}, bm1.ToArray()) // unchanged values + + bm2, release2, err := reader.Read(testCtx(), key2, filters.OperatorEqual) + require.NoError(t, err) + defer release2() + assert.ElementsMatch(t, []uint64{3, 4, 5, 7}, bm2.ToArray()) // extended with 5 + + bm3, release3, err := reader.Read(testCtx(), key3, filters.OperatorEqual) + require.NoError(t, err) + defer release3() + assert.ElementsMatch(t, []uint64{6}, bm3.ToArray()) // fewer remain + }) + }) + + t.Run("with a single flush in between updates", func(t *testing.T) { + dirName := t.TempDir() + b, err := NewBucketCreator().NewBucket(ctx, dirName, "", nullLogger(), nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), opts...) + require.Nil(t, err) + + defer b.Shutdown(ctx) + + key1 := uint64(1) + key2 := uint64(2) + key3 := uint64(3) + + t.Run("set original values and verify", func(t *testing.T) { + orig1 := []uint64{1, 2} + orig2 := []uint64{3, 4} + orig3 := []uint64{5, 6} + + err = b.RoaringSetRangeAdd(key1, orig1...) + require.Nil(t, err) + err = b.RoaringSetRangeAdd(key2, orig2...) + require.Nil(t, err) + err = b.RoaringSetRangeAdd(key3, orig3...) + require.Nil(t, err) + + reader := b.ReaderRoaringSetRange() + defer reader.Close() + + bm1, release1, err := reader.Read(testCtx(), key1, filters.OperatorEqual) + require.NoError(t, err) + defer release1() + assert.ElementsMatch(t, orig1, bm1.ToArray()) + + bm2, release2, err := reader.Read(testCtx(), key2, filters.OperatorEqual) + require.NoError(t, err) + defer release2() + assert.ElementsMatch(t, orig2, bm2.ToArray()) + + bm3, release3, err := reader.Read(testCtx(), key3, filters.OperatorEqual) + require.NoError(t, err) + defer release3() + assert.ElementsMatch(t, orig3, bm3.ToArray()) + }) + + t.Run("flush to disk", func(t *testing.T) { + require.Nil(t, b.FlushAndSwitch()) + }) + + t.Run("extend some, delete some, keep some", func(t *testing.T) { + deletions1 := []uint64{1} + additions2 := []uint64{5, 7} + + err = b.RoaringSetRangeRemove(key1, deletions1...) + require.NoError(t, err) + err = b.RoaringSetRangeAdd(key2, additions2...) // implicit removal from key3 (5) + require.NoError(t, err) + + reader := b.ReaderRoaringSetRange() + defer reader.Close() + + bm1, release1, err := reader.Read(testCtx(), key1, filters.OperatorEqual) + require.NoError(t, err) + defer release1() + assert.ElementsMatch(t, []uint64{2}, bm1.ToArray()) // unchanged values + + bm2, release2, err := reader.Read(testCtx(), key2, filters.OperatorEqual) + require.NoError(t, err) + defer release2() + assert.ElementsMatch(t, []uint64{3, 4, 5, 7}, bm2.ToArray()) // extended with 5 + + bm3, release3, err := reader.Read(testCtx(), key3, filters.OperatorEqual) + require.NoError(t, err) + defer release3() + assert.ElementsMatch(t, []uint64{6}, bm3.ToArray()) // fewer remain + }) + }) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/strategies_set.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/strategies_set.go new file mode 100644 index 0000000000000000000000000000000000000000..dfefd64022e7bfd439ee9b7a381f8cbe9e355ae4 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/strategies_set.go @@ -0,0 +1,143 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package lsmkv + +type setDecoder struct{} + +func newSetDecoder() *setDecoder { + return &setDecoder{} +} + +func (s *setDecoder) Do(in []value) [][]byte { + // check if there are tombstones, if not, we can simply take the list without + // further processing + var tombstones int + for _, value := range in { + if value.tombstone { + tombstones++ + } + } + + if tombstones == 0 { + return s.doWithoutTombstones(in) + } + + // there are tombstones, we need to remove them + // TODO: The logic below can be improved since don't care about the "latest" + // write on a set, as all writes are per definition identical. Any write that + // is not followed by a tombstone is fine + count := make(map[string]uint, len(in)) + for _, value := range in { + count[string(value.value)] = count[string(value.value)] + 1 + } + out := make([][]byte, len(in)) + + i := 0 + for _, value := range in { + if count[string(value.value)] != 1 { + count[string(value.value)] = count[string(value.value)] - 1 + continue + } + + if value.tombstone { + continue + } + + out[i] = value.value + i++ + } + + return out[:i] +} + +func (s *setDecoder) doWithoutTombstones(in []value) [][]byte { + out := make([][]byte, len(in)) + for i := range in { + out[i] = in[i].value + } + + // take an arbitrary cutoff for when it is worth to remove duplicates. The + // assumption is that on larger lists, duplicates are more likely to be + // tolerated, for example, because the point is to build an allow list for a + // secondary index where a duplicate does not matter. If the amount is + // smaller than the cutoff this is more likely to be relevant to a user. + // + // As the list gets longer, removing duplicates gets a lot more expensive, + // hence it makes sense to skip the de-duplication, if we can be reasonably + // sure that it does not matter + if len(out) <= 1000 { + return s.deduplicateResults(out) + } + + return out +} + +func (s *setDecoder) deduplicateResults(in [][]byte) [][]byte { + out := make([][]byte, len(in)) + + seen := map[string]struct{}{} + + i := 0 + for _, elem := range in { + if _, ok := seen[string(elem)]; ok { + continue + } + + out[i] = elem + seen[string(elem)] = struct{}{} + i++ + } + + return out[:i] +} + +// DoPartial keeps any extra tombstones, but does not keep tombstones which +// were "consumed" +func (s *setDecoder) DoPartial(in []value) []value { + count := map[string]uint{} + for _, value := range in { + count[string(value.value)] = count[string(value.value)] + 1 + } + + out := make([]value, len(in)) + + i := 0 + for _, value := range in { + if count[string(value.value)] != 1 { + count[string(value.value)] = count[string(value.value)] - 1 + continue + } + + out[i] = value + i++ + } + + return out[:i] +} + +type setEncoder struct{} + +func newSetEncoder() *setEncoder { + return &setEncoder{} +} + +func (s *setEncoder) Do(in [][]byte) []value { + out := make([]value, len(in)) + for i, v := range in { + out[i] = value{ + tombstone: false, + value: v, + } + } + + return out +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/strategies_set_integration_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/strategies_set_integration_test.go new file mode 100644 index 0000000000000000000000000000000000000000..7953273bb223e30d0b22af25dce5396a2d4f62d3 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/strategies_set_integration_test.go @@ -0,0 +1,1102 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +//go:build integrationTest +// +build integrationTest + +package lsmkv + +import ( + "context" + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/entities/cyclemanager" +) + +func TestSetCollectionStrategy(t *testing.T) { + ctx := testCtx() + tests := bucketIntegrationTests{ + { + name: "collectionInsertAndSetAdd", + f: collectionInsertAndSetAdd, + opts: []BucketOption{ + WithStrategy(StrategySetCollection), + }, + }, + { + name: "collectionInsertAndSetAddInsertAndDelete", + f: collectionInsertAndSetAddInsertAndDelete, + opts: []BucketOption{ + WithStrategy(StrategySetCollection), + }, + }, + { + name: "collectionCursors", + f: collectionCursors, + opts: []BucketOption{ + WithStrategy(StrategySetCollection), + }, + }, + } + tests.run(ctx, t) +} + +func collectionInsertAndSetAdd(ctx context.Context, t *testing.T, opts []BucketOption) { + dirName := t.TempDir() + + t.Run("memtable-only", func(t *testing.T) { + b, err := NewBucketCreator().NewBucket(ctx, dirName, "", nullLogger(), nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), opts...) + require.Nil(t, err) + + defer b.Shutdown(ctx) + + // so big it effectively never triggers as part of this test + b.SetMemtableThreshold(1e9) + + key1 := []byte("test1-key-1") + key2 := []byte("test1-key-2") + key3 := []byte("test1-key-3") + + t.Run("set original values and verify", func(t *testing.T) { + orig1 := [][]byte{[]byte("value 1.1"), []byte("value 1.2")} + orig2 := [][]byte{[]byte("value 2.1"), []byte("value 2.2")} + orig3 := [][]byte{[]byte("value 3.1"), []byte("value 3.2")} + + err = b.SetAdd(key1, orig1) + require.Nil(t, err) + err = b.SetAdd(key2, orig2) + require.Nil(t, err) + err = b.SetAdd(key3, orig3) + require.Nil(t, err) + + res, err := b.SetList(key1) + require.Nil(t, err) + assert.Equal(t, res, orig1) + res, err = b.SetList(key2) + require.Nil(t, err) + assert.Equal(t, res, orig2) + res, err = b.SetList(key3) + require.Nil(t, err) + assert.Equal(t, res, orig3) + }) + + t.Run("replace some, keep one", func(t *testing.T) { + orig1 := [][]byte{[]byte("value 1.1"), []byte("value 1.2")} + orig2 := [][]byte{[]byte("value 2.1"), []byte("value 2.2")} + orig3 := [][]byte{[]byte("value 3.1"), []byte("value 3.2")} + append2 := [][]byte{[]byte("value 2.3")} + append3 := [][]byte{[]byte("value 3.3")} + + err = b.SetAdd(key2, append2) + require.Nil(t, err) + err = b.SetAdd(key3, append3) + require.Nil(t, err) + + res, err := b.SetList(key1) + require.Nil(t, err) + assert.Equal(t, orig1, res) + res, err = b.SetList(key2) + require.Nil(t, err) + assert.Equal(t, append(orig2, append2...), res) + res, err = b.SetList(key3) + require.Nil(t, err) + assert.Equal(t, append(orig3, append3...), res) + }) + }) + + t.Run("with a single flush between updates", func(t *testing.T) { + b, err := NewBucketCreator().NewBucket(ctx, dirName, "", nullLogger(), nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), opts...) + require.Nil(t, err) + + defer b.Shutdown(ctx) + + // so big it effectively never triggers as part of this test + b.SetMemtableThreshold(1e9) + + key1 := []byte("test2-key-1") + key2 := []byte("test2-key-2") + key3 := []byte("test2-key-3") + + t.Run("set original values and verify", func(t *testing.T) { + orig1 := [][]byte{[]byte("value 1.1"), []byte("value 1.2")} + orig2 := [][]byte{[]byte("value 2.1"), []byte("value 2.2")} + orig3 := [][]byte{[]byte("value 3.1"), []byte("value 3.2")} + + err = b.SetAdd(key1, orig1) + require.Nil(t, err) + err = b.SetAdd(key2, orig2) + require.Nil(t, err) + err = b.SetAdd(key3, orig3) + require.Nil(t, err) + + res, err := b.SetList(key1) + require.Nil(t, err) + assert.Equal(t, res, orig1) + res, err = b.SetList(key2) + require.Nil(t, err) + assert.Equal(t, res, orig2) + res, err = b.SetList(key3) + require.Nil(t, err) + assert.Equal(t, res, orig3) + }) + + t.Run("flush to disk", func(t *testing.T) { + require.Nil(t, b.FlushAndSwitch()) + }) + + t.Run("replace some, keep one", func(t *testing.T) { + orig1 := [][]byte{[]byte("value 1.1"), []byte("value 1.2")} + orig2 := [][]byte{[]byte("value 2.1"), []byte("value 2.2")} + orig3 := [][]byte{[]byte("value 3.1"), []byte("value 3.2")} + append2 := [][]byte{[]byte("value 2.3")} + append3 := [][]byte{[]byte("value 3.3")} + + err = b.SetAdd(key2, append2) + require.Nil(t, err) + err = b.SetAdd(key3, append3) + require.Nil(t, err) + + res, err := b.SetList(key1) + require.Nil(t, err) + assert.Equal(t, orig1, res) + res, err = b.SetList(key2) + require.Nil(t, err) + assert.Equal(t, append(orig2, append2...), res) + res, err = b.SetList(key3) + require.Nil(t, err) + assert.Equal(t, append(orig3, append3...), res) + }) + }) + + t.Run("with flushes after initial and update", func(t *testing.T) { + b, err := NewBucketCreator().NewBucket(ctx, dirName, "", nullLogger(), nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), opts...) + require.Nil(t, err) + + defer b.Shutdown(ctx) + + // so big it effectively never triggers as part of this test + b.SetMemtableThreshold(1e9) + key1 := []byte("test-3-key-1") + key2 := []byte("test-3-key-2") + key3 := []byte("test-3-key-3") + + t.Run("set original values and verify", func(t *testing.T) { + orig1 := [][]byte{[]byte("value 1.1"), []byte("value 1.2")} + orig2 := [][]byte{[]byte("value 2.1"), []byte("value 2.2")} + orig3 := [][]byte{[]byte("value 3.1"), []byte("value 3.2")} + + err = b.SetAdd(key1, orig1) + require.Nil(t, err) + err = b.SetAdd(key2, orig2) + require.Nil(t, err) + err = b.SetAdd(key3, orig3) + require.Nil(t, err) + + res, err := b.SetList(key1) + require.Nil(t, err) + assert.Equal(t, res, orig1) + res, err = b.SetList(key2) + require.Nil(t, err) + assert.Equal(t, res, orig2) + res, err = b.SetList(key3) + require.Nil(t, err) + assert.Equal(t, res, orig3) + }) + + t.Run("flush to disk", func(t *testing.T) { + require.Nil(t, b.FlushAndSwitch()) + }) + + t.Run("replace some, keep one", func(t *testing.T) { + orig1 := [][]byte{[]byte("value 1.1"), []byte("value 1.2")} + orig2 := [][]byte{[]byte("value 2.1"), []byte("value 2.2")} + orig3 := [][]byte{[]byte("value 3.1"), []byte("value 3.2")} + append2 := [][]byte{[]byte("value 2.3")} + append3 := [][]byte{[]byte("value 3.3")} + + err = b.SetAdd(key2, append2) + require.Nil(t, err) + err = b.SetAdd(key3, append3) + require.Nil(t, err) + + // Flush again! + require.Nil(t, b.FlushAndSwitch()) + + res, err := b.SetList(key1) + require.Nil(t, err) + assert.Equal(t, orig1, res) + res, err = b.SetList(key2) + require.Nil(t, err) + assert.Equal(t, append(orig2, append2...), res) + res, err = b.SetList(key3) + require.Nil(t, err) + assert.Equal(t, append(orig3, append3...), res) + }) + }) + + t.Run("update in memtable, then do an orderly shutdown, and re-init", func(t *testing.T) { + b, err := NewBucketCreator().NewBucket(ctx, dirName, "", nullLogger(), nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), opts...) + require.Nil(t, err) + + defer b.Shutdown(ctx) + + // so big it effectively never triggers as part of this test + b.SetMemtableThreshold(1e9) + + key1 := []byte("test4-key-1") + key2 := []byte("test4-key-2") + key3 := []byte("test4-key-3") + + t.Run("set original values and verify", func(t *testing.T) { + orig1 := [][]byte{[]byte("value 1.1"), []byte("value 1.2")} + orig2 := [][]byte{[]byte("value 2.1"), []byte("value 2.2")} + orig3 := [][]byte{[]byte("value 3.1"), []byte("value 3.2")} + + err = b.SetAdd(key1, orig1) + require.Nil(t, err) + err = b.SetAdd(key2, orig2) + require.Nil(t, err) + err = b.SetAdd(key3, orig3) + require.Nil(t, err) + + res, err := b.SetList(key1) + require.Nil(t, err) + assert.Equal(t, res, orig1) + res, err = b.SetList(key2) + require.Nil(t, err) + assert.Equal(t, res, orig2) + res, err = b.SetList(key3) + require.Nil(t, err) + assert.Equal(t, res, orig3) + }) + + t.Run("replace some, keep one", func(t *testing.T) { + orig1 := [][]byte{[]byte("value 1.1"), []byte("value 1.2")} + orig2 := [][]byte{[]byte("value 2.1"), []byte("value 2.2")} + orig3 := [][]byte{[]byte("value 3.1"), []byte("value 3.2")} + append2 := [][]byte{[]byte("value 2.3")} + append3 := [][]byte{[]byte("value 3.3")} + + err = b.SetAdd(key2, append2) + require.Nil(t, err) + err = b.SetAdd(key3, append3) + require.Nil(t, err) + + res, err := b.SetList(key1) + require.Nil(t, err) + assert.Equal(t, orig1, res) + res, err = b.SetList(key2) + require.Nil(t, err) + assert.Equal(t, append(orig2, append2...), res) + res, err = b.SetList(key3) + require.Nil(t, err) + assert.Equal(t, append(orig3, append3...), res) + }) + + t.Run("orderly shutdown", func(t *testing.T) { + b.Shutdown(context.Background()) + }) + + t.Run("init another bucket on the same files", func(t *testing.T) { + b2, err := NewBucketCreator().NewBucket(ctx, dirName, "", nullLogger(), nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), opts...) + require.Nil(t, err) + + orig1 := [][]byte{[]byte("value 1.1"), []byte("value 1.2")} + orig2 := [][]byte{[]byte("value 2.1"), []byte("value 2.2")} + orig3 := [][]byte{[]byte("value 3.1"), []byte("value 3.2")} + append2 := [][]byte{[]byte("value 2.3")} + append3 := [][]byte{[]byte("value 3.3")} + + res, err := b2.SetList(key1) + require.Nil(t, err) + assert.Equal(t, orig1, res) + res, err = b2.SetList(key2) + require.Nil(t, err) + assert.Equal(t, append(orig2, append2...), res) + res, err = b2.SetList(key3) + require.Nil(t, err) + assert.Equal(t, append(orig3, append3...), res) + }) + }) +} + +func collectionInsertAndSetAddInsertAndDelete(ctx context.Context, t *testing.T, opts []BucketOption) { + dirName := t.TempDir() + + t.Run("memtable-only", func(t *testing.T) { + b, err := NewBucketCreator().NewBucket(ctx, dirName, "", nullLogger(), nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), opts...) + require.Nil(t, err) + + defer b.Shutdown(ctx) + + // so big it effectively never triggers as part of this test + b.SetMemtableThreshold(1e9) + + key1 := []byte("test1-key-1") + key2 := []byte("test1-key-2") + key3 := []byte("test1-key-3") + + t.Run("set original values and verify", func(t *testing.T) { + orig1 := [][]byte{[]byte("value 1.1"), []byte("value 1.2")} + orig2 := [][]byte{[]byte("value 2.1"), []byte("value 2.2")} + orig3 := [][]byte{[]byte("value 3.1"), []byte("value 3.2")} + + err = b.SetAdd(key1, orig1) + require.Nil(t, err) + err = b.SetAdd(key2, orig2) + require.Nil(t, err) + err = b.SetAdd(key3, orig3) + require.Nil(t, err) + + res, err := b.SetList(key1) + require.Nil(t, err) + assert.Equal(t, orig1, res) + res, err = b.SetList(key2) + require.Nil(t, err) + assert.Equal(t, orig2, res) + res, err = b.SetList(key3) + require.Nil(t, err) + assert.Equal(t, orig3, res) + }) + + t.Run("delete individual keys", func(t *testing.T) { + delete2 := []byte("value 2.1") + delete3 := []byte("value 3.2") + + err = b.SetDeleteSingle(key2, delete2) + require.Nil(t, err) + err = b.SetDeleteSingle(key3, delete3) + require.Nil(t, err) + }) + + t.Run("validate the results", func(t *testing.T) { + expected1 := [][]byte{[]byte("value 1.1"), []byte("value 1.2")} // unchanged + expected2 := [][]byte{[]byte("value 2.2")} // value1 deleted + expected3 := [][]byte{[]byte("value 3.1")} // value2 deleted + + res, err := b.SetList(key1) + require.Nil(t, err) + assert.Equal(t, expected1, res) + res, err = b.SetList(key2) + require.Nil(t, err) + assert.Equal(t, expected2, res) + res, err = b.SetList(key3) + require.Nil(t, err) + assert.Equal(t, expected3, res) + }) + + t.Run("re-add keys which were previously deleted and new ones", func(t *testing.T) { + readd2 := [][]byte{[]byte("value 2.1"), []byte("value 2.3")} + readd3 := [][]byte{[]byte("value 3.2"), []byte("value 3.3")} + + err = b.SetAdd(key2, readd2) + require.Nil(t, err) + err = b.SetAdd(key3, readd3) + require.Nil(t, err) + }) + + t.Run("validate the results again", func(t *testing.T) { + expected1 := [][]byte{[]byte("value 1.1"), []byte("value 1.2")} // unchanged + expected2 := [][]byte{ + []byte("value 2.2"), // from original import + []byte("value 2.1"), // added again after initial deletion + []byte("value 2.3"), // newly added + } + expected3 := [][]byte{ + []byte("value 3.1"), // form original import + []byte("value 3.2"), // added again after initial deletion + []byte("value 3.3"), // newly added + } // value2 deleted + + res, err := b.SetList(key1) + require.Nil(t, err) + assert.Equal(t, expected1, res) + res, err = b.SetList(key2) + require.Nil(t, err) + assert.Equal(t, expected2, res) + res, err = b.SetList(key3) + require.Nil(t, err) + assert.Equal(t, expected3, res) + }) + }) + + t.Run("with a single flush between updates", func(t *testing.T) { + b, err := NewBucketCreator().NewBucket(ctx, dirName, "", nullLogger(), nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), opts...) + require.Nil(t, err) + + defer b.Shutdown(ctx) + + // so big it effectively never triggers as part of this test + b.SetMemtableThreshold(1e9) + + key1 := []byte("test2-key-1") + key2 := []byte("test2-key-2") + key3 := []byte("test2-key-3") + + t.Run("set original values and verify", func(t *testing.T) { + orig1 := [][]byte{[]byte("value 1.1"), []byte("value 1.2")} + orig2 := [][]byte{[]byte("value 2.1"), []byte("value 2.2")} + orig3 := [][]byte{[]byte("value 3.1"), []byte("value 3.2")} + + err = b.SetAdd(key1, orig1) + require.Nil(t, err) + err = b.SetAdd(key2, orig2) + require.Nil(t, err) + err = b.SetAdd(key3, orig3) + require.Nil(t, err) + + res, err := b.SetList(key1) + require.Nil(t, err) + assert.Equal(t, res, orig1) + res, err = b.SetList(key2) + require.Nil(t, err) + assert.Equal(t, res, orig2) + res, err = b.SetList(key3) + require.Nil(t, err) + assert.Equal(t, res, orig3) + }) + + t.Run("flush to disk", func(t *testing.T) { + require.Nil(t, b.FlushAndSwitch()) + }) + + t.Run("delete individual keys", func(t *testing.T) { + delete2 := []byte("value 2.1") + delete3 := []byte("value 3.2") + + err = b.SetDeleteSingle(key2, delete2) + require.Nil(t, err) + err = b.SetDeleteSingle(key3, delete3) + require.Nil(t, err) + }) + + t.Run("validate the results", func(t *testing.T) { + expected1 := [][]byte{[]byte("value 1.1"), []byte("value 1.2")} // unchanged + expected2 := [][]byte{[]byte("value 2.2")} // value1 deleted + expected3 := [][]byte{[]byte("value 3.1")} // value2 deleted + + res, err := b.SetList(key1) + require.Nil(t, err) + assert.Equal(t, expected1, res) + res, err = b.SetList(key2) + require.Nil(t, err) + assert.Equal(t, expected2, res) + res, err = b.SetList(key3) + require.Nil(t, err) + assert.Equal(t, expected3, res) + }) + + t.Run("re-add keys which were previously deleted and new ones", func(t *testing.T) { + readd2 := [][]byte{[]byte("value 2.1"), []byte("value 2.3")} + readd3 := [][]byte{[]byte("value 3.2"), []byte("value 3.3")} + + err = b.SetAdd(key2, readd2) + require.Nil(t, err) + err = b.SetAdd(key3, readd3) + require.Nil(t, err) + }) + + t.Run("validate the results again", func(t *testing.T) { + expected1 := [][]byte{[]byte("value 1.1"), []byte("value 1.2")} // unchanged + expected2 := [][]byte{ + []byte("value 2.2"), // from original import + []byte("value 2.1"), // added again after initial deletion + []byte("value 2.3"), // newly added + } + expected3 := [][]byte{ + []byte("value 3.1"), // form original import + []byte("value 3.2"), // added again after initial deletion + []byte("value 3.3"), // newly added + } // value2 deleted + + res, err := b.SetList(key1) + require.Nil(t, err) + assert.Equal(t, expected1, res) + res, err = b.SetList(key2) + require.Nil(t, err) + assert.Equal(t, expected2, res) + res, err = b.SetList(key3) + require.Nil(t, err) + assert.Equal(t, expected3, res) + }) + }) + + t.Run("with flushes in between and after the update", func(t *testing.T) { + b, err := NewBucketCreator().NewBucket(ctx, dirName, "", nullLogger(), nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), opts...) + require.Nil(t, err) + + defer b.Shutdown(ctx) + + // so big it effectively never triggers as part of this test + b.SetMemtableThreshold(1e9) + + key1 := []byte("test3-key-1") + key2 := []byte("test3-key-2") + key3 := []byte("test3-key-3") + + t.Run("set original values and verify", func(t *testing.T) { + orig1 := [][]byte{[]byte("value 1.1"), []byte("value 1.2")} + orig2 := [][]byte{[]byte("value 2.1"), []byte("value 2.2")} + orig3 := [][]byte{[]byte("value 3.1"), []byte("value 3.2")} + + err = b.SetAdd(key1, orig1) + require.Nil(t, err) + err = b.SetAdd(key2, orig2) + require.Nil(t, err) + err = b.SetAdd(key3, orig3) + require.Nil(t, err) + + res, err := b.SetList(key1) + require.Nil(t, err) + assert.Equal(t, res, orig1) + res, err = b.SetList(key2) + require.Nil(t, err) + assert.Equal(t, res, orig2) + res, err = b.SetList(key3) + require.Nil(t, err) + assert.Equal(t, res, orig3) + }) + + t.Run("flush to disk", func(t *testing.T) { + require.Nil(t, b.FlushAndSwitch()) + }) + + t.Run("delete individual keys", func(t *testing.T) { + delete2 := []byte("value 2.1") + delete3 := []byte("value 3.2") + + err = b.SetDeleteSingle(key2, delete2) + require.Nil(t, err) + err = b.SetDeleteSingle(key3, delete3) + require.Nil(t, err) + }) + + t.Run("flush to disk - again!", func(t *testing.T) { + require.Nil(t, b.FlushAndSwitch()) + }) + + t.Run("validate", func(t *testing.T) { + expected1 := [][]byte{[]byte("value 1.1"), []byte("value 1.2")} // unchanged + expected2 := [][]byte{[]byte("value 2.2")} // value1 deleted + expected3 := [][]byte{[]byte("value 3.1")} // value2 deleted + + res, err := b.SetList(key1) + require.Nil(t, err) + assert.Equal(t, expected1, res) + res, err = b.SetList(key2) + require.Nil(t, err) + assert.Equal(t, expected2, res) + res, err = b.SetList(key3) + require.Nil(t, err) + assert.Equal(t, expected3, res) + }) + + t.Run("re-add keys which were previously deleted and new ones", func(t *testing.T) { + readd2 := [][]byte{[]byte("value 2.1"), []byte("value 2.3")} + readd3 := [][]byte{[]byte("value 3.2"), []byte("value 3.3")} + + err = b.SetAdd(key2, readd2) + require.Nil(t, err) + err = b.SetAdd(key3, readd3) + require.Nil(t, err) + }) + + t.Run("flush to disk - yet again!", func(t *testing.T) { + require.Nil(t, b.FlushAndSwitch()) + }) + + t.Run("validate the results again", func(t *testing.T) { + expected1 := [][]byte{[]byte("value 1.1"), []byte("value 1.2")} // unchanged + expected2 := [][]byte{ + []byte("value 2.2"), // from original import + []byte("value 2.1"), // added again after initial deletion + []byte("value 2.3"), // newly added + } + expected3 := [][]byte{ + []byte("value 3.1"), // form original import + []byte("value 3.2"), // added again after initial deletion + []byte("value 3.3"), // newly added + } // value2 deleted + + res, err := b.SetList(key1) + require.Nil(t, err) + assert.Equal(t, expected1, res) + res, err = b.SetList(key2) + require.Nil(t, err) + assert.Equal(t, expected2, res) + res, err = b.SetList(key3) + require.Nil(t, err) + assert.Equal(t, expected3, res) + }) + }) + + t.Run("update in memtable, make orderly shutdown, then create a new bucket from disk", + func(t *testing.T) { + b, err := NewBucketCreator().NewBucket(ctx, dirName, "", nullLogger(), nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), opts...) + require.Nil(t, err) + + defer b.Shutdown(ctx) + + // so big it effectively never triggers as part of this test + b.SetMemtableThreshold(1e9) + + key1 := []byte("test4-key-1") + key2 := []byte("test4-key-2") + key3 := []byte("test4-key-3") + + t.Run("set original values and verify", func(t *testing.T) { + orig1 := [][]byte{[]byte("value 1.1"), []byte("value 1.2")} + orig2 := [][]byte{[]byte("value 2.1"), []byte("value 2.2")} + orig3 := [][]byte{[]byte("value 3.1"), []byte("value 3.2")} + + err = b.SetAdd(key1, orig1) + require.Nil(t, err) + err = b.SetAdd(key2, orig2) + require.Nil(t, err) + err = b.SetAdd(key3, orig3) + require.Nil(t, err) + + res, err := b.SetList(key1) + require.Nil(t, err) + assert.Equal(t, res, orig1) + res, err = b.SetList(key2) + require.Nil(t, err) + assert.Equal(t, res, orig2) + res, err = b.SetList(key3) + require.Nil(t, err) + assert.Equal(t, res, orig3) + }) + + t.Run("delete individual keys", func(t *testing.T) { + delete2 := []byte("value 2.1") + delete3 := []byte("value 3.2") + + err = b.SetDeleteSingle(key2, delete2) + require.Nil(t, err) + err = b.SetDeleteSingle(key3, delete3) + require.Nil(t, err) + }) + + t.Run("orderly shutdown", func(t *testing.T) { + b.Shutdown(context.Background()) + }) + + t.Run("init another bucket on the same files", func(t *testing.T) { + b2, err := NewBucketCreator().NewBucket(ctx, dirName, "", nullLogger(), nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), opts...) + require.Nil(t, err) + + expected1 := [][]byte{[]byte("value 1.1"), []byte("value 1.2")} // unchanged + expected2 := [][]byte{[]byte("value 2.2")} // value1 deleted + expected3 := [][]byte{[]byte("value 3.1")} // value2 deleted + + res, err := b2.SetList(key1) + require.Nil(t, err) + assert.Equal(t, expected1, res) + res, err = b2.SetList(key2) + require.Nil(t, err) + assert.Equal(t, expected2, res) + res, err = b2.SetList(key3) + require.Nil(t, err) + assert.Equal(t, expected3, res) + }) + }) +} + +func collectionCursors(ctx context.Context, t *testing.T, opts []BucketOption) { + t.Run("memtable-only", func(t *testing.T) { + r := getRandomSeed() + dirName := t.TempDir() + + b, err := NewBucketCreator().NewBucket(ctx, dirName, "", nullLogger(), nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), opts...) + require.Nil(t, err) + + defer b.Shutdown(ctx) + + // so big it effectively never triggers as part of this test + b.SetMemtableThreshold(1e9) + + t.Run("set original values", func(t *testing.T) { + pairs := 20 + valuesPerPair := 3 + keys := make([][]byte, pairs) + values := make([][][]byte, pairs) + + for i := range keys { + keys[i] = []byte(fmt.Sprintf("key-%03d", i)) + values[i] = make([][]byte, valuesPerPair) + for j := range values[i] { + values[i][j] = []byte(fmt.Sprintf("value-%03d.%d", i, j)) + } + } + + // shuffle to make sure the BST isn't accidentally in order + r.Shuffle(len(keys), func(i, j int) { + keys[i], keys[j] = keys[j], keys[i] + values[i], values[j] = values[j], values[i] + }) + + for i := range keys { + err = b.SetAdd(keys[i], values[i]) + require.Nil(t, err) + } + }) + + t.Run("seek from somewhere in the middle", func(t *testing.T) { + expectedKeys := [][]byte{ + []byte("key-016"), + []byte("key-017"), + []byte("key-018"), + []byte("key-019"), + } + expectedValues := [][][]byte{ + {[]byte("value-016.0"), []byte("value-016.1"), []byte("value-016.2")}, + {[]byte("value-017.0"), []byte("value-017.1"), []byte("value-017.2")}, + {[]byte("value-018.0"), []byte("value-018.1"), []byte("value-018.2")}, + {[]byte("value-019.0"), []byte("value-019.1"), []byte("value-019.2")}, + } + + var retrievedKeys [][]byte + var retrievedValues [][][]byte + c := b.SetCursor() + defer c.Close() + for k, v := c.Seek([]byte("key-016")); k != nil; k, v = c.Next() { + retrievedKeys = append(retrievedKeys, k) + retrievedValues = append(retrievedValues, v) + } + + assert.Equal(t, expectedKeys, retrievedKeys) + assert.Equal(t, expectedValues, retrievedValues) + }) + + t.Run("start from the beginning", func(t *testing.T) { + expectedKeys := [][]byte{ + []byte("key-000"), + []byte("key-001"), + []byte("key-002"), + } + expectedValues := [][][]byte{ + {[]byte("value-000.0"), []byte("value-000.1"), []byte("value-000.2")}, + {[]byte("value-001.0"), []byte("value-001.1"), []byte("value-001.2")}, + {[]byte("value-002.0"), []byte("value-002.1"), []byte("value-002.2")}, + } + + var retrievedKeys [][]byte + var retrievedValues [][][]byte + c := b.SetCursor() + defer c.Close() + retrieved := 0 + for k, v := c.First(); k != nil && retrieved < 3; k, v = c.Next() { + retrieved++ + retrievedKeys = append(retrievedKeys, k) + retrievedValues = append(retrievedValues, v) + } + + assert.Equal(t, expectedKeys, retrievedKeys) + assert.Equal(t, expectedValues, retrievedValues) + }) + + t.Run("extend an existing key", func(t *testing.T) { + key := []byte("key-002") + extend := [][]byte{[]byte("value-002.3")} + + require.Nil(t, b.SetAdd(key, extend)) + }) + + t.Run("verify the extension is contained", func(t *testing.T) { + expectedKeys := [][]byte{ + []byte("key-001"), + []byte("key-002"), + } + expectedValues := [][][]byte{ + {[]byte("value-001.0"), []byte("value-001.1"), []byte("value-001.2")}, + { + []byte("value-002.0"), []byte("value-002.1"), []byte("value-002.2"), + []byte("value-002.3"), + }, + } + + var retrievedKeys [][]byte + var retrievedValues [][][]byte + c := b.SetCursor() + defer c.Close() + retrieved := 0 + for k, v := c.Seek([]byte("key-001")); k != nil && retrieved < 2; k, v = c.Next() { + retrieved++ + retrievedKeys = append(retrievedKeys, k) + retrievedValues = append(retrievedValues, v) + } + + assert.Equal(t, expectedKeys, retrievedKeys) + assert.Equal(t, expectedValues, retrievedValues) + }) + }) + + t.Run("with flushes", func(t *testing.T) { + r := getRandomSeed() + dirName := t.TempDir() + + b, err := NewBucketCreator().NewBucket(ctx, dirName, "", nullLogger(), nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), opts...) + require.Nil(t, err) + + defer b.Shutdown(ctx) + + // so big it effectively never triggers as part of this test + b.SetMemtableThreshold(1e9) + + t.Run("first third (%3==0)", func(t *testing.T) { + pairs := 20 + valuesPerPair := 3 + var keys [][]byte + var values [][][]byte + + for i := 0; i < pairs; i++ { + if i%3 != 0 { + continue + } + keys = append(keys, []byte(fmt.Sprintf("key-%03d", i))) + curValues := make([][]byte, valuesPerPair) + for j := range curValues { + curValues[j] = []byte(fmt.Sprintf("value-%03d.%d", i, j)) + } + values = append(values, curValues) + } + + // shuffle to make sure the BST isn't accidentally in order + r.Shuffle(len(keys), func(i, j int) { + keys[i], keys[j] = keys[j], keys[i] + values[i], values[j] = values[j], values[i] + }) + + for i := range keys { + err = b.SetAdd(keys[i], values[i]) + require.Nil(t, err) + } + }) + + t.Run("flush to disk", func(t *testing.T) { + require.Nil(t, b.FlushAndSwitch()) + }) + + t.Run("second third (%3==1)", func(t *testing.T) { + pairs := 20 + valuesPerPair := 3 + var keys [][]byte + var values [][][]byte + + for i := 0; i < pairs; i++ { + if i%3 != 1 { + continue + } + keys = append(keys, []byte(fmt.Sprintf("key-%03d", i))) + curValues := make([][]byte, valuesPerPair) + for j := range curValues { + curValues[j] = []byte(fmt.Sprintf("value-%03d.%d", i, j)) + } + values = append(values, curValues) + } + + // shuffle to make sure the BST isn't accidentally in order + r.Shuffle(len(keys), func(i, j int) { + keys[i], keys[j] = keys[j], keys[i] + values[i], values[j] = values[j], values[i] + }) + + for i := range keys { + err = b.SetAdd(keys[i], values[i]) + require.Nil(t, err) + } + }) + + t.Run("flush to disk", func(t *testing.T) { + require.Nil(t, b.FlushAndSwitch()) + }) + + t.Run("third (%3==2) memtable-only", func(t *testing.T) { + pairs := 20 + valuesPerPair := 3 + var keys [][]byte + var values [][][]byte + + for i := 0; i < pairs; i++ { + if i%3 != 2 { + continue + } + keys = append(keys, []byte(fmt.Sprintf("key-%03d", i))) + curValues := make([][]byte, valuesPerPair) + for j := range curValues { + curValues[j] = []byte(fmt.Sprintf("value-%03d.%d", i, j)) + } + values = append(values, curValues) + } + + // shuffle to make sure the BST isn't accidentally in order + r.Shuffle(len(keys), func(i, j int) { + keys[i], keys[j] = keys[j], keys[i] + values[i], values[j] = values[j], values[i] + }) + + for i := range keys { + err = b.SetAdd(keys[i], values[i]) + require.Nil(t, err) + } + + // no flush for this one, so this segment stays in the memtable + }) + + t.Run("seek from somewhere in the middle", func(t *testing.T) { + expectedKeys := [][]byte{ + []byte("key-016"), + []byte("key-017"), + []byte("key-018"), + []byte("key-019"), + } + expectedValues := [][][]byte{ + {[]byte("value-016.0"), []byte("value-016.1"), []byte("value-016.2")}, + {[]byte("value-017.0"), []byte("value-017.1"), []byte("value-017.2")}, + {[]byte("value-018.0"), []byte("value-018.1"), []byte("value-018.2")}, + {[]byte("value-019.0"), []byte("value-019.1"), []byte("value-019.2")}, + } + + var retrievedKeys [][]byte + var retrievedValues [][][]byte + c := b.SetCursor() + defer c.Close() + for k, v := c.Seek([]byte("key-016")); k != nil; k, v = c.Next() { + retrievedKeys = append(retrievedKeys, k) + retrievedValues = append(retrievedValues, v) + } + + assert.Equal(t, expectedKeys, retrievedKeys) + assert.Equal(t, expectedValues, retrievedValues) + }) + + t.Run("start from the beginning", func(t *testing.T) { + expectedKeys := [][]byte{ + []byte("key-000"), + []byte("key-001"), + []byte("key-002"), + } + expectedValues := [][][]byte{ + {[]byte("value-000.0"), []byte("value-000.1"), []byte("value-000.2")}, + {[]byte("value-001.0"), []byte("value-001.1"), []byte("value-001.2")}, + {[]byte("value-002.0"), []byte("value-002.1"), []byte("value-002.2")}, + } + + var retrievedKeys [][]byte + var retrievedValues [][][]byte + c := b.SetCursor() + defer c.Close() + retrieved := 0 + for k, v := c.First(); k != nil && retrieved < 3; k, v = c.Next() { + retrieved++ + retrievedKeys = append(retrievedKeys, k) + retrievedValues = append(retrievedValues, v) + } + + assert.Equal(t, expectedKeys, retrievedKeys) + assert.Equal(t, expectedValues, retrievedValues) + }) + + t.Run("delete & extend an existing key", func(t *testing.T) { + key := []byte("key-002") + extend := [][]byte{[]byte("value-002.3")} + + require.Nil(t, b.SetAdd(key, extend)) + + key = []byte("key-001") + deleteValue := []byte("value-001.1") + require.Nil(t, b.SetDeleteSingle(key, deleteValue)) + }) + + t.Run("verify the extension is contained", func(t *testing.T) { + expectedKeys := [][]byte{ + []byte("key-001"), + []byte("key-002"), + } + expectedValues := [][][]byte{ + { + []byte("value-001.0"), + // "value-001.1" deleted + []byte("value-001.2"), + }, + { + []byte("value-002.0"), []byte("value-002.1"), []byte("value-002.2"), + []byte("value-002.3"), + }, + } + + var retrievedKeys [][]byte + var retrievedValues [][][]byte + c := b.SetCursor() + defer c.Close() + retrieved := 0 + for k, v := c.Seek([]byte("key-001")); k != nil && retrieved < 2; k, v = c.Next() { + retrieved++ + retrievedKeys = append(retrievedKeys, k) + retrievedValues = append(retrievedValues, v) + } + + assert.Equal(t, expectedKeys, retrievedKeys) + assert.Equal(t, expectedValues, retrievedValues) + }) + + t.Run("flush to disk", func(t *testing.T) { + require.Nil(t, b.FlushAndSwitch()) + }) + + t.Run("verify again after flush", func(t *testing.T) { + expectedKeys := [][]byte{ + []byte("key-001"), + []byte("key-002"), + } + expectedValues := [][][]byte{ + { + []byte("value-001.0"), + // "value-001.1" deleted + []byte("value-001.2"), + }, + { + []byte("value-002.0"), []byte("value-002.1"), []byte("value-002.2"), + []byte("value-002.3"), + }, + } + + var retrievedKeys [][]byte + var retrievedValues [][][]byte + c := b.SetCursor() + defer c.Close() + retrieved := 0 + for k, v := c.Seek([]byte("key-001")); k != nil && retrieved < 2; k, v = c.Next() { + retrieved++ + retrievedKeys = append(retrievedKeys, k) + retrievedValues = append(retrievedValues, v) + } + + assert.Equal(t, expectedKeys, retrievedKeys) + assert.Equal(t, expectedValues, retrievedValues) + }) + }) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/strategies_set_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/strategies_set_test.go new file mode 100644 index 0000000000000000000000000000000000000000..24d0ef1138e01fc5c021e1fe733e1cf44fa21f1c --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/strategies_set_test.go @@ -0,0 +1,180 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package lsmkv + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestSetDecoder(t *testing.T) { + type test struct { + name string + in []value + out [][]byte + } + + tests := []test{ + { + name: "single value", + in: []value{ + { + value: []byte("foo"), + }, + }, + out: [][]byte{ + []byte("foo"), + }, + }, + + { + name: "single value with tombstone", + in: []value{ + { + value: []byte("foo"), + tombstone: true, + }, + }, + out: [][]byte{}, + }, + { + name: "single value, then a tombstone added", + in: []value{ + { + value: []byte("foo"), + }, + { + value: []byte("foo"), + tombstone: true, + }, + }, + out: [][]byte{}, + }, + { + name: "single value, then a tombstone added, then added again", + in: []value{ + { + value: []byte("foo"), + }, + { + value: []byte("foo"), + tombstone: true, + }, + { + value: []byte("foo"), + }, + }, + out: [][]byte{ + []byte("foo"), + }, + }, + { + name: "one value, repeating", + in: []value{ + { + value: []byte("foo"), + }, + { + value: []byte("foo"), + }, + }, + out: [][]byte{ + []byte("foo"), + }, + }, + { + name: "multiple values, some tombstones, ending in everything present", + in: []value{ + { + value: []byte("foo"), + }, + { + value: []byte("bar"), + }, + { + value: []byte("foo"), + tombstone: true, + }, + { + value: []byte("foo"), + tombstone: true, + }, + { + value: []byte("foo"), + tombstone: true, + }, + { + value: []byte("foo"), + }, + { + value: []byte("bar"), + }, + { + value: []byte("bar"), + tombstone: true, + }, + { + value: []byte("bar"), + }, + }, + out: [][]byte{ + []byte("foo"), + []byte("bar"), + }, + }, + { + name: "multiple values, some tombstones, ending in everything deleted", + in: []value{ + { + value: []byte("foo"), + }, + { + value: []byte("bar"), + }, + { + value: []byte("foo"), + tombstone: true, + }, + { + value: []byte("foo"), + tombstone: true, + }, + { + value: []byte("foo"), + tombstone: true, + }, + { + value: []byte("foo"), + }, + { + value: []byte("bar"), + }, + { + value: []byte("bar"), + tombstone: true, + }, + { + value: []byte("foo"), + tombstone: true, + }, + }, + out: [][]byte{}, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + assert.Equal(t, test.out, newSetDecoder().Do(test.in)) + }) + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/varenc/simple.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/varenc/simple.go new file mode 100644 index 0000000000000000000000000000000000000000..75c54cec8b723e578d84410cbf68247325535720 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/varenc/simple.go @@ -0,0 +1,95 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package varenc + +import "encoding/binary" + +type UintTypes interface { + uint64 | uint32 | uint16 | uint8 +} + +type SimpleEncoder[T UintTypes] struct { + values []T + buf []byte + elementSize int +} + +func (e *SimpleEncoder[T]) Init(expectedCount int) { + e.values = make([]T, expectedCount) + + switch any(*new(T)).(type) { + case uint64: + e.elementSize = 8 + case uint32: + e.elementSize = 4 + case uint16: + e.elementSize = 2 + case uint8: + e.elementSize = 1 + default: + panic("unsupported type") + } + + e.buf = make([]byte, 8+e.elementSize*expectedCount) +} + +func (e SimpleEncoder[T]) encode(value T, buf []byte) { + switch v := any(value).(type) { + case uint64: + binary.LittleEndian.PutUint64(buf, v) + case uint32: + binary.LittleEndian.PutUint32(buf, v) + case uint16: + binary.LittleEndian.PutUint16(buf, v) + case uint8: + buf[0] = byte(v) + } +} + +func (e SimpleEncoder[T]) decode(buf []byte, value *T) { + switch len(buf) { + case 8: + *value = any(binary.LittleEndian.Uint64(buf)).(T) + case 4: + *value = any(binary.LittleEndian.Uint32(buf)).(T) + case 2: + *value = any(binary.LittleEndian.Uint16(buf)).(T) + case 1: + *value = any(buf[0]).(T) + } +} + +func (e SimpleEncoder[T]) EncodeReusable(values []T, buf []byte) { + binary.LittleEndian.PutUint64(buf, uint64(len(values))) + len := e.elementSize + for i, value := range values { + e.encode(value, buf[8+i*len:8+(i+1)*len]) + } +} + +func (e SimpleEncoder[T]) DecodeReusable(data []byte, values []T) { + count := binary.LittleEndian.Uint64(data) + len := e.elementSize + for i := 0; i < int(count); i++ { + e.decode(data[8+i*len:8+(i+1)*len], &values[i]) + } +} + +func (e *SimpleEncoder[T]) Encode(values []T) []byte { + e.EncodeReusable(values, e.buf) + return e.buf +} + +func (e *SimpleEncoder[T]) Decode(data []byte) []T { + e.DecodeReusable(data, e.values) + return e.values +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/varenc/variable_encoding.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/varenc/variable_encoding.go new file mode 100644 index 0000000000000000000000000000000000000000..59a0eb254f0cafdf5ca42ceb607b33bddee4dcbd --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/varenc/variable_encoding.go @@ -0,0 +1,48 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package varenc + +type VarEncDataType uint8 + +const ( + SimpleUint64 VarEncDataType = iota + SimpleUint32 + SimpleUint16 + SimpleUint8 + SimpleFloat64 + SimpleFloat32 + VarIntUint64 // Variable length encoding for uint64 + + // Add new data types here + DeltaVarIntUint64 = VarIntUint64 + 64 +) + +type VarEncEncoder[T any] interface { + Init(expectedCount int) + Encode(values []T) []byte + Decode(data []byte) []T + EncodeReusable(values []T, buf []byte) + DecodeReusable(data []byte, values []T) +} + +func GetVarEncEncoder64(t VarEncDataType) VarEncEncoder[uint64] { + switch t { + case SimpleUint64: + return &SimpleEncoder[uint64]{} + case VarIntUint64: + return &VarIntEncoder{} + case DeltaVarIntUint64: + return &VarIntDeltaEncoder{} + default: + return nil + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/varenc/variable_encoding_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/varenc/variable_encoding_test.go new file mode 100644 index 0000000000000000000000000000000000000000..d4dce0f37ac7e961747c7c2323ac807b99c5553d --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/varenc/variable_encoding_test.go @@ -0,0 +1,245 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package varenc + +import ( + "fmt" + "math" + "math/rand/v2" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/weaviate/weaviate/adapters/repos/db/inverted/terms" +) + +func TestSimpleEncodeDecode(t *testing.T) { + enc := &SimpleEncoder[uint64]{} + + valueCount := terms.BLOCK_SIZE + enc.Init(valueCount) + values := make([]uint64, valueCount) + for i := 0; i < valueCount; i++ { + values[i] = uint64(i) + } + + packed := enc.Encode(values) + assert.NotNil(t, packed) + + decoded := enc.Decode(packed) + assert.Equal(t, values, decoded) +} + +func TestVarIntEncodeDecode(t *testing.T) { + enc := &VarIntEncoder{} + valueCount := terms.BLOCK_SIZE + enc.Init(valueCount) + + values := make([]uint64, valueCount) + for i := 0; i < valueCount; i++ { + values[i] = uint64(i) + } + + packed := enc.Encode(values) + assert.NotNil(t, packed) + + decoded := enc.Decode(packed) + assert.Equal(t, values, decoded) +} + +func TestVarIntDeltaEncodeDecode(t *testing.T) { + enc := &VarIntDeltaEncoder{} + valueCount := terms.BLOCK_SIZE + enc.Init(valueCount) + + values := make([]uint64, valueCount) + for i := 0; i < valueCount; i++ { + values[i] = uint64(i) + } + + packed := enc.Encode(values) + assert.NotNil(t, packed) + + decoded := enc.Decode(packed) + assert.Equal(t, values, decoded) +} + +func TestCompareNonDeltaEncoders(t *testing.T) { + encs := []VarEncEncoder[uint64]{ + &SimpleEncoder[uint64]{}, + &VarIntEncoder{}, + } + + sizes := make([]int, len(encs)) + + valueCount := terms.BLOCK_SIZE + + for _, enc := range encs { + enc.Init(valueCount) + } + + values := make([]uint64, valueCount) + for i := 0; i < valueCount; i++ { + values[i] = uint64(rand.Uint32() / 2) + } + + for i, enc := range encs { + packed := enc.Encode(values) + assert.NotNil(t, packed) + + sizes[i] = len(packed) + + decoded := enc.Decode(packed) + assert.Equal(t, values, decoded) + } + + for i := range encs { + fmt.Printf("Encoder %d: %d %f\n", i, sizes[i], float64(sizes[0])/float64(sizes[i])) + } +} + +func TestCompareDeltaEncoders(t *testing.T) { + encs := []VarEncEncoder[uint64]{ + &SimpleEncoder[uint64]{}, + &VarIntEncoder{}, + &VarIntDeltaEncoder{}, + } + + sizes := make([]int, len(encs)) + + valueCount := terms.BLOCK_SIZE + + for _, enc := range encs { + enc.Init(valueCount) + } + + values := make([]uint64, valueCount) + values[0] = 100 + for i := 1; i < valueCount; i++ { + values[i] = values[i-1] + rand.Uint64N(10) + } + + for i, enc := range encs { + packed := enc.Encode(values) + assert.NotNil(t, packed) + + sizes[i] = len(packed) + + decoded := enc.Decode(packed) + assert.Equal(t, values, decoded) + } + + for i := range encs { + fmt.Printf("Encoder %d: %d %f\n", i, sizes[i], float64(sizes[0])/float64(sizes[i])) + } +} + +func BenchmarkDeltaEncoders(b *testing.B) { + encs := []VarEncEncoder[uint64]{ + &SimpleEncoder[uint64]{}, + &VarIntEncoder{}, + &VarIntDeltaEncoder{}, + } + + valueCount := terms.BLOCK_SIZE + + for _, enc := range encs { + enc.Init(valueCount) + } + + values := make([]uint64, valueCount) + values[0] = 100 + for i := 1; i < valueCount; i++ { + values[i] = values[i-1] + rand.Uint64N(10) + } + + for _, enc := range encs { + b.Run(fmt.Sprintf("%T", enc), func(b *testing.B) { + b.ReportAllocs() + b.ResetTimer() + + for i := 0; i < b.N; i++ { + packed := enc.Encode(values) + decoded := enc.Decode(packed) + assert.Equal(b, values, decoded) + } + }) + } +} + +func BenchmarkDecoder(b *testing.B) { + encs := []VarEncEncoder[uint64]{ + &SimpleEncoder[uint64]{}, + &VarIntEncoder{}, + &VarIntDeltaEncoder{}, + } + valueCount := terms.BLOCK_SIZE + + // Example input values + values := make([]uint64, valueCount) + + values[0] = 100 + for i := range values[1:] { + // values[i+1] = values[i] + rand.Uint64N(10) + values[i+1] = values[i] + 1 + // values[i] = uint64(math.Round(rand.Float64()*10)) + 1 + } + + for _, enc := range encs { + enc.Init(valueCount) + packed := enc.Encode(values) + b.Run(fmt.Sprintf("%T", enc), func(b *testing.B) { + for i := 0; i < b.N; i++ { + enc.Decode(packed) + } + b.ReportMetric(float64(((b.N*valueCount)/1000000))/b.Elapsed().Seconds(), "Mvalues/s") + }) + + } +} + +func BenchmarkDecoderMulti(b *testing.B) { + encs := [][]VarEncEncoder[uint64]{ + {&SimpleEncoder[uint64]{}, &SimpleEncoder[uint64]{}}, + {&VarIntDeltaEncoder{}, &VarIntEncoder{}}, + } + + valueCount := terms.BLOCK_SIZE + + // Example input values + docIds := make([]uint64, valueCount) + + docIds[0] = 100 + tfs := make([]uint64, valueCount) + tfs[0] = 1 + for i := range docIds[1:] { + docIds[i+1] = docIds[i] + rand.Uint64N(10) + // docIds[i+1] = docIds[i] + 1 + tfs[i] = uint64(math.Round(rand.Float64()*10)) + 1 + } + + for _, enc := range encs { + enc[0].Init(valueCount) + enc[1].Init(valueCount) + + packedDocIds := enc[0].Encode(docIds) + packedTfs := enc[1].Encode(tfs) + + b.Run(fmt.Sprintf("%T", enc), func(b *testing.B) { + for i := 0; i < b.N; i++ { + enc[0].Decode(packedDocIds) + enc[1].Decode(packedTfs) + } + b.ReportMetric(float64(((b.N*valueCount)/1000000))/b.Elapsed().Seconds(), "Mvalues/s") + }) + + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/varenc/varint.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/varenc/varint.go new file mode 100644 index 0000000000000000000000000000000000000000..c43ec86c4ac0cb78f10ccfcb6f8543fa261396fb --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/varenc/varint.go @@ -0,0 +1,208 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package varenc + +import ( + "encoding/binary" + "math/bits" +) + +func decodeReusable(deltas []uint64, packed []byte, deltaDiff bool) { + if len(packed) < 8 { + return // Error handling: insufficient input or output space + } + + // Read the first delta using BigEndian to handle byte order explicitly + deltas[0] = binary.BigEndian.Uint64(packed[0:8]) + + // Read bitsNeeded (6 bits starting from bit 2 of packed[8]) + bitsNeeded := int((packed[8] >> 2) & 0x3F) + if bitsNeeded == 0 || bitsNeeded > 64 { + // Handle invalid bitsNeeded + return + } + + // Starting bit position after reading bitsNeeded + bitPos := 6 + bytePos := 8 // Start from packed[8] + + // Initialize the bit buffer with the remaining bits in packed[8], if any + bitsLeft := 8 - bitPos + bitBuffer := uint64(packed[bytePos] & ((1 << bitsLeft) - 1)) + + bytePos++ + + // Precompute the mask for bitsNeeded bits + bitsMask := uint64((1 << bitsNeeded) - 1) + + // Read the deltas + for i := 1; i < len(deltas); i++ { + // Ensure we have enough bits in the buffer + for bitsLeft < bitsNeeded { + if bytePos >= len(packed) { + // Handle insufficient data + return + } + bitBuffer = (bitBuffer << 8) | uint64(packed[bytePos]) + bitsLeft += 8 + bytePos++ + } + // Extract bitsNeeded bits from the buffer + bitsLeft -= bitsNeeded + deltas[i] = (bitBuffer >> bitsLeft) & bitsMask + if deltaDiff { + deltas[i] += deltas[i-1] + } + } +} + +func encodeReusable(deltas []uint64, packed []byte, deltaDiff bool) int { + var currentByte byte + bitPos := 0 // Tracks the current bit position in the byte + + bitsNeeded := 0 + + binary.BigEndian.PutUint64(packed, deltas[0]) + currentByteIndex := 8 + + for i, delta := range deltas[1:] { + if deltaDiff { + delta -= deltas[i] + } + // Determine the number of bits needed to represent this delta + if bitsNeeded < bits.Len64(delta) { + bitsNeeded = bits.Len64(delta) + } + } + if bitsNeeded == 0 { + bitsNeeded = 1 // Ensure we use at least 1 bit for 0 values + } + + bitsToStore := uint64(bitsNeeded) + + currentByte |= byte((bitsToStore>>(5-bitPos))&1) << (7 - bitPos) + bitPos++ + currentByte |= byte((bitsToStore>>(5-bitPos))&1) << (7 - bitPos) + bitPos++ + currentByte |= byte((bitsToStore>>(5-bitPos))&1) << (7 - bitPos) + bitPos++ + currentByte |= byte((bitsToStore>>(5-bitPos))&1) << (7 - bitPos) + bitPos++ + currentByte |= byte((bitsToStore>>(5-bitPos))&1) << (7 - bitPos) + bitPos++ + currentByte |= byte((bitsToStore>>(5-bitPos))&1) << (7 - bitPos) + bitPos++ + + for i, delta := range deltas[1:] { + if deltaDiff { + delta -= deltas[i] + } + // Pack the number of bits (using 6 bits for the bit length) + bitsNeededInteral := bitsNeeded + // Pack the bits of this delta into the byte slice + for bitsNeededInteral > 0 { + if bitPos == 8 { + // Move to a new byte when the current one is full + packed[currentByteIndex] = currentByte + currentByte = 0 + bitPos = 0 + currentByteIndex++ + } + + // Calculate how many bits can be written to the current byte + bitsToWrite := 8 - bitPos + if bitsNeededInteral < bitsToWrite { + bitsToWrite = bitsNeededInteral + } + + // Write bits from delta to current byte + currentByte |= byte((delta>>(bitsNeededInteral-bitsToWrite))&((1< 0 { + packed[currentByteIndex] = currentByte + } + + return currentByteIndex + 1 +} + +type VarIntEncoder struct { + values []uint64 + buf []byte +} + +func (e *VarIntEncoder) Init(expectedCount int) { + if len(e.values) < expectedCount { + e.values = make([]uint64, expectedCount) + } + if len(e.buf) < 8+8*expectedCount { + e.buf = make([]byte, 8+8*expectedCount) + } +} + +func (e VarIntEncoder) EncodeReusable(values []uint64, buf []byte) { + encodeReusable(values, buf, false) +} + +func (e VarIntEncoder) DecodeReusable(data []byte, values []uint64) { + decodeReusable(values, data, false) +} + +func (e *VarIntEncoder) Encode(values []uint64) []byte { + n := encodeReusable(values, e.buf, false) + output := make([]byte, n) + copy(output, e.buf[:n]) + return output +} + +func (e *VarIntEncoder) Decode(data []byte) []uint64 { + decodeReusable(e.values, data, false) + return e.values +} + +type VarIntDeltaEncoder struct { + values []uint64 + buf []byte +} + +func (e *VarIntDeltaEncoder) Init(expectedCount int) { + if len(e.values) < expectedCount { + e.values = make([]uint64, expectedCount) + } + if len(e.buf) < 8+8*expectedCount { + e.buf = make([]byte, 8+8*expectedCount) + } +} + +func (e VarIntDeltaEncoder) EncodeReusable(values []uint64, buf []byte) { + encodeReusable(values, buf, true) +} + +func (e VarIntDeltaEncoder) DecodeReusable(data []byte, values []uint64) { + decodeReusable(values, data, true) +} + +func (e *VarIntDeltaEncoder) Encode(values []uint64) []byte { + n := encodeReusable(values, e.buf, true) + output := make([]byte, n) + copy(output, e.buf[:n]) + return output +} + +func (e *VarIntDeltaEncoder) Decode(data []byte) []uint64 { + decodeReusable(e.values, data, true) + return e.values +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/priorityqueue/queue.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/priorityqueue/queue.go new file mode 100644 index 0000000000000000000000000000000000000000..13652a7c7418f3566381e18a5610484881d947b1 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/priorityqueue/queue.go @@ -0,0 +1,198 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package priorityqueue + +type supportedValueType interface { + any | uint64 +} + +// Item represents a queue item supporting an optional additional Value +type Item[T supportedValueType] struct { + ID uint64 + Dist float32 + Rescored bool + Value T +} + +// Queue is a priority queue supporting generic item values +type Queue[T supportedValueType] struct { + items []Item[T] + less func(items []Item[T], i, j int) bool +} + +// NewMin constructs a priority queue which prioritizes items with smaller distance +func NewMin[T supportedValueType](capacity int) *Queue[T] { + return &Queue[T]{ + items: make([]Item[T], 0, capacity), + less: func(items []Item[T], i, j int) bool { + return items[i].Dist < items[j].Dist + }, + } +} + +// NewMin constructs a priority queue which prioritizes items with larger distance and smaller ID: +// - higher scores first +// - if tied, lower document id first +// Thus, the signs in the Less function are opposite for scores and ids +func NewMinWithId[T supportedValueType](capacity int) *Queue[T] { + return &Queue[T]{ + items: make([]Item[T], 0, capacity), + less: func(items []Item[T], i, j int) bool { + if items[i].Dist == items[j].Dist { + return items[i].ID > items[j].ID + } + return items[i].Dist < items[j].Dist + }, + } +} + +// NewMax constructs a priority queue which prioritizes items with greater distance +func NewMax[T supportedValueType](capacity int) *Queue[T] { + return &Queue[T]{ + items: make([]Item[T], 0, capacity), + less: func(items []Item[T], i, j int) bool { + return items[i].Dist > items[j].Dist + }, + } +} + +func (q *Queue[T]) ShouldEnqueue(distance float32, limit int) bool { + return q.Len() < limit || q.Top().Dist < distance +} + +func (q *Queue[T]) InsertAndPop(id uint64, score float64, limit int, worstDist *float64, val T) { + q.InsertWithValue(id, float32(score), val) + for q.Len() > limit { + q.Pop() + } + // only update the worst distance when the queue is full, otherwise results can be missing if the first + // entry that is checked already has a very high score + if q.Len() >= limit { + *worstDist = float64(q.Top().Dist) + } +} + +// Pop removes the next item in the queue and returns it +func (q *Queue[T]) Pop() Item[T] { + out := q.items[0] + q.items[0] = q.items[len(q.items)-1] + q.items = q.items[:len(q.items)-1] + q.heapify(0) + return out +} + +// Top peeks at the next item in the queue +func (q *Queue[T]) Top() Item[T] { + return q.items[0] +} + +// Len returns the length of the queue +func (q *Queue[T]) Len() int { + return len(q.items) +} + +// Cap returns the remaining capacity of the queue +func (q *Queue[T]) Cap() int { + return cap(q.items) +} + +// Reset clears all items from the queue +func (q *Queue[T]) Reset() { + q.items = q.items[:0] +} + +// ResetCap drops existing queue items, and allocates a new queue with the given capacity +func (q *Queue[T]) ResetCap(capacity int) { + q.items = make([]Item[T], 0, capacity) +} + +// Insert creates a valueless item and adds it to the queue +func (q *Queue[T]) Insert(id uint64, distance float32) int { + item := Item[T]{ + ID: id, + Dist: distance, + } + return q.insert(item) +} + +// InsertWithValue creates an item with a T type value and adds it to the queue +func (q *Queue[T]) InsertWithValue(id uint64, distance float32, val T) int { + item := Item[T]{ + ID: id, + Dist: distance, + Value: val, + } + return q.insert(item) +} + +// DeleteItem deletes item meeting predicate's conditions +// TODO aliszka optimize? +func (q *Queue[T]) DeleteItem(match func(item Item[T]) bool) bool { + for i := range q.items { + if match(q.items[i]) { + if i == 0 { + q.Pop() + } else { + last := q.Len() - 1 + q.items[i] = q.items[last] + q.items = q.items[:last] + q.heapify(0) + } + return true + } + } + return false +} + +func (q *Queue[T]) insert(item Item[T]) int { + q.items = append(q.items, item) + i := len(q.items) - 1 + for i != 0 && q.less(q.items, i, q.parent(i)) { + q.swap(i, q.parent(i)) + i = q.parent(i) + } + return i +} + +func (q *Queue[T]) left(i int) int { + return 2*i + 1 +} + +func (q *Queue[T]) right(i int) int { + return 2*i + 2 +} + +func (q *Queue[T]) parent(i int) int { + return (i - 1) / 2 +} + +func (q *Queue[T]) swap(i, j int) { + q.items[i], q.items[j] = q.items[j], q.items[i] +} + +func (q *Queue[T]) heapify(i int) { + left := q.left(i) + right := q.right(i) + smallest := i + if left < len(q.items) && q.less(q.items, left, i) { + smallest = left + } + + if right < len(q.items) && q.less(q.items, right, smallest) { + smallest = right + } + + if smallest != i { + q.swap(i, smallest) + q.heapify(smallest) + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/priorityqueue/queue_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/priorityqueue/queue_test.go new file mode 100644 index 0000000000000000000000000000000000000000..40f74617da9c86f49e2ce7858ea20c51208e1b6e --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/priorityqueue/queue_test.go @@ -0,0 +1,227 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package priorityqueue + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestPriorityQueueMin(t *testing.T) { + values := map[uint64]float32{ + 0: 0.0, + 1: 0.23, + 2: 0.8, + 3: 0.222, + 4: 0.88, + 5: 1, + } + populateMinPq := func() *Queue[any] { + pq := NewMin[any](6) + for id, dist := range values { + pq.Insert(id, dist) + } + return pq + } + + t.Run("insert", func(t *testing.T) { + expectedResults := []Item[any]{ + {Dist: 0, ID: 0}, + {Dist: 0.222, ID: 3}, + {Dist: 0.23, ID: 1}, + {Dist: 0.8, ID: 2}, + {Dist: 0.88, ID: 4}, + {Dist: 1, ID: 5}, + } + + pq := populateMinPq() + + assertPqElementsMatch(t, expectedResults, pq) + }) + + t.Run("delete 1", func(t *testing.T) { + expectedResults := []Item[any]{ + {Dist: 0, ID: 0}, + {Dist: 0.222, ID: 3}, + {Dist: 0.23, ID: 1}, + {Dist: 0.88, ID: 4}, + {Dist: 1, ID: 5}, + } + + pq := populateMinPq() + deleted1 := pq.DeleteItem(func(item Item[any]) bool { + return item.ID == 2 && item.Dist == 0.8 + }) + notDeleted1 := pq.DeleteItem(func(item Item[any]) bool { + return item.ID == 100 + }) + + assert.True(t, deleted1) + assert.False(t, notDeleted1) + assertPqElementsMatch(t, expectedResults, pq) + }) + + t.Run("delete 2", func(t *testing.T) { + expectedResults := []Item[any]{ + {Dist: 0, ID: 0}, + {Dist: 0.23, ID: 1}, + {Dist: 0.88, ID: 4}, + {Dist: 1, ID: 5}, + } + + pq := populateMinPq() + deleted1 := pq.DeleteItem(func(item Item[any]) bool { + return item.ID == 2 && item.Dist == 0.8 + }) + deleted2 := pq.DeleteItem(func(item Item[any]) bool { + return item.ID == 3 && item.Dist == 0.222 + }) + + assert.True(t, deleted1) + assert.True(t, deleted2) + assertPqElementsMatch(t, expectedResults, pq) + }) + + t.Run("delete 3", func(t *testing.T) { + expectedResults := []Item[any]{ + {Dist: 0.23, ID: 1}, + {Dist: 0.88, ID: 4}, + {Dist: 1, ID: 5}, + } + + pq := populateMinPq() + deleted1 := pq.DeleteItem(func(item Item[any]) bool { + return (item.ID == 2 && item.Dist == 0.8) + }) + deleted2 := pq.DeleteItem(func(item Item[any]) bool { + return (item.ID == 3 && item.Dist == 0.222) + }) + deleted3 := pq.DeleteItem(func(item Item[any]) bool { + return (item.ID == 0 && item.Dist == 0) + }) + + assert.True(t, deleted1) + assert.True(t, deleted2) + assert.True(t, deleted3) + assertPqElementsMatch(t, expectedResults, pq) + }) +} + +func TestPriorityQueueMax(t *testing.T) { + values := map[uint64]float32{ + 0: 0.0, + 1: 0.23, + 2: 0.8, + 3: 0.222, + 4: 0.88, + 5: 1, + } + populateMaxPq := func() *Queue[any] { + pq := NewMax[any](6) + for id, dist := range values { + pq.Insert(id, dist) + } + return pq + } + + t.Run("insert", func(t *testing.T) { + expectedResults := []Item[any]{ + {Dist: 1, ID: 5}, + {Dist: 0.88, ID: 4}, + {Dist: 0.8, ID: 2}, + {Dist: 0.23, ID: 1}, + {Dist: 0.222, ID: 3}, + {Dist: 0, ID: 0}, + } + + pq := populateMaxPq() + + assertPqElementsMatch(t, expectedResults, pq) + }) + + t.Run("delete 1", func(t *testing.T) { + expectedResults := []Item[any]{ + {Dist: 0.88, ID: 4}, + {Dist: 0.8, ID: 2}, + {Dist: 0.23, ID: 1}, + {Dist: 0.222, ID: 3}, + {Dist: 0, ID: 0}, + } + + pq := populateMaxPq() + deleted1 := pq.DeleteItem(func(item Item[any]) bool { + return item.ID == 5 && item.Dist == 1 + }) + notDeleted1 := pq.DeleteItem(func(item Item[any]) bool { + return item.ID == 100 + }) + + assert.True(t, deleted1) + assert.False(t, notDeleted1) + assertPqElementsMatch(t, expectedResults, pq) + }) + + t.Run("delete 2", func(t *testing.T) { + expectedResults := []Item[any]{ + {Dist: 0.88, ID: 4}, + {Dist: 0.8, ID: 2}, + {Dist: 0.23, ID: 1}, + {Dist: 0, ID: 0}, + } + + pq := populateMaxPq() + deleted1 := pq.DeleteItem(func(item Item[any]) bool { + return item.ID == 5 && item.Dist == 1 + }) + deleted2 := pq.DeleteItem(func(item Item[any]) bool { + return (item.ID == 3 && item.Dist == 0.222) + }) + + assert.True(t, deleted1) + assert.True(t, deleted2) + assertPqElementsMatch(t, expectedResults, pq) + }) + + t.Run("delete 3", func(t *testing.T) { + expectedResults := []Item[any]{ + {Dist: 0.88, ID: 4}, + {Dist: 0.23, ID: 1}, + {Dist: 0, ID: 0}, + } + + pq := populateMaxPq() + deleted1 := pq.DeleteItem(func(item Item[any]) bool { + return item.ID == 5 && item.Dist == 1 + }) + deleted2 := pq.DeleteItem(func(item Item[any]) bool { + return (item.ID == 3 && item.Dist == 0.222) + }) + deleted3 := pq.DeleteItem(func(item Item[any]) bool { + return (item.ID == 2 && item.Dist == 0.8) + }) + + assert.True(t, deleted1) + assert.True(t, deleted2) + assert.True(t, deleted3) + assertPqElementsMatch(t, expectedResults, pq) + }) +} + +func assertPqElementsMatch(t *testing.T, expectedResults []Item[any], pq *Queue[any]) { + var results []Item[any] + for pq.Len() > 0 { + results = append(results, pq.Pop()) + } + + assert.ElementsMatch(t, expectedResults, results) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/propertyspecific/index.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/propertyspecific/index.go new file mode 100644 index 0000000000000000000000000000000000000000..c93a83e8d82900362c5b137603486474c3c75033 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/propertyspecific/index.go @@ -0,0 +1,58 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package propertyspecific + +import ( + "context" + + "github.com/pkg/errors" + "github.com/weaviate/weaviate/adapters/repos/db/vector/geo" + "github.com/weaviate/weaviate/entities/schema" +) + +// Index - for now - only supports a Geo index as a property-specific index. +// This could be extended in the future, for example to allow vectorization of +// single properties, as opposed to only allowing vectorization of the entire +// object. +type Index struct { + Name string + Type schema.DataType + GeoIndex *geo.Index +} + +// Indices is a collection of property-specific Indices by propname +type Indices map[string]Index + +// ByProp retrieves a property-specific index by prop name. Second argument is +// false, if the index doesn't exist. +func (i Indices) ByProp(propName string) (Index, bool) { + index, ok := i[propName] + return index, ok +} + +func (i Indices) DropAll(ctx context.Context) error { + for propName, index := range i { + if index.Type != schema.DataTypeGeoCoordinates { + return errors.Errorf("no implementation to delete property %s index of type %v", + propName, index.Type) + } + + if err := index.GeoIndex.Drop(ctx); err != nil { + return errors.Wrapf(err, "drop property %s", propName) + } + + index.GeoIndex = nil + delete(i, propName) + + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/queue/metrics.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/queue/metrics.go new file mode 100644 index 0000000000000000000000000000000000000000..12a1ea7106827a6468d9622eb4a555af39475e70 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/queue/metrics.go @@ -0,0 +1,139 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package queue + +import ( + "time" + + "github.com/prometheus/client_golang/prometheus" + "github.com/sirupsen/logrus" + "github.com/weaviate/weaviate/usecases/monitoring" +) + +type Metrics struct { + logger *logrus.Entry + baseMetrics *monitoring.PrometheusMetrics + monitoring bool + queueSize prometheus.Gauge + queueDiskUsage prometheus.Gauge + queuesPaused prometheus.Gauge + queuesCount prometheus.Gauge + partitionProcessingDuration prometheus.Observer +} + +func NewMetrics( + logger logrus.FieldLogger, + prom *monitoring.PrometheusMetrics, + labels prometheus.Labels, +) *Metrics { + m := Metrics{ + logger: logger.WithField("monitoring", "queue"), + } + + if prom == nil { + return &m + } + + m.baseMetrics = prom + + m.monitoring = true + + m.queueSize = prom.QueueSize.With(labels) + m.queueDiskUsage = prom.QueueDiskUsage.With(labels) + m.queuesPaused = prom.QueuePaused.With(labels) + m.queuesCount = prom.QueueCount.With(labels) + m.partitionProcessingDuration = prom.QueuePartitionProcessingDuration.With(labels) + + return &m +} + +func (m *Metrics) Paused(id string) { + m.logger.WithField("action", "queue_pause"). + WithField("queue_id", id). + Trace("index queue paused") + + if !m.monitoring { + return + } + + m.queuesPaused.Inc() +} + +func (m *Metrics) Resumed(id string) { + m.logger.WithField("action", "queue_resume"). + WithField("queue_id", id). + Trace("index queue resumed") + + if !m.monitoring { + return + } + + m.queuesPaused.Dec() +} + +func (m *Metrics) Registered(id string) { + m.logger.WithField("action", "queue_register"). + WithField("queue_id", id). + Trace("queue registered") + + if !m.monitoring { + return + } + + m.queuesCount.Inc() +} + +func (m *Metrics) Unregistered(id string) { + m.logger.WithField("action", "queue_unregister"). + WithField("queue_id", id). + Trace("queue unregistered") + + if !m.monitoring { + return + } + + m.queuesCount.Dec() +} + +func (m *Metrics) TasksProcessed(start time.Time, count int) { + took := time.Since(start) + m.logger.WithField("action", "dispatch_queue"). + WithField("partition_size", count). + WithField("took", took). + Tracef("partition processed by worker in %s", took) + + if !m.monitoring { + return + } + + m.partitionProcessingDuration.Observe(float64(took.Milliseconds())) +} + +func (m *Metrics) Size(size uint64) { + m.logger.WithField("size", size).Tracef("queue size %d", size) + + if !m.monitoring { + return + } + + m.queueSize.Set(float64(size)) +} + +func (m *Metrics) DiskUsage(size int64) { + m.logger.WithField("disk_usage", size).Tracef("disk usage of queue %d", size) + + if !m.monitoring { + return + } + + m.queueDiskUsage.Set(float64(size)) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/queue/queue.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/queue/queue.go new file mode 100644 index 0000000000000000000000000000000000000000..a50b3cc633cca675e8c893f7fc953211229a119a --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/queue/queue.go @@ -0,0 +1,1093 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package queue + +import ( + "bufio" + "bytes" + "encoding/binary" + "fmt" + "io" + "io/fs" + "os" + "path/filepath" + "regexp" + "sync" + "time" + + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +const ( + // defaultChunkSize is the maximum size of each chunk file. Defaults to 10MB. + defaultChunkSize = 10 * 1024 * 1024 + + // defaultStaleTimeout is the duration after which a partial chunk is considered stale. + // If no tasks are pushed to the queue for this duration, the partial chunk is scheduled. + defaultStaleTimeout = 100 * time.Millisecond + + // chunkWriterBufferSize is the size of the buffer used by the chunk writer. + // It should be large enough to hold a few records, but not too large to avoid + // taking up too much memory when the number of queues is large. + chunkWriterBufferSize = 256 * 1024 + + // chunkFileFmt is the format string for the chunk files, + chunkFileFmt = "chunk-%d.bin" + + magicHeader = "WV8Q" +) + +// regex pattern for the chunk files +var chunkFilePattern = regexp.MustCompile(`chunk-\d+\.bin`) + +type Queue interface { + ID() string + Size() int64 + DequeueBatch() (batch *Batch, err error) + Metrics() *Metrics +} + +type BeforeScheduleHook interface { + BeforeSchedule() bool +} + +type DiskQueue struct { + // Logger for the queue. Wrappers of this queue should use this logger. + Logger logrus.FieldLogger + staleTimeout time.Duration + taskDecoder TaskDecoder + scheduler *Scheduler + id string + dir string + onBatchProcessed func() + metrics *Metrics + chunkSize uint64 + + // m protects the disk operations + m sync.RWMutex + lastPushTime time.Time + closed bool + w *chunkWriter + r *chunkReader + recordCount uint64 + diskUsage int64 + + rmLock sync.Mutex +} + +type DiskQueueOptions struct { + // Required + ID string + Scheduler *Scheduler + Dir string + TaskDecoder TaskDecoder + + // Optional + Logger logrus.FieldLogger + StaleTimeout time.Duration + ChunkSize uint64 + OnBatchProcessed func() + Metrics *Metrics +} + +func NewDiskQueue(opt DiskQueueOptions) (*DiskQueue, error) { + if opt.ID == "" { + return nil, errors.New("id is required") + } + if opt.Scheduler == nil { + return nil, errors.New("scheduler is required") + } + if opt.Dir == "" { + return nil, errors.New("dir is required") + } + if opt.TaskDecoder == nil { + return nil, errors.New("task decoder is required") + } + if opt.Logger == nil { + opt.Logger = logrus.New() + } + opt.Logger = opt.Logger. + WithField("queue_id", opt.ID). + WithField("action", "disk_queue") + + if opt.Metrics == nil { + opt.Metrics = NewMetrics(opt.Logger, nil, nil) + } + if opt.StaleTimeout <= 0 { + opt.StaleTimeout = defaultStaleTimeout + } + if opt.ChunkSize <= 0 { + opt.ChunkSize = defaultChunkSize + } + + q := DiskQueue{ + id: opt.ID, + scheduler: opt.Scheduler, + dir: opt.Dir, + Logger: opt.Logger, + staleTimeout: opt.StaleTimeout, + taskDecoder: opt.TaskDecoder, + metrics: opt.Metrics, + onBatchProcessed: opt.OnBatchProcessed, + chunkSize: opt.ChunkSize, + } + + return &q, nil +} + +func (q *DiskQueue) Init() error { + // create the directory if it doesn't exist + err := os.MkdirAll(q.dir, 0o755) + if err != nil { + return errors.Wrap(err, "failed to create directory") + } + + // determine the number of records stored on disk + // and the disk usage + chunkList, err := q.analyzeDisk() + if err != nil { + return err + } + + // create chunk reader + q.r = newChunkReader(q.dir, chunkList) + + // create chunk writer + q.w, err = newChunkWriter(q.dir, q.r, q.Logger, q.chunkSize) + if err != nil { + return errors.Wrap(err, "failed to create chunk writer") + } + q.recordCount += q.w.recordCount + + // set the last push time to now + q.lastPushTime = time.Now() + + return nil +} + +// Close the queue, prevent further pushes and unregister it from the scheduler. +func (q *DiskQueue) Close() error { + if q == nil { + return nil + } + + q.m.Lock() + if q.closed { + q.m.Unlock() + return errors.New("queue already closed") + } + q.closed = true + q.m.Unlock() + + q.scheduler.UnregisterQueue(q.id) + + q.m.Lock() + defer q.m.Unlock() + + if q.w != nil { + err := q.w.Close() + if err != nil { + return errors.Wrap(err, "failed to close chunk writer") + } + } + + if q.r != nil { + err := q.r.Close() + if err != nil { + return errors.Wrap(err, "failed to close chunk reader") + } + } + + return nil +} + +func (q *DiskQueue) Metrics() *Metrics { + return q.metrics +} + +func (q *DiskQueue) ID() string { + return q.id +} + +var bufPool = sync.Pool{ + New: func() any { + return new(bytes.Buffer) + }, +} + +func (q *DiskQueue) Push(record []byte) error { + q.m.RLock() + if q.closed { + q.m.RUnlock() + return errors.New("queue closed") + } + q.m.RUnlock() + + if len(record) == 0 { + return errors.New("empty record") + } + + buf := bufPool.Get().(*bytes.Buffer) + defer bufPool.Put(buf) + + buf.Reset() + + var bytesBuf [4]byte + // length of the record in 4 bytes + binary.BigEndian.PutUint32(bytesBuf[:], uint32(len(record))) + _, err := buf.Write(bytesBuf[:]) + if err != nil { + return errors.Wrap(err, "failed to write record length") + } + + // write the record + _, err = buf.Write(record) + if err != nil { + return errors.Wrap(err, "failed to write record") + } + + q.m.Lock() + defer q.m.Unlock() + + q.lastPushTime = time.Now() + + n, err := q.w.Write(buf.Bytes()) + if err != nil { + return errors.Wrap(err, "failed to write record") + } + + q.recordCount++ + q.diskUsage += int64(n) + + return nil +} + +func (q *DiskQueue) Scheduler() *Scheduler { + return q.scheduler +} + +func (q *DiskQueue) Flush() error { + q.m.Lock() + defer q.m.Unlock() + + return q.w.Flush() +} + +func (q *DiskQueue) DequeueBatch() (batch *Batch, err error) { + c, err := q.r.ReadChunk() + if err != nil { + return nil, err + } + + // if there are no more chunks to read, + // check if the partial chunk is stale (e.g no tasks were pushed for a while) + if c == nil || c.f == nil { + c, err = q.checkIfStale() + if c == nil || err != nil || c.f == nil { + return nil, err + } + } + + if c.f == nil { + return nil, nil + } + defer c.Close() + + // decode all tasks from the chunk + // and partition them by worker + tasks := make([]Task, 0, c.count) + + buf := make([]byte, 4) + for { + buf = buf[:4] + + // read the record length + _, err := io.ReadFull(c.r, buf) + if errors.Is(err, io.EOF) { + break + } + if err != nil { + return nil, errors.Wrap(err, "failed to read record length") + } + length := binary.BigEndian.Uint32(buf) + if length == 0 { + return nil, errors.New("invalid record length") + } + + // read the record + if cap(buf) < int(length) { + buf = make([]byte, length) + } else { + buf = buf[:length] + } + _, err = io.ReadFull(c.r, buf) + if err != nil { + return nil, errors.Wrap(err, "failed to read record") + } + + // decode the task + t, err := q.taskDecoder.DecodeTask(buf) + if err != nil { + return nil, errors.Wrap(err, "failed to decode task") + } + + tasks = append(tasks, t) + } + + err = c.Close() + if err != nil { + q.Logger.WithField("file", c.path).WithError(err).Warn("failed to close chunk file") + } + + if len(tasks) == 0 { + // empty chunk, remove it + q.removeChunk(c) + return nil, nil + } + + doneFn := func() { + q.removeChunk(c) + if q.onBatchProcessed != nil { + q.onBatchProcessed() + } + } + + return &Batch{ + Tasks: tasks, + onDone: doneFn, + }, nil +} + +func (q *DiskQueue) checkIfStale() (*chunk, error) { + if q.Size() == 0 { + return nil, nil + } + + q.m.Lock() + + if q.w.f == nil { + q.m.Unlock() + return nil, nil + } + + if q.w.recordCount == 0 { + q.m.Unlock() + return nil, nil + } + + if time.Since(q.lastPushTime) < q.staleTimeout { + q.m.Unlock() + return nil, nil + } + + q.Logger.Debug("partial chunk is stale, scheduling") + + err := q.w.Promote() + if err != nil { + q.m.Unlock() + return nil, err + } + + q.m.Unlock() + + return q.r.ReadChunk() +} + +func (q *DiskQueue) Size() int64 { + if q == nil { + return 0 + } + + q.m.RLock() + defer q.m.RUnlock() + + q.metrics.Size(q.recordCount) + q.metrics.DiskUsage(q.diskUsage) + return int64(q.recordCount) +} + +func (q *DiskQueue) Pause() { + q.scheduler.PauseQueue(q.id) + q.metrics.Paused(q.id) +} + +func (q *DiskQueue) Resume() { + q.scheduler.ResumeQueue(q.id) + q.metrics.Resumed(q.id) +} + +func (q *DiskQueue) Wait() { + q.scheduler.Wait(q.id) +} + +func (q *DiskQueue) Drop() error { + if q == nil { + return nil + } + + err := q.Close() + if err != nil { + q.Logger.WithError(err).Error("failed to close queue") + } + + q.m.Lock() + defer q.m.Unlock() + + // remove the directory + err = os.RemoveAll(q.dir) + if err != nil { + return errors.Wrap(err, "failed to remove directory") + } + + return nil +} + +func (q *DiskQueue) removeChunk(c *chunk) { + q.rmLock.Lock() + defer q.rmLock.Unlock() + + deleted, err := q.r.RemoveChunk(c) + if err != nil { + q.Logger.WithError(err).WithField("file", c.path).Error("failed to remove chunk") + return + } + if !deleted { + return + } + + q.m.Lock() + q.recordCount -= c.count + q.diskUsage -= int64(c.size) + q.metrics.DiskUsage(q.diskUsage) + q.metrics.Size(q.recordCount) + q.m.Unlock() +} + +// analyzeDisk is a slow method that determines the number of records +// stored on disk and in the partial chunk by reading the header of all the files in the directory. +// It also calculates the disk usage. +// It is used when the queue is first initialized. +func (q *DiskQueue) analyzeDisk() ([]string, error) { + q.m.Lock() + defer q.m.Unlock() + + entries, err := os.ReadDir(q.dir) + if err != nil { + return nil, errors.Wrap(err, "failed to read directory") + } + + chunkList := make([]string, 0, len(entries)) + + for _, entry := range entries { + if entry.IsDir() { + continue + } + + // check if the entry name matches the regex pattern of a chunk file + if !chunkFilePattern.Match([]byte(entry.Name())) { + continue + } + + fi, err := entry.Info() + if err != nil { + return nil, errors.Wrap(err, "failed to get file info") + } + + filePath := filepath.Join(q.dir, entry.Name()) + + if fi.Size() == 0 { + // best effort to remove empty files + _ = os.Remove(filePath) + continue + } + + q.diskUsage += fi.Size() + + count, err := q.readChunkRecordCount(filePath) + if err != nil { + return nil, err + } + + // partial chunk + if count == 0 { + continue + } + + q.recordCount += count + + chunkList = append(chunkList, filePath) + continue + } + + return chunkList, nil +} + +func (q *DiskQueue) readChunkRecordCount(path string) (uint64, error) { + f, err := os.Open(path) + if err != nil { + return 0, err + } + defer f.Close() + + return readChunkHeader(f) +} + +var readerPool = sync.Pool{ + New: func() any { + return bufio.NewReaderSize(nil, defaultChunkSize) + }, +} + +type chunk struct { + path string + r *bufio.Reader + f *os.File + count uint64 + size uint64 +} + +func openChunk(path string) (*chunk, error) { + var err error + c := chunk{ + path: path, + } + + c.f, err = os.Open(path) + if err != nil { + return nil, err + } + + stat, err := c.f.Stat() + if err != nil { + return nil, err + } + + if stat.Size() == 0 { + // empty file + // remove it + err = c.f.Close() + if err != nil { + return nil, err + } + + err = os.Remove(path) + if err != nil { + return nil, err + } + + return nil, nil + } + + c.r = readerPool.Get().(*bufio.Reader) + c.r.Reset(c.f) + c.size = uint64(stat.Size()) + + // check the header + c.count, err = readChunkHeader(c.r) + if err != nil { + return nil, err + } + + return &c, nil +} + +func chunkFromFile(f *os.File) (*chunk, error) { + var err error + c := chunk{ + path: f.Name(), + f: f, + } + + _, err = f.Seek(0, 0) + if err != nil { + return nil, err + } + + c.r = readerPool.Get().(*bufio.Reader) + c.r.Reset(c.f) + + // check the header + c.count, err = readChunkHeader(c.r) + if err != nil { + return nil, err + } + + // get the file size + info, err := c.f.Stat() + if err != nil { + return nil, errors.Wrap(err, "failed to stat chunk file") + } + + c.size = uint64(info.Size()) + + return &c, nil +} + +func (c *chunk) Close() error { + if c.f == nil { + return nil + } + + err := c.f.Close() + readerPool.Put(c.r) + c.f = nil + return err +} + +func readChunkHeader(r io.Reader) (uint64, error) { + // read the header + header := make([]byte, len(magicHeader)+1+8) + _, err := io.ReadFull(r, header) + if err != nil { + return 0, errors.Wrap(err, "failed to read header") + } + + // check the magic number + if !bytes.Equal(header[:len(magicHeader)], []byte(magicHeader)) { + return 0, errors.New("invalid magic header") + } + + // check the version + if header[len(magicHeader)] != 1 { + return 0, errors.New("invalid version") + } + + // read the number of records + return binary.BigEndian.Uint64(header[len(magicHeader)+1:]), nil +} + +// chunkWriter is an io.Writer that writes records to a series of chunk files. +// Each chunk file has a header that contains the number of records in the file. +// The records are written as a 4-byte length followed by the record itself. +// The records are written to a partial chunk file until the target size is reached, +// at which point the partial chunk is promoted to a new chunk file. +// The chunkWriter is not thread-safe and should be used with a lock. +type chunkWriter struct { + logger logrus.FieldLogger + maxSize uint64 + dir string + w *bufio.Writer + f *os.File + size uint64 + recordCount uint64 + buf [8]byte + + reader *chunkReader +} + +func newChunkWriter(dir string, reader *chunkReader, logger logrus.FieldLogger, maxSize uint64) (*chunkWriter, error) { + ch := &chunkWriter{ + dir: dir, + reader: reader, + logger: logger, + maxSize: maxSize, + w: bufio.NewWriterSize(nil, chunkWriterBufferSize), + } + + err := ch.Open() + if err != nil { + return nil, err + } + + return ch, nil +} + +func (w *chunkWriter) Write(buf []byte) (int, error) { + var created bool + + if w.f == nil { + err := w.Create() + if err != nil { + return 0, err + } + created = true + } + + if w.IsFull() { + err := w.Promote() + if err != nil { + return 0, err + } + + err = w.Create() + if err != nil { + return 0, err + } + created = true + } + + n, err := w.w.Write(buf) + if err != nil { + return n, err + } + + w.size += uint64(n) + w.recordCount++ + + if created { + return int(w.size), nil + } + + return n, nil +} + +func (w *chunkWriter) Flush() error { + if w.f == nil { + return nil + } + + return w.w.Flush() +} + +func (w *chunkWriter) Close() error { + err := w.w.Flush() + if err != nil { + return err + } + + if w.f != nil { + err = w.f.Sync() + if err != nil { + return errors.Wrap(err, "failed to sync") + } + + err := w.f.Close() + if err != nil { + return errors.Wrap(err, "failed to close chunk") + } + + w.f = nil + } + + return nil +} + +func (w *chunkWriter) Create() error { + var err error + + path := filepath.Join(w.dir, fmt.Sprintf(chunkFileFmt, time.Now().UnixMicro())) + w.f, err = os.OpenFile(path, os.O_CREATE|os.O_RDWR, 0o644) + if err != nil { + return errors.Wrap(err, "failed to create chunk file") + } + + w.w.Reset(w.f) + + // magic + _, err = w.w.Write([]byte(magicHeader)) + if err != nil { + return errors.Wrap(err, "failed to write header") + } + // version + err = w.w.WriteByte(1) + if err != nil { + return errors.Wrap(err, "failed to write version") + } + // number of records + binary.BigEndian.PutUint64(w.buf[:], uint64(0)) + _, err = w.w.Write(w.buf[:]) + if err != nil { + return errors.Wrap(err, "failed to write size") + } + w.size = uint64(len(magicHeader) + 1 + 8) + + return nil +} + +func (w *chunkWriter) Open() error { + entries, err := os.ReadDir(w.dir) + if err != nil { + return errors.Wrap(err, "failed to read directory") + } + + if len(entries) == 0 { + return nil + } + + lastChunk := entries[len(entries)-1].Name() + + w.f, err = os.OpenFile(filepath.Join(w.dir, lastChunk), os.O_RDWR, 0o644) + if err != nil { + return errors.Wrap(err, "failed to open chunk file") + } + + w.w.Reset(w.f) + + // get file size + info, err := w.f.Stat() + if err != nil { + return errors.Wrap(err, "failed to stat chunk file") + } + + // new file, write the header + if info.Size() == 0 { + // magic + _, err = w.w.Write([]byte(magicHeader)) + if err != nil { + return errors.Wrap(err, "failed to write header") + } + // version + err = w.w.WriteByte(1) + if err != nil { + return errors.Wrap(err, "failed to write version") + } + // number of records + binary.BigEndian.PutUint64(w.buf[:], uint64(0)) + _, err = w.w.Write(w.buf[:]) + if err != nil { + return errors.Wrap(err, "failed to write size") + } + w.size = uint64(len(magicHeader) + 1 + 8) + + return nil + } + + // existing file: + // either the record count is already written in the header + // of this is a partial chunk and we need to count the records + recordCount, err := readChunkHeader(w.f) + if err != nil { + return errors.Wrap(err, "failed to read chunk header") + } + + if recordCount > 0 { + // the file is a complete chunk + // close the file and open a new one + err = w.f.Close() + if err != nil { + return errors.Wrap(err, "failed to close chunk file") + } + + return w.Create() + } + + w.size = uint64(info.Size()) + + r := bufio.NewReader(w.f) + + // count the records by reading the length of each record + // and skipping it + var count uint64 + for { + // read the record length + n, err := io.ReadFull(r, w.buf[:4]) + if errors.Is(err, io.EOF) { + break + } + if errors.Is(err, io.ErrUnexpectedEOF) { + // a record was not fully written, probably because of a crash. + w.logger.WithField("action", "queue_log_corruption"). + WithField("path", filepath.Join(w.dir, lastChunk)). + Error(errors.Wrap(err, "queue ended abruptly, some elements may not have been recovered")) + + // truncate the file to the last complete record + err = w.f.Truncate(int64(w.size) - int64(n)) + if err != nil { + return errors.Wrap(err, "failed to truncate chunk file") + } + err = w.f.Sync() + if err != nil { + return errors.Wrap(err, "failed to sync chunk file") + } + w.size -= uint64(n) + break + } + if err != nil { + return errors.Wrap(err, "failed to read record length") + } + length := binary.BigEndian.Uint32(w.buf[:4]) + if length == 0 { + return errors.New("invalid record length") + } + + // skip the record + n, err = r.Discard(int(length)) + if err != nil { + if errors.Is(err, io.EOF) { + // a record was not fully written, probably because of a crash. + w.logger.WithField("action", "queue_log_corruption"). + WithField("path", filepath.Join(w.dir, lastChunk)). + Error(errors.Wrap(err, "queue ended abruptly, some elements may not have been recovered")) + + // truncate the file to the last complete record + err = w.f.Truncate(int64(w.size) - 4 - int64(n)) + if err != nil { + return errors.Wrap(err, "failed to truncate chunk file") + } + err = w.f.Sync() + if err != nil { + return errors.Wrap(err, "failed to sync chunk file") + } + w.size -= 4 + uint64(n) + break + } + + return errors.Wrap(err, "failed to skip record") + } + + count++ + } + + w.recordCount = count + + // place the cursor at the end of the file + _, err = w.f.Seek(0, 2) + if err != nil { + return errors.Wrap(err, "failed to seek to the end of the file") + } + + return nil +} + +func (w *chunkWriter) IsFull() bool { + return w.f != nil && w.size >= w.maxSize +} + +func (w *chunkWriter) Promote() error { + if w.f == nil { + return nil + } + + // flush the buffer + err := w.w.Flush() + if err != nil { + return errors.Wrap(err, "failed to flush chunk") + } + + // update the number of records in the header + _, err = w.f.Seek(int64(len(magicHeader)+1), 0) + if err != nil { + return errors.Wrap(err, "failed to seek to record count") + } + err = binary.Write(w.f, binary.BigEndian, w.recordCount) + if err != nil { + return errors.Wrap(err, "failed to write record count") + } + + err = w.reader.PromoteChunk(w.f) + if err != nil { + return errors.Wrap(err, "failed to promote chunk") + } + + w.f = nil + w.size = 0 + w.recordCount = 0 + w.w.Reset(nil) + + return nil +} + +type chunkReader struct { + m sync.Mutex + dir string + cursor int + chunkList []string + chunks map[string]*os.File +} + +func newChunkReader(dir string, chunkList []string) *chunkReader { + return &chunkReader{ + dir: dir, + chunks: make(map[string]*os.File), + chunkList: chunkList, + } +} + +func (r *chunkReader) ReadChunk() (*chunk, error) { + r.m.Lock() + + if r.cursor >= len(r.chunkList) { + r.m.Unlock() + return nil, nil + } + + path := r.chunkList[r.cursor] + f, ok := r.chunks[path] + + r.cursor++ + + r.m.Unlock() + + if ok { + return chunkFromFile(f) + } + + return openChunk(path) +} + +func (r *chunkReader) Close() error { + r.m.Lock() + defer r.m.Unlock() + + for _, f := range r.chunks { + _ = f.Sync() + _ = f.Close() + } + + return nil +} + +func (r *chunkReader) PromoteChunk(f *os.File) error { + r.m.Lock() + // do not keep more than 10 files open + if len(r.chunks) > 10 { + r.m.Unlock() + + // sync and close the chunk + err := f.Sync() + if err != nil { + return errors.Wrap(err, "failed to sync chunk") + } + + err = f.Close() + if err != nil { + return errors.Wrap(err, "failed to close chunk") + } + + // add the file to the list + r.m.Lock() + r.chunkList = append(r.chunkList, f.Name()) + r.m.Unlock() + + return nil + } + defer r.m.Unlock() + + r.chunks[f.Name()] = f + r.chunkList = append(r.chunkList, f.Name()) + + return nil +} + +func (r *chunkReader) RemoveChunk(c *chunk) (bool, error) { + _ = c.Close() + + r.m.Lock() + delete(r.chunks, c.path) + r.m.Unlock() + + err := os.Remove(c.path) + if err != nil { + if errors.Is(err, fs.ErrNotExist) { + // already removed + return false, nil + } + + return false, err + } + + return true, nil +} + +// compile time check for Queue interface +var _ = Queue(new(DiskQueue)) diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/queue/queue_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/queue/queue_test.go new file mode 100644 index 0000000000000000000000000000000000000000..3bc9edbcbf9763cf9040ea4a1ff9d1834faa8572 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/queue/queue_test.go @@ -0,0 +1,489 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package queue + +import ( + "bufio" + "bytes" + "encoding/binary" + "io" + "os" + "path/filepath" + "testing" + "time" + + "github.com/sirupsen/logrus" + "github.com/stretchr/testify/require" +) + +func TestNewDiskQueue(t *testing.T) { + tempDir := t.TempDir() + + s := makeScheduler(t) + + q, err := NewDiskQueue(DiskQueueOptions{ + ID: "test_queue", + Scheduler: s, + Logger: newTestLogger(), Dir: tempDir, + TaskDecoder: &mockTaskDecoder{}, + }) + require.NoError(t, err) + require.NotNil(t, q) + require.Equal(t, "test_queue", q.ID()) +} + +func TestQueuePush(t *testing.T) { + s := makeScheduler(t) + s.Start() + defer s.Close() + + t.Run("a few tasks", func(t *testing.T) { + q := makeQueue(t, s, discardExecutor()) + pushMany(t, q, 1, 100, 200, 300) + require.Equal(t, int64(3), q.Size()) + q.Close() + }) + + t.Run("push when closed", func(t *testing.T) { + q := makeQueue(t, s, discardExecutor()) + + err := q.Close() + require.NoError(t, err) + + err = q.Push(makeRecord(1, 100)) + require.Error(t, err) + }) + + t.Run("lazily creates chunk", func(t *testing.T) { + q := makeQueue(t, s, discardExecutor()) + + entries, err := os.ReadDir(q.dir) + require.NoError(t, err) + + require.Len(t, entries, 0) + + err = q.Push(makeRecord(1, 100)) + require.NoError(t, err) + + entries, err = os.ReadDir(q.dir) + require.NoError(t, err) + + require.Len(t, entries, 1) + }) + + t.Run("re-open", func(t *testing.T) { + dir := t.TempDir() + decoder := &mockTaskDecoder{} + q, err := NewDiskQueue(DiskQueueOptions{ + ID: "test_queue", + Scheduler: s, + Logger: newTestLogger(), + Dir: dir, + TaskDecoder: decoder, + StaleTimeout: 500 * time.Millisecond, + ChunkSize: 50, + }) + require.NoError(t, err) + err = q.Init() + require.NoError(t, err) + + pushMany(t, q, 1, 100, 200, 300) + + err = q.Close() + require.NoError(t, err) + + q, err = NewDiskQueue(DiskQueueOptions{ + ID: "test_queue", + Scheduler: s, + Logger: newTestLogger(), + Dir: dir, + TaskDecoder: decoder, + StaleTimeout: 500 * time.Millisecond, + ChunkSize: 50, + }) + require.NoError(t, err) + err = q.Init() + require.NoError(t, err) + + require.Equal(t, int64(3), q.Size()) + + err = q.Push(makeRecord(1, 100)) + require.NoError(t, err) + + require.Equal(t, int64(4), q.Size()) + }) + + t.Run("keeps track of last push time", func(t *testing.T) { + q := makeQueue(t, s, discardExecutor()) + + lpt := q.lastPushTime + require.NotNil(t, lpt) + + pushMany(t, q, 1, 100, 200, 300) + + lpt = q.lastPushTime + require.NotNil(t, lpt) + + pushMany(t, q, 1, 400, 500, 600) + + lpt2 := q.lastPushTime + require.NotEqual(t, lpt, lpt2) + }) + + t.Run("persistence", func(t *testing.T) { + q := makeQueueSize(t, s, discardExecutor(), 1000) + + // ensure the queue doesn't get processed + q.Pause() + + for i := 0; i < 100; i++ { + pushMany(t, q, 1, 100, 200, 300) + } + + err := q.Flush() + require.NoError(t, err) + + entries, err := os.ReadDir(q.dir) + require.NoError(t, err) + require.Len(t, entries, 4) + + // first 3 are full + for i := 0; i < 3; i++ { + stat, err := os.Stat(filepath.Join(q.dir, entries[i].Name())) + require.NoError(t, err) + require.EqualValues(t, 1001, stat.Size()) + } + + // last one is partial + stat, err := os.Stat(filepath.Join(q.dir, entries[3].Name())) + require.NoError(t, err) + require.EqualValues(t, 949, stat.Size()) + + // ensure the last chunk is stale + time.Sleep(q.staleTimeout) + + // dequeue all tasks + for i := 0; i < 4; i++ { + batch, err := q.DequeueBatch() + require.NoError(t, err) + require.NotNil(t, batch) + + batch.Done() + } + + // ensure all chunks are removed + entries, err = os.ReadDir(q.dir) + require.NoError(t, err) + + require.Len(t, entries, 0) + + // ensure the queue reports the correct size + require.EqualValues(t, 0, q.Size()) + require.EqualValues(t, 0, q.diskUsage) + }) +} + +func TestQueueDecodeTask(t *testing.T) { + s := makeScheduler(t) + s.Start() + defer s.Close() + + t.Run("a few tasks", func(t *testing.T) { + exec := discardExecutor() + q := makeQueueSize(t, s, exec, 50) + + pushMany(t, q, 1, 100, 200, 300, 400, 500, 600) + + entries, err := os.ReadDir(q.dir) + require.NoError(t, err) + require.Len(t, entries, 2) + + f, err := os.Open(filepath.Join(q.dir, entries[0].Name())) + require.NoError(t, err) + defer f.Close() + + batch, err := q.DequeueBatch() + require.NoError(t, err) + require.NotNil(t, batch) + require.Len(t, batch.Tasks, 3) + + for i := 0; i < 3; i++ { + task := batch.Tasks[i] + require.NotNil(t, task) + require.Equal(t, uint8(1), task.Op()) + require.Equal(t, uint64(100*(i+1)), task.Key()) + } + + require.Equal(t, int64(6), q.Size()) + + // decoding more tasks should return nil + batch, err = q.DequeueBatch() + require.NoError(t, err) + require.Nil(t, batch) + + err = q.Close() + require.NoError(t, err) + }) + + t.Run("many tasks", func(t *testing.T) { + exec := discardExecutor() + q := makeQueueSize(t, s, exec, 660) + q.Pause() + + // encode 120 records + for i := 0; i < 120; i++ { + err := q.Push(makeRecord(uint8(i), uint64(i+1))) + require.NoError(t, err) + } + + // check the number of files + entries, err := os.ReadDir(q.dir) + require.NoError(t, err) + require.Len(t, entries, 3) + // check if the entry name matches the regex pattern + require.Regexp(t, `chunk-\d+\.bin`, entries[0].Name()) + require.Regexp(t, `chunk-\d+\.bin`, entries[1].Name()) + require.Regexp(t, `chunk-\d+\.bin`, entries[2].Name()) + + // check the content of the files + checkContent := func(fName string, size int, start, end int) { + f, err := os.Open(filepath.Join(q.dir, fName)) + require.NoError(t, err) + defer f.Close() + + gotSize, err := readChunkHeader(f) + if size == 0 { + require.ErrorIs(t, err, io.EOF) + return + } + require.NoError(t, err) + if gotSize != 0 { + require.Equal(t, size, int(gotSize)) + } + + buf := bufio.NewReader(f) + + for i := start; i < end; i++ { + // read the record length + var rsizeBuf [4]byte + _, err := io.ReadFull(buf, rsizeBuf[:]) + require.NoError(t, err) + rSize := binary.BigEndian.Uint32(rsizeBuf[:]) + require.Equal(t, uint32(9), rSize) + + // read the record + var recordBuf [9]byte + _, err = io.ReadFull(buf, recordBuf[:]) + require.NoError(t, err) + op := recordBuf[0] + key := binary.BigEndian.Uint64(recordBuf[1:]) + require.Equal(t, uint8(i), op) + require.Equal(t, uint64(i+1), key) + } + } + + checkContent(entries[0].Name(), 50, 0, 49) + checkContent(entries[1].Name(), 50, 50, 99) + + // partial file should have 0 records because it was not flushed + checkContent(entries[2].Name(), 0, 100, 119) + + // flush the queue + err = q.Flush() + require.NoError(t, err) + + // check the content of the partial file + checkContent(entries[2].Name(), 20, 100, 119) + + // check the queue size + size := q.Size() + require.EqualValues(t, 120, size) + + // promote the partial file + err = q.w.Promote() + require.NoError(t, err) + + // check the number of files + entries, err = os.ReadDir(q.dir) + require.NoError(t, err) + + require.Len(t, entries, 3) + require.Regexp(t, `chunk-\d+\.bin`, entries[0].Name()) + require.Regexp(t, `chunk-\d+\.bin`, entries[1].Name()) + require.Regexp(t, `chunk-\d+\.bin`, entries[2].Name()) + + // check the content of the 3rd file + checkContent(entries[2].Name(), 20, 100, 119) + + // check the queue size again + size = q.Size() + require.EqualValues(t, 120, size) + + // promote again, no-op + err = q.w.Promote() + require.NoError(t, err) + }) + + t.Run("restart", func(t *testing.T) { + exec := discardExecutor() + q := makeQueueSize(t, s, exec, 50) + + pushMany(t, q, 1, 100, 200, 300, 400, 500, 600) + + entries, err := os.ReadDir(q.dir) + require.NoError(t, err) + require.Len(t, entries, 2) + + err = q.Close() + require.NoError(t, err) + + q, err = NewDiskQueue(DiskQueueOptions{ + ID: "test_queue", + Scheduler: s, + Logger: newTestLogger(), + Dir: q.dir, + TaskDecoder: &mockTaskDecoder{}, + StaleTimeout: 500 * time.Millisecond, + ChunkSize: 50, + }) + require.NoError(t, err) + + err = q.Init() + require.NoError(t, err) + + batch, err := q.DequeueBatch() + require.NoError(t, err) + require.NotNil(t, batch) + require.Len(t, batch.Tasks, 3) + + for i := 0; i < 3; i++ { + task := batch.Tasks[i] + require.NotNil(t, task) + require.Equal(t, uint8(1), task.Op()) + require.Equal(t, uint64(100*(i+1)), task.Key()) + } + + require.Equal(t, int64(6), q.Size()) + + // decoding more tasks should return nil + batch, err = q.DequeueBatch() + require.NoError(t, err) + require.Nil(t, batch) + + err = q.Close() + require.NoError(t, err) + }) +} + +func newTestLogger() logrus.FieldLogger { + logger := logrus.New() + logger.SetLevel(logrus.DebugLevel) + return logger +} + +func BenchmarkQueuePush(b *testing.B) { + rec := make([]byte, 10*1024) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + b.StopTimer() + d, err := NewDiskQueue(DiskQueueOptions{ + Dir: filepath.Join(b.TempDir(), "test_queue"), + ID: "test_queue", + Scheduler: makeScheduler(b), + TaskDecoder: &mockTaskDecoder{}, + }) + require.NoError(b, err) + b.StartTimer() + + for j := 0; j < 20_000; j++ { + d.Push(rec) + } + } +} + +func TestPartialChunkRecovery(t *testing.T) { + tests := []struct { + name string + truncate int64 + records int + }{ + {"truncate full record", -10, 4}, + {"truncate mid record", -2, 4}, + {"truncate record length", -11, 4}, + {"truncate after header", 13, 0}, + {"empty file", 0, 0}, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + s := makeScheduler(t, 1) + s.Start() + + tmpDir := t.TempDir() + + _, e := streamExecutor() + q := makeQueueWith(t, s, e, 500, tmpDir) + + // write 5 records + for i := 0; i < 5; i++ { + err := q.Push(bytes.Repeat([]byte{1}, 10)) + require.NoError(t, err) + } + + // close the queue to ensure all records are flushed + err := q.Close() + require.NoError(t, err) + + // ensure there is a chunk file + entries, err := os.ReadDir(tmpDir) + require.NoError(t, err) + require.Len(t, entries, 1) + + stat, err := os.Stat(filepath.Join(tmpDir, entries[0].Name())) + require.NoError(t, err) + size := stat.Size() + + // manually corrupt the file + chunkFile := filepath.Join(tmpDir, entries[0].Name()) + if test.truncate < 0 { + // truncate the file from the end + err = os.Truncate(chunkFile, int64(size+test.truncate)) + } else { + // truncate the file from the beginning + err = os.Truncate(chunkFile, test.truncate) + } + require.NoError(t, err) + + // open the queue again + // this should not return an error + q = makeQueueWith(t, s, e, 500, tmpDir) + require.NoError(t, err) + + s.RegisterQueue(q) + q.Pause() + + // manually promote a partial chunk to a full chunk + err = q.w.Promote() + require.NoError(t, err) + + batch, err := q.DequeueBatch() + require.NoError(t, err) + if test.records == 0 { + require.Nil(t, batch) + } else { + require.NotNil(t, batch) + require.Len(t, batch.Tasks, test.records) + } + }) + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/queue/scheduler.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/queue/scheduler.go new file mode 100644 index 0000000000000000000000000000000000000000..4d048be6488abea75483448f09468c97beff5729 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/queue/scheduler.go @@ -0,0 +1,574 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package queue + +import ( + "context" + "os" + "runtime" + "sync" + "time" + + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/weaviate/weaviate/adapters/repos/db/vector/common" + enterrors "github.com/weaviate/weaviate/entities/errors" +) + +type Scheduler struct { + SchedulerOptions + + queues struct { + sync.Mutex + + m map[string]*queueState + } + + // context used to close pending tasks + ctx context.Context + cancelFn context.CancelFunc + + activeTasks *common.SharedGauge + + wg sync.WaitGroup + chans []chan *Batch + triggerCh chan chan struct{} +} + +type SchedulerOptions struct { + Logger logrus.FieldLogger + // Number of workers to process tasks. Defaults to the number of CPUs - 1. + Workers int + // The interval at which the scheduler checks the queues for tasks. Defaults to 1 second. + ScheduleInterval time.Duration + // The interval between retries for failed tasks. + RetryInterval time.Duration + // Function to be called when the scheduler is closed + OnClose func() +} + +func NewScheduler(opts SchedulerOptions) *Scheduler { + var err error + + if opts.Logger == nil { + opts.Logger = logrus.New() + } + opts.Logger = opts.Logger.WithField("component", "queue-scheduler") + + if opts.Workers <= 0 { + opts.Workers = max(1, runtime.GOMAXPROCS(0)-1) + } + + if opts.ScheduleInterval == 0 { + var it time.Duration + v := os.Getenv("QUEUE_SCHEDULER_INTERVAL") + + if v != "" { + it, err = time.ParseDuration(v) + if err != nil { + opts.Logger.WithError(err).WithField("value", v).Warn("failed to parse QUEUE_SCHEDULER_INTERVAL, using default") + } + } + + if it == 0 { + it = 1 * time.Second + } + opts.ScheduleInterval = it + } + + if opts.RetryInterval == 0 { + var ri time.Duration + v := os.Getenv("QUEUE_RETRY_INTERVAL") + + if v != "" { + ri, err = time.ParseDuration(v) + if err != nil { + opts.Logger.WithError(err).WithField("value", v).Warn("failed to parse QUEUE_RETRY_INTERVAL, using default") + } + } + + if ri == 0 { + ri = 5 * time.Second + } + opts.RetryInterval = ri + } + + s := Scheduler{ + SchedulerOptions: opts, + activeTasks: common.NewSharedGauge(), + } + s.queues.m = make(map[string]*queueState) + s.triggerCh = make(chan chan struct{}) + + return &s +} + +func (s *Scheduler) RegisterQueue(q Queue) { + if s.ctx == nil { + // scheduler not started + return + } + + s.queues.Lock() + defer s.queues.Unlock() + + s.queues.m[q.ID()] = newQueueState(s.ctx, q) + + q.Metrics().Registered(q.ID()) +} + +func (s *Scheduler) UnregisterQueue(id string) { + if s.ctx == nil { + // scheduler not started + return + } + + q := s.getQueue(id) + if q == nil { + return + } + + s.PauseQueue(id) + + q.cancelFn() + + // wait for the workers to finish processing the queue's tasks + s.Wait(id) + + // the queue is paused, so it's safe to remove it + s.queues.Lock() + delete(s.queues.m, id) + s.queues.Unlock() + + q.q.Metrics().Unregistered(q.q.ID()) +} + +func (s *Scheduler) Start() { + if s.ctx != nil { + // scheduler already started + return + } + + s.ctx, s.cancelFn = context.WithCancel(context.Background()) + + // run workers + chans := make([]chan *Batch, s.Workers) + + for i := 0; i < s.Workers; i++ { + worker, ch := NewWorker(s.Logger, s.RetryInterval) + chans[i] = ch + + s.wg.Add(1) + f := func() { + defer s.wg.Done() + + worker.Run(s.ctx) + } + enterrors.GoWrapper(f, s.Logger) + } + + s.chans = chans + + // run scheduler goroutine + s.wg.Add(1) + f := func() { + defer s.wg.Done() + + s.runScheduler() + } + enterrors.GoWrapper(f, s.Logger) +} + +func (s *Scheduler) Close() error { + if s == nil || s.ctx == nil { + // scheduler not initialized. No op. + return nil + } + + // check if the scheduler is already closed + if s.ctx.Err() != nil { + return nil + } + + // stop scheduling + s.cancelFn() + + // wait for the workers to finish processing tasks + s.activeTasks.Wait() + + // wait for the spawned goroutines to stop + s.wg.Wait() + + // close the channels + for _, ch := range s.chans { + close(ch) + } + + s.Logger.Debug("scheduler closed") + + if s.OnClose != nil { + s.OnClose() + } + + return nil +} + +func (s *Scheduler) PauseQueue(id string) { + if s.ctx == nil { + // scheduler not started + return + } + + q := s.getQueue(id) + if q == nil { + return + } + + q.m.Lock() + q.paused = true + q.m.Unlock() + + s.Logger.WithField("id", id).Debug("queue paused") +} + +func (s *Scheduler) ResumeQueue(id string) { + if s.ctx == nil { + // scheduler not started + return + } + + q := s.getQueue(id) + if q == nil { + return + } + + q.m.Lock() + q.paused = false + q.m.Unlock() + + s.Logger.WithField("id", id).Debug("queue resumed") +} + +func (s *Scheduler) Wait(id string) { + if s.ctx == nil { + // scheduler not started + return + } + + q := s.getQueue(id) + if q == nil { + return + } + + q.scheduled.Wait() + q.activeTasks.Wait() +} + +func (s *Scheduler) WaitAll() { + if s.ctx == nil { + // scheduler not started + return + } + + s.activeTasks.Wait() +} + +func (s *Scheduler) getQueue(id string) *queueState { + s.queues.Lock() + defer s.queues.Unlock() + + return s.queues.m[id] +} + +func (s *Scheduler) runScheduler() { + t := time.NewTicker(s.ScheduleInterval) + + for { + select { + case <-s.ctx.Done(): + // stop the ticker + t.Stop() + return + case <-t.C: + s.schedule() + case ch := <-s.triggerCh: + s.scheduleQueues() + close(ch) + } + } +} + +// Manually schedule the queues. +func (s *Scheduler) Schedule(ctx context.Context) { + if s.ctx == nil { + // scheduler not started + return + } + + ch := make(chan struct{}) + select { + case s.triggerCh <- ch: + select { + case <-ch: + case <-ctx.Done(): + } + default: + } +} + +func (s *Scheduler) schedule() { + // as long as there are tasks to schedule, keep running + // in a tight loop + for { + if s.ctx.Err() != nil { + return + } + + if nothingScheduled := s.scheduleQueues(); nothingScheduled { + return + } + } +} + +func (s *Scheduler) scheduleQueues() (nothingScheduled bool) { + // loop over the queues in random order + s.queues.Lock() + ids := make([]string, 0, len(s.queues.m)) + for id := range s.queues.m { + ids = append(ids, id) + } + s.queues.Unlock() + + nothingScheduled = true + + for _, id := range ids { + if s.ctx.Err() != nil { + return + } + + q := s.getQueue(id) + if q == nil { + continue + } + + // mark it as scheduled + q.MarkAsScheduled() + + if q.Paused() { + q.MarkAsUnscheduled() + continue + } + + // run the before-schedule hook if it is implemented + if hook, ok := q.q.(BeforeScheduleHook); ok { + if skip := hook.BeforeSchedule(); skip { + q.MarkAsUnscheduled() + continue + } + } + + if q.q.Size() == 0 { + q.MarkAsUnscheduled() + continue + } + + count, err := s.dispatchQueue(q) + if err != nil { + s.Logger.WithError(err).WithField("id", id).Error("failed to schedule queue") + } + + q.MarkAsUnscheduled() + + nothingScheduled = count <= 0 + } + + return +} + +func (s *Scheduler) dispatchQueue(q *queueState) (int64, error) { + if q.ctx.Err() != nil { + return 0, nil + } + + batch, err := q.q.DequeueBatch() + if err != nil { + return 0, errors.Wrap(err, "failed to dequeue batch") + } + if batch == nil || len(batch.Tasks) == 0 { + return 0, nil + } + + partitions := make([][]Task, s.Workers) + + var taskCount int64 + for _, t := range batch.Tasks { + // TODO: introduce other partitioning strategies if needed + slot := t.Key() % uint64(s.Workers) + partitions[slot] = append(partitions[slot], t) + taskCount++ + } + + // compress the tasks before sending them to the workers + // i.e. group consecutive tasks with the same operation as a single task + for i := range partitions { + partitions[i] = s.compressTasks(partitions[i]) + } + + // keep track of the number of active tasks + // for this chunk to remove it when all tasks are done + var counter int + for i, partition := range partitions { + if len(partition) == 0 { + continue + } + q.m.Lock() + counter++ + q.m.Unlock() + + // increment the global active tasks counter + s.activeTasks.Incr() + // increment the queue's active tasks counter + q.activeTasks.Incr() + + start := time.Now() + + select { + case <-s.ctx.Done(): + s.activeTasks.Decr() + q.activeTasks.Decr() + return taskCount, nil + case <-q.ctx.Done(): + s.activeTasks.Decr() + q.activeTasks.Decr() + return taskCount, nil + case s.chans[i] <- &Batch{ + Tasks: partitions[i], + Ctx: q.ctx, + onDone: func() { + defer q.q.Metrics().TasksProcessed(start, int(taskCount)) + defer q.activeTasks.Decr() + defer s.activeTasks.Decr() + + q.m.Lock() + counter-- + c := counter + q.m.Unlock() + if c == 0 { + // It is important to unlock the queue here + // to avoid a deadlock when the last worker calls Done. + batch.Done() + s.Logger. + WithField("queue_id", q.q.ID()). + WithField("queue_size", q.q.Size()). + WithField("count", taskCount). + Debug("tasks processed") + } + }, + onCanceled: func() { + q.activeTasks.Decr() + s.activeTasks.Decr() + }, + }: + } + } + + s.logQueueStats(q.q, taskCount) + + return taskCount, nil +} + +func (s *Scheduler) logQueueStats(q Queue, tasksDequeued int64) { + s.Logger. + WithField("queue_id", q.ID()). + WithField("queue_size", q.Size()). + WithField("count", tasksDequeued). + Debug("processing tasks") +} + +func (s *Scheduler) compressTasks(tasks []Task) []Task { + if len(tasks) == 0 { + return tasks + } + grouper, ok := tasks[0].(TaskGrouper) + if !ok { + return tasks + } + + var cur uint8 + var group []Task + var compressed []Task + + for i, t := range tasks { + if i == 0 { + cur = t.Op() + group = append(group, t) + continue + } + + if t.Op() == cur { + group = append(group, t) + continue + } + + compressed = append(compressed, grouper.NewGroup(cur, group...)) + + cur = t.Op() + group = []Task{t} + } + + compressed = append(compressed, grouper.NewGroup(cur, group...)) + + return compressed +} + +type queueState struct { + m sync.RWMutex + q Queue + paused bool + scheduled *common.SharedGauge + activeTasks *common.SharedGauge + ctx context.Context + cancelFn context.CancelFunc +} + +func newQueueState(ctx context.Context, q Queue) *queueState { + qs := queueState{ + q: q, + scheduled: common.NewSharedGauge(), + activeTasks: common.NewSharedGauge(), + } + + if ctx != nil { + qs.ctx, qs.cancelFn = context.WithCancel(ctx) + } + + return &qs +} + +func (qs *queueState) Paused() bool { + qs.m.RLock() + defer qs.m.RUnlock() + + return qs.paused +} + +func (qs *queueState) Scheduled() bool { + return qs.scheduled.Count() > 0 +} + +func (qs *queueState) MarkAsScheduled() { + qs.scheduled.Incr() +} + +func (qs *queueState) MarkAsUnscheduled() { + qs.scheduled.Decr() +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/queue/scheduler_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/queue/scheduler_test.go new file mode 100644 index 0000000000000000000000000000000000000000..9299c54483fa64011cbcccc290741c4833f28f74 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/queue/scheduler_test.go @@ -0,0 +1,485 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package queue + +import ( + "context" + "encoding/binary" + "os" + "slices" + "testing" + "time" + + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/adapters/repos/db/vector/common" +) + +func TestScheduler(t *testing.T) { + t.Run("start and close", func(t *testing.T) { + s := makeScheduler(t) + s.Start() + time.Sleep(100 * time.Millisecond) + err := s.Close() + require.NoError(t, err) + }) + + t.Run("start twice", func(t *testing.T) { + s := makeScheduler(t) + s.Start() + s.Start() + err := s.Close() + require.NoError(t, err) + }) + + t.Run("commands before start", func(t *testing.T) { + s := makeScheduler(t) + err := s.Close() + require.NoError(t, err) + s.PauseQueue("test") + s.ResumeQueue("test") + s.Wait("test") + }) + + t.Run("paused queue should not process tasks", func(t *testing.T) { + s := makeScheduler(t) + s.Start() + + ch, e := streamExecutor() + q := makeQueue(t, s, e) + + pushMany(t, q, 1, 100, 200, 300) + + require.EqualValues(t, 100, <-ch) + require.EqualValues(t, 200, <-ch) + require.EqualValues(t, 300, <-ch) + + s.PauseQueue(q.ID()) + + pushMany(t, q, 1, 400, 500, 600) + + select { + case <-ch: + t.Fatal("should not have been called") + case <-time.After(500 * time.Millisecond): + } + + s.ResumeQueue(q.ID()) + + require.EqualValues(t, 400, <-ch) + require.EqualValues(t, 500, <-ch) + require.EqualValues(t, 600, <-ch) + + err := q.Close() + require.NoError(t, err) + }) + + t.Run("chunk files are removed properly", func(t *testing.T) { + s := makeScheduler(t, 3) + s.Start() + + ch, e := streamExecutor() + q := makeQueue(t, s, e) + + pushMany(t, q, 1, 100, 200, 300) + + res := make([]uint64, 3) + res[0] = <-ch + res[1] = <-ch + res[2] = <-ch + slices.Sort(res) + require.Equal(t, []uint64{100, 200, 300}, res) + + time.Sleep(100 * time.Millisecond) + + entries, err := os.ReadDir(q.dir) + require.NoError(t, err) + require.Len(t, entries, 0) + + err = q.Close() + require.NoError(t, err) + }) + + t.Run("chunks are processed in order", func(t *testing.T) { + s := makeScheduler(t, 1) + s.Start() + + ch, e := streamExecutor() + q := makeQueue(t, s, e) + // override chunk size for testing + q.w.maxSize = 9000 + + // consume the channel in a separate goroutine + var res []uint64 + done := make(chan struct{}) + go func() { + defer close(done) + + for i := range ch { + res = append(res, i) + } + }() + + for i := 0; i < 10; i++ { + var batch []uint64 + for j := 0; j < 1000; j++ { + batch = append(batch, uint64(i*1000+j)) + } + + pushMany(t, q, 1, batch...) + } + + for i := 0; i < 10; i++ { + if q.Size() == 0 { + break + } + + time.Sleep(100 * time.Millisecond) + } + require.Zero(t, q.Size()) + + close(ch) + <-done + + for i := 0; i < 10000; i++ { + require.EqualValues(t, i, res[i]) + } + + entries, err := os.ReadDir(q.dir) + require.NoError(t, err) + require.Len(t, entries, 0) + + err = q.Close() + require.NoError(t, err) + }) + + t.Run("chunk is promoted if full", func(t *testing.T) { + s := makeScheduler(t, 1) + s.Start() + + _, e := streamExecutor() + q := makeQueue(t, s, e) + q.staleTimeout = 1 * time.Second + + // override chunk size for testing + q.w.maxSize = 90 + + var batch []uint64 + for i := 0; i < 11; i++ { + batch = append(batch, uint64(i)) + } + pushMany(t, q, 1, batch...) + + entries, err := os.ReadDir(q.dir) + require.NoError(t, err) + require.Len(t, entries, 2) + + err = q.Close() + require.NoError(t, err) + }) + + t.Run("does not read partial chunk", func(t *testing.T) { + s := makeScheduler(t, 1) + s.Start() + + ch, e := streamExecutor() + q := makeQueue(t, s, e) + q.staleTimeout = 1 * time.Second + + var batch []uint64 + for i := 0; i < 10; i++ { + batch = append(batch, uint64(i)) + } + pushMany(t, q, 1, batch...) + + entries, err := os.ReadDir(q.dir) + require.NoError(t, err) + require.Len(t, entries, 1) + + select { + case <-time.After(500 * time.Millisecond): + case <-ch: + t.Fatal("should not have been called") + } + + err = q.Close() + require.NoError(t, err) + }) + + t.Run("invalid tasks", func(t *testing.T) { + s := makeScheduler(t, 1) + s.ScheduleInterval = 200 * time.Millisecond + s.RetryInterval = 100 * time.Millisecond + s.Start() + + called := make(map[uint64]int) + + started := make(chan struct{}) + e := mockTaskDecoder{ + execFn: func(ctx context.Context, t *mockTask) error { + if t.key == 0 { + close(started) + } + + called[t.key]++ + if t.key == 3 { + return errors.New("invalid task") + } + + return nil + }, + } + + q := makeQueue(t, s, &e) + + var batch []uint64 + for i := 0; i < 30; i++ { + batch = append(batch, uint64(i)) + } + pushMany(t, q, 1, batch...) + + s.Schedule(t.Context()) + <-started + s.Wait(q.ID()) + + for i := 0; i < 30; i++ { + if i == 3 { + require.Equal(t, 3, called[uint64(i)]) + continue + } + + require.Equal(t, 1, called[uint64(i)], "task %d should have been executed once", i) + } + + err := q.Close() + require.NoError(t, err) + }) + + t.Run("transient error", func(t *testing.T) { + s := makeScheduler(t, 1) + s.ScheduleInterval = 200 * time.Millisecond + s.RetryInterval = 100 * time.Millisecond + s.Start() + + called := make(map[uint64]int) + + started := make(chan struct{}) + e := mockTaskDecoder{ + execFn: func(ctx context.Context, t *mockTask) error { + if t.key == 0 { + close(started) + } + + called[t.key]++ + if t.key == 3 && called[t.key] < 3 { + return errors.New("invalid task") + } + + return nil + }, + } + + q := makeQueue(t, s, &e) + + var batch []uint64 + for i := 0; i < 30; i++ { + batch = append(batch, uint64(i)) + } + pushMany(t, q, 1, batch...) + + s.Schedule(t.Context()) + <-started + s.Wait(q.ID()) + + for i := 0; i < 30; i++ { + if i == 3 { + require.Equal(t, 3, called[uint64(i)]) + continue + } + + require.Equal(t, 1, called[uint64(i)], "task %d should have been executed once", i) + } + + err := q.Close() + require.NoError(t, err) + }) + + t.Run("permanent error", func(t *testing.T) { + s := makeScheduler(t, 1) + s.ScheduleInterval = 200 * time.Millisecond + s.RetryInterval = 100 * time.Millisecond + s.Start() + + called := make(map[uint64]int) + + started := make(chan struct{}) + e := mockTaskDecoder{ + execFn: func(ctx context.Context, t *mockTask) error { + if t.key == 0 { + close(started) + } + + called[t.key]++ + if t.key == 3 { + return common.ErrWrongDimensions + } + + return nil + }, + } + + q := makeQueue(t, s, &e) + + var batch []uint64 + for i := 0; i < 30; i++ { + batch = append(batch, uint64(i)) + } + pushMany(t, q, 1, batch...) + + s.Schedule(t.Context()) + <-started + s.Wait(q.ID()) + + for i := 0; i < 30; i++ { + require.Equal(t, 1, called[uint64(i)], "task %d should have been executed once", i) + } + + err := q.Close() + require.NoError(t, err) + }) +} + +func makeScheduler(t testing.TB, workers ...int) *Scheduler { + t.Helper() + + logger := logrus.New() + logger.SetLevel(logrus.DebugLevel) + + w := 1 + if len(workers) > 0 { + w = workers[0] + } + + return NewScheduler(SchedulerOptions{ + Logger: logger, + Workers: w, + ScheduleInterval: 50 * time.Millisecond, + RetryInterval: 100 * time.Millisecond, + }) +} + +func makeQueueWith(t *testing.T, s *Scheduler, decoder TaskDecoder, chunkSize uint64, dir string) *DiskQueue { + t.Helper() + + logger := logrus.New() + logger.SetLevel(logrus.DebugLevel) + + q, err := NewDiskQueue(DiskQueueOptions{ + ID: "test_queue", + Scheduler: s, + Logger: newTestLogger(), + Dir: dir, + TaskDecoder: decoder, + StaleTimeout: 500 * time.Millisecond, + ChunkSize: chunkSize, + }) + require.NoError(t, err) + + err = q.Init() + require.NoError(t, err) + + s.RegisterQueue(q) + + return q +} + +func makeQueueSize(t *testing.T, s *Scheduler, decoder TaskDecoder, chunkSize uint64) *DiskQueue { + return makeQueueWith(t, s, decoder, chunkSize, t.TempDir()) +} + +func makeQueue(t *testing.T, s *Scheduler, decoder TaskDecoder) *DiskQueue { + return makeQueueSize(t, s, decoder, 0) +} + +func makeRecord(op uint8, id uint64) []byte { + buf := make([]byte, 9) + buf[0] = op + binary.BigEndian.PutUint64(buf[1:], id) + return buf +} + +func pushMany(t testing.TB, q *DiskQueue, op uint8, ids ...uint64) { + t.Helper() + + for _, id := range ids { + err := q.Push(makeRecord(op, id)) + require.NoError(t, err) + } + + err := q.Flush() + require.NoError(t, err) +} + +func streamExecutor() (chan uint64, *mockTaskDecoder) { + ch := make(chan uint64) + + return ch, &mockTaskDecoder{ + execFn: func(ctx context.Context, t *mockTask) error { + ch <- t.key + return nil + }, + } +} + +func discardExecutor() *mockTaskDecoder { + return &mockTaskDecoder{ + execFn: func(ctx context.Context, t *mockTask) error { + return nil + }, + } +} + +type mockTaskDecoder struct { + execFn func(context.Context, *mockTask) error +} + +func (m *mockTaskDecoder) DecodeTask(data []byte) (Task, error) { + t := mockTask{ + op: data[0], + key: binary.BigEndian.Uint64(data[1:]), + } + + t.execFn = func(ctx context.Context) error { + return m.execFn(ctx, &t) + } + + return &t, nil +} + +type mockTask struct { + op uint8 + key uint64 + execFn func(context.Context) error +} + +func (m *mockTask) Op() uint8 { + return m.op +} + +func (m *mockTask) Key() uint64 { + return m.key +} + +func (m *mockTask) Execute(ctx context.Context) error { + return m.execFn(ctx) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/queue/task.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/queue/task.go new file mode 100644 index 0000000000000000000000000000000000000000..02cd160f87f1888cd3e4a4709def6fab0072b36c --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/queue/task.go @@ -0,0 +1,100 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package queue + +import ( + "context" + "sync" +) + +type Task interface { + Op() uint8 + Key() uint64 + Execute(ctx context.Context) error +} + +type TaskGrouper interface { + NewGroup(op uint8, tasks ...Task) Task +} + +type Batch struct { + Tasks []Task + Ctx context.Context + onDone func() + onCanceled func() + once sync.Once +} + +func (b *Batch) Done() { + if b.onDone != nil { + b.once.Do(b.onDone) + } +} + +func (b *Batch) Cancel() { + if b.onCanceled != nil { + b.onCanceled() + } +} + +// MergeBatches merges multiple batches into a single batch. +// It will ignore nil batches. +// It will execute the onDone and onCanceled functions of all batches. +func MergeBatches(batches ...*Batch) *Batch { + // count the number of tasks + var numTasks int + for _, batch := range batches { + if batch == nil { + continue + } + + numTasks += len(batch.Tasks) + } + + tasks := make([]Task, 0, numTasks) + onDoneFns := make([]func(), 0, len(batches)) + onCanceledFns := make([]func(), 0, len(batches)) + + for _, batch := range batches { + if batch == nil { + continue + } + + if len(batch.Tasks) > 0 { + tasks = append(tasks, batch.Tasks...) + } + if batch.onDone != nil { + onDoneFns = append(onDoneFns, batch.onDone) + } + if batch.onCanceled != nil { + onCanceledFns = append(onCanceledFns, batch.onCanceled) + } + } + + return &Batch{ + Tasks: tasks, + onDone: func() { + for _, fn := range onDoneFns { + fn() + } + }, + onCanceled: func() { + for _, fn := range onCanceledFns { + fn() + } + }, + } +} + +type TaskDecoder interface { + DecodeTask([]byte) (Task, error) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/queue/worker.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/queue/worker.go new file mode 100644 index 0000000000000000000000000000000000000000..806901bf925df63f074a4c73b0284ed28bcdf30c --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/queue/worker.go @@ -0,0 +1,130 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package queue + +import ( + "context" + "errors" + "time" + + "github.com/sirupsen/logrus" + "github.com/weaviate/weaviate/adapters/repos/db/vector/common" +) + +const ( + maxRetry = 3 +) + +type Worker struct { + logger logrus.FieldLogger + retryInterval time.Duration + ch chan *Batch +} + +func NewWorker(logger logrus.FieldLogger, retryInterval time.Duration) (*Worker, chan *Batch) { + ch := make(chan *Batch) + + return &Worker{ + logger: logger, + retryInterval: retryInterval, + ch: ch, + }, ch +} + +func (w *Worker) Run(ctx context.Context) { + for { + select { + case <-ctx.Done(): + return + case batch := <-w.ch: + _ = w.do(batch) + } + } +} + +func (w *Worker) do(batch *Batch) (err error) { + defer func() { + if err != nil { + batch.Cancel() + } else { + batch.Done() + } + }() + + attempts := 1 + + // keep track of failed tasks + var failed []Task + var errs []error + + for { + tasks := batch.Tasks + + if len(failed) > 0 { + tasks = failed + failed = nil // reset failed tasks for the next iteration + errs = nil + } + + for i, t := range tasks { + err = t.Execute(batch.Ctx) + // check if the full batch was canceled + if errors.Is(err, context.Canceled) { + return err + } + if errors.Is(err, common.ErrWrongDimensions) { + w.logger. + WithError(err). + Error("task failed due to wrong dimensions, discarding") + continue // skip this task + } + + // if the task failed, add it to the failed list + if err != nil { + errs = append(errs, err) + failed = append(failed, tasks[i]) + } + } + + if len(failed) == 0 { + return nil // all tasks succeeded + } + + if attempts >= maxRetry { + w.logger. + WithError(errors.Join(errs...)). + WithField("failed", len(failed)). + WithField("attempts", attempts). + Error("failed to process task, discarding") + return nil + } + + w.logger. + WithError(errors.Join(errs...)). + WithField("failed", len(failed)). + WithField("attempts", attempts). + Infof("failed to process task, retrying in %s", w.retryInterval.String()) + attempts++ + + t := time.NewTimer(w.retryInterval) + select { + case <-batch.Ctx.Done(): + // drain the timer + if !t.Stop() { + <-t.C + } + + return batch.Ctx.Err() + case <-t.C: + } + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/refcache/cacher.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/refcache/cacher.go new file mode 100644 index 0000000000000000000000000000000000000000..15809239ab2d45e0d8a6bb058800511dba745e83 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/refcache/cacher.go @@ -0,0 +1,406 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package refcache + +import ( + "context" + "fmt" + "sync" + + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/weaviate/weaviate/entities/additional" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/multi" + "github.com/weaviate/weaviate/entities/schema/crossref" + "github.com/weaviate/weaviate/entities/search" +) + +type repo interface { + MultiGet(ctx context.Context, query []multi.Identifier, + additional additional.Properties, tenant string) ([]search.Result, error) +} + +func NewCacher(repo repo, logger logrus.FieldLogger, tenant string) *Cacher { + return &Cacher{ + logger: logger, + repo: repo, + store: map[multi.Identifier]search.Result{}, + tenant: tenant, + } +} + +type cacherJob struct { + si multi.Identifier + props search.SelectProperties + complete bool +} + +type Cacher struct { + sync.Mutex + jobs []cacherJob + logger logrus.FieldLogger + repo repo + store map[multi.Identifier]search.Result + additional additional.Properties // meta is immutable for the lifetime of the request cacher, so we can safely store it + tenant string + groupByProps search.SelectProperties +} + +func (c *Cacher) Get(si multi.Identifier) (search.Result, bool) { + sr, ok := c.store[si] + return sr, ok +} + +// Build builds the lookup cache recursively and tries to be smart about it. This +// means that it aims to use only a single (multiget) transaction per layer. +// The recursion exit condition is jobs marked as done. At some point +// the cacher will realise that for every nested prop there is already a +// complete job, so it it stop the recursion. +// +// build is called on a "level" i.e. the search result. After working +// on the job list for the first time if the resolved items still contain +// references and the user set the SelectProperty to indicate they want to +// resolve them, build is called again on all the results (plural!) from the +// previous run. We thus end up with one request to the backend per level +// regardless of the amount of lookups per level. +// +// This keeps request times to a minimum even on deeply nested requests. +func (c *Cacher) Build(ctx context.Context, objects []search.Result, + properties search.SelectProperties, additional additional.Properties, groupByProperties search.SelectProperties, +) error { + c.additional = additional + c.groupByProps = groupByProperties + err := c.findJobsFromResponse(objects, properties) + if err != nil { + return fmt.Errorf("build request cache: %w", err) + } + + c.dedupJobList() + err = c.fetchJobs(ctx) + if err != nil { + return fmt.Errorf("build request cache: %w", err) + } + + return nil +} + +// A response is a []search.Result which has all primitive props parsed (and +// even ref-beacons parsed into their respective types, but not resolved!) +// findJobsFromResponse will traverse through it and check if there are +// references. In a recursive lookup this can both be done on the rootlevel to +// start the first lookup as well as recursively on the results of a lookup to +// further look if a next-level call is required. +func (c *Cacher) findJobsFromResponse(objects []search.Result, properties search.SelectProperties) error { + for _, obj := range objects { + var err error + + // we can only set SelectProperties on the rootlevel since this is the only + // place where we have a single root class. In nested lookups we need to + // first identify the correct path in the SelectProperties graph which + // correspends with the path we're currently traversing through. Thus we + // always cache the original SelectProps with the job. This call goes + // through the job history and looks up the correct SelectProperties + // subpath to use in this place. + // tl;dr: On root level (root=base) take props from the outside, on a + // nested level lookup the SelectProps matching the current base element + propertiesReplaced, err := c.ReplaceInitialPropertiesWithSpecific(obj, properties) + if err != nil { + return err + } + + if obj.Schema == nil { + return nil + } + + schemaMap, ok := obj.Schema.(map[string]interface{}) + if !ok { + return fmt.Errorf("object schema is present, but not a map: %T", obj) + } + + if err := c.parseSchemaMap(schemaMap, propertiesReplaced); err != nil { + return err + } + + if c.groupByProps != nil { + if err := c.parseAdditionalGroup(obj); err != nil { + return err + } + } + } + + return nil +} + +func (c *Cacher) parseAdditionalGroup(obj search.Result) error { + if obj.AdditionalProperties != nil && obj.AdditionalProperties["group"] != nil { + if group, ok := obj.AdditionalProperties["group"].(*additional.Group); ok { + for _, hitMap := range group.Hits { + if err := c.parseSchemaMap(hitMap, c.groupByProps); err != nil { + return err + } + } + } + } + return nil +} + +func (c *Cacher) parseSchemaMap(schemaMap map[string]interface{}, propertiesReplaced search.SelectProperties) error { + for key, value := range schemaMap { + selectProp := propertiesReplaced.FindProperty(key) + skip, unresolved := c.skipProperty(key, value, selectProp) + if skip { + continue + } + + for _, selectPropRef := range selectProp.Refs { + innerProperties := selectPropRef.RefProperties + + for _, item := range unresolved { + ref, err := c.extractAndParseBeacon(item) + if err != nil { + return err + } + c.addJob(multi.Identifier{ + ID: ref.TargetID.String(), + ClassName: selectPropRef.ClassName, + }, innerProperties) + } + } + } + return nil +} + +func (c *Cacher) skipProperty(key string, value interface{}, selectProp *search.SelectProperty) (bool, models.MultipleRef) { + // the cacher runs at a point where primitive props have already been + // parsed, so we can simply look for parsed, but not resolved refenereces + parsed, ok := value.(models.MultipleRef) + if !ok { + // must be another kind of prop, not interesting for us + return true, nil + } + + if selectProp == nil { + // while we did hit a ref propr, the user is not interested in resolving + // this prop + return true, nil + } + + return false, parsed +} + +func (c *Cacher) extractAndParseBeacon(item *models.SingleRef) (*crossref.Ref, error) { + return crossref.Parse(item.Beacon.String()) +} + +func (c *Cacher) ReplaceInitialPropertiesWithSpecific(obj search.Result, + properties search.SelectProperties, +) (search.SelectProperties, error) { + if properties != nil { + // don't overwrite the properties if the caller has explicitly set them, + // this can only mean they're at the root level + return properties, nil + } + + // this is a nested level, we cannot rely on global initialSelectProperties + // anymore, instead we need to find the selectProperties for exactly this + // ID + job, ok := c.findJob(multi.Identifier{ + ID: obj.ID.String(), + ClassName: obj.ClassName, + }) + if ok { + return job.props, nil + } + + return properties, nil +} + +func (c *Cacher) addJob(si multi.Identifier, props search.SelectProperties) { + c.jobs = append(c.jobs, cacherJob{si, props, false}) +} + +func (c *Cacher) findJob(si multi.Identifier) (cacherJob, bool) { + for _, job := range c.jobs { + if job.si == si { + return job, true + } + } + + return cacherJob{}, false +} + +// finds incompleteJobs without altering the original job list +func (c *Cacher) incompleteJobs() []cacherJob { + out := make([]cacherJob, len(c.jobs)) + n := 0 + for _, job := range c.jobs { + if !job.complete { + out[n] = job + n++ + } + } + + return out[:n] +} + +// finds complete jobs without altering the original job list +func (c *Cacher) completeJobs() []cacherJob { + out := make([]cacherJob, len(c.jobs)) + n := 0 + for _, job := range c.jobs { + if job.complete { + out[n] = job + n++ + } + } + + return out[:n] +} + +// alters the list, removes duplicates. +func (c *Cacher) dedupJobList() { + incompleteJobs := c.incompleteJobs() + before := len(incompleteJobs) + if before == 0 { + // nothing to do + return + } + + c.logger. + WithFields(logrus.Fields{ + "action": "request_cacher_dedup_joblist_start", + "jobs": before, + }). + Debug("starting job list deduplication") + deduped := make([]cacherJob, len(incompleteJobs)) + found := map[multi.Identifier]struct{}{} + + // don't look up refs that are already completed - this can for example happen with cyclic refs + for _, job := range c.completeJobs() { + found[job.si] = struct{}{} + } + + n := 0 + for _, job := range incompleteJobs { + if _, ok := found[job.si]; ok { + continue + } + + found[job.si] = struct{}{} + deduped[n] = job + n++ + } + + c.jobs = append(c.completeJobs(), deduped[:n]...) + + c.logger. + WithFields(logrus.Fields{ + "action": "request_cacher_dedup_joblist_complete", + "jobs": n, + "removedJobs": before - n, + }). + Debug("completed job list deduplication") +} + +func (c *Cacher) fetchJobs(ctx context.Context) error { + jobs := c.incompleteJobs() + if len(jobs) == 0 { + c.logSkipFetchJobs() + return nil + } + + query := jobListToMultiGetQuery(jobs) + res, err := c.repo.MultiGet(ctx, query, c.additional, c.tenant) + if err != nil { + return errors.Wrap(err, "fetch job list") + } + + return c.parseAndStore(ctx, res) +} + +func (c *Cacher) logSkipFetchJobs() { + c.logger. + WithFields( + logrus.Fields{ + "action": "request_cacher_fetch_jobs_skip", + }). + Trace("skip fetch jobs, have no incomplete jobs") +} + +// parseAndStore parses the results for nested refs. Since it is already a +// []search.Result no other parsing is required, as we can expect this type to +// have all primitive props parsed correctly +// +// If nested refs are found, the recursion is started. +// +// Once no more nested refs can be found, the recursion triggers its exit +// condition and all jobs are stored. +func (c *Cacher) parseAndStore(ctx context.Context, res []search.Result) error { + // mark all current jobs as done, as we use the amount of incomplete jobs as + // the exit condition for the recursion. Next up, we will start a nested + // Build() call. If the Build call returns no new jobs, we are done and the + // recursion stops. If it does return more jobs, we will enter a nested + // iteration which will eventually come to this place again + c.markAllJobsAsDone() + + err := c.Build(ctx, removeEmptyResults(res), nil, c.additional, nil) + if err != nil { + return errors.Wrap(err, "build nested cache") + } + + err = c.storeResults(res) + if err != nil { + return err + } + + return nil +} + +func removeEmptyResults(in []search.Result) []search.Result { + out := make([]search.Result, len(in)) + n := 0 + for _, obj := range in { + if obj.ID != "" { + out[n] = obj + n++ + } + } + + return out[0:n] +} + +func (c *Cacher) storeResults(res search.Results) error { + for _, item := range res { + c.store[multi.Identifier{ + ID: item.ID.String(), + ClassName: item.ClassName, + }] = item + } + + return nil +} + +func (c *Cacher) markAllJobsAsDone() { + for i := range c.jobs { + c.jobs[i].complete = true + } +} + +func jobListToMultiGetQuery(jobs []cacherJob) []multi.Identifier { + query := make([]multi.Identifier, len(jobs)) + for i, job := range jobs { + query[i] = job.si + } + + return query +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/refcache/cacher_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/refcache/cacher_test.go new file mode 100644 index 0000000000000000000000000000000000000000..c0254c001acc67c7b090f7af9492c85494ddc081 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/refcache/cacher_test.go @@ -0,0 +1,1062 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package refcache + +import ( + "context" + "fmt" + "testing" + + "github.com/go-openapi/strfmt" + "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/entities/additional" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/multi" + "github.com/weaviate/weaviate/entities/search" +) + +func TestCacher(t *testing.T) { + // some ids to be used in the tests, they carry no meaning outside each test + id1 := "132bdf92-ffec-4a52-9196-73ea7cbb5a5e" + id2 := "a60a26dc-791a-41fc-8dda-c0f21f90cc98" + id3 := "a60a26dc-791a-41fc-8dda-c0f21f90cc99" + id4 := "a60a26dc-791a-41fc-8dda-c0f21f90cc97" + + t.Run("with empty results", func(t *testing.T) { + repo := newFakeRepo() + logger, _ := test.NewNullLogger() + cr := NewCacher(repo, logger, "") + err := cr.Build(context.Background(), nil, nil, additional.Properties{}, nil) + assert.Nil(t, err) + }) + + t.Run("with results with nil-schemas", func(t *testing.T) { + repo := newFakeRepo() + logger, _ := test.NewNullLogger() + cr := NewCacher(repo, logger, "") + input := []search.Result{ + { + ID: "foo", + ClassName: "BestClass", + }, + } + err := cr.Build(context.Background(), input, nil, additional.Properties{}, nil) + assert.Nil(t, err) + }) + + t.Run("with results without refs in the schema", func(t *testing.T) { + repo := newFakeRepo() + logger, _ := test.NewNullLogger() + cr := NewCacher(repo, logger, "") + input := []search.Result{ + { + ID: "foo", + ClassName: "BestClass", + Schema: map[string]interface{}{ + "foo": "bar", + "baz": &models.PhoneNumber{}, + }, + }, + } + err := cr.Build(context.Background(), input, nil, additional.Properties{}, nil) + assert.Nil(t, err) + }) + + t.Run("with a single ref, but no selectprops", func(t *testing.T) { + repo := newFakeRepo() + logger, _ := test.NewNullLogger() + cr := NewCacher(repo, logger, "") + input := []search.Result{ + { + ID: "foo", + ClassName: "BestClass", + Schema: map[string]interface{}{ + "refProp": models.MultipleRef{ + &models.SingleRef{ + Beacon: "weaviate://localhost/123", + }, + }, + }, + }, + } + err := cr.Build(context.Background(), input, nil, additional.Properties{}, nil) + require.Nil(t, err) + _, ok := cr.Get(multi.Identifier{ID: "123", ClassName: "SomeClass"}) + assert.False(t, ok) + }) + + t.Run("with a single ref, and a matching select prop", func(t *testing.T) { + repo := newFakeRepo() + repo.lookup[multi.Identifier{ID: id1, ClassName: "SomeClass"}] = search.Result{ + ClassName: "SomeClass", + ID: strfmt.UUID(id1), + Schema: map[string]interface{}{ + "bar": "some string", + }, + } + logger, _ := test.NewNullLogger() + cr := NewCacher(repo, logger, "") + input := []search.Result{ + { + ID: "foo", + ClassName: "BestClass", + Schema: map[string]interface{}{ + "refProp": models.MultipleRef{ + &models.SingleRef{ + Beacon: strfmt.URI(fmt.Sprintf("weaviate://localhost/%s", id1)), + }, + }, + }, + }, + } + selectProps := search.SelectProperties{ + search.SelectProperty{ + Name: "refProp", + Refs: []search.SelectClass{ + { + ClassName: "SomeClass", + RefProperties: search.SelectProperties{ + search.SelectProperty{ + Name: "bar", + IsPrimitive: true, + }, + }, + }, + }, + }, + } + + expected := search.Result{ + ID: strfmt.UUID(id1), + ClassName: "SomeClass", + Schema: map[string]interface{}{ + "bar": "some string", + }, + } + + err := cr.Build(context.Background(), input, selectProps, additional.Properties{}, nil) + require.Nil(t, err) + res, ok := cr.Get(multi.Identifier{ID: id1, ClassName: "SomeClass"}) + require.True(t, ok) + assert.Equal(t, expected, res) + assert.Equal(t, 1, repo.counter, "required the expected amount of lookups") + }) + + t.Run("with a nested lookup, partially resolved", func(t *testing.T) { + repo := newFakeRepo() + repo.lookup[multi.Identifier{ID: id1, ClassName: "SomeClass"}] = search.Result{ + ClassName: "SomeClass", + ID: strfmt.UUID(id1), + Schema: map[string]interface{}{ + "primitive": "foobar", + "ignoredRef": models.MultipleRef{ + &models.SingleRef{ + Beacon: strfmt.URI("weaviate://localhost/ignoreMe"), + }, + }, + "nestedRef": models.MultipleRef{ + &models.SingleRef{ + Beacon: strfmt.URI(fmt.Sprintf("weaviate://localhost/%s", id2)), + }, + }, + }, + } + repo.lookup[multi.Identifier{ID: id2, ClassName: "SomeNestedClass"}] = search.Result{ + ClassName: "SomeNestedClass", + ID: strfmt.UUID(id2), + Schema: map[string]interface{}{ + "name": "John Doe", + }, + } + logger, _ := test.NewNullLogger() + cr := NewCacher(repo, logger, "") + input := []search.Result{ + { + ID: "foo", + ClassName: "BestClass", + Schema: map[string]interface{}{ + "refProp": models.MultipleRef{ + &models.SingleRef{ + Beacon: strfmt.URI(fmt.Sprintf("weaviate://localhost/%s", id1)), + }, + }, + }, + }, + } + selectProps := search.SelectProperties{ + search.SelectProperty{ + Name: "refProp", + Refs: []search.SelectClass{ + { + ClassName: "SomeClass", + RefProperties: search.SelectProperties{ + search.SelectProperty{ + Name: "primitive", + IsPrimitive: true, + }, + search.SelectProperty{ + Name: "nestedRef", + Refs: []search.SelectClass{ + { + ClassName: "SomeNestedClass", + RefProperties: []search.SelectProperty{ + { + Name: "name", + IsPrimitive: true, + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + + expectedOuter := search.Result{ + ID: strfmt.UUID(id1), + ClassName: "SomeClass", + Schema: map[string]interface{}{ + "primitive": "foobar", + "ignoredRef": models.MultipleRef{ + &models.SingleRef{ + Beacon: strfmt.URI("weaviate://localhost/ignoreMe"), + }, + }, + "nestedRef": models.MultipleRef{ + &models.SingleRef{ + Beacon: strfmt.URI(fmt.Sprintf("weaviate://localhost/%s", id2)), + }, + }, + }, + } + + expectedInner := search.Result{ + ClassName: "SomeNestedClass", + ID: strfmt.UUID(id2), + Schema: map[string]interface{}{ + "name": "John Doe", + }, + } + + err := cr.Build(context.Background(), input, selectProps, additional.Properties{}, nil) + require.Nil(t, err) + res, ok := cr.Get(multi.Identifier{ID: id1, ClassName: "SomeClass"}) + require.True(t, ok) + assert.Equal(t, expectedOuter, res) + res, ok = cr.Get(multi.Identifier{ID: id2, ClassName: "SomeNestedClass"}) + require.True(t, ok) + assert.Equal(t, expectedInner, res) + assert.Equal(t, 2, repo.counter, "required the expected amount of lookups") + }) + + t.Run("with multiple items pointing to the same ref", func(t *testing.T) { + // this test asserts that we do not make unnecessary requests if an object + // is linked twice on the list. (This is very common if the reference is + // used for something like a product category, e.g. it would not be + // uncommon at all if all search results are of the same category) + repo := newFakeRepo() + repo.lookup[multi.Identifier{ID: id1, ClassName: "SomeClass"}] = search.Result{ + ClassName: "SomeClass", + ID: strfmt.UUID(id1), + Schema: map[string]interface{}{ + "primitive": "foobar", + "nestedRef": models.MultipleRef{ + &models.SingleRef{ + Beacon: strfmt.URI(fmt.Sprintf("weaviate://localhost/%s", id2)), + }, + }, + }, + } + repo.lookup[multi.Identifier{ID: id2, ClassName: "SomeNestedClass"}] = search.Result{ + ClassName: "SomeNestedClass", + ID: strfmt.UUID(id2), + Schema: map[string]interface{}{ + "name": "John Doe", + }, + } + logger, _ := test.NewNullLogger() + cr := NewCacher(repo, logger, "") + + // contains three items, all pointing to the same inner class + input := []search.Result{ + { + ID: "foo", + ClassName: "BestClass", + Schema: map[string]interface{}{ + "refProp": models.MultipleRef{ + &models.SingleRef{ + Beacon: strfmt.URI(fmt.Sprintf("weaviate://localhost/%s", id1)), + }, + }, + }, + }, + { + ID: "bar", + ClassName: "BestClass", + Schema: map[string]interface{}{ + "refProp": models.MultipleRef{ + &models.SingleRef{ + Beacon: strfmt.URI(fmt.Sprintf("weaviate://localhost/%s", id1)), + }, + }, + }, + }, + { + ID: "baz", + ClassName: "BestClass", + Schema: map[string]interface{}{ + "refProp": models.MultipleRef{ + &models.SingleRef{ + Beacon: strfmt.URI(fmt.Sprintf("weaviate://localhost/%s", id1)), + }, + }, + }, + }, + } + selectProps := search.SelectProperties{ + search.SelectProperty{ + Name: "refProp", + Refs: []search.SelectClass{ + { + ClassName: "SomeClass", + RefProperties: search.SelectProperties{ + search.SelectProperty{ + Name: "primitive", + IsPrimitive: true, + }, + search.SelectProperty{ + Name: "nestedRef", + Refs: []search.SelectClass{ + { + ClassName: "SomeNestedClass", + RefProperties: []search.SelectProperty{ + { + Name: "name", + IsPrimitive: true, + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + + expectedOuter := search.Result{ + ID: strfmt.UUID(id1), + ClassName: "SomeClass", + Schema: map[string]interface{}{ + "primitive": "foobar", + "nestedRef": models.MultipleRef{ + &models.SingleRef{ + Beacon: strfmt.URI(fmt.Sprintf("weaviate://localhost/%s", id2)), + }, + }, + }, + } + + expectedInner := search.Result{ + ClassName: "SomeNestedClass", + ID: strfmt.UUID(id2), + Schema: map[string]interface{}{ + "name": "John Doe", + }, + } + + err := cr.Build(context.Background(), input, selectProps, additional.Properties{}, nil) + require.Nil(t, err) + res, ok := cr.Get(multi.Identifier{ID: id1, ClassName: "SomeClass"}) + require.True(t, ok) + assert.Equal(t, expectedOuter, res) + res, ok = cr.Get(multi.Identifier{ID: id2, ClassName: "SomeNestedClass"}) + require.True(t, ok) + assert.Equal(t, expectedInner, res) + assert.Equal(t, 2, repo.counter, "required the expected amount of lookup queries") + assert.Equal(t, 2, repo.counter, "required the expected amount of objects on the lookup queries") + }) + + t.Run("with a nested lookup, and nested refs in nested refs", func(t *testing.T) { + repo := newFakeRepo() + idNested2ID := "132bdf92-ffec-4a52-9196-73ea7cbb5a00" + idNestedInNestedID := "132bdf92-ffec-4a52-9196-73ea7cbb5a01" + repo.lookup[multi.Identifier{ID: id1, ClassName: "SomeClass"}] = search.Result{ + ClassName: "SomeClass", + ID: strfmt.UUID(id1), + Schema: map[string]interface{}{ + "primitive": "foobar", + "nestedRef": models.MultipleRef{ + &models.SingleRef{ + Beacon: strfmt.URI(fmt.Sprintf("weaviate://localhost/%s", id2)), + }, + }, + "nestedRef2": models.MultipleRef{ + &models.SingleRef{ + Beacon: strfmt.URI(fmt.Sprintf("weaviate://localhost/%s", idNested2ID)), + Schema: map[string]interface{}{ + "title": "nestedRef2Title", + "nestedRefInNestedRef": models.MultipleRef{ + &models.SingleRef{ + Beacon: strfmt.URI(fmt.Sprintf("weaviate://localhost/%s", idNestedInNestedID)), + }, + }, + }, + }, + }, + }, + } + repo.lookup[multi.Identifier{ID: id2, ClassName: "SomeNestedClass"}] = search.Result{ + ClassName: "SomeNestedClass", + ID: strfmt.UUID(id2), + Schema: map[string]interface{}{ + "name": "John Doe", + }, + } + repo.lookup[multi.Identifier{ID: idNested2ID, ClassName: "SomeNestedClass2"}] = search.Result{ + ClassName: "SomeNestedClass2", + ID: strfmt.UUID(idNested2ID), + Schema: map[string]interface{}{ + "title": "nestedRef2Title", + "nestedRefInNestedRef": models.MultipleRef{ + &models.SingleRef{ + Beacon: strfmt.URI(fmt.Sprintf("weaviate://localhost/%s", idNestedInNestedID)), + }, + }, + }, + } + repo.lookup[multi.Identifier{ID: idNestedInNestedID, ClassName: "SomeNestedClassNested2"}] = search.Result{ + ClassName: "SomeNestedClassNested2", + ID: strfmt.UUID(idNestedInNestedID), + Schema: map[string]interface{}{ + "titleNested": "Nested In Nested Title", + }, + } + logger, _ := test.NewNullLogger() + cr := NewCacher(repo, logger, "") + input := []search.Result{ + { + ID: "foo", + ClassName: "BestClass", + Schema: map[string]interface{}{ + "refProp": models.MultipleRef{ + &models.SingleRef{ + Beacon: strfmt.URI(fmt.Sprintf("weaviate://localhost/%s", id1)), + }, + }, + }, + }, + } + selectProps := search.SelectProperties{ + search.SelectProperty{ + Name: "refProp", + Refs: []search.SelectClass{ + { + ClassName: "SomeClass", + RefProperties: search.SelectProperties{ + search.SelectProperty{ + Name: "primitive", + IsPrimitive: true, + }, + search.SelectProperty{ + Name: "nestedRef", + Refs: []search.SelectClass{ + { + ClassName: "SomeNestedClass", + RefProperties: []search.SelectProperty{ + { + Name: "name", + IsPrimitive: true, + }, + }, + }, + }, + }, + search.SelectProperty{ + Name: "nestedRef2", + Refs: []search.SelectClass{ + { + ClassName: "SomeNestedClass2", + RefProperties: []search.SelectProperty{ + { + Name: "title", + IsPrimitive: true, + }, + { + Name: "nestedRefInNestedRef", + Refs: []search.SelectClass{ + { + ClassName: "SomeNestedClassNested2", + RefProperties: []search.SelectProperty{ + { + Name: "titleNested", + IsPrimitive: true, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + + expectedOuter := search.Result{ + ID: strfmt.UUID(id1), + ClassName: "SomeClass", + Schema: map[string]interface{}{ + "primitive": "foobar", + "nestedRef": models.MultipleRef{ + &models.SingleRef{ + Beacon: strfmt.URI(fmt.Sprintf("weaviate://localhost/%s", id2)), + }, + }, + "nestedRef2": models.MultipleRef{ + &models.SingleRef{ + Beacon: strfmt.URI(fmt.Sprintf("weaviate://localhost/%s", idNested2ID)), + Schema: map[string]interface{}{ + "title": "nestedRef2Title", + "nestedRefInNestedRef": models.MultipleRef{ + &models.SingleRef{ + Beacon: strfmt.URI(fmt.Sprintf("weaviate://localhost/%s", idNestedInNestedID)), + }, + }, + }, + }, + }, + }, + } + + expectedInner := search.Result{ + ClassName: "SomeNestedClass", + ID: strfmt.UUID(id2), + Schema: map[string]interface{}{ + "name": "John Doe", + }, + } + + expectedInner2 := search.Result{ + ClassName: "SomeNestedClass2", + ID: strfmt.UUID(idNested2ID), + Schema: map[string]interface{}{ + "title": "nestedRef2Title", + "nestedRefInNestedRef": models.MultipleRef{ + &models.SingleRef{ + Beacon: strfmt.URI(fmt.Sprintf("weaviate://localhost/%s", idNestedInNestedID)), + }, + }, + }, + } + + expectedInnerInner := search.Result{ + ClassName: "SomeNestedClassNested2", + ID: strfmt.UUID(idNestedInNestedID), + Schema: map[string]interface{}{ + "titleNested": "Nested In Nested Title", + }, + } + + err := cr.Build(context.Background(), input, selectProps, additional.Properties{}, nil) + require.Nil(t, err) + res, ok := cr.Get(multi.Identifier{ID: id1, ClassName: "SomeClass"}) + require.True(t, ok) + assert.Equal(t, expectedOuter, res) + input2 := []search.Result{expectedInner, expectedInner2} + err = cr.Build(context.Background(), input2, nil, additional.Properties{}, nil) + require.Nil(t, err) + nested1, ok := cr.Get(multi.Identifier{ID: id2, ClassName: "SomeNestedClass"}) + require.True(t, ok) + assert.Equal(t, expectedInner, nested1) + nested2, ok := cr.Get(multi.Identifier{ID: idNested2ID, ClassName: "SomeNestedClass2"}) + require.True(t, ok) + assert.Equal(t, expectedInner2, nested2) + nestedSchema, ok := nested2.Schema.(map[string]interface{}) + require.True(t, ok) + nestedRefInNestedRef, ok := nestedSchema["nestedRefInNestedRef"] + require.True(t, ok) + require.NotNil(t, nestedRefInNestedRef) + nestedRefInNestedMultiRef, ok := nestedRefInNestedRef.(models.MultipleRef) + require.True(t, ok) + require.NotNil(t, nestedRefInNestedMultiRef) + require.Nil(t, err) + res, ok = cr.Get(multi.Identifier{ID: idNestedInNestedID, ClassName: "SomeNestedClassNested2"}) + require.True(t, ok) + assert.Equal(t, expectedInnerInner, res) + assert.Equal(t, 3, repo.counter, "required the expected amount of lookups") + }) + + t.Run("with group and with a additional group lookup", func(t *testing.T) { + repo := newFakeRepo() + repo.lookup[multi.Identifier{ID: id1, ClassName: "SomeClass"}] = search.Result{ + ClassName: "SomeClass", + ID: strfmt.UUID(id1), + Schema: map[string]interface{}{ + "primitive": "foobar", + }, + AdditionalProperties: models.AdditionalProperties{ + "group": &additional.Group{ + Hits: []map[string]interface{}{ + { + "primitive": "foobar", + "ignoredRef": models.MultipleRef{ + &models.SingleRef{ + Beacon: strfmt.URI("weaviate://localhost/ignoreMe"), + }, + }, + "nestedRef": models.MultipleRef{ + &models.SingleRef{ + Beacon: strfmt.URI(fmt.Sprintf("weaviate://localhost/%s", id2)), + }, + }, + }, + }, + }, + }, + } + repo.lookup[multi.Identifier{ID: id2, ClassName: "SomeNestedClass"}] = search.Result{ + ClassName: "SomeNestedClass", + ID: strfmt.UUID(id2), + Schema: map[string]interface{}{ + "name": "John Doe", + }, + } + logger, _ := test.NewNullLogger() + cr := NewCacher(repo, logger, "") + input := []search.Result{ + { + ID: "foo", + ClassName: "BestClass", + Schema: map[string]interface{}{ + "refProp": models.MultipleRef{ + &models.SingleRef{ + Beacon: strfmt.URI(fmt.Sprintf("weaviate://localhost/%s", id1)), + }, + }, + }, + AdditionalProperties: models.AdditionalProperties{ + "group": &additional.Group{ + Hits: []map[string]interface{}{ + { + "primitive": "foobar", + "ignoredRef": models.MultipleRef{ + &models.SingleRef{ + Beacon: strfmt.URI("weaviate://localhost/ignoreMe"), + }, + }, + "nestedRef": models.MultipleRef{ + &models.SingleRef{ + Beacon: strfmt.URI(fmt.Sprintf("weaviate://localhost/%s", id2)), + }, + }, + }, + }, + }, + }, + }, + } + groupByProps := search.SelectProperties{ + search.SelectProperty{ + Name: "nestedRef", + Refs: []search.SelectClass{ + { + ClassName: "SomeNestedClass", + RefProperties: []search.SelectProperty{ + { + Name: "name", + IsPrimitive: true, + }, + }, + }, + }, + }, + } + + expectedInner := search.Result{ + ClassName: "SomeNestedClass", + ID: strfmt.UUID(id2), + Schema: map[string]interface{}{ + "name": "John Doe", + }, + } + + err := cr.Build(context.Background(), input, nil, additional.Properties{}, groupByProps) + require.Nil(t, err) + res, ok := cr.Get(multi.Identifier{ID: id2, ClassName: "SomeNestedClass"}) + require.True(t, ok) + assert.Equal(t, expectedInner, res) + assert.Equal(t, 1, repo.counter, "required the expected amount of lookups") + }) + + t.Run("with group and with 2 additional group lookups", func(t *testing.T) { + repo := newFakeRepo() + repo.lookup[multi.Identifier{ID: id1, ClassName: "SomeClass"}] = search.Result{ + ClassName: "SomeClass", + ID: strfmt.UUID(id1), + Schema: map[string]interface{}{ + "primitive": "foobar", + }, + AdditionalProperties: models.AdditionalProperties{ + "group": &additional.Group{ + Hits: []map[string]interface{}{ + { + "primitive": "foobar", + "ignoredRef": models.MultipleRef{ + &models.SingleRef{ + Beacon: strfmt.URI("weaviate://localhost/ignoreMe"), + }, + }, + "nestedRef": models.MultipleRef{ + &models.SingleRef{ + Beacon: strfmt.URI(fmt.Sprintf("weaviate://localhost/%s", id2)), + }, + }, + }, + }, + }, + }, + } + repo.lookup[multi.Identifier{ID: id2, ClassName: "SomeNestedClass"}] = search.Result{ + ClassName: "SomeNestedClass", + ID: strfmt.UUID(id2), + Schema: map[string]interface{}{ + "name": "John Doe", + }, + } + repo.lookup[multi.Identifier{ID: id3, ClassName: "OtherNestedClass"}] = search.Result{ + ClassName: "OtherNestedClass", + ID: strfmt.UUID(id3), + Schema: map[string]interface{}{ + "name": "John Doe", + }, + } + logger, _ := test.NewNullLogger() + cr := NewCacher(repo, logger, "") + input := []search.Result{ + { + ID: "foo", + ClassName: "BestClass", + Schema: map[string]interface{}{ + "refProp": models.MultipleRef{ + &models.SingleRef{ + Beacon: strfmt.URI(fmt.Sprintf("weaviate://localhost/%s", id1)), + }, + }, + }, + AdditionalProperties: models.AdditionalProperties{ + "group": &additional.Group{ + Hits: []map[string]interface{}{ + { + "primitive": "foobar", + "nestedRef": models.MultipleRef{ + &models.SingleRef{ + Beacon: strfmt.URI(fmt.Sprintf("weaviate://localhost/%s", id2)), + }, + }, + "otherNestedRef": models.MultipleRef{ + &models.SingleRef{ + Beacon: strfmt.URI(fmt.Sprintf("weaviate://localhost/%s", id3)), + }, + }, + }, + }, + }, + }, + }, + } + groupByProps := search.SelectProperties{ + search.SelectProperty{ + Name: "nestedRef", + Refs: []search.SelectClass{ + { + ClassName: "SomeNestedClass", + RefProperties: []search.SelectProperty{ + { + Name: "name", + IsPrimitive: true, + }, + }, + }, + }, + }, + search.SelectProperty{ + Name: "otherNestedRef", + Refs: []search.SelectClass{ + { + ClassName: "OtherNestedClass", + RefProperties: []search.SelectProperty{ + { + Name: "name", + IsPrimitive: true, + }, + }, + }, + }, + }, + } + + expectedSomeNestedClass := search.Result{ + ClassName: "SomeNestedClass", + ID: strfmt.UUID(id2), + Schema: map[string]interface{}{ + "name": "John Doe", + }, + } + + expectedOtherNestedClass := search.Result{ + ClassName: "OtherNestedClass", + ID: strfmt.UUID(id3), + Schema: map[string]interface{}{ + "name": "John Doe", + }, + } + + err := cr.Build(context.Background(), input, nil, additional.Properties{}, groupByProps) + require.Nil(t, err) + res, ok := cr.Get(multi.Identifier{ID: id2, ClassName: "SomeNestedClass"}) + require.True(t, ok) + assert.Equal(t, expectedSomeNestedClass, res) + res, ok = cr.Get(multi.Identifier{ID: id3, ClassName: "OtherNestedClass"}) + require.True(t, ok) + assert.Equal(t, expectedOtherNestedClass, res) + assert.Equal(t, 1, repo.counter, "required the expected amount of lookups") + }) + + t.Run("with group with a nested lookup and with 2 additional group lookups", func(t *testing.T) { + repo := newFakeRepo() + repo.lookup[multi.Identifier{ID: id1, ClassName: "SomeClass"}] = search.Result{ + ClassName: "SomeClass", + ID: strfmt.UUID(id1), + Schema: map[string]interface{}{ + "primitive": "foobar", + "ignoredRef": models.MultipleRef{ + &models.SingleRef{ + Beacon: strfmt.URI("weaviate://localhost/ignoreMe"), + }, + }, + "nestedRef": models.MultipleRef{ + &models.SingleRef{ + Beacon: strfmt.URI(fmt.Sprintf("weaviate://localhost/%s", id2)), + }, + }, + }, + } + repo.lookup[multi.Identifier{ID: id2, ClassName: "SomeNestedClass"}] = search.Result{ + ClassName: "SomeNestedClass", + ID: strfmt.UUID(id2), + Schema: map[string]interface{}{ + "name": "John Doe", + }, + } + repo.lookup[multi.Identifier{ID: id3, ClassName: "InnerNestedClass"}] = search.Result{ + ClassName: "InnerNestedClass", + ID: strfmt.UUID(id3), + Schema: map[string]interface{}{ + "name": "John Doe", + }, + } + repo.lookup[multi.Identifier{ID: id4, ClassName: "OtherNestedClass"}] = search.Result{ + ClassName: "OtherNestedClass", + ID: strfmt.UUID(id4), + Schema: map[string]interface{}{ + "name": "John Doe", + }, + } + logger, _ := test.NewNullLogger() + cr := NewCacher(repo, logger, "") + input := []search.Result{ + { + ID: "foo", + ClassName: "BestClass", + Schema: map[string]interface{}{ + "refProp": models.MultipleRef{ + &models.SingleRef{ + Beacon: strfmt.URI(fmt.Sprintf("weaviate://localhost/%s", id1)), + }, + }, + }, + AdditionalProperties: models.AdditionalProperties{ + "group": &additional.Group{ + Hits: []map[string]interface{}{ + { + "primitive": "foobar", + "innerNestedRef": models.MultipleRef{ + &models.SingleRef{ + Beacon: strfmt.URI(fmt.Sprintf("weaviate://localhost/%s", id3)), + }, + }, + "otherNestedRef": models.MultipleRef{ + &models.SingleRef{ + Beacon: strfmt.URI(fmt.Sprintf("weaviate://localhost/%s", id4)), + }, + }, + }, + }, + }, + }, + }, + } + selectProps := search.SelectProperties{ + search.SelectProperty{ + Name: "refProp", + Refs: []search.SelectClass{ + { + ClassName: "SomeClass", + RefProperties: search.SelectProperties{ + search.SelectProperty{ + Name: "primitive", + IsPrimitive: true, + }, + search.SelectProperty{ + Name: "nestedRef", + Refs: []search.SelectClass{ + { + ClassName: "SomeNestedClass", + RefProperties: []search.SelectProperty{ + { + Name: "name", + IsPrimitive: true, + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + groupByProps := search.SelectProperties{ + search.SelectProperty{ + Name: "innerNestedRef", + Refs: []search.SelectClass{ + { + ClassName: "InnerNestedClass", + RefProperties: []search.SelectProperty{ + { + Name: "name", + IsPrimitive: true, + }, + }, + }, + }, + }, + search.SelectProperty{ + Name: "otherNestedRef", + Refs: []search.SelectClass{ + { + ClassName: "OtherNestedClass", + RefProperties: []search.SelectProperty{ + { + Name: "name", + IsPrimitive: true, + }, + }, + }, + }, + }, + } + + expectedOuter := search.Result{ + ID: strfmt.UUID(id1), + ClassName: "SomeClass", + Schema: map[string]interface{}{ + "primitive": "foobar", + "ignoredRef": models.MultipleRef{ + &models.SingleRef{ + Beacon: strfmt.URI("weaviate://localhost/ignoreMe"), + }, + }, + "nestedRef": models.MultipleRef{ + &models.SingleRef{ + Beacon: strfmt.URI(fmt.Sprintf("weaviate://localhost/%s", id2)), + }, + }, + }, + } + + expectedInner := search.Result{ + ClassName: "SomeNestedClass", + ID: strfmt.UUID(id2), + Schema: map[string]interface{}{ + "name": "John Doe", + }, + } + + expectedInnerNestedClass := search.Result{ + ClassName: "InnerNestedClass", + ID: strfmt.UUID(id3), + Schema: map[string]interface{}{ + "name": "John Doe", + }, + } + + expectedOtherNestedClass := search.Result{ + ClassName: "OtherNestedClass", + ID: strfmt.UUID(id4), + Schema: map[string]interface{}{ + "name": "John Doe", + }, + } + + err := cr.Build(context.Background(), input, selectProps, additional.Properties{}, groupByProps) + require.Nil(t, err) + res, ok := cr.Get(multi.Identifier{ID: id1, ClassName: "SomeClass"}) + require.True(t, ok) + assert.Equal(t, expectedOuter, res) + res, ok = cr.Get(multi.Identifier{ID: id2, ClassName: "SomeNestedClass"}) + require.True(t, ok) + assert.Equal(t, expectedInner, res) + res, ok = cr.Get(multi.Identifier{ID: id3, ClassName: "InnerNestedClass"}) + require.True(t, ok) + assert.Equal(t, expectedInnerNestedClass, res) + res, ok = cr.Get(multi.Identifier{ID: id4, ClassName: "OtherNestedClass"}) + require.True(t, ok) + assert.Equal(t, expectedOtherNestedClass, res) + assert.Equal(t, 2, repo.counter, "required the expected amount of lookups") + }) +} + +type fakeRepo struct { + lookup map[multi.Identifier]search.Result + counter int // count request + objectCounter int // count total objects on request(s) +} + +func newFakeRepo() *fakeRepo { + return &fakeRepo{ + lookup: map[multi.Identifier]search.Result{}, + } +} + +func (f *fakeRepo) MultiGet(ctx context.Context, query []multi.Identifier, additional additional.Properties, tenant string) ([]search.Result, error) { + f.counter++ + f.objectCounter += len(query) + out := make([]search.Result, len(query)) + for i, q := range query { + if res, ok := f.lookup[q]; ok { + out[i] = res + } else { + out[i] = search.Result{} + } + } + + return out, nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/refcache/resolver.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/refcache/resolver.go new file mode 100644 index 0000000000000000000000000000000000000000..e598fdb681ab7d98067565b24e4b61bb5961b3e6 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/refcache/resolver.go @@ -0,0 +1,251 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package refcache + +import ( + "context" + "fmt" + + "github.com/pkg/errors" + "github.com/weaviate/weaviate/entities/additional" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/multi" + "github.com/weaviate/weaviate/entities/schema/crossref" + "github.com/weaviate/weaviate/entities/search" +) + +type Resolver struct { + cacher cacher + // for groupBy feature + withGroup bool + groupByProps search.SelectProperties +} + +type cacher interface { + Build(ctx context.Context, objects []search.Result, properties search.SelectProperties, additional additional.Properties, groupByProperties search.SelectProperties) error + Get(si multi.Identifier) (search.Result, bool) +} + +func NewResolver(cacher cacher) *Resolver { + return &Resolver{cacher: cacher} +} + +func NewResolverWithGroup(cacher cacher, groupByProps search.SelectProperties) *Resolver { + return &Resolver{ + cacher: cacher, + // for groupBy feature + withGroup: true, + groupByProps: groupByProps, + } +} + +func (r *Resolver) Do(ctx context.Context, objects []search.Result, + properties search.SelectProperties, additional additional.Properties, +) ([]search.Result, error) { + cacherProps := properties + if !r.withGroup { + cacherProps = append(properties, r.groupByProps...) + } + + if err := r.cacher.Build(ctx, objects, cacherProps, additional, r.groupByProps); err != nil { + return nil, errors.Wrap(err, "build reference cache") + } + + return r.parseObjects(objects, properties, additional) +} + +func (r *Resolver) parseObjects(objects []search.Result, properties search.SelectProperties, + additional additional.Properties, +) ([]search.Result, error) { + for i, obj := range objects { + parsed, err := r.parseObject(obj, properties, additional) + if err != nil { + return nil, errors.Wrapf(err, "parse at position %d", i) + } + + objects[i] = parsed + } + + return objects, nil +} + +func (r *Resolver) parseObject(object search.Result, properties search.SelectProperties, + additional additional.Properties, +) (search.Result, error) { + if object.Schema == nil { + return object, nil + } + + schemaMap, ok := object.Schema.(map[string]interface{}) + if !ok { + return object, fmt.Errorf("schema is not a map: %T", object.Schema) + } + + schema, err := r.parseSchema(schemaMap, properties) + if err != nil { + return object, err + } + + object.Schema = schema + + if r.withGroup { + additionalProperties, err := r.parseAdditionalGroup(object.AdditionalProperties, properties) + if err != nil { + return object, err + } + object.AdditionalProperties = additionalProperties + } + return object, nil +} + +func (r *Resolver) parseAdditionalGroup( + additionalProperties models.AdditionalProperties, + properties search.SelectProperties, +) (models.AdditionalProperties, error) { + if additionalProperties != nil && additionalProperties["group"] != nil { + if group, ok := additionalProperties["group"].(*additional.Group); ok { + for j, hit := range group.Hits { + schema, err := r.parseSchema(hit, r.groupByProps) + if err != nil { + return additionalProperties, fmt.Errorf("resolve group hit: %w", err) + } + group.Hits[j] = schema + } + } + } + return additionalProperties, nil +} + +func (r *Resolver) parseSchema(schema map[string]interface{}, + properties search.SelectProperties, +) (map[string]interface{}, error) { + for propName, value := range schema { + refs, ok := value.(models.MultipleRef) + if !ok { + // not a ref, not interesting for us + continue + } + + selectProp := properties.FindProperty(propName) + if selectProp == nil { + // user is not interested in this prop + continue + } + + parsed, err := r.parseRefs(refs, propName, *selectProp) + if err != nil { + return schema, errors.Wrapf(err, "parse refs for prop %q", propName) + } + + if parsed != nil { + schema[propName] = parsed + } + } + + return schema, nil +} + +func (r *Resolver) parseRefs(input models.MultipleRef, prop string, + selectProp search.SelectProperty, +) ([]interface{}, error) { + var refs []interface{} + for _, selectPropRef := range selectProp.Refs { + innerProperties := selectPropRef.RefProperties + additionalProperties := selectPropRef.AdditionalProperties + perClass, err := r.resolveRefs(input, selectPropRef.ClassName, innerProperties, additionalProperties) + if err != nil { + return nil, errors.Wrap(err, "resolve ref") + } + + refs = append(refs, perClass...) + } + return refs, nil +} + +func (r *Resolver) resolveRefs(input models.MultipleRef, desiredClass string, + innerProperties search.SelectProperties, + additionalProperties additional.Properties, +) ([]interface{}, error) { + var output []interface{} + for i, item := range input { + resolved, err := r.resolveRef(item, desiredClass, innerProperties, additionalProperties) + if err != nil { + return nil, errors.Wrapf(err, "at position %d", i) + } + + if resolved == nil { + continue + } + + output = append(output, *resolved) + } + + return output, nil +} + +func (r *Resolver) resolveRef(item *models.SingleRef, desiredClass string, + innerProperties search.SelectProperties, + additionalProperties additional.Properties, +) (*search.LocalRef, error) { + var out search.LocalRef + + ref, err := crossref.Parse(item.Beacon.String()) + if err != nil { + return nil, err + } + + si := multi.Identifier{ + ID: ref.TargetID.String(), + ClassName: desiredClass, + } + res, ok := r.cacher.Get(si) + if !ok { + // silently ignore, could have been deleted in the meantime, or we're + // asking for a non-matching selectProperty, for example if we ask for + // Article { published { ... on { Magazine { name } ... on { Journal { name } } + // we don't know at resolve time if this ID will point to a Magazine or a + // Journal, so we will get a few empty responses when trying both for any + // given ID. + // + // In turn this means we need to validate through automated and explorative + // tests, that we never skip results that should be contained, as we + // wouldn't throw an error, so the user would never notice + return nil, nil + } + + out.Class = res.ClassName + schema := res.Schema.(map[string]interface{}) + nested, err := r.parseSchema(schema, innerProperties) + if err != nil { + return nil, errors.Wrap(err, "resolve nested ref") + } + + if additionalProperties.Vector { + nested["vector"] = res.Vector + } + if len(additionalProperties.Vectors) > 0 { + vectors := make(map[string]models.Vector) + for _, targetVector := range additionalProperties.Vectors { + vectors[targetVector] = res.Vectors[targetVector] + } + nested["vectors"] = vectors + } + if additionalProperties.CreationTimeUnix { + nested["creationTimeUnix"] = res.Created + } + if additionalProperties.LastUpdateTimeUnix { + nested["lastUpdateTimeUnix"] = res.Updated + } + out.Fields = nested + + return &out, nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/refcache/resolver_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/refcache/resolver_test.go new file mode 100644 index 0000000000000000000000000000000000000000..277d315a75c0530fae267d21b252920592fb8c75 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/refcache/resolver_test.go @@ -0,0 +1,556 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package refcache + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/go-openapi/strfmt" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/entities/additional" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/multi" + "github.com/weaviate/weaviate/entities/search" +) + +func TestResolver(t *testing.T) { + id1 := "df5d4e49-0c56-4b87-ade1-3d46cc9b425f" + id2 := "3a08d808-8eb5-49ee-86b2-68b6035e8b69" + + t.Run("with nil input", func(t *testing.T) { + r := NewResolver(newFakeCacher()) + res, err := r.Do(context.Background(), nil, nil, additional.Properties{}) + require.Nil(t, err) + assert.Nil(t, res) + }) + + t.Run("with nil-schemas", func(t *testing.T) { + r := NewResolver(newFakeCacher()) + input := []search.Result{ + { + ID: "foo", + ClassName: "BestClass", + }, + } + + expected := input + res, err := r.Do(context.Background(), input, nil, additional.Properties{}) + require.Nil(t, err) + assert.Equal(t, expected, res) + }) + + t.Run("with single ref but no select props", func(t *testing.T) { + r := NewResolver(newFakeCacher()) + input := []search.Result{ + { + ID: "foo", + ClassName: "BestClass", + Schema: map[string]interface{}{ + "refProp": models.MultipleRef{ + &models.SingleRef{ + Beacon: "weaviate://localhost/123", + }, + }, + }, + }, + } + + expected := input + res, err := r.Do(context.Background(), input, nil, additional.Properties{}) + require.Nil(t, err) + assert.Equal(t, expected, res) + }) + + t.Run("with single ref with vector and matching select prop", func(t *testing.T) { + getInput := func() []search.Result { + return []search.Result{ + { + ID: "foo", + ClassName: "BestClass", + Schema: map[string]interface{}{ + "refProp": models.MultipleRef{ + &models.SingleRef{ + Beacon: strfmt.URI(fmt.Sprintf("weaviate://localhost/%s", id1)), + }, + }, + }, + }, + } + } + getResolver := func() *Resolver { + cacher := newFakeCacher() + r := NewResolver(cacher) + cacher.lookup[multi.Identifier{ID: id1, ClassName: "SomeClass"}] = search.Result{ + ClassName: "SomeClass", + ID: strfmt.UUID(id1), + Schema: map[string]interface{}{ + "bar": "some string", + }, + Vector: []float32{0.1, 0.2}, + } + return r + } + getSelectProps := func(withVector bool) search.SelectProperties { + return search.SelectProperties{ + search.SelectProperty{ + Name: "refProp", + Refs: []search.SelectClass{ + { + ClassName: "SomeClass", + RefProperties: search.SelectProperties{ + search.SelectProperty{ + Name: "bar", + IsPrimitive: true, + }, + }, + AdditionalProperties: additional.Properties{ + Vector: withVector, + }, + }, + }, + }, + } + } + getExpectedResult := func(withVector bool) []search.Result { + fields := map[string]interface{}{ + "bar": "some string", + } + if withVector { + fields["vector"] = []float32{0.1, 0.2} + } + return []search.Result{ + { + ID: "foo", + ClassName: "BestClass", + Schema: map[string]interface{}{ + "refProp": []interface{}{ + search.LocalRef{ + Class: "SomeClass", + Fields: fields, + }, + }, + }, + }, + } + } + // ask for vector in ref property + res, err := getResolver().Do(context.Background(), getInput(), getSelectProps(true), additional.Properties{}) + require.Nil(t, err) + assert.Equal(t, getExpectedResult(true), res) + // don't ask for vector in ref property + res, err = getResolver().Do(context.Background(), getInput(), getSelectProps(false), additional.Properties{}) + require.Nil(t, err) + assert.Equal(t, getExpectedResult(false), res) + }) + + t.Run("with single ref with creation/update timestamps and matching select prop", func(t *testing.T) { + now := time.Now().UnixMilli() + getInput := func() []search.Result { + return []search.Result{ + { + ID: "foo", + ClassName: "BestClass", + Schema: map[string]interface{}{ + "refProp": models.MultipleRef{ + &models.SingleRef{ + Beacon: strfmt.URI(fmt.Sprintf("weaviate://localhost/%s", id1)), + }, + }, + }, + }, + } + } + getResolver := func() *Resolver { + cacher := newFakeCacher() + r := NewResolver(cacher) + cacher.lookup[multi.Identifier{ID: id1, ClassName: "SomeClass"}] = search.Result{ + ClassName: "SomeClass", + ID: strfmt.UUID(id1), + Schema: map[string]interface{}{ + "bar": "some string", + }, + Created: now, + Updated: now, + } + return r + } + selectProps := search.SelectProperties{ + search.SelectProperty{ + Name: "refProp", + Refs: []search.SelectClass{ + { + ClassName: "SomeClass", + RefProperties: search.SelectProperties{ + search.SelectProperty{ + Name: "bar", + IsPrimitive: true, + }, + }, + AdditionalProperties: additional.Properties{ + CreationTimeUnix: true, + LastUpdateTimeUnix: true, + }, + }, + }, + }, + } + expected := []search.Result{ + { + ID: "foo", + ClassName: "BestClass", + Schema: map[string]interface{}{ + "refProp": []interface{}{ + search.LocalRef{ + Class: "SomeClass", + Fields: map[string]interface{}{ + "bar": "some string", + "creationTimeUnix": now, + "lastUpdateTimeUnix": now, + }, + }, + }, + }, + }, + } + res, err := getResolver().Do(context.Background(), getInput(), selectProps, additional.Properties{}) + require.Nil(t, err) + assert.Equal(t, expected, res) + }) + + t.Run("with single ref and matching select prop", func(t *testing.T) { + cacher := newFakeCacher() + r := NewResolver(cacher) + cacher.lookup[multi.Identifier{ID: id1, ClassName: "SomeClass"}] = search.Result{ + ClassName: "SomeClass", + ID: strfmt.UUID(id1), + Schema: map[string]interface{}{ + "bar": "some string", + }, + } + input := []search.Result{ + { + ID: "foo", + ClassName: "BestClass", + Schema: map[string]interface{}{ + "refProp": models.MultipleRef{ + &models.SingleRef{ + Beacon: strfmt.URI(fmt.Sprintf("weaviate://localhost/%s", id1)), + }, + }, + }, + }, + } + selectProps := search.SelectProperties{ + search.SelectProperty{ + Name: "refProp", + Refs: []search.SelectClass{ + { + ClassName: "SomeClass", + RefProperties: search.SelectProperties{ + search.SelectProperty{ + Name: "bar", + IsPrimitive: true, + }, + }, + }, + }, + }, + } + + expected := []search.Result{ + { + ID: "foo", + ClassName: "BestClass", + Schema: map[string]interface{}{ + "refProp": []interface{}{ + search.LocalRef{ + Class: "SomeClass", + Fields: map[string]interface{}{ + "bar": "some string", + }, + }, + }, + }, + }, + } + res, err := r.Do(context.Background(), input, selectProps, additional.Properties{}) + require.Nil(t, err) + assert.Equal(t, expected, res) + }) + + t.Run("with a nested lookup", func(t *testing.T) { + cacher := newFakeCacher() + r := NewResolver(cacher) + cacher.lookup[multi.Identifier{ID: id1, ClassName: "SomeClass"}] = search.Result{ + ClassName: "SomeClass", + ID: strfmt.UUID(id1), + Schema: map[string]interface{}{ + "primitive": "foobar", + "ignoredRef": models.MultipleRef{ + &models.SingleRef{ + Beacon: strfmt.URI("weaviate://localhost/ignoreMe"), + }, + }, + "nestedRef": models.MultipleRef{ + &models.SingleRef{ + Beacon: strfmt.URI(fmt.Sprintf("weaviate://localhost/%s", id2)), + }, + }, + }, + } + cacher.lookup[multi.Identifier{ID: id2, ClassName: "SomeNestedClass"}] = search.Result{ + ClassName: "SomeNestedClass", + ID: strfmt.UUID(id2), + Schema: map[string]interface{}{ + "name": "John Doe", + }, + } + input := []search.Result{ + { + ID: "foo", + ClassName: "BestClass", + Schema: map[string]interface{}{ + "refProp": models.MultipleRef{ + &models.SingleRef{ + Beacon: strfmt.URI(fmt.Sprintf("weaviate://localhost/%s", id1)), + }, + }, + }, + }, + } + selectProps := search.SelectProperties{ + search.SelectProperty{ + Name: "refProp", + Refs: []search.SelectClass{ + { + ClassName: "SomeClass", + RefProperties: search.SelectProperties{ + search.SelectProperty{ + Name: "primitive", + IsPrimitive: true, + }, + search.SelectProperty{ + Name: "nestedRef", + Refs: []search.SelectClass{ + { + ClassName: "SomeNestedClass", + RefProperties: []search.SelectProperty{ + { + Name: "name", + IsPrimitive: true, + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + + expected := []search.Result{ + { + ID: "foo", + ClassName: "BestClass", + Schema: map[string]interface{}{ + "refProp": []interface{}{ + search.LocalRef{ + Class: "SomeClass", + Fields: map[string]interface{}{ + "primitive": "foobar", + "ignoredRef": models.MultipleRef{ + &models.SingleRef{ + Beacon: strfmt.URI("weaviate://localhost/ignoreMe"), + }, + }, + "nestedRef": []interface{}{ + search.LocalRef{ + Class: "SomeNestedClass", + Fields: map[string]interface{}{ + "name": "John Doe", + }, + }, + }, + }, + }, + }, + }, + }, + } + res, err := r.Do(context.Background(), input, selectProps, additional.Properties{}) + require.Nil(t, err) + assert.Equal(t, expected, res) + }) + + t.Run("with single ref with vector and matching select prop and group", func(t *testing.T) { + getInput := func() []search.Result { + return []search.Result{ + { + ID: "foo", + ClassName: "BestClass", + Schema: map[string]interface{}{ + "refProp": models.MultipleRef{ + &models.SingleRef{ + Beacon: strfmt.URI(fmt.Sprintf("weaviate://localhost/%s", id1)), + }, + }, + }, + AdditionalProperties: models.AdditionalProperties{ + "group": &additional.Group{ + Hits: []map[string]interface{}{ + { + "nestedRef": models.MultipleRef{ + &models.SingleRef{ + Beacon: strfmt.URI(fmt.Sprintf("weaviate://localhost/SomeNestedClass/%s", id2)), + }, + }, + }, + }, + }, + }, + }, + } + } + + getSelectProps := func(withVector bool) search.SelectProperties { + return search.SelectProperties{ + search.SelectProperty{ + Name: "refProp", + Refs: []search.SelectClass{ + { + ClassName: "SomeClass", + RefProperties: search.SelectProperties{ + search.SelectProperty{ + Name: "bar", + IsPrimitive: true, + }, + }, + AdditionalProperties: additional.Properties{ + Vector: withVector, + }, + }, + }, + }, + } + } + getGroupBySelectProps := func() search.SelectProperties { + return search.SelectProperties{ + search.SelectProperty{ + Name: "nestedRef", + Refs: []search.SelectClass{ + { + ClassName: "SomeNestedClass", + RefProperties: []search.SelectProperty{ + { + Name: "name", + IsPrimitive: true, + }, + }, + }, + }, + }, + } + } + getResolver := func() *Resolver { + cacher := newFakeCacher() + r := NewResolverWithGroup(cacher, getGroupBySelectProps()) + cacher.lookup[multi.Identifier{ID: id1, ClassName: "SomeClass"}] = search.Result{ + ClassName: "SomeClass", + ID: strfmt.UUID(id1), + Schema: map[string]interface{}{ + "bar": "some string", + }, + Vector: []float32{0.1, 0.2}, + } + cacher.lookup[multi.Identifier{ID: id2, ClassName: "SomeNestedClass"}] = search.Result{ + ClassName: "SomeNestedClass", + ID: strfmt.UUID(id2), + Schema: map[string]interface{}{ + "name": "John Doe", + }, + } + return r + } + getExpectedResult := func(withVector bool) []search.Result { + fields := map[string]interface{}{ + "bar": "some string", + } + if withVector { + fields["vector"] = []float32{0.1, 0.2} + } + return []search.Result{ + { + ID: "foo", + ClassName: "BestClass", + Schema: map[string]interface{}{ + "refProp": []interface{}{ + search.LocalRef{ + Class: "SomeClass", + Fields: fields, + }, + }, + }, + AdditionalProperties: models.AdditionalProperties{ + "group": &additional.Group{ + Hits: []map[string]interface{}{ + { + "nestedRef": []interface{}{ + search.LocalRef{ + Class: "SomeNestedClass", + Fields: map[string]interface{}{ + "name": "John Doe", + }, + }, + }, + }, + }, + }, + }, + }, + } + } + // ask for vector in ref property + res, err := getResolver().Do(context.Background(), getInput(), getSelectProps(true), additional.Properties{}) + require.Nil(t, err) + assert.Equal(t, getExpectedResult(true), res) + // don't ask for vector in ref property + res, err = getResolver().Do(context.Background(), getInput(), getSelectProps(false), additional.Properties{}) + require.Nil(t, err) + assert.Equal(t, getExpectedResult(false), res) + }) +} + +func newFakeCacher() *fakeCacher { + return &fakeCacher{ + lookup: map[multi.Identifier]search.Result{}, + } +} + +type fakeCacher struct { + lookup map[multi.Identifier]search.Result +} + +func (f *fakeCacher) Build(ctx context.Context, objects []search.Result, properties search.SelectProperties, + additional additional.Properties, groupByProps search.SelectProperties, +) error { + return nil +} + +func (f *fakeCacher) Get(si multi.Identifier) (search.Result, bool) { + res, ok := f.lookup[si] + return res, ok +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/roaringset/binary_search_tree.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/roaringset/binary_search_tree.go new file mode 100644 index 0000000000000000000000000000000000000000..389e70a6f578b0466ea9d2c0e7f1a7d5bb5dbe5e --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/roaringset/binary_search_tree.go @@ -0,0 +1,271 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package roaringset + +import ( + "bytes" + + "github.com/weaviate/weaviate/adapters/repos/db/lsmkv/rbtree" + "github.com/weaviate/weaviate/entities/lsmkv" +) + +type BinarySearchTree struct { + root *BinarySearchNode +} + +type Insert struct { + Additions []uint64 + Deletions []uint64 +} + +func (t *BinarySearchTree) Insert(key []byte, values Insert) { + if t.root == nil { + t.root = &BinarySearchNode{ + Key: key, + Value: BitmapLayer{ + Additions: NewBitmap(values.Additions...), + Deletions: NewBitmap(values.Deletions...), + }, + colourIsRed: false, // root node is always black + } + return + } + + if newRoot := t.root.insert(key, values); newRoot != nil { + t.root = newRoot + } + t.root.colourIsRed = false // Can be flipped in the process of balancing, but root is always black +} + +// Get creates copies of underlying bitmaps to prevent future (concurrent) +// read and writes after layer being returned +func (t *BinarySearchTree) Get(key []byte) (BitmapLayer, error) { + if t.root == nil { + return BitmapLayer{}, lsmkv.NotFound + } + + return t.root.get(key) +} + +// FlattenInOrder creates list of ordered copies of bst nodes +// Only Key and Value fields are populated +func (t *BinarySearchTree) FlattenInOrder() []*BinarySearchNode { + if t.root == nil { + return nil + } + + return t.root.flattenInOrder() +} + +type BinarySearchNode struct { + Key []byte + Value BitmapLayer + left *BinarySearchNode + right *BinarySearchNode + parent *BinarySearchNode + colourIsRed bool +} + +func (n *BinarySearchNode) Parent() rbtree.Node { + if n == nil { + return nil + } + return n.parent +} + +func (n *BinarySearchNode) SetParent(parent rbtree.Node) { + if n == nil { + addNewSearchNodeRoaringSetReceiver(&n) + } + + if parent == nil { + n.parent = nil + return + } + + n.parent = parent.(*BinarySearchNode) +} + +func (n *BinarySearchNode) Left() rbtree.Node { + if n == nil { + return nil + } + return n.left +} + +func (n *BinarySearchNode) SetLeft(left rbtree.Node) { + if n == nil { + addNewSearchNodeRoaringSetReceiver(&n) + } + + if left == nil { + n.left = nil + return + } + + n.left = left.(*BinarySearchNode) +} + +func (n *BinarySearchNode) Right() rbtree.Node { + if n == nil { + return nil + } + return n.right +} + +func (n *BinarySearchNode) SetRight(right rbtree.Node) { + if n == nil { + addNewSearchNodeRoaringSetReceiver(&n) + } + + if right == nil { + n.right = nil + return + } + + n.right = right.(*BinarySearchNode) +} + +func (n *BinarySearchNode) IsRed() bool { + if n == nil { + return false + } + return n.colourIsRed +} + +func (n *BinarySearchNode) SetRed(isRed bool) { + n.colourIsRed = isRed +} + +func (n *BinarySearchNode) IsNil() bool { + return n == nil +} + +func addNewSearchNodeRoaringSetReceiver(nodePtr **BinarySearchNode) { + *nodePtr = &BinarySearchNode{} +} + +func (n *BinarySearchNode) insert(key []byte, values Insert) *BinarySearchNode { + if bytes.Equal(key, n.Key) { + // Merging the new additions and deletions into the existing ones is a + // four-step process: + // + // 1. make sure anything that's added is not part of the deleted list, in + // case it was previously deleted + // 2. actually add the new entries to additions + // 3. make sure anything that's deleted is not part of the additions list, + // in case it was recently added + // 4. actually add the new entries to deletions (this step is vital in case + // a delete points to an entry of a previous segment that's not added in + // this memtable) + for _, x := range values.Additions { + n.Value.Deletions.Remove(x) + n.Value.Additions.Set(x) + } + + for _, x := range values.Deletions { + n.Value.Additions.Remove(x) + n.Value.Deletions.Set(x) + } + + return nil + } + + if bytes.Compare(key, n.Key) < 0 { + if n.left != nil { + return n.left.insert(key, values) + } else { + n.left = &BinarySearchNode{ + Key: key, + Value: BitmapLayer{ + Additions: NewBitmap(values.Additions...), + Deletions: NewBitmap(values.Deletions...), + }, + parent: n, + colourIsRed: true, + } + return BinarySearchNodeFromRB(rbtree.Rebalance(n.left)) + } + } else { + if n.right != nil { + return n.right.insert(key, values) + } else { + n.right = &BinarySearchNode{ + Key: key, + Value: BitmapLayer{ + Additions: NewBitmap(values.Additions...), + Deletions: NewBitmap(values.Deletions...), + }, + parent: n, + colourIsRed: true, + } + return BinarySearchNodeFromRB(rbtree.Rebalance(n.right)) + } + } +} + +func (n *BinarySearchNode) get(key []byte) (BitmapLayer, error) { + if bytes.Equal(n.Key, key) { + return n.Value.Clone(), nil + } + + if bytes.Compare(key, n.Key) < 0 { + if n.left == nil { + return BitmapLayer{}, lsmkv.NotFound + } + + return n.left.get(key) + } else { + if n.right == nil { + return BitmapLayer{}, lsmkv.NotFound + } + + return n.right.get(key) + } +} + +func BinarySearchNodeFromRB(rbNode rbtree.Node) (bsNode *BinarySearchNode) { + if rbNode == nil { + bsNode = nil + return + } + bsNode = rbNode.(*BinarySearchNode) + return +} + +func (n *BinarySearchNode) flattenInOrder() []*BinarySearchNode { + var left []*BinarySearchNode + var right []*BinarySearchNode + + if n.left != nil { + left = n.left.flattenInOrder() + } + + if n.right != nil { + right = n.right.flattenInOrder() + } + + key := make([]byte, len(n.Key)) + copy(key, n.Key) + + // Node's Value has to be copied, not to be mutated when BST is updated. + // Since memtable flush needs condensing, Condense here serves as cloning here + // instead of separate clone + optional condense calls + right = append([]*BinarySearchNode{{ + Key: key, + Value: BitmapLayer{ + Additions: Condense(n.Value.Additions), + Deletions: Condense(n.Value.Deletions), + }, + }}, right...) + return append(left, right...) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/roaringset/binary_search_tree_cursor.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/roaringset/binary_search_tree_cursor.go new file mode 100644 index 0000000000000000000000000000000000000000..f71776b89cce3f41751e3c2839c4f9ef1e1b7585 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/roaringset/binary_search_tree_cursor.go @@ -0,0 +1,69 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package roaringset + +import ( + "bytes" + + "github.com/weaviate/weaviate/entities/lsmkv" +) + +type BinarySearchTreeCursor struct { + nodes []*BinarySearchNode + nextNodePos int +} + +func NewBinarySearchTreeCursor(bst *BinarySearchTree) *BinarySearchTreeCursor { + return &BinarySearchTreeCursor{nodes: bst.FlattenInOrder()} +} + +func (c *BinarySearchTreeCursor) First() ([]byte, BitmapLayer, error) { + c.nextNodePos = 0 + return c.Next() +} + +func (c *BinarySearchTreeCursor) Next() ([]byte, BitmapLayer, error) { + if c.nextNodePos >= len(c.nodes) { + return nil, BitmapLayer{}, nil + } + + pos := c.nextNodePos + c.nextNodePos++ + return c.nodes[pos].Key, c.nodes[pos].Value, nil +} + +func (c *BinarySearchTreeCursor) Seek(key []byte) ([]byte, BitmapLayer, error) { + pos := c.posKeyGreaterThanEqual(key) + if pos == -1 { + return nil, BitmapLayer{}, lsmkv.NotFound + } + c.nextNodePos = pos + return c.Next() +} + +func (c *BinarySearchTreeCursor) posKeyGreaterThanEqual(key []byte) int { + // seek from the end, return position of first (from the beginning) node with key >= given key + // if key > node_key return previous pos + // if key == node_key return current pos + // if key < node_key continue or return current pos if all nodes checked + pos := -1 + for i := len(c.nodes) - 1; i >= 0; i-- { + if cmp := bytes.Compare(key, c.nodes[i].Key); cmp > 0 { + break + } else if cmp == 0 { + pos = i + break + } + pos = i + } + return pos +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/roaringset/binary_search_tree_cursor_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/roaringset/binary_search_tree_cursor_test.go new file mode 100644 index 0000000000000000000000000000000000000000..4d02572298dc1e8ce1655345e5c704411850ec1e --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/roaringset/binary_search_tree_cursor_test.go @@ -0,0 +1,175 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package roaringset + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/weaviate/weaviate/entities/lsmkv" +) + +func TestBSTCursor(t *testing.T) { + bst := &BinarySearchTree{} + + in := []struct { + key string + addVal uint64 + delVal uint64 + }{ + {"aaa", 1, 11}, + {"bbb", 2, 22}, + {"ccc", 3, 33}, + {"ddd", 4, 44}, + } + + for _, v := range in { + bst.Insert([]byte(v.key), Insert{Additions: []uint64{v.addVal}, Deletions: []uint64{v.delVal}}) + } + + t.Run("start from beginning", func(t *testing.T) { + cursor := NewBinarySearchTreeCursor(bst) + + key, layer, err := cursor.First() + + assert.Equal(t, []byte(in[0].key), key) + assert.Equal(t, 1, layer.Additions.GetCardinality()) + assert.True(t, layer.Additions.Contains(in[0].addVal)) + assert.Equal(t, 1, layer.Deletions.GetCardinality()) + assert.True(t, layer.Deletions.Contains(in[0].delVal)) + assert.Nil(t, err) + }) + + t.Run("start from beginning and go through all", func(t *testing.T) { + cursor := NewBinarySearchTreeCursor(bst) + + i := 0 // 1st match is "aaa" + for key, layer, err := cursor.First(); key != nil; key, layer, err = cursor.Next() { + assert.Equal(t, []byte(in[i].key), key) + assert.Equal(t, 1, layer.Additions.GetCardinality()) + assert.True(t, layer.Additions.Contains(in[i].addVal)) + assert.Equal(t, 1, layer.Deletions.GetCardinality()) + assert.True(t, layer.Deletions.Contains(in[i].delVal)) + assert.Nil(t, err) + i++ + } + assert.Equal(t, i, len(in)) + }) + + t.Run("seek matching element and go through rest", func(t *testing.T) { + cursor := NewBinarySearchTreeCursor(bst) + + i := 1 // 1st match is "bbb" + matching := []byte("bbb") + for key, layer, err := cursor.Seek(matching); key != nil; key, layer, err = cursor.Next() { + assert.Equal(t, []byte(in[i].key), key) + assert.Equal(t, 1, layer.Additions.GetCardinality()) + assert.True(t, layer.Additions.Contains(in[i].addVal)) + assert.Equal(t, 1, layer.Deletions.GetCardinality()) + assert.True(t, layer.Deletions.Contains(in[i].delVal)) + assert.Nil(t, err) + i++ + } + assert.Equal(t, i, len(in)) + }) + + t.Run("seek non-matching element and go through rest", func(t *testing.T) { + cursor := NewBinarySearchTreeCursor(bst) + + i := 2 // 1st match is "ccc" + nonMatching := []byte("bcde") + for key, layer, err := cursor.Seek(nonMatching); key != nil; key, layer, err = cursor.Next() { + assert.Equal(t, []byte(in[i].key), key) + assert.Equal(t, 1, layer.Additions.GetCardinality()) + assert.True(t, layer.Additions.Contains(in[i].addVal)) + assert.Equal(t, 1, layer.Deletions.GetCardinality()) + assert.True(t, layer.Deletions.Contains(in[i].delVal)) + assert.Nil(t, err) + i++ + } + assert.Equal(t, i, len(in)) + }) + + t.Run("seek missing element", func(t *testing.T) { + cursor := NewBinarySearchTreeCursor(bst) + + missing := []byte("eee") + key, layer, err := cursor.Seek(missing) + + assert.Nil(t, key) + assert.True(t, layer.Additions.IsEmpty()) + assert.True(t, layer.Deletions.IsEmpty()) + assert.ErrorIs(t, err, lsmkv.NotFound) + }) + + t.Run("next after seek missing element does not change cursor's position", func(t *testing.T) { + cursor := NewBinarySearchTreeCursor(bst) + + key1, _, err1 := cursor.First() + + missing := []byte("eee") + cursor.Seek(missing) + + key2, _, err2 := cursor.Next() + + assert.Equal(t, []byte("aaa"), key1) + assert.Nil(t, err1) + assert.Equal(t, []byte("bbb"), key2) + assert.Nil(t, err2) + }) + + t.Run("next after last is nil/empty", func(t *testing.T) { + cursor := NewBinarySearchTreeCursor(bst) + + last := []byte("ddd") + cursor.Seek(last) + key, layer, err := cursor.Next() + + assert.Nil(t, key) + assert.True(t, layer.Additions.IsEmpty()) + assert.True(t, layer.Deletions.IsEmpty()) + assert.Nil(t, err) + }) + + t.Run("first after final/empty next", func(t *testing.T) { + cursor := NewBinarySearchTreeCursor(bst) + + last := []byte("ddd") + cursor.Seek(last) + cursor.Next() + key, layer, err := cursor.First() + + assert.Equal(t, []byte(in[0].key), key) + assert.Equal(t, 1, layer.Additions.GetCardinality()) + assert.True(t, layer.Additions.Contains(in[0].addVal)) + assert.Equal(t, 1, layer.Deletions.GetCardinality()) + assert.True(t, layer.Deletions.Contains(in[0].delVal)) + assert.Nil(t, err) + }) + + t.Run("seek after final/empty next", func(t *testing.T) { + cursor := NewBinarySearchTreeCursor(bst) + + last := []byte("ddd") + matching := []byte("bbb") + cursor.Seek(last) + cursor.Next() + key, layer, err := cursor.Seek(matching) + + assert.Equal(t, []byte(in[1].key), key) + assert.Equal(t, 1, layer.Additions.GetCardinality()) + assert.True(t, layer.Additions.Contains(in[1].addVal)) + assert.Equal(t, 1, layer.Deletions.GetCardinality()) + assert.True(t, layer.Deletions.Contains(in[1].delVal)) + assert.Nil(t, err) + }) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/roaringset/binary_search_tree_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/roaringset/binary_search_tree_test.go new file mode 100644 index 0000000000000000000000000000000000000000..073ba4c4e2bab9b6d12ff89b8e4748c98fd551c5 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/roaringset/binary_search_tree_test.go @@ -0,0 +1,295 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package roaringset + +import ( + "bytes" + "encoding/binary" + "math/rand" + "testing" + "time" + + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestBSTRoaringSet(t *testing.T) { + t.Run("single key, single set entry", func(t *testing.T) { + bst := &BinarySearchTree{} + key := []byte("my-key") + + bst.Insert(key, Insert{Additions: []uint64{7}}) + + res, err := bst.Get(key) + require.Nil(t, err) + + assert.False(t, res.Additions.Contains(6)) + assert.True(t, res.Additions.Contains(7)) + }) + + t.Run("single key, set updated multiple times", func(t *testing.T) { + bst := &BinarySearchTree{} + key := []byte("my-key") + + for i := uint64(7); i < 14; i++ { + bst.Insert(key, Insert{Additions: []uint64{i}}) + } + + res, err := bst.Get(key) + require.Nil(t, err) + + assert.False(t, res.Additions.Contains(6)) + for i := uint64(7); i < 14; i++ { + assert.True(t, res.Additions.Contains(i)) + } + assert.False(t, res.Additions.Contains(15)) + }) + + t.Run("single key, entry added, then deleted", func(t *testing.T) { + bst := &BinarySearchTree{} + key := []byte("my-key") + + for i := uint64(7); i < 11; i++ { + bst.Insert(key, Insert{Additions: []uint64{i}}) + } + + bst.Insert(key, Insert{Deletions: []uint64{9}}) + + res, err := bst.Get(key) + require.Nil(t, err) + + // check Additions + assert.True(t, res.Additions.Contains(7)) + assert.True(t, res.Additions.Contains(8)) + assert.False(t, res.Additions.Contains(9)) + assert.True(t, res.Additions.Contains(10)) + + // check Deletions + assert.True(t, res.Deletions.Contains(9)) + }) + + t.Run("single key, entry added, then deleted, then re-added", func(t *testing.T) { + bst := &BinarySearchTree{} + key := []byte("my-key") + + for i := uint64(7); i < 11; i++ { + bst.Insert(key, Insert{Additions: []uint64{i}}) + } + + bst.Insert(key, Insert{Deletions: []uint64{9}}) + + bst.Insert(key, Insert{Additions: []uint64{9}}) + + res, err := bst.Get(key) + require.Nil(t, err) + + // check Additions + assert.True(t, res.Additions.Contains(7)) + assert.True(t, res.Additions.Contains(8)) + assert.True(t, res.Additions.Contains(9)) + assert.True(t, res.Additions.Contains(10)) + + // check Deletions + assert.False(t, res.Deletions.Contains(9)) + }) + + t.Run("get is snapshot of underlying bitmaps", func(t *testing.T) { + bst := &BinarySearchTree{} + key := []byte("my-key") + + for i := uint64(1); i <= 3; i++ { + bst.Insert(key, Insert{ + Additions: []uint64{10 + i}, + Deletions: []uint64{10 - i}, + }) + } + + getBeforeUpdate, err := bst.Get(key) + require.Nil(t, err) + + expectedAdditionsBeforeUpdate := []uint64{11, 12, 13} + expectedDeletionsBeforeUpdate := []uint64{7, 8, 9} + + assert.ElementsMatch(t, expectedAdditionsBeforeUpdate, getBeforeUpdate.Additions.ToArray()) + assert.ElementsMatch(t, expectedDeletionsBeforeUpdate, getBeforeUpdate.Deletions.ToArray()) + + t.Run("gotten layer does not change on bst update", func(t *testing.T) { + bst.Insert(key, Insert{Additions: []uint64{100}, Deletions: []uint64{1}}) + + getAfterUpdate, err := bst.Get(key) + require.Nil(t, err) + + expectedAdditionsAfterUpdate := []uint64{11, 12, 13, 100} + expectedDeletionsAfterUpdate := []uint64{1, 7, 8, 9} + + assert.ElementsMatch(t, expectedAdditionsBeforeUpdate, getBeforeUpdate.Additions.ToArray()) + assert.ElementsMatch(t, expectedDeletionsBeforeUpdate, getBeforeUpdate.Deletions.ToArray()) + + assert.ElementsMatch(t, expectedAdditionsAfterUpdate, getAfterUpdate.Additions.ToArray()) + assert.ElementsMatch(t, expectedDeletionsAfterUpdate, getAfterUpdate.Deletions.ToArray()) + }) + }) +} + +func TestBSTRoaringSet_Flatten(t *testing.T) { + t.Run("flattened bst is snapshot of current bst", func(t *testing.T) { + key1 := "key-1" + key2 := "key-2" + key3 := "key-3" + + bst := &BinarySearchTree{} + // mixed order + bst.Insert([]byte(key3), Insert{Additions: []uint64{7, 8, 9}, Deletions: []uint64{77, 88, 99}}) + bst.Insert([]byte(key1), Insert{Additions: []uint64{1, 2, 3}, Deletions: []uint64{11, 22, 33}}) + bst.Insert([]byte(key2), Insert{Additions: []uint64{4, 5, 6}, Deletions: []uint64{44, 55, 66}}) + + flatBeforeUpdate := bst.FlattenInOrder() + + expectedBeforeUpdate := []struct { + key string + additions []uint64 + deletions []uint64 + }{ + {key1, []uint64{1, 2, 3}, []uint64{11, 22, 33}}, + {key2, []uint64{4, 5, 6}, []uint64{44, 55, 66}}, + {key3, []uint64{7, 8, 9}, []uint64{77, 88, 99}}, + } + + assert.Len(t, flatBeforeUpdate, len(expectedBeforeUpdate)) + for i, exp := range expectedBeforeUpdate { + assert.Equal(t, []byte(exp.key), flatBeforeUpdate[i].Key) + assert.ElementsMatch(t, exp.additions, flatBeforeUpdate[i].Value.Additions.ToArray()) + assert.ElementsMatch(t, exp.deletions, flatBeforeUpdate[i].Value.Deletions.ToArray()) + } + + t.Run("flattened bst does not change on bst update", func(t *testing.T) { + key4 := "key-4" + + // mixed order + bst.Insert([]byte(key4), Insert{Additions: []uint64{111, 222, 333}, Deletions: []uint64{444, 555, 666}}) + bst.Insert([]byte(key3), Insert{Additions: []uint64{77, 88}, Deletions: []uint64{7, 8}}) + bst.Insert([]byte(key1), Insert{Additions: []uint64{11, 22}, Deletions: []uint64{1, 2}}) + + flatAfterUpdate := bst.FlattenInOrder() + + expectedAfterUpdate := []struct { + key string + additions []uint64 + deletions []uint64 + }{ + {key1, []uint64{3, 11, 22}, []uint64{1, 2, 33}}, + {key2, []uint64{4, 5, 6}, []uint64{44, 55, 66}}, + {key3, []uint64{9, 77, 88}, []uint64{7, 8, 99}}, + {key4, []uint64{111, 222, 333}, []uint64{444, 555, 666}}, + } + + assert.Len(t, flatBeforeUpdate, len(expectedBeforeUpdate)) + for i, exp := range expectedBeforeUpdate { + assert.Equal(t, []byte(exp.key), flatBeforeUpdate[i].Key) + assert.ElementsMatch(t, exp.additions, flatBeforeUpdate[i].Value.Additions.ToArray()) + assert.ElementsMatch(t, exp.deletions, flatBeforeUpdate[i].Value.Deletions.ToArray()) + } + + assert.Len(t, flatAfterUpdate, len(expectedAfterUpdate)) + for i, exp := range expectedAfterUpdate { + assert.Equal(t, []byte(exp.key), flatAfterUpdate[i].Key) + assert.ElementsMatch(t, exp.additions, flatAfterUpdate[i].Value.Additions.ToArray()) + assert.ElementsMatch(t, exp.deletions, flatAfterUpdate[i].Value.Deletions.ToArray()) + } + }) + }) +} + +func BenchmarkBinarySearchTreeInsert(b *testing.B) { + count := uint64(100_000) + keys := make([][]byte, count) + + // generate + for i := range keys { + bytes, err := lexicographicallySortableFloat64(float64(i) / 3) + require.NoError(b, err) + keys[i] = bytes + } + + // shuffle + r := rand.New(rand.NewSource(time.Now().UnixNano())) + for i := range keys { + j := r.Intn(i + 1) + keys[i], keys[j] = keys[j], keys[i] + } + + insert := Insert{Additions: make([]uint64, 1)} + for i := 0; i < b.N; i++ { + m := &BinarySearchTree{} + for value := uint64(0); value < count; value++ { + insert.Additions[0] = value + m.Insert(keys[value], insert) + } + } +} + +func BenchmarkBinarySearchTreeFlatten(b *testing.B) { + count := uint64(100_000) + keys := make([][]byte, count) + + // generate + for i := range keys { + bytes, err := lexicographicallySortableFloat64(float64(i) / 3) + require.NoError(b, err) + keys[i] = bytes + } + + // shuffle + r := rand.New(rand.NewSource(time.Now().UnixNano())) + for i := range keys { + j := r.Intn(i + 1) + keys[i], keys[j] = keys[j], keys[i] + } + + // insert + insert := Insert{Additions: make([]uint64, 1)} + m := &BinarySearchTree{} + for value := uint64(0); value < count; value++ { + insert.Additions[0] = value + m.Insert(keys[value], insert) + } + + for i := 0; i < b.N; i++ { + m.FlattenInOrder() + } +} + +func lexicographicallySortableFloat64(in float64) ([]byte, error) { + buf := bytes.NewBuffer(nil) + + err := binary.Write(buf, binary.BigEndian, in) + if err != nil { + return nil, errors.Wrap(err, "serialize float64 value as big endian") + } + + var out []byte + if in >= 0 { + // on positive numbers only flip the sign + out = buf.Bytes() + firstByte := out[0] ^ 0x80 + out = append([]byte{firstByte}, out[1:]...) + } else { + // on negative numbers flip every bit + out = make([]byte, 8) + for i, b := range buf.Bytes() { + out[i] = b ^ 0xFF + } + } + + return out, nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/roaringset/buf_pool.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/roaringset/buf_pool.go new file mode 100644 index 0000000000000000000000000000000000000000..c9bdbd8061b615a747be0c2941386976665c4adf --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/roaringset/buf_pool.go @@ -0,0 +1,451 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package roaringset + +import ( + "context" + "math" + "math/bits" + "slices" + "sync" + "time" + + "github.com/dustin/go-humanize" + "github.com/prometheus/client_golang/prometheus" + "github.com/sirupsen/logrus" + "github.com/weaviate/sroar" + "github.com/weaviate/weaviate/entities/errors" + "github.com/weaviate/weaviate/usecases/monitoring" +) + +type BitmapBufPool interface { + Get(minCap int) (buf []byte, put func()) + CloneToBuf(bm *sroar.Bitmap) (cloned *sroar.Bitmap, put func()) +} + +func cloneToBuf(pool BitmapBufPool, bm *sroar.Bitmap) (cloned *sroar.Bitmap, put func()) { + buf, put := pool.Get(bm.LenInBytes()) + return bm.CloneToBuf(buf), put +} + +func NewBitmapBufPoolDefault(logger logrus.FieldLogger, metrics *monitoring.PrometheusMetrics, + inMemoMaxBufSize int, maxMemoSizeForBufs int, +) (pool BitmapBufPool, close func()) { + syncMinRangeP2 := 9 // 2^9 = 512B + syncMaxRangeP2 := 20 // 2^20 = 1MB + syncRanges := calculateSyncBufferRanges(syncMinRangeP2, syncMaxRangeP2) + syncMaxBufSize := syncRanges[len(syncRanges)-1] + + inMemoMinRangeP2 := syncMaxRangeP2 + 1 + inMemoRanges, inMemoBufsLimits := calculateInMemoBufferRangesAndLimits(syncMaxBufSize, inMemoMinRangeP2, + inMemoMaxBufSize, maxMemoSizeForBufs) + + allRanges := syncRanges + if len(inMemoRanges) > 0 { + allRanges = append(allRanges, inMemoRanges...) + } + + stopCleanup := func() {} + p := NewBitmapBufPoolRanged(metrics, syncMaxBufSize, inMemoBufsLimits, allRanges...) + if ln := len(inMemoRanges); ln > 0 { + limitMaxRange := inMemoBufsLimits[inMemoRanges[ln-1]] + nBuffers := (limitMaxRange + 1) / 2 + cleanupInterval := 1 * time.Minute + // try to clean half of buffers every minute + stopCleanup = p.StartPeriodicCleanup(logger, nBuffers, cleanupInterval) + } + + return p, stopCleanup +} + +// ----------------------------------------------------------------------------- + +type bitmapBufPoolNoop struct{} + +func NewBitmapBufPoolNoop() *bitmapBufPoolNoop { + return &bitmapBufPoolNoop{} +} + +func (p *bitmapBufPoolNoop) Get(minCap int) (buf []byte, put func()) { + return make([]byte, 0, minCap), func() {} +} + +func (p *bitmapBufPoolNoop) CloneToBuf(bm *sroar.Bitmap) (cloned *sroar.Bitmap, put func()) { + return cloneToBuf(p, bm) +} + +// ----------------------------------------------------------------------------- + +type bitmapBufPoolRanged struct { + ranges []int + firstInMemoRngIdx int + poolsSync []*BufPoolFixedSync + poolsInMemo []*BufPoolFixedInMemory + disposableMetrics bufDisposableMetrics +} + +// Creates multiple pools, one for specified range of sizes (given in bytes). +// E.g. for ranges 1024, 2048 and 4096, 3 internal buffer pools will be +// created to handle ranges of size: [1-1024], [1025-2048], [2048-4096]. +// Buffers of sizes bigger than highest range will be created but not kept in pool +// (to be removed by GC when no longer needed) +// Ranges <=0 or duplicated will be ignored. +func NewBitmapBufPoolRanged(metrics *monitoring.PrometheusMetrics, + syncMaxBufSize int, inMemoBufsLimits map[int]int, ranges ...int, +) *bitmapBufPoolRanged { + ranges = validateBufferRanges(ranges) + poolsSync := []*BufPoolFixedSync{} + poolsInMemo := []*BufPoolFixedInMemory{} + + var inMemoMetrics bufPoolInMemoMetrics + var disposableMetrics bufDisposableMetrics + if metrics == nil { + inMemoMetrics = &bufPoolNoopMetrics{} + disposableMetrics = &bufDisposableNoopMetrics{} + } else { + disposableMetrics = newPromBufDisposableMetrics(metrics) + } + + i := 0 + for ; i < len(ranges) && ranges[i] <= syncMaxBufSize; i++ { + poolsSync = append(poolsSync, NewBufPoolFixedSync(ranges[i])) + } + firstInMemoRngIdx := i + for ; i < len(ranges); i++ { + limit := 1 + if lmt, ok := inMemoBufsLimits[ranges[i]]; ok { + limit = lmt + } + if metrics != nil { + inMemoMetrics = newPromBufPoolInMemoMetrics(metrics, ranges[i]) + } + poolsInMemo = append(poolsInMemo, NewBufPoolFixedInMemory(inMemoMetrics, ranges[i], limit)) + } + + return &bitmapBufPoolRanged{ + ranges: ranges, + firstInMemoRngIdx: firstInMemoRngIdx, + poolsSync: poolsSync, + poolsInMemo: poolsInMemo, + disposableMetrics: disposableMetrics, + } +} + +func (p *bitmapBufPoolRanged) Get(minCap int) (buf []byte, put func()) { + for i := 0; i < p.firstInMemoRngIdx; i++ { + if minCap <= p.ranges[i] { + return p.poolsSync[i].Get() + } + } + for i := p.firstInMemoRngIdx; i < len(p.ranges); i++ { + if minCap <= p.ranges[i] { + return p.poolsInMemo[i-p.firstInMemoRngIdx].Get() + } + } + p.disposableMetrics.bufCreated(minCap) + return make([]byte, 0, minCap), func() {} +} + +func (p *bitmapBufPoolRanged) CloneToBuf(bm *sroar.Bitmap) (cloned *sroar.Bitmap, put func()) { + return cloneToBuf(p, bm) +} + +func (p *bitmapBufPoolRanged) cleanup(n int) map[int]int { + cleaned := map[int]int{} + for i := p.firstInMemoRngIdx; i < len(p.ranges); i++ { + cleaned[p.ranges[i]] = p.poolsInMemo[i-p.firstInMemoRngIdx].Cleanup(n) + } + return cleaned +} + +func (p *bitmapBufPoolRanged) StartPeriodicCleanup(logger logrus.FieldLogger, n int, interval time.Duration) (stop func()) { + ctx, cancel := context.WithCancel(context.Background()) + errors.GoWrapper(func() { + ticker := time.NewTicker(interval) + defer ticker.Stop() + + for { + select { + case <-ctx.Done(): + return + case <-ticker.C: + p.cleanup(n) + } + } + }, logger) + return cancel +} + +// ----------------------------------------------------------------------------- + +type bitmapBufPoolFactorWrapper struct { + pool BitmapBufPool + factor float64 +} + +func NewBitmapBufPoolFactorWrapper(pool BitmapBufPool, factor float64) *bitmapBufPoolFactorWrapper { + factor = max(factor, 1.0) + return &bitmapBufPoolFactorWrapper{pool: pool, factor: factor} +} + +func (p *bitmapBufPoolFactorWrapper) Get(minCap int) (buf []byte, put func()) { + newMinCap := int(math.Ceil(float64(minCap) * p.factor)) + return p.pool.Get(newMinCap) +} + +func (p *bitmapBufPoolFactorWrapper) CloneToBuf(bm *sroar.Bitmap) (cloned *sroar.Bitmap, put func()) { + return cloneToBuf(p, bm) +} + +// ----------------------------------------------------------------------------- + +type BufPoolFixedSync struct { + pool *sync.Pool +} + +func NewBufPoolFixedSync(cap int) *BufPoolFixedSync { + return &BufPoolFixedSync{ + pool: &sync.Pool{ + New: func() any { + buf := make([]byte, 0, cap) + return &buf + }, + }, + } +} + +func (p *BufPoolFixedSync) Get() (buf []byte, put func()) { + ptr := p.pool.Get().(*[]byte) + return *ptr, func() { p.pool.Put(ptr) } +} + +// ----------------------------------------------------------------------------- + +type BufPoolFixedInMemory struct { + cap int + limit int + bufsCh chan *[]byte + metrics bufPoolInMemoMetrics +} + +func NewBufPoolFixedInMemory(metrics bufPoolInMemoMetrics, cap int, limit int) *BufPoolFixedInMemory { + return &BufPoolFixedInMemory{ + cap: cap, + limit: limit, + bufsCh: make(chan *[]byte, limit), + metrics: metrics, + } +} + +func (p *BufPoolFixedInMemory) Get() (buf []byte, put func()) { + var ptr *[]byte + select { + case ptr = <-p.bufsCh: + buf = *ptr + p.metrics.bufGot() + default: + buf = make([]byte, 0, p.cap) + ptr = &buf + p.metrics.bufCreated() + } + return buf, func() { p.put(ptr) } +} + +func (p *BufPoolFixedInMemory) put(ptr *[]byte) bool { + select { + case p.bufsCh <- ptr: + p.metrics.bufPut() + // successfully returned + return true + default: + p.metrics.bufDiscarded() + // chan full, discard buffer + return false + } +} + +// Cleanup removes available buffers from the pool and returs number of buffers removed. +// Buffers are removed up to configured limit to prevent indefinite removal in case +// new ones are created in the parallel. +func (p *BufPoolFixedInMemory) Cleanup(n int) int { + i := 0 +outer: + for n = min(n, p.limit); i < n; i++ { + select { + case <-p.bufsCh: + p.metrics.bufCleanedUp() + // discard taken buffer + default: + break outer + } + } + return i +} + +// ----------------------------------------------------------------------------- + +func validateBufferRanges(ranges []int) []int { + if ln := len(ranges); ln > 0 { + // cleanup ranges, keep unique and > 0 + unique_gt0 := map[int]struct{}{} + for i := 0; i < ln; i++ { + if rng := ranges[i]; rng > 0 { + unique_gt0[rng] = struct{}{} + } + } + i := 0 + for rng := range unique_gt0 { + ranges[i] = rng + i++ + } + ranges = ranges[:i] + slices.Sort(ranges) + } + return ranges +} + +func calculateSyncBufferRanges(minRangeP2, maxRangeP2 int) []int { + if minRangeP2 < 0 || maxRangeP2 < 0 || minRangeP2 > maxRangeP2 { + return []int{} + } + + rangesLn := maxRangeP2 - minRangeP2 + 1 + ranges := make([]int, rangesLn) + for i := range ranges { + ranges[i] = 1 << (i + minRangeP2) + } + return ranges +} + +func calculateInMemoBufferRangesAndLimits(maxSyncBufSize, minRangeP2, maxBufSize, maxMemoSize int, +) ([]int, map[int]int) { + if maxBufSize > maxSyncBufSize { + maxRangeP2 := 63 - bits.LeadingZeros64(uint64(maxBufSize)) + + rangesLn := maxRangeP2 - minRangeP2 + 1 + ranges := make([]int, rangesLn, rangesLn+1) + for i := 0; i < rangesLn; i++ { + ranges[i] = 1 << (i + minRangeP2) + } + if maxBufSize != 1< maxMemoSize { + sums = sums[:i] + ranges = ranges[:i] + break + } + } + + rangesLn = len(ranges) + bufsLimits := make(map[int]int, len(ranges)) // range -> limit + for i := rangesLn - 1; i >= 0; i-- { + bufsLimits[ranges[i]] = maxMemoSize / sums[i] + maxMemoSize -= sums[i] * bufsLimits[ranges[i]] + + if i != rangesLn-1 { + bufsLimits[ranges[i]] += bufsLimits[ranges[i+1]] + } + } + + return ranges, bufsLimits + } + return []int{}, map[int]int{} +} + +// ----------------------------------------------------------------------------- + +type bufPoolInMemoMetrics interface { + bufCreated() + bufGot() + bufPut() + bufDiscarded() + bufCleanedUp() +} + +type promBufPoolInMemoMetrics struct { + usageCounter *prometheus.CounterVec + size string +} + +func newPromBufPoolInMemoMetrics(metrics *monitoring.PrometheusMetrics, sizeInBytes int) *promBufPoolInMemoMetrics { + return &promBufPoolInMemoMetrics{ + usageCounter: metrics.LSMBitmapBuffersUsage, + size: humanize.IBytes(uint64(sizeInBytes)), + } +} + +func (m *promBufPoolInMemoMetrics) bufCreated() { + m.usageCounter.WithLabelValues(m.size, "inmemo_created").Inc() +} + +func (m *promBufPoolInMemoMetrics) bufGot() { + m.usageCounter.WithLabelValues(m.size, "inmemo_got").Inc() +} + +func (m *promBufPoolInMemoMetrics) bufPut() { + m.usageCounter.WithLabelValues(m.size, "inmemo_put").Inc() +} + +func (m *promBufPoolInMemoMetrics) bufDiscarded() { + m.usageCounter.WithLabelValues(m.size, "inmemo_discarded").Inc() +} + +func (m *promBufPoolInMemoMetrics) bufCleanedUp() { + m.usageCounter.WithLabelValues(m.size, "inmemo_cleanedUp").Inc() +} + +type bufPoolNoopMetrics struct{} + +func (m *bufPoolNoopMetrics) bufCreated() {} +func (m *bufPoolNoopMetrics) bufGot() {} +func (m *bufPoolNoopMetrics) bufPut() {} +func (m *bufPoolNoopMetrics) bufDiscarded() {} +func (m *bufPoolNoopMetrics) bufCleanedUp() {} + +// ----------------------------------------------------------------------------- + +type bufDisposableMetrics interface { + bufCreated(sizeInBytes int) +} + +type promBufDisposableMetrics struct { + usageCounter *prometheus.CounterVec +} + +func newPromBufDisposableMetrics(metrics *monitoring.PrometheusMetrics) *promBufDisposableMetrics { + return &promBufDisposableMetrics{ + usageCounter: metrics.LSMBitmapBuffersUsage, + } +} + +func (m *promBufDisposableMetrics) bufCreated(sizeInBytes int) { + s := uint64(sizeInBytes) + ceil := uint64(1 << bits.Len64(s)) + if s^ceil != 0 { + ceil *= 2 + } + size := humanize.IBytes(ceil) + m.usageCounter.WithLabelValues(size, "disposable_created").Inc() +} + +type bufDisposableNoopMetrics struct{} + +func (m *bufDisposableNoopMetrics) bufCreated(sizeInBytes int) {} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/roaringset/buf_pool_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/roaringset/buf_pool_test.go new file mode 100644 index 0000000000000000000000000000000000000000..f425086ac85c5b6ba8176f0f5cf4062673be836d --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/roaringset/buf_pool_test.go @@ -0,0 +1,832 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package roaringset + +import ( + "encoding/binary" + "fmt" + "testing" + "time" + + "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/usecases/monitoring" +) + +func TestBufPoolFixedSync(t *testing.T) { + t.Run("pool returns buffers of given cap", func(t *testing.T) { + pool123 := NewBufPoolFixedSync(123) + pool234 := NewBufPoolFixedSync(234) + pool345 := NewBufPoolFixedSync(345) + + t.Run("buf1", func(t *testing.T) { + buf1_123, put := pool123.Get() + defer put() + buf1_234, put := pool234.Get() + defer put() + buf1_345, put := pool345.Get() + defer put() + + assert.Equal(t, 0, len(buf1_123)) + assert.Equal(t, 123, cap(buf1_123)) + assert.Equal(t, 0, len(buf1_234)) + assert.Equal(t, 234, cap(buf1_234)) + assert.Equal(t, 0, len(buf1_345)) + assert.Equal(t, 345, cap(buf1_345)) + }) + + t.Run("buf2", func(t *testing.T) { + buf2_123, put := pool123.Get() + defer put() + buf2_234, put := pool234.Get() + defer put() + buf2_345, put := pool345.Get() + defer put() + + assert.Equal(t, 0, len(buf2_123)) + assert.Equal(t, 123, cap(buf2_123)) + assert.Equal(t, 0, len(buf2_234)) + assert.Equal(t, 234, cap(buf2_234)) + assert.Equal(t, 0, len(buf2_345)) + assert.Equal(t, 345, cap(buf2_345)) + }) + }) +} + +func TestBufPoolFixedInMemory(t *testing.T) { + metrics := &bufPoolNoopMetrics{} + + t.Run("pool returns buffers of given cap", func(t *testing.T) { + pool123 := NewBufPoolFixedInMemory(metrics, 123, 2) + pool234 := NewBufPoolFixedInMemory(metrics, 234, 2) + pool345 := NewBufPoolFixedInMemory(metrics, 345, 2) + + t.Run("buf1", func(t *testing.T) { + buf1_123, put := pool123.Get() + defer put() + buf1_234, put := pool234.Get() + defer put() + buf1_345, put := pool345.Get() + defer put() + + assert.Equal(t, 0, len(buf1_123)) + assert.Equal(t, 123, cap(buf1_123)) + assert.Equal(t, 0, len(buf1_234)) + assert.Equal(t, 234, cap(buf1_234)) + assert.Equal(t, 0, len(buf1_345)) + assert.Equal(t, 345, cap(buf1_345)) + }) + + t.Run("buf2", func(t *testing.T) { + buf2_123, put := pool123.Get() + defer put() + buf2_234, put := pool234.Get() + defer put() + buf2_345, put := pool345.Get() + defer put() + + assert.Equal(t, 0, len(buf2_123)) + assert.Equal(t, 123, cap(buf2_123)) + assert.Equal(t, 0, len(buf2_234)) + assert.Equal(t, 234, cap(buf2_234)) + assert.Equal(t, 0, len(buf2_345)) + assert.Equal(t, 345, cap(buf2_345)) + }) + }) + + t.Run("pool reuses buffers up to given limit", func(t *testing.T) { + val1 := uint16(1001) + val2 := uint16(2002) + val3 := uint16(3003) + val4 := uint16(4004) + val5 := uint16(5005) + + // pool has 3 buffers. first 3 buffers got from the pool are reused + // (once written values stay in the buffers). + // following buffers are created as temporary ones and are not put back to the pool + limit := 3 + pool := NewBufPoolFixedInMemory(metrics, 2, limit) + + t.Run("get buffers and write unique values", func(t *testing.T) { + buf1_1, put1 := pool.Get() + binary.BigEndian.PutUint16(buf1_1[:2], val1) + + buf2_1, put2 := pool.Get() + binary.BigEndian.PutUint16(buf2_1[:2], val2) + + buf3_1, put3 := pool.Get() + binary.BigEndian.PutUint16(buf3_1[:2], val3) + + buf4_1, put4 := pool.Get() + binary.BigEndian.PutUint16(buf4_1[:2], val4) + + buf5_1, put5 := pool.Get() + binary.BigEndian.PutUint16(buf5_1[:2], val5) + + // put in order + put1() + put2() + put3() + put4() // should be discarded + put5() // should be discarded + }) + + t.Run("get buffers - only 3 (limit) have values", func(t *testing.T) { + buf1_2, put1 := pool.Get() + val1_2 := binary.BigEndian.Uint16(buf1_2[:2]) + + buf2_2, put2 := pool.Get() + val2_2 := binary.BigEndian.Uint16(buf2_2[:2]) + + buf3_2, put3 := pool.Get() + val3_2 := binary.BigEndian.Uint16(buf3_2[:2]) + + buf4_2, put4 := pool.Get() + val4_2 := binary.BigEndian.Uint16(buf4_2[:2]) + + buf5_2, put5 := pool.Get() + val5_2 := binary.BigEndian.Uint16(buf5_2[:2]) + + assert.Equal(t, val1, val1_2) + assert.Equal(t, val2, val2_2) + assert.Equal(t, val3, val3_2) + assert.Equal(t, uint16(0), val4_2) + assert.Equal(t, uint16(0), val5_2) + + // write again to temp buffers + binary.BigEndian.PutUint16(buf4_2[:2], val4) + binary.BigEndian.PutUint16(buf5_2[:2], val5) + + // put in reverse order + put5() + put4() + put3() + put2() // should be discarded + put1() // should be discarded + }) + + t.Run("get buffers - only 3 (limit) have values (in reverse order)", func(t *testing.T) { + buf5_3, put := pool.Get() + val5_3 := binary.BigEndian.Uint16(buf5_3[:2]) + defer put() + + buf4_3, put := pool.Get() + val4_3 := binary.BigEndian.Uint16(buf4_3[:2]) + defer put() + + buf3_3, put := pool.Get() + val3_3 := binary.BigEndian.Uint16(buf3_3[:2]) + defer put() + + buf2_3, put := pool.Get() + val2_3 := binary.BigEndian.Uint16(buf2_3[:2]) + defer put() + + buf1_3, put := pool.Get() + val1_3 := binary.BigEndian.Uint16(buf1_3[:2]) + defer put() + + assert.Equal(t, uint16(0), val1_3) + assert.Equal(t, uint16(0), val2_3) + assert.Equal(t, val3, val3_3) + assert.Equal(t, val4, val4_3) + assert.Equal(t, val5, val5_3) + }) + }) + + t.Run("pool creates buffers lazily", func(t *testing.T) { + val1 := uint16(1001) + val2 := uint16(2002) + val3 := uint16(3003) + limit := 3 + + t.Run("1 buffer used at once, 1 buffer is created", func(t *testing.T) { + pool := NewBufPoolFixedInMemory(metrics, 2, limit) + + buf1_1, put1 := pool.Get() + binary.BigEndian.PutUint16(buf1_1[:2], val1) + put1() + + buf1_2, put := pool.Get() + val1_2 := binary.BigEndian.Uint16(buf1_2[:2]) + put() + + assert.Equal(t, val1, val1_2) + }) + + t.Run("2 buffers used at once, 2 buffers are created", func(t *testing.T) { + pool := NewBufPoolFixedInMemory(metrics, 2, limit) + + buf1_3, put1 := pool.Get() + binary.BigEndian.PutUint16(buf1_3[:2], val1) + buf2_3, put2 := pool.Get() + binary.BigEndian.PutUint16(buf2_3[:2], val2) + put1() + put2() + + buf1_4, put := pool.Get() + val1_4 := binary.BigEndian.Uint16(buf1_4[:2]) + put() + buf2_4, put := pool.Get() + val2_4 := binary.BigEndian.Uint16(buf2_4[:2]) + put() + + assert.Equal(t, val1, val1_4) + assert.Equal(t, val2, val2_4) + }) + + t.Run("3 buffers used at once, 3 buffers are created", func(t *testing.T) { + pool := NewBufPoolFixedInMemory(metrics, 2, limit) + + buf1_5, put1 := pool.Get() + binary.BigEndian.PutUint16(buf1_5[:2], val1) + buf2_5, put2 := pool.Get() + binary.BigEndian.PutUint16(buf2_5[:2], val2) + buf3_5, put3 := pool.Get() + binary.BigEndian.PutUint16(buf3_5[:2], val3) + put1() + put2() + put3() + + buf1_6, put := pool.Get() + val1_6 := binary.BigEndian.Uint16(buf1_6[:2]) + put() + buf2_6, put := pool.Get() + val2_6 := binary.BigEndian.Uint16(buf2_6[:2]) + put() + buf3_6, put := pool.Get() + val3_6 := binary.BigEndian.Uint16(buf3_6[:2]) + put() + + assert.Equal(t, val1, val1_6) + assert.Equal(t, val2, val2_6) + assert.Equal(t, val3, val3_6) + }) + }) + + t.Run("pool cleanup unused buffers", func(t *testing.T) { + val1 := uint16(1001) + val2 := uint16(2002) + val3 := uint16(3003) + limit := 3 + + t.Run("all buffers in use, nothing is cleaned up", func(t *testing.T) { + pool := NewBufPoolFixedInMemory(metrics, 2, limit) + + buf1_1, put1 := pool.Get() + binary.BigEndian.PutUint16(buf1_1[:2], val1) + buf2_1, put2 := pool.Get() + binary.BigEndian.PutUint16(buf2_1[:2], val2) + buf3_1, put3 := pool.Get() + binary.BigEndian.PutUint16(buf3_1[:2], val3) + + cleaned := pool.Cleanup(limit) + put1() + put2() + put3() + + buf1_2, put := pool.Get() + val1_2 := binary.BigEndian.Uint16(buf1_2[:2]) + put() + buf2_2, put := pool.Get() + val2_2 := binary.BigEndian.Uint16(buf2_2[:2]) + put() + buf3_2, put := pool.Get() + val3_2 := binary.BigEndian.Uint16(buf3_2[:2]) + put() + + assert.Equal(t, 0, cleaned) + assert.Equal(t, val1, val1_2) + assert.Equal(t, val2, val2_2) + assert.Equal(t, val3, val3_2) + }) + + t.Run("2 buffers in use, 1 is cleaned up", func(t *testing.T) { + pool := NewBufPoolFixedInMemory(metrics, 2, limit) + + buf1_1, put1 := pool.Get() + binary.BigEndian.PutUint16(buf1_1[:2], val1) + buf2_1, put2 := pool.Get() + binary.BigEndian.PutUint16(buf2_1[:2], val2) + buf3_1, put3 := pool.Get() + binary.BigEndian.PutUint16(buf3_1[:2], val3) + + put1() + cleaned := pool.Cleanup(limit) + put2() + put3() + + buf2_2, put := pool.Get() + val2_2 := binary.BigEndian.Uint16(buf2_2[:2]) + put() + buf3_2, put := pool.Get() + val3_2 := binary.BigEndian.Uint16(buf3_2[:2]) + put() + buf2_3, put := pool.Get() + val2_3 := binary.BigEndian.Uint16(buf2_3[:2]) + put() + buf3_3, put := pool.Get() + val3_3 := binary.BigEndian.Uint16(buf3_3[:2]) + put() + + assert.Equal(t, 1, cleaned) + assert.Equal(t, val2, val2_2) + assert.Equal(t, val3, val3_2) + assert.Equal(t, val2, val2_3) + assert.Equal(t, val3, val3_3) + }) + + t.Run("1 buffer in use, 2 are cleaned up", func(t *testing.T) { + pool := NewBufPoolFixedInMemory(metrics, 2, limit) + + buf1_1, put1 := pool.Get() + binary.BigEndian.PutUint16(buf1_1[:2], val1) + buf2_1, put2 := pool.Get() + binary.BigEndian.PutUint16(buf2_1[:2], val2) + buf3_1, put3 := pool.Get() + binary.BigEndian.PutUint16(buf3_1[:2], val3) + + put1() + put2() + cleaned := pool.Cleanup(limit) + put3() + + buf3_2, put := pool.Get() + val3_2 := binary.BigEndian.Uint16(buf3_2[:2]) + put() + buf3_3, put := pool.Get() + val3_3 := binary.BigEndian.Uint16(buf3_3[:2]) + put() + + assert.Equal(t, 2, cleaned) + assert.Equal(t, val3, val3_2) + assert.Equal(t, val3, val3_3) + }) + + t.Run("no buffers in use, all are cleaned up", func(t *testing.T) { + pool := NewBufPoolFixedInMemory(metrics, 2, limit) + + buf1_1, put1 := pool.Get() + binary.BigEndian.PutUint16(buf1_1[:2], val1) + buf2_1, put2 := pool.Get() + binary.BigEndian.PutUint16(buf2_1[:2], val2) + buf3_1, put3 := pool.Get() + binary.BigEndian.PutUint16(buf3_1[:2], val3) + + put1() + put2() + put3() + cleaned := pool.Cleanup(limit) + + buf0_2, put := pool.Get() + val0_2 := binary.BigEndian.Uint16(buf0_2[:2]) + put() + + assert.Equal(t, 3, cleaned) + assert.Equal(t, uint16(0), val0_2) + }) + }) +} + +func TestBitmapBufPoolRanged(t *testing.T) { + var metrics *monitoring.PrometheusMetrics = nil + + t.Run("pool returns buffers of next higher range", func(t *testing.T) { + ranges := []int{32, 64, 128, 256, 512, 1024} + + testCases := []struct { + cap int + expectedCap int + }{ + { + cap: 1, + expectedCap: 32, + }, + { + cap: 16, + expectedCap: 32, + }, + { + cap: 32, + expectedCap: 32, + }, + { + cap: 33, + expectedCap: 64, + }, + { + cap: 64, + expectedCap: 64, + }, + { + cap: 65, + expectedCap: 128, + }, + { + cap: 128, + expectedCap: 128, + }, + { + cap: 129, + expectedCap: 256, + }, + { + cap: 256, + expectedCap: 256, + }, + { + cap: 257, + expectedCap: 512, + }, + { + cap: 512, + expectedCap: 512, + }, + { + cap: 513, + expectedCap: 1024, + }, + { + cap: 1025, + expectedCap: 1025, + }, + { + cap: 2345, + expectedCap: 2345, + }, + } + + t.Run("sync pool", func(t *testing.T) { + syncMaxBufSize := 1024 + pool := NewBitmapBufPoolRanged(metrics, syncMaxBufSize, nil, ranges...) + + for i, tc := range testCases { + t.Run(fmt.Sprintf("test case #%d", i), func(t *testing.T) { + buf, put := pool.Get(tc.cap) + defer put() + + assert.Equal(t, 0, len(buf)) + assert.Equal(t, tc.expectedCap, cap(buf)) + }) + } + }) + + t.Run("sync + inmemo pools", func(t *testing.T) { + syncMaxBufSize := 256 + pool := NewBitmapBufPoolRanged(metrics, syncMaxBufSize, nil, ranges...) + + for i, tc := range testCases { + t.Run(fmt.Sprintf("test case #%d", i), func(t *testing.T) { + buf, put := pool.Get(tc.cap) + defer put() + + assert.Equal(t, 0, len(buf)) + assert.Equal(t, tc.expectedCap, cap(buf)) + }) + } + }) + }) + + t.Run("inmemo buffers are cleaned up", func(t *testing.T) { + syncMaxBufSize := 128 + limits := map[int]int{256: 4, 512: 3, 1024: 2} + ranges := []int{32, 64, 128, 256, 512, 1024} + pool := NewBitmapBufPoolRanged(metrics, syncMaxBufSize, limits, ranges...) + + // get and write to 3 buffers of each inmemo size + buf256_1, put256_1 := pool.Get(254) + binary.BigEndian.PutUint16(buf256_1[:2], 10254) + buf256_2, put256_2 := pool.Get(255) + binary.BigEndian.PutUint16(buf256_2[:2], 10255) + buf256_3, put256_3 := pool.Get(255) + binary.BigEndian.PutUint16(buf256_3[:2], 10256) + buf512_1, put512_1 := pool.Get(512) + binary.BigEndian.PutUint16(buf512_1[:2], 10510) + buf512_2, put512_2 := pool.Get(512) + binary.BigEndian.PutUint16(buf512_2[:2], 10511) + buf512_3, put512_3 := pool.Get(512) + binary.BigEndian.PutUint16(buf512_3[:2], 10512) + buf1024_1, put1024_1 := pool.Get(1024) + binary.BigEndian.PutUint16(buf1024_1[:2], 11022) + buf1024_2, put1024_2 := pool.Get(1024) + binary.BigEndian.PutUint16(buf1024_2[:2], 11023) + buf1024_3tmp, put1024_3tmp := pool.Get(1024) + binary.BigEndian.PutUint16(buf1024_3tmp[:2], 11024) + put256_1() + put256_2() + put256_3() + put512_1() + put512_2() + put512_3() + put1024_1() + put1024_2() + put1024_3tmp() + + buf256_4, put256_4 := pool.Get(256) + val256_4 := binary.BigEndian.Uint16(buf256_4[:2]) + buf256_5, put256_5 := pool.Get(256) + val256_5 := binary.BigEndian.Uint16(buf256_5[:2]) + buf256_6, put256_6 := pool.Get(256) + val256_6 := binary.BigEndian.Uint16(buf256_6[:2]) + buf512_4, put512_4 := pool.Get(512) + val512_4 := binary.BigEndian.Uint16(buf512_4[:2]) + buf512_5, put512_5 := pool.Get(512) + val512_5 := binary.BigEndian.Uint16(buf512_5[:2]) + buf512_6, put512_6 := pool.Get(512) + val512_6 := binary.BigEndian.Uint16(buf512_6[:2]) + buf1024_4, put1024_4 := pool.Get(1024) + val1024_4 := binary.BigEndian.Uint16(buf1024_4[:2]) + buf1024_5, put1024_5 := pool.Get(1024) + val1024_5 := binary.BigEndian.Uint16(buf1024_5[:2]) + buf1024_6tmp, put1024_6tmp := pool.Get(1024) + val1024_6tmp := binary.BigEndian.Uint16(buf1024_6tmp[:2]) + put256_4() + put256_5() + put256_6() + put512_4() + put512_5() + put512_6() + put1024_4() + put1024_5() + put1024_6tmp() + + assert.Equal(t, uint16(10254), val256_4) + assert.Equal(t, uint16(10255), val256_5) + assert.Equal(t, uint16(10256), val256_6) + assert.Equal(t, uint16(10510), val512_4) + assert.Equal(t, uint16(10511), val512_5) + assert.Equal(t, uint16(10512), val512_6) + assert.Equal(t, uint16(11022), val1024_4) + assert.Equal(t, uint16(11023), val1024_5) + assert.Equal(t, uint16(0), val1024_6tmp) + + cleaned := pool.cleanup(3) + + // 3 of 4 256s, 3 of 3 512s, 2 of 2 1024s buffers should be cleaned + assert.Equal(t, map[int]int{256: 3, 512: 3, 1024: 2}, cleaned) + + buf256_7, put := pool.Get(256) + val256_7 := binary.BigEndian.Uint16(buf256_7[:2]) + put() + buf512_7, put := pool.Get(512) + val512_7 := binary.BigEndian.Uint16(buf512_7[:2]) + put() + buf1024_7, put := pool.Get(1024) + val1024_7 := binary.BigEndian.Uint16(buf1024_7[:2]) + put() + + assert.Equal(t, uint16(0), val256_7) + assert.Equal(t, uint16(0), val512_7) + assert.Equal(t, uint16(0), val1024_7) + }) + + t.Run("inmemo buffers are cleaned up periodically", func(t *testing.T) { + logger, _ := test.NewNullLogger() + + syncMaxBufSize := 128 + limits := map[int]int{256: 2, 512: 2, 1024: 2} + ranges := []int{32, 64, 128, 256, 512, 1024} + pool := NewBitmapBufPoolRanged(metrics, syncMaxBufSize, limits, ranges...) + + buf256_1, put256_1 := pool.Get(256) + binary.BigEndian.PutUint16(buf256_1[:2], 10256) + buf512_1, put512_1 := pool.Get(512) + binary.BigEndian.PutUint16(buf512_1[:2], 10512) + buf1024_1, put1024_1 := pool.Get(1024) + binary.BigEndian.PutUint16(buf1024_1[:2], 11024) + put256_1() + put512_1() + put1024_1() + + buf256_2, put := pool.Get(256) + val256_2 := binary.BigEndian.Uint16(buf256_2[:2]) + put() + buf512_2, put := pool.Get(512) + val512_2 := binary.BigEndian.Uint16(buf512_2[:2]) + put() + buf1024_2, put := pool.Get(1024) + val1024_2 := binary.BigEndian.Uint16(buf1024_2[:2]) + put() + + assert.Equal(t, uint16(10256), val256_2) + assert.Equal(t, uint16(10512), val512_2) + assert.Equal(t, uint16(11024), val1024_2) + + stop := pool.StartPeriodicCleanup(logger, 2, 500*time.Microsecond) + defer stop() + + // wait for cleanup + time.Sleep(5 * time.Millisecond) + + buf256_3, put := pool.Get(256) + val256_3 := binary.BigEndian.Uint16(buf256_3[:2]) + put() + buf512_3, put := pool.Get(512) + val512_3 := binary.BigEndian.Uint16(buf512_3[:2]) + put() + buf1024_3, put := pool.Get(1024) + val1024_3 := binary.BigEndian.Uint16(buf1024_3[:2]) + put() + + assert.Equal(t, uint16(0), val256_3) + assert.Equal(t, uint16(0), val512_3) + assert.Equal(t, uint16(0), val1024_3) + }) +} + +func TestCalculateSyncBufferRanges(t *testing.T) { + testCases := []struct { + minRangeP2 int + maxRangeP2 int + expectedRanges []int + }{ + { + minRangeP2: 1, + maxRangeP2: 5, + expectedRanges: []int{2, 4, 8, 16, 32}, + }, + { + minRangeP2: 7, + maxRangeP2: 10, + expectedRanges: []int{128, 256, 512, 1024}, + }, + { + minRangeP2: 9, + maxRangeP2: 20, + expectedRanges: []int{512, 1024, 2048, 4096, 8192, 16384, 32768, 65536, 131072, 262144, 524288, 1048576}, + }, + { + minRangeP2: 7, + maxRangeP2: 10, + expectedRanges: []int{128, 256, 512, 1024}, + }, + { + minRangeP2: 0, + maxRangeP2: 0, + expectedRanges: []int{1}, + }, + { + minRangeP2: 0, + maxRangeP2: 1, + expectedRanges: []int{1, 2}, + }, + { + minRangeP2: -1, + maxRangeP2: 0, + expectedRanges: []int{}, + }, + { + minRangeP2: 0, + maxRangeP2: -1, + expectedRanges: []int{}, + }, + { + minRangeP2: 9, + maxRangeP2: 7, + expectedRanges: []int{}, + }, + } + + for i, tc := range testCases { + t.Run(fmt.Sprintf("test case #%d", i), func(t *testing.T) { + ranges := calculateSyncBufferRanges(tc.minRangeP2, tc.maxRangeP2) + require.Equal(t, tc.expectedRanges, ranges) + }) + } +} + +func TestCalculateInMemoBufferRangesAndLimits(t *testing.T) { + MiB := 1 << 20 + GiB := 1 << 30 + + testCases := []struct { + maxSyncBufSize int + minRangeP2 int + maxBufSize int + maxMemoSize int + expectedRanges []int + expectedLimits map[int]int + }{ + { + maxSyncBufSize: 1024, + minRangeP2: 11, // 2^11 = 2048 + maxBufSize: 32768, + maxMemoSize: 32768, + expectedRanges: []int{2048, 4096, 8192, 16384}, + expectedLimits: map[int]int{2048: 2, 4096: 1, 8192: 1, 16384: 1}, + }, + { + maxSyncBufSize: 1024, + minRangeP2: 11, // 2^11 = 2048 + maxBufSize: 32768, + maxMemoSize: 16384, + expectedRanges: []int{2048, 4096, 8192}, + expectedLimits: map[int]int{2048: 2, 4096: 1, 8192: 1}, + }, + { + maxSyncBufSize: 1024, + minRangeP2: 11, // 2^11 = 2048 + maxBufSize: 32768, + maxMemoSize: 65536, + expectedRanges: []int{2048, 4096, 8192, 16384, 32768}, + expectedLimits: map[int]int{2048: 2, 4096: 1, 8192: 1, 16384: 1, 32768: 1}, + }, + { + maxSyncBufSize: 1024, + minRangeP2: 11, // 2^11 = 2048 + maxBufSize: 32768, + maxMemoSize: 262144, + expectedRanges: []int{2048, 4096, 8192, 16384, 32768}, + expectedLimits: map[int]int{2048: 6, 4096: 5, 8192: 4, 16384: 4, 32768: 4}, + }, + { + maxSyncBufSize: 1024, + minRangeP2: 11, // 2^11 = 2048 + maxBufSize: 40000, + maxMemoSize: 262144, + expectedRanges: []int{2048, 4096, 8192, 16384, 32768, 40000}, + expectedLimits: map[int]int{2048: 6, 4096: 5, 8192: 4, 16384: 3, 32768: 2, 40000: 2}, + }, + { + maxSyncBufSize: 1024, + minRangeP2: 11, // 2^11 = 2048 + maxBufSize: 65536, + maxMemoSize: 262144, + expectedRanges: []int{2048, 4096, 8192, 16384, 32768, 65536}, + expectedLimits: map[int]int{2048: 4, 4096: 2, 8192: 2, 16384: 2, 32768: 2, 65536: 2}, + }, + { + maxSyncBufSize: 1 * MiB, + minRangeP2: 21, // 2^21 = 2MiB + maxBufSize: 128 * MiB, + maxMemoSize: 2 * GiB, + expectedRanges: []int{2 * MiB, 4 * MiB, 8 * MiB, 16 * MiB, 32 * MiB, 64 * MiB, 128 * MiB}, + expectedLimits: map[int]int{2 * MiB: 10, 4 * MiB: 9, 8 * MiB: 9, 16 * MiB: 8, 32 * MiB: 8, 64 * MiB: 8, 128 * MiB: 8}, + }, + { + maxSyncBufSize: 1024, + minRangeP2: 11, // 2^11 = 2048 + maxBufSize: 1024, + maxMemoSize: 32768, + expectedRanges: []int{}, + expectedLimits: map[int]int{}, + }, + { + maxSyncBufSize: 1024, + minRangeP2: 11, // 2^11 = 2048 + maxBufSize: 2048, + maxMemoSize: 32768, + expectedRanges: []int{2048}, + expectedLimits: map[int]int{2048: 16}, + }, + } + + for i, tc := range testCases { + t.Run(fmt.Sprintf("test case #%d", i), func(t *testing.T) { + ranges, limits := calculateInMemoBufferRangesAndLimits(tc.maxSyncBufSize, tc.minRangeP2, tc.maxBufSize, tc.maxMemoSize) + require.Equal(t, tc.expectedRanges, ranges) + require.Equal(t, tc.expectedLimits, limits) + }) + } +} + +func TestValidateBufferRanges(t *testing.T) { + testCases := []struct { + ranges []int + expectedRanges []int + }{ + { + ranges: []int{1, 2, 3, 4, 5, 6, 7, 8, 9}, + expectedRanges: []int{1, 2, 3, 4, 5, 6, 7, 8, 9}, + }, + { + ranges: []int{1, 2, 3, 1, 2, 3, 1, 2, 3}, + expectedRanges: []int{1, 2, 3}, + }, + { + ranges: []int{-3, -2, -1, 0, 1, 2, 3}, + expectedRanges: []int{1, 2, 3}, + }, + { + ranges: []int{3, 2, 1, 0, -1, -2, -3}, + expectedRanges: []int{1, 2, 3}, + }, + } + + for i, tc := range testCases { + t.Run(fmt.Sprintf("test case #%d", i), func(t *testing.T) { + ranges := validateBufferRanges(tc.ranges) + require.Equal(t, tc.expectedRanges, ranges) + }) + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/roaringset/compactor.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/roaringset/compactor.go new file mode 100644 index 0000000000000000000000000000000000000000..96bf99b60fadf5c0b46afa5b7c835008e5709bf7 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/roaringset/compactor.go @@ -0,0 +1,376 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package roaringset + +import ( + "bytes" + "fmt" + "io" + + "github.com/weaviate/weaviate/usecases/memwatch" + + "github.com/weaviate/weaviate/adapters/repos/db/compactor" + + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + "github.com/weaviate/sroar" + "github.com/weaviate/weaviate/adapters/repos/db/lsmkv/segmentindex" + "github.com/weaviate/weaviate/entities/diskio" + "github.com/weaviate/weaviate/usecases/monitoring" +) + +// Compactor takes in a left and a right segment and merges them into a single +// segment. The input segments are represented by cursors without their +// respective segmentindexes. A new segmentindex is built from the merged nodes +// without taking the old indexes into account at all. +// +// The left segment must precede the right one in its creation time, as the +// compactor applies latest-takes-presence rules when there is a conflict. +// +// # Merging independent key/value pairs +// +// The new segment's nodes will be in sorted fashion (this is a requirement for +// the segment index and segment cursors to function). To achieve a sorted end +// result, the Compactor goes over both input cursors simultaneously and always +// works on the smaller of the two keys. After a key/value pair has been added +// to the output only the input cursor that provided the pair is advanced. +// +// # Merging key/value pairs with identical keys +// +// When both segment have a key/value pair with an overlapping key, the value +// has to be merged. The merge logic is not part of the compactor itself. +// Instead it makes use of [BitmapLayers.Merge]. +// +// # Exit Criterium +// +// When both cursors no longer return values, all key/value pairs are +// considered compacted. The compactor then deals with metadata. +// +// # Index and Header metadata +// +// Only once the key/value pairs have been compacted, will the compactor write +// the primary index based on the new key/value payload. Finally, the input +// writer is rewinded to be able to write the header metadata at the beginning +// of the file. Because of this, the input writer must be an [io.WriteSeeker], +// such as [*os.File]. +// +// The level of the resulting segment is the input level increased by one. +// Levels help the "eligible for compaction" cycle to find suitable compaction +// pairs. +type Compactor struct { + left, right *SegmentCursor + currentLevel uint16 + // Tells if deletions or keys without corresponding values + // can be removed from merged segment. + // (left segment is root (1st) one, keepTombstones is off for bucket) + cleanupDeletions bool + + w io.WriteSeeker + bufw compactor.Writer + mw *compactor.MemoryWriter + + scratchSpacePath string + + enableChecksumValidation bool + + maxNewFileSize int64 + allocChecker memwatch.AllocChecker +} + +// NewCompactor from left (older) and right (newer) segment. See [Compactor] +// for an explanation of what goes on under the hood, and why the input +// requirements are the way they are. +// +// # Segment Layout +// +// The layout of the segment is +// - header +// - data +// - check-sum +// +// However, it is challenging to calculate the length of the data (which is +// part of the header) before writing the file: +// +// big files (overhead is not that relevant) +// - write empty header +// - write data +// - seek back to start +// - write real header +// - seek to original position (after data) +// - write checksum +// +// # Decision Logic +// +// For small files we use a custom buffered writer, that buffers everything +// and writes just once at the end. For larger files, we use the regular +// approach as outlined above using a standard go buffered writer. +// +// The threshold to consider a file small vs large is simply the size of the +// regular buffered writer. The idea is that we would allocate +// [SegmentWriterBufferSize] bytes in any case, so if we anticipate being able +// to write the entire file in less than [SegmentWriterBufferSize] bytes, there +// is no additional cost to using the fully-in-memory approach. +func NewCompactor(w io.WriteSeeker, + left, right *SegmentCursor, level uint16, + scratchSpacePath string, cleanupDeletions bool, + enableChecksumValidation bool, maxNewFileSize int64, allocChecker memwatch.AllocChecker, +) *Compactor { + observeWrite := monitoring.GetMetrics().FileIOWrites.With(prometheus.Labels{ + "operation": "compaction", + "strategy": "roaringset", + }) + writeCB := func(written int64) { + observeWrite.Observe(float64(written)) + } + meteredW := diskio.NewMeteredWriter(w, writeCB) + writer, mw := compactor.NewWriter(meteredW, maxNewFileSize) + + return &Compactor{ + left: left, + right: right, + w: meteredW, + bufw: writer, + mw: mw, + currentLevel: level, + cleanupDeletions: cleanupDeletions, + scratchSpacePath: scratchSpacePath, + enableChecksumValidation: enableChecksumValidation, + maxNewFileSize: maxNewFileSize, + allocChecker: allocChecker, + } +} + +// Do starts a compaction. See [Compactor] for an explanation of this process. +func (c *Compactor) Do() error { + if err := c.init(); err != nil { + return fmt.Errorf("init: %w", err) + } + + segmentFile := segmentindex.NewSegmentFile( + segmentindex.WithBufferedWriter(c.bufw), + segmentindex.WithChecksumsDisabled(!c.enableChecksumValidation), + ) + + kis, err := c.writeNodes(segmentFile) + if err != nil { + return fmt.Errorf("write keys: %w", err) + } + + if err := c.writeIndexes(segmentFile, kis); err != nil { + return fmt.Errorf("write index: %w", err) + } + + // flush buffered, so we can safely seek on underlying writer + if c.mw == nil { + if err := c.bufw.Flush(); err != nil { + return fmt.Errorf("flush buffered: %w", err) + } + } + + var dataEnd uint64 = segmentindex.HeaderSize + if len(kis) > 0 { + dataEnd = uint64(kis[len(kis)-1].ValueEnd) + } + + version := segmentindex.ChooseHeaderVersion(c.enableChecksumValidation) + if err := compactor.WriteHeader(c.mw, c.w, c.bufw, segmentFile, c.currentLevel, version, + 0, dataEnd, segmentindex.StrategyRoaringSet); err != nil { + return errors.Wrap(err, "write header") + } + + if _, err := segmentFile.WriteChecksum(); err != nil { + return fmt.Errorf("write compactorRoaringSet segment checksum: %w", err) + } + + return nil +} + +func (c *Compactor) init() error { + // write a dummy header, we don't know the contents of the actual header yet, + // we will seek to the beginning and overwrite the actual header at the very + // end + + if _, err := c.bufw.Write(make([]byte, segmentindex.HeaderSize)); err != nil { + return errors.Wrap(err, "write empty header") + } + + return nil +} + +// nodeCompactor is a helper type to improve the code structure of merging +// nodes in a compaction +type nodeCompactor struct { + left, right *SegmentCursor + keyLeft, keyRight []byte + valueLeft, valueRight BitmapLayer + output []segmentindex.Key + offset int + bufw io.Writer + + cleanupDeletions bool + emptyBitmap *sroar.Bitmap +} + +func (c *Compactor) writeNodes(f *segmentindex.SegmentFile) ([]segmentindex.Key, error) { + nc := &nodeCompactor{ + left: c.left, + right: c.right, + bufw: f.BodyWriter(), + cleanupDeletions: c.cleanupDeletions, + emptyBitmap: sroar.NewBitmap(), + } + + nc.init() + + if err := nc.loopThroughKeys(); err != nil { + return nil, err + } + + return nc.output, nil +} + +func (c *nodeCompactor) init() { + c.keyLeft, c.valueLeft, _ = c.left.First() + c.keyRight, c.valueRight, _ = c.right.First() + + // the (dummy) header was already written, this is our initial offset + c.offset = segmentindex.HeaderSize +} + +func (c *nodeCompactor) loopThroughKeys() error { + for { + if c.keyLeft == nil && c.keyRight == nil { + return nil + } + + if c.keysEqual() { + if err := c.mergeIdenticalKeys(); err != nil { + return err + } + } else if c.leftKeySmallerOrRightNotSet() { + if err := c.takeLeftKey(); err != nil { + return err + } + } else { + if err := c.takeRightKey(); err != nil { + return err + } + } + } +} + +func (c *nodeCompactor) keysEqual() bool { + return bytes.Equal(c.keyLeft, c.keyRight) +} + +func (c *nodeCompactor) leftKeySmallerOrRightNotSet() bool { + return (c.keyLeft != nil && bytes.Compare(c.keyLeft, c.keyRight) == -1) || c.keyRight == nil +} + +func (c *nodeCompactor) mergeIdenticalKeys() error { + layers := BitmapLayers{ + {Additions: c.valueLeft.Additions, Deletions: c.valueLeft.Deletions}, + {Additions: c.valueRight.Additions, Deletions: c.valueRight.Deletions}, + } + merged, err := layers.Merge() + if err != nil { + return fmt.Errorf("merge bitmap layers for identical keys: %w", err) + } + + if additions, deletions, skip := c.cleanupValues(merged.Additions, merged.Deletions); !skip { + sn, err := NewSegmentNode(c.keyRight, additions, deletions) + if err != nil { + return fmt.Errorf("new segment node for merged key: %w", err) + } + + ki, err := sn.KeyIndexAndWriteTo(c.bufw, c.offset) + if err != nil { + return fmt.Errorf("write individual node (merged key): %w", err) + } + + c.offset = ki.ValueEnd + c.output = append(c.output, ki) + } + + // advance both! + c.keyLeft, c.valueLeft, _ = c.left.Next() + c.keyRight, c.valueRight, _ = c.right.Next() + return nil +} + +func (c *nodeCompactor) takeLeftKey() error { + if additions, deletions, skip := c.cleanupValues(c.valueLeft.Additions, c.valueLeft.Deletions); !skip { + sn, err := NewSegmentNode(c.keyLeft, additions, deletions) + if err != nil { + return fmt.Errorf("new segment node for left key: %w", err) + } + + ki, err := sn.KeyIndexAndWriteTo(c.bufw, c.offset) + if err != nil { + return fmt.Errorf("write individual node (left key): %w", err) + } + + c.offset = ki.ValueEnd + c.output = append(c.output, ki) + } + + c.keyLeft, c.valueLeft, _ = c.left.Next() + return nil +} + +func (c *nodeCompactor) takeRightKey() error { + if additions, deletions, skip := c.cleanupValues(c.valueRight.Additions, c.valueRight.Deletions); !skip { + sn, err := NewSegmentNode(c.keyRight, additions, deletions) + if err != nil { + return fmt.Errorf("new segment node for right key: %w", err) + } + + ki, err := sn.KeyIndexAndWriteTo(c.bufw, c.offset) + if err != nil { + return fmt.Errorf("write individual node (right key): %w", err) + } + + c.offset = ki.ValueEnd + c.output = append(c.output, ki) + } + + c.keyRight, c.valueRight, _ = c.right.Next() + return nil +} + +func (c *nodeCompactor) cleanupValues(additions, deletions *sroar.Bitmap, +) (add, del *sroar.Bitmap, skip bool) { + if !c.cleanupDeletions { + return Condense(additions), Condense(deletions), false + } + if !additions.IsEmpty() { + return Condense(additions), c.emptyBitmap, false + } + return nil, nil, true +} + +func (c *Compactor) writeIndexes(f *segmentindex.SegmentFile, + keys []segmentindex.Key, +) error { + indexes := &segmentindex.Indexes{ + Keys: keys, + SecondaryIndexCount: 0, + ScratchSpacePath: c.scratchSpacePath, + ObserveWrite: monitoring.GetMetrics().FileIOWrites.With(prometheus.Labels{ + "strategy": "roaringset", + "operation": "writeIndices", + }), + AllocChecker: c.allocChecker, + } + _, err := f.WriteIndexes(indexes, c.maxNewFileSize) + return err +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/roaringset/compactor_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/roaringset/compactor_test.go new file mode 100644 index 0000000000000000000000000000000000000000..87642e6daf7e52288cfdcc9e5e593b2d44a10c7b --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/roaringset/compactor_test.go @@ -0,0 +1,631 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package roaringset + +import ( + "fmt" + "io" + "os" + "path/filepath" + "testing" + + "github.com/weaviate/weaviate/adapters/repos/db/compactor" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/adapters/repos/db/lsmkv/segmentindex" +) + +func Test_Compactor(t *testing.T) { + type test struct { + name string + left []byte + right []byte + expected []keyWithBML + expectedRoot []keyWithBML + } + + tests := []test{ + { + name: "independent segments without overlap", + left: createSegmentsFromKeys(t, []keyWithBML{ + { + key: []byte("aaa"), + additions: []uint64{0}, + deletions: []uint64{1}, + }, + { + key: []byte("ccc"), + additions: []uint64{4}, + deletions: []uint64{5}, + }, + }), + right: createSegmentsFromKeys(t, []keyWithBML{ + { + key: []byte("bbb"), + additions: []uint64{2}, + deletions: []uint64{3}, + }, + { + key: []byte("ddd"), + additions: []uint64{6}, + deletions: []uint64{7}, + }, + }), + expected: []keyWithBML{ + { + key: []byte("aaa"), + additions: []uint64{0}, + deletions: []uint64{1}, + }, + { + key: []byte("bbb"), + additions: []uint64{2}, + deletions: []uint64{3}, + }, + { + key: []byte("ccc"), + additions: []uint64{4}, + deletions: []uint64{5}, + }, + { + key: []byte("ddd"), + additions: []uint64{6}, + deletions: []uint64{7}, + }, + }, + expectedRoot: []keyWithBML{ + { + key: []byte("aaa"), + additions: []uint64{0}, + }, + { + key: []byte("bbb"), + additions: []uint64{2}, + }, + { + key: []byte("ccc"), + additions: []uint64{4}, + }, + { + key: []byte("ddd"), + additions: []uint64{6}, + }, + }, + }, + { + name: "some segments overlap", + // note: there is no need to test every possible edge case for the + // overlapping segments in this place, as this logic is outsourced to + // BitmapLayer.Merge() which already has tests for edge cases + left: createSegmentsFromKeys(t, []keyWithBML{ + { + key: []byte("aaa"), + additions: []uint64{0}, + deletions: []uint64{1}, + }, + { + key: []byte("overlap"), + additions: []uint64{4, 5, 6}, + deletions: []uint64{1, 3, 7}, + }, + }), + right: createSegmentsFromKeys(t, []keyWithBML{ + { + key: []byte("overlap"), + additions: []uint64{3, 8}, + deletions: []uint64{5}, + }, + { + key: []byte("zzz"), + additions: []uint64{6}, + deletions: []uint64{7}, + }, + }), + expected: []keyWithBML{ + { + key: []byte("aaa"), + additions: []uint64{0}, + deletions: []uint64{1}, + }, + { + key: []byte("overlap"), + additions: []uint64{3, 4, 6, 8}, + deletions: []uint64{1, 5, 7}, + }, + { + key: []byte("zzz"), + additions: []uint64{6}, + deletions: []uint64{7}, + }, + }, + expectedRoot: []keyWithBML{ + { + key: []byte("aaa"), + additions: []uint64{0}, + }, + { + key: []byte("overlap"), + additions: []uint64{3, 4, 6, 8}, + }, + { + key: []byte("zzz"), + additions: []uint64{6}, + }, + }, + }, + { + name: "everything but one is deleted", + left: createSegmentsFromKeys(t, []keyWithBML{ + { + key: []byte("aaa"), + additions: []uint64{0}, + deletions: []uint64{}, + }, + { + key: []byte("bbb"), + additions: []uint64{4, 5, 6}, + deletions: []uint64{}, + }, + { + key: []byte("ddd"), + additions: []uint64{11, 12, 111}, + deletions: []uint64{}, + }, + }), + right: createSegmentsFromKeys(t, []keyWithBML{ + { + key: []byte("aaa"), + additions: []uint64{}, + deletions: []uint64{0}, + }, + { + key: []byte("bbb"), + additions: []uint64{}, + deletions: []uint64{4, 5, 6}, + }, + { + key: []byte("ccc"), + additions: []uint64{}, + deletions: []uint64{7, 8}, + }, + { + key: []byte("ddd"), + additions: []uint64{222}, + deletions: []uint64{11, 12, 13, 14}, + }, + }), + expected: []keyWithBML{ + { + key: []byte("aaa"), + additions: []uint64{}, + deletions: []uint64{0}, + }, + { + key: []byte("bbb"), + additions: []uint64{}, + deletions: []uint64{4, 5, 6}, + }, + { + key: []byte("ccc"), + additions: []uint64{}, + deletions: []uint64{7, 8}, + }, + { + key: []byte("ddd"), + additions: []uint64{111, 222}, + deletions: []uint64{11, 12, 13, 14}, + }, + }, + expectedRoot: []keyWithBML{ + { + key: []byte("ddd"), + additions: []uint64{111, 222}, + }, + }, + }, + + // the key loop is essentially a state machine. The next tests try to cover + // all possible states: + // + // 1. only the left key is set -> take left key + // 2. both left key and right key are set, but left is smaller -> take left + // key + // 3. only the right key is set -> take right key + // 4. both right and left keys are set, but right key is smaller -> take + // the right key + // 5. both keys are identical -> merge them + // + // Note: There is also an implicit 6th case: both keys are not set, this is + // the exit condition which is part of every test. + { + name: "state 1 - only left key is set", + left: createSegmentsFromKeys(t, []keyWithBML{ + { + key: []byte("aaa"), + additions: []uint64{0}, + deletions: []uint64{1}, + }, + }), + right: createSegmentsFromKeys(t, []keyWithBML{}), + expected: []keyWithBML{ + { + key: []byte("aaa"), + additions: []uint64{0}, + deletions: []uint64{1}, + }, + }, + expectedRoot: []keyWithBML{ + { + key: []byte("aaa"), + additions: []uint64{0}, + }, + }, + }, + { + name: "state 2 - left+right, left is smaller", + left: createSegmentsFromKeys(t, []keyWithBML{ + { + key: []byte("aaa"), + additions: []uint64{0}, + deletions: []uint64{1}, + }, + }), + right: createSegmentsFromKeys(t, []keyWithBML{ + { + key: []byte("bbb"), + additions: []uint64{2}, + deletions: []uint64{3}, + }, + }), + expected: []keyWithBML{ + { + key: []byte("aaa"), + additions: []uint64{0}, + deletions: []uint64{1}, + }, + { + key: []byte("bbb"), + additions: []uint64{2}, + deletions: []uint64{3}, + }, + }, + expectedRoot: []keyWithBML{ + { + key: []byte("aaa"), + additions: []uint64{0}, + }, + { + key: []byte("bbb"), + additions: []uint64{2}, + }, + }, + }, + { + name: "state 3 - only the right key is set", + left: createSegmentsFromKeys(t, []keyWithBML{}), + right: createSegmentsFromKeys(t, []keyWithBML{ + { + key: []byte("bbb"), + additions: []uint64{2}, + deletions: []uint64{3}, + }, + }), + expected: []keyWithBML{ + { + key: []byte("bbb"), + additions: []uint64{2}, + deletions: []uint64{3}, + }, + }, + expectedRoot: []keyWithBML{ + { + key: []byte("bbb"), + additions: []uint64{2}, + }, + }, + }, + { + name: "state 4 - left+right, right is smaller", + left: createSegmentsFromKeys(t, []keyWithBML{ + { + key: []byte("ccc"), + additions: []uint64{0}, + deletions: []uint64{1}, + }, + }), + right: createSegmentsFromKeys(t, []keyWithBML{ + { + key: []byte("bbb"), + additions: []uint64{2}, + deletions: []uint64{3}, + }, + }), + expected: []keyWithBML{ + { + key: []byte("bbb"), + additions: []uint64{2}, + deletions: []uint64{3}, + }, + { + key: []byte("ccc"), + additions: []uint64{0}, + deletions: []uint64{1}, + }, + }, + expectedRoot: []keyWithBML{ + { + key: []byte("bbb"), + additions: []uint64{2}, + }, + { + key: []byte("ccc"), + additions: []uint64{0}, + }, + }, + }, + { + name: "state 5 - left+right are identical", + left: createSegmentsFromKeys(t, []keyWithBML{ + { + key: []byte("aaa"), + additions: []uint64{0}, + deletions: []uint64{1}, + }, + }), + right: createSegmentsFromKeys(t, []keyWithBML{ + { + key: []byte("aaa"), + additions: []uint64{2}, + deletions: []uint64{3}, + }, + }), + expected: []keyWithBML{ + { + key: []byte("aaa"), + additions: []uint64{0, 2}, + deletions: []uint64{1, 3}, + }, + }, + expectedRoot: []keyWithBML{ + { + key: []byte("aaa"), + additions: []uint64{0, 2}, + }, + }, + }, + } + + for _, test := range tests { + leftCursor := NewSegmentCursor(test.left, nil) + rightCursor := NewSegmentCursor(test.right, nil) + for _, checkSum := range []bool{true, false} { + maxNewFileSize := int64(len(test.left)+len(test.right)) + segmentindex.HeaderSize + if checkSum { + maxNewFileSize += 8 // for checksum + } + t.Run("[keep]"+test.name+fmt.Sprintf("checksum: %v", checkSum), func(t *testing.T) { + segmentBytesInMem := cursorCompactor(t, leftCursor, rightCursor, maxNewFileSize, false, checkSum) + segmentBytesWriter := cursorCompactor(t, leftCursor, rightCursor, compactor.SegmentWriterBufferSize+1, false, checkSum) + + require.Equal(t, segmentBytesInMem, segmentBytesWriter) + + header, err := segmentindex.ParseHeader(segmentBytesInMem[:segmentindex.HeaderSize]) + require.NoError(t, err) + + cu := NewSegmentCursor(segmentBytesInMem[segmentindex.HeaderSize:header.IndexStart], nil) + + i := 0 + for k, v, _ := cu.First(); k != nil; k, v, _ = cu.Next() { + assert.Equal(t, test.expected[i].key, k) + assert.Equal(t, test.expected[i].additions, v.Additions.ToArray()) + assert.Equal(t, test.expected[i].deletions, v.Deletions.ToArray()) + i++ + } + + assert.Equal(t, len(test.expected), i, "all expected keys must have been hit") + }) + } + } + + for _, test := range tests { + for _, checkSum := range []bool{true, false} { + t.Run("[cleanup] "+test.name, func(t *testing.T) { + leftCursor := NewSegmentCursor(test.left, nil) + rightCursor := NewSegmentCursor(test.right, nil) + + maxNewFileSize := int64(len(test.left)+len(test.right)) + segmentindex.HeaderSize + if checkSum { + maxNewFileSize += 8 // for checksum + } + + segmentBytesInMem := cursorCompactor(t, leftCursor, rightCursor, maxNewFileSize, true, checkSum) + segmentBytesWriter := cursorCompactor(t, leftCursor, rightCursor, compactor.SegmentWriterBufferSize+1, true, checkSum) + require.Equal(t, segmentBytesInMem, segmentBytesWriter) + + header, err := segmentindex.ParseHeader(segmentBytesInMem[:segmentindex.HeaderSize]) + require.NoError(t, err) + + cu := NewSegmentCursor(segmentBytesInMem[segmentindex.HeaderSize:header.IndexStart], nil) + + i := 0 + for k, v, _ := cu.First(); k != nil; k, v, _ = cu.Next() { + assert.Equal(t, test.expectedRoot[i].key, k) + assert.Equal(t, test.expectedRoot[i].additions, v.Additions.ToArray()) + assert.Empty(t, v.Deletions.ToArray()) + i++ + } + + assert.Equal(t, len(test.expectedRoot), i, "all expected keys must have been hit") + }) + } + } +} + +func cursorCompactor(t *testing.T, leftCursor, rightCursor *SegmentCursor, maxNewFileSize int64, cleanup, checkSum bool) []byte { + t.Helper() + dir := t.TempDir() + + segmentFile := filepath.Join(dir, fmt.Sprintf("result-%v-%v-%v.db", cleanup, checkSum, maxNewFileSize)) + f, err := os.Create(segmentFile) + require.NoError(t, err) + + c := NewCompactor(f, leftCursor, rightCursor, 5, dir+"/scratch", cleanup, checkSum, maxNewFileSize, nil) + require.NoError(t, c.Do()) + + require.NoError(t, f.Close()) + + f, err = os.Open(segmentFile) + require.NoError(t, err) + + segmentBytes, err := io.ReadAll(f) + require.NoError(t, err) + require.NoError(t, f.Close()) + + return segmentBytes +} + +func TestCompactor_InMemoryWritesEfficency(t *testing.T) { + // The point of the in-memory write path is to prevent using up too many + // Write syscalls for tiny segments/tiny compactions. This test proves that + // this is actually the case. + compactionSetup := func(inMem bool) (int, int, []byte) { + leftCursor := NewSegmentCursor(createSegmentsFromKeys(t, []keyWithBML{ + { + additions: []uint64{0}, + }, + }), nil) + rightCursor := NewSegmentCursor(createSegmentsFromKeys(t, []keyWithBML{ + { + additions: []uint64{1}, + }, + }), nil) + + ws, err := NewCountingWriteSeeker() + require.Nil(t, err) + defer ws.Close() + + maxNewFileSize := int64(len(leftCursor.data)+len(rightCursor.data)) + segmentindex.HeaderSize + 156 // for checksum + // if the maxNewFileSize is already larger than our + // SegmentWriterBufferSize there is no point in this test, both paths + // would use the regular buffer. + require.Less(t, maxNewFileSize, int64(compactor.SegmentWriterBufferSize)) + + if !inMem { + // we can force the "regular" write path by setting the maxNewFileSize + // to a value larger than the threshold + maxNewFileSize = compactor.SegmentWriterBufferSize + 1 + } + + c := NewCompactor(ws, leftCursor, rightCursor, 0, t.TempDir(), true, true, maxNewFileSize, nil) + + err = c.Do() + require.Nil(t, err) + + b, err := ws.Bytes() + require.Nil(t, err) + + return ws.WriteCalls, ws.SeekCalls, b + } + + writeCallsRegular, seekCallsRegular, bytesRegular := compactionSetup(false) + writeCallsInMem, seekCallsInMem, bytesInMem := compactionSetup(true) + + avgWriteSizeRegular := float64(len(bytesRegular)) / float64(writeCallsRegular) + avgWriteSizeInMem := float64(len(bytesInMem)) / float64(writeCallsInMem) + + t.Logf("Regular write calls: %d (%d seek calls), avg write size: %fB", writeCallsRegular, seekCallsRegular, avgWriteSizeRegular) + t.Logf("In memory write calls: %d (%d seek calls), avg write size: %fB", writeCallsInMem, seekCallsInMem, avgWriteSizeInMem) + + assert.Equal(t, 1, writeCallsInMem) + assert.Less(t, writeCallsInMem, writeCallsRegular) + assert.Equal(t, bytesRegular, bytesInMem) +} + +type keyWithBML struct { + key []byte + additions []uint64 + deletions []uint64 +} + +func createSegmentsFromKeys(t *testing.T, keys []keyWithBML) []byte { + out := []byte{} + + for _, k := range keys { + add := NewBitmap(k.additions...) + del := NewBitmap(k.deletions...) + sn, err := NewSegmentNode(k.key, add, del) + require.Nil(t, err) + out = append(out, sn.ToBuffer()...) + } + + return out +} + +// CountingWriteSeeker wraps an *os.File and records how it is used. +// It is intended for single‑goroutine unit tests. Guard with a sync.Mutex +// if your code writes concurrently. +type CountingWriteSeeker struct { + f *os.File + + // Statistics + WriteCalls int // how many times Write was invoked + BytesWritten int // total bytes written + SeekCalls int // how many times Seek was invoked +} + +// NewCountingWriteSeeker creates a temporary on‑disk file, unlinks it +// immediately (so nothing is left behind), and returns the wrapper. +// The file is removed automatically when it is closed or the process exits. +func NewCountingWriteSeeker() (*CountingWriteSeeker, error) { + tmp, err := os.CreateTemp("", "counting-ws-*") + if err != nil { + return nil, err + } + // Remove from directory tree right away; the fd keeps it alive. + _ = os.Remove(tmp.Name()) + + return &CountingWriteSeeker{f: tmp}, nil +} + +// Write records the call and forwards to the underlying *os.File. +func (c *CountingWriteSeeker) Write(p []byte) (int, error) { + c.WriteCalls++ + n, err := c.f.Write(p) + c.BytesWritten += n + return n, err +} + +// Seek records the call and forwards to the underlying *os.File. +func (c *CountingWriteSeeker) Seek(offset int64, whence int) (int64, error) { + c.SeekCalls++ + return c.f.Seek(offset, whence) +} + +// Bytes returns the full contents written so far. +// It leaves the file offset unchanged. +func (c *CountingWriteSeeker) Bytes() ([]byte, error) { + // Save current position + pos, err := c.f.Seek(0, io.SeekCurrent) + if err != nil { + return nil, err + } + // Read everything + _, _ = c.f.Seek(0, io.SeekStart) + b, err := io.ReadAll(c.f) + // Restore previous position (ignore error) + _, _ = c.f.Seek(pos, io.SeekStart) + return b, err +} + +// Close closes the underlying file. +func (c *CountingWriteSeeker) Close() error { return c.f.Close() } diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/roaringset/cursor.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/roaringset/cursor.go new file mode 100644 index 0000000000000000000000000000000000000000..d48b96df381bfde061d7fd823978b218d2eb08a5 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/roaringset/cursor.go @@ -0,0 +1,157 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package roaringset + +import ( + "bytes" + "errors" + "fmt" + + "github.com/weaviate/sroar" + "github.com/weaviate/weaviate/entities/lsmkv" +) + +type CombinedCursor struct { + cursors []InnerCursor + states []innerCursorState + keyOnly bool +} + +type InnerCursor interface { + First() ([]byte, BitmapLayer, error) + Next() ([]byte, BitmapLayer, error) + Seek(key []byte) ([]byte, BitmapLayer, error) +} + +type innerCursorState struct { + key []byte + layer BitmapLayer + err error +} + +// When keyOnly flag is set, only keys are returned by First/Next/Seek access methods, +// 2nd value returned is expected to be nil +// When keyOnly is not set, 2nd value is always bitmap. Returned bitmap can be empty (e.g. for Next call after last element was already returned) +func NewCombinedCursor(innerCursors []InnerCursor, keyOnly bool) *CombinedCursor { + return &CombinedCursor{cursors: innerCursors, keyOnly: keyOnly} +} + +func (c *CombinedCursor) First() ([]byte, *sroar.Bitmap) { + states := c.runAll(func(ic InnerCursor) ([]byte, BitmapLayer, error) { + return ic.First() + }) + return c.getResultFromStates(states) +} + +func (c *CombinedCursor) Next() ([]byte, *sroar.Bitmap) { + // fallback to First if no previous calls of First or Seek + if c.states == nil { + return c.First() + } + return c.getResultFromStates(c.states) +} + +func (c *CombinedCursor) Seek(key []byte) ([]byte, *sroar.Bitmap) { + states := c.runAll(func(ic InnerCursor) ([]byte, BitmapLayer, error) { + return ic.Seek(key) + }) + return c.getResultFromStates(states) +} + +type cursorRun func(ic InnerCursor) ([]byte, BitmapLayer, error) + +func (c *CombinedCursor) runAll(cursorRun cursorRun) []innerCursorState { + states := make([]innerCursorState, len(c.cursors)) + for id, ic := range c.cursors { + states[id] = c.createState(cursorRun(ic)) + } + return states +} + +func (c *CombinedCursor) createState(key []byte, layer BitmapLayer, err error) innerCursorState { + if errors.Is(err, lsmkv.NotFound) { + return innerCursorState{err: err} + } + if err != nil { + panic(fmt.Errorf("unexpected error: %w", err)) // TODO necessary? + } + state := innerCursorState{key: key} + state.layer = layer + + return state +} + +func (c *CombinedCursor) getResultFromStates(states []innerCursorState) ([]byte, *sroar.Bitmap) { + // NotFound is returned only by Seek call. + // If all cursors returned NotFound, combined Seek has no result, therefore inner cursors' states + // should not be updated to allow combined cursor to proceed with following Next calls + + for { + key, ids, allNotFound := c.getCursorIdsWithLowestKey(states) + if !allNotFound { + c.states = states + } + layers := BitmapLayers{} + for _, id := range ids { + layers = append(layers, c.states[id].layer) + // forward cursors used in final result + c.states[id] = c.createState(c.cursors[id].Next()) + } + + if key == nil && c.keyOnly { + return nil, nil + } + + bm := layers.Flatten(true) + if key == nil { + return nil, bm + } + + if bm.IsEmpty() { + // all values deleted, skip key + continue + } + + // TODO remove keyOnly option, not used anyway + if !c.keyOnly { + return key, bm + } + return key, nil + } +} + +func (c *CombinedCursor) getCursorIdsWithLowestKey(states []innerCursorState) ([]byte, []int, bool) { + var lowestKey []byte + ids := []int{} + allNotFound := true + + for id, state := range states { + if errors.Is(state.err, lsmkv.NotFound) { + continue + } + allNotFound = false + if state.key == nil { + continue + } + if lowestKey == nil { + lowestKey = state.key + ids = []int{id} + } else if cmp := bytes.Compare(lowestKey, state.key); cmp > 0 { + lowestKey = state.key + ids = []int{id} + } else if cmp == 0 { + ids = append(ids, id) + } + } + + return lowestKey, ids, allNotFound +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/roaringset/cursor_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/roaringset/cursor_test.go new file mode 100644 index 0000000000000000000000000000000000000000..b8eee54eee57dea274fae0d71884b8125a74fcae --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/roaringset/cursor_test.go @@ -0,0 +1,451 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package roaringset + +import ( + "encoding/binary" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/weaviate/sroar" +) + +func TestCombinedCursor(t *testing.T) { + bst1 := createBst(t, []bstIn{ + { + key: "aaa", + additions: []uint64{1}, + deletions: []uint64{}, + }, + { + key: "bbb", + additions: []uint64{22}, + deletions: []uint64{}, + }, + { + key: "ccc", + additions: []uint64{333}, + deletions: []uint64{}, + }, + { + key: "ddd", + additions: []uint64{4444}, + deletions: []uint64{}, + }, + }) + + bst2 := createBst(t, []bstIn{ + { + key: "aaa", + additions: []uint64{2, 3}, + deletions: []uint64{1}, + }, + { + key: "bbb", + additions: []uint64{33}, + deletions: []uint64{22}, + }, + { + key: "ggg", + additions: []uint64{7777777, 8888888}, + deletions: []uint64{}, + }, + }) + + bst3 := createBst(t, []bstIn{ + { + key: "bbb", + additions: []uint64{22}, + deletions: []uint64{}, + }, + { + key: "ccc", + additions: []uint64{}, + deletions: []uint64{333}, + }, + { + key: "eee", + additions: []uint64{55555, 66666}, + deletions: []uint64{}, + }, + { + key: "fff", + additions: []uint64{666666}, + deletions: []uint64{}, + }, + { + key: "hhh", + additions: []uint64{999999999}, + deletions: []uint64{111111111, 222222222, 333333333}, + }, + }) + + expected := []struct { + key string + values []uint64 + }{ + { // 0 + key: "aaa", + values: []uint64{2, 3}, + }, + { // 1 + key: "bbb", + values: []uint64{22, 33}, + }, + { // 2 + key: "ddd", + values: []uint64{4444}, + }, + { // 3 + key: "eee", + values: []uint64{55555, 66666}, + }, + { // 4 + key: "fff", + values: []uint64{666666}, + }, + { // 5 + key: "ggg", + values: []uint64{7777777, 8888888}, + }, + { // 6 + key: "hhh", + values: []uint64{999999999}, + }, + } + + t.Run("default cursor", func(t *testing.T) { + t.Run("start from beginning", func(t *testing.T) { + cursor := createCursor(t, bst1, bst2, bst3) + + key, bm := cursor.First() + + assert.Equal(t, []byte(expected[0].key), key) + assert.Equal(t, len(expected[0].values), bm.GetCardinality()) + for _, v := range expected[0].values { + assert.True(t, bm.Contains(v)) + } + }) + + t.Run("start from beginning and go through all", func(t *testing.T) { + cursor := createCursor(t, bst1, bst2, bst3) + + i := 0 // 1st match is "aaa" + for key, bm := cursor.First(); key != nil; key, bm = cursor.Next() { + assert.Equal(t, []byte(expected[i].key), key) + assert.Equal(t, len(expected[i].values), bm.GetCardinality()) + for _, v := range expected[i].values { + assert.True(t, bm.Contains(v)) + } + i++ + } + }) + + t.Run("start from beginning using Next and go through all", func(t *testing.T) { + cursor := createCursor(t, bst1, bst2, bst3) + + i := 0 // 1st match is "aaa" + for key, bm := cursor.Next(); key != nil; key, bm = cursor.Next() { + assert.Equal(t, []byte(expected[i].key), key) + assert.Equal(t, len(expected[i].values), bm.GetCardinality()) + for _, v := range expected[i].values { + assert.True(t, bm.Contains(v)) + } + i++ + } + }) + + t.Run("seek matching element and go through rest", func(t *testing.T) { + cursor := createCursor(t, bst1, bst2, bst3) + + i := 2 // 1st match is "ddd" + matching := []byte("ddd") + for key, bm := cursor.Seek(matching); key != nil; key, bm = cursor.Next() { + assert.Equal(t, []byte(expected[i].key), key) + assert.Equal(t, len(expected[i].values), bm.GetCardinality()) + for _, v := range expected[i].values { + assert.True(t, bm.Contains(v)) + } + i++ + } + }) + + t.Run("seek non-matching element and go through rest", func(t *testing.T) { + cursor := createCursor(t, bst1, bst2, bst3) + + i := 4 // 1st match is "fff" + nonMatching := []byte("efg") + for key, bm := cursor.Seek(nonMatching); key != nil; key, bm = cursor.Next() { + assert.Equal(t, []byte(expected[i].key), key) + assert.Equal(t, len(expected[i].values), bm.GetCardinality()) + for _, v := range expected[i].values { + assert.True(t, bm.Contains(v)) + } + i++ + } + }) + + t.Run("seek missing element", func(t *testing.T) { + cursor := createCursor(t, bst1, bst2, bst3) + + missing := []byte("lll") + key, bm := cursor.Seek(missing) + + assert.Nil(t, key) + assert.NotNil(t, bm) + assert.Equal(t, 0, bm.GetCardinality()) + }) + + t.Run("next after seek missing element does not change cursor's position", func(t *testing.T) { + cursor := createCursor(t, bst1, bst2, bst3) + + key1, _ := cursor.First() + + missing := []byte("lll") + cursor.Seek(missing) + + key2, _ := cursor.Next() + + assert.Equal(t, []byte("aaa"), key1) + assert.Equal(t, []byte("bbb"), key2) + }) + + t.Run("next after last is nil/empty", func(t *testing.T) { + cursor := createCursor(t, bst1, bst2, bst3) + + last := []byte("hhh") + cursor.Seek(last) + key, bm := cursor.Next() + + assert.Nil(t, key) + assert.NotNil(t, bm) + assert.Equal(t, 0, bm.GetCardinality()) + }) + + t.Run("first after final/empty next", func(t *testing.T) { + cursor := createCursor(t, bst1, bst2, bst3) + + last := []byte("hhh") + cursor.Seek(last) + cursor.Next() + key, bm := cursor.First() + + assert.Equal(t, []byte(expected[0].key), key) + assert.Equal(t, len(expected[0].values), bm.GetCardinality()) + for _, v := range expected[0].values { + assert.True(t, bm.Contains(v)) + } + }) + + t.Run("seek after final/empty next", func(t *testing.T) { + cursor := createCursor(t, bst1, bst2, bst3) + + last := []byte("hhh") + matching := []byte("eee") + cursor.Seek(last) + cursor.Next() + key, bm := cursor.Seek(matching) + + assert.Equal(t, []byte(expected[3].key), key) + assert.Equal(t, len(expected[3].values), bm.GetCardinality()) + for _, v := range expected[3].values { + assert.True(t, bm.Contains(v)) + } + }) + }) + + t.Run("cursor key only", func(t *testing.T) { + t.Run("start from beginning", func(t *testing.T) { + cursor := createCursorKeyOnly(t, bst1, bst2, bst3) + + key, bm := cursor.First() + + assert.Equal(t, []byte(expected[0].key), key) + assert.Nil(t, bm) + }) + + t.Run("start from beginning and go through all", func(t *testing.T) { + cursor := createCursorKeyOnly(t, bst1, bst2, bst3) + + i := 0 // 1st match is "aaa" + for key, bm := cursor.First(); key != nil; key, bm = cursor.Next() { + assert.Equal(t, []byte(expected[i].key), key) + assert.Nil(t, bm) + i++ + } + }) + + t.Run("start from beginning using Next and go through all", func(t *testing.T) { + cursor := createCursorKeyOnly(t, bst1, bst2, bst3) + + i := 0 // 1st match is "aaa" + for key, bm := cursor.Next(); key != nil; key, bm = cursor.Next() { + assert.Equal(t, []byte(expected[i].key), key) + assert.Nil(t, bm) + i++ + } + }) + + t.Run("seek matching element and go through rest", func(t *testing.T) { + cursor := createCursorKeyOnly(t, bst1, bst2, bst3) + + i := 2 // 1st match is "ddd" + matching := []byte("ddd") + for key, bm := cursor.Seek(matching); key != nil; key, bm = cursor.Next() { + assert.Equal(t, []byte(expected[i].key), key) + assert.Nil(t, bm) + i++ + } + }) + + t.Run("seek non-matching element and go through rest", func(t *testing.T) { + cursor := createCursorKeyOnly(t, bst1, bst2, bst3) + + i := 4 // 1st match is "fff" + nonMatching := []byte("efg") + for key, bm := cursor.Seek(nonMatching); key != nil; key, bm = cursor.Next() { + assert.Equal(t, []byte(expected[i].key), key) + assert.Nil(t, bm) + i++ + } + }) + + t.Run("seek missing element", func(t *testing.T) { + cursor := createCursorKeyOnly(t, bst1, bst2, bst3) + + missing := []byte("lll") + key, bm := cursor.Seek(missing) + + assert.Nil(t, key) + assert.Nil(t, bm) + }) + + t.Run("next after seek missing element does not change cursor's position", func(t *testing.T) { + cursor := createCursorKeyOnly(t, bst1, bst2, bst3) + + key1, _ := cursor.First() + + missing := []byte("lll") + cursor.Seek(missing) + + key2, _ := cursor.Next() + + assert.Equal(t, []byte("aaa"), key1) + assert.Equal(t, []byte("bbb"), key2) + }) + + t.Run("next after last is nil/empty", func(t *testing.T) { + cursor := createCursorKeyOnly(t, bst1, bst2, bst3) + + last := []byte("hhh") + cursor.Seek(last) + key, bm := cursor.Next() + + assert.Nil(t, key) + assert.Nil(t, bm) + }) + + t.Run("first after final/empty next", func(t *testing.T) { + cursor := createCursorKeyOnly(t, bst1, bst2, bst3) + + last := []byte("hhh") + cursor.Seek(last) + cursor.Next() + key, bm := cursor.First() + + assert.Equal(t, []byte(expected[0].key), key) + assert.Nil(t, bm) + }) + + t.Run("seek after final/empty next", func(t *testing.T) { + cursor := createCursorKeyOnly(t, bst1, bst2, bst3) + + last := []byte("hhh") + matching := []byte("eee") + cursor.Seek(last) + cursor.Next() + key, bm := cursor.Seek(matching) + + assert.Equal(t, []byte(expected[3].key), key) + assert.Nil(t, bm) + }) + }) +} + +type bstIn struct { + key string + additions []uint64 + deletions []uint64 +} + +func createBst(t *testing.T, in []bstIn) *BinarySearchTree { + bst := &BinarySearchTree{} + for i := range in { + bst.Insert([]byte(in[i].key), Insert{Additions: in[i].additions, Deletions: in[i].deletions}) + } + return bst +} + +func createCursor(t *testing.T, bsts ...*BinarySearchTree) *CombinedCursor { + innerCursors := []InnerCursor{} + for _, bst := range bsts { + innerCursors = append(innerCursors, NewBinarySearchTreeCursor(bst)) + } + return NewCombinedCursor(innerCursors, false) +} + +func createCursorKeyOnly(t *testing.T, bsts ...*BinarySearchTree) *CombinedCursor { + c := createCursor(t, bsts...) + c.keyOnly = true + return c +} + +// Previous implementation of cursor called recursively Next() when empty entry occurred, +// which could lead to stack overflow. This test prevents a regression. +func TestCombinedCursor_StackOverflow(t *testing.T) { + cursor := NewCombinedCursor([]InnerCursor{&emptyInnerCursor{}}, false) + + k, bm := cursor.First() + assert.Nil(t, k) + assert.True(t, bm.IsEmpty()) +} + +type emptyInnerCursor struct { + key uint64 +} + +func (c *emptyInnerCursor) First() ([]byte, BitmapLayer, error) { + c.key = 0 + return c.bytes(), c.layer(), nil +} + +func (c *emptyInnerCursor) Next() ([]byte, BitmapLayer, error) { + if c.key > 1<<22 { + return nil, BitmapLayer{}, nil + } + c.key++ + return c.bytes(), c.layer(), nil +} + +func (c *emptyInnerCursor) Seek(key []byte) ([]byte, BitmapLayer, error) { + return c.First() +} + +func (c *emptyInnerCursor) bytes() []byte { + b := make([]byte, 8) + binary.BigEndian.PutUint64(b, c.key) + return b +} + +func (c *emptyInnerCursor) layer() BitmapLayer { + return BitmapLayer{Additions: sroar.NewBitmap(), Deletions: sroar.NewBitmap()} +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/roaringset/doc.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/roaringset/doc.go new file mode 100644 index 0000000000000000000000000000000000000000..73c3a70d67c60dd60cb96f8614b04aad043211b2 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/roaringset/doc.go @@ -0,0 +1,85 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Package roaringset contains all the LSM business logic that is unique +// to the "RoaringSet" strategy +// +// This package alone does not contain an entire LSM store. It's intended to be +// used as part of the [github.com/weaviate/weaviate/adapters/repos/db/lsmkv] package. +// +// # Motivation +// +// What makes the RoaringSet strategy unique is that it's essentially a fully +// persistent Roaring Bitmap that can be built up and updated incrementally +// (without write amplification) while being extremely fast to query. +// +// Without this specific strategy, it would not be efficient to use roaring +// bitmaps in an LSM store. For example: +// +// - Lucene uses posting lists in the inverted index on disk and supports +// converting them to a Roaring Bitmap at query time. This resulting bitmap +// can then be cached. However, the cost to initially convert a posting list +// to a roaring bitmap is quite huge. In our own tests, inserting 90M out of +// 100M possible ids into a [github.com/weaviate/sroar.Bitmap] takes about +// 3.5s. +// +// - You could store a regular roaring bitmap, such as +// [github.com/weaviate/sroar.Bitmap] in a regular LSM store, such as +// RocksDB. This would fix the retrieval issue and you should be able to +// retrieve and initialize a bitmap containing 90M objects in a few +// milliseconds. However, the cost to incrementally update this bitmap would +// be extreme. You would have to use a read-modify-write pattern which would +// lead to huge write-amplification on large setups. A 90M roaring bitmap +// is about 10.5MB, so to add a single entry (which would take up anywhere +// from 1 bit to 2 bytes), you would have to read 10.5MB and write 10.5MB +// again. That's not feasible except for bulk-loading. In Weaviate we cannot +// always assume bulk loading, as user behavior and insert orders are +// generally unpredictable. +// +// We solve this issue by making the LSM store roaring-bitmap-native. This way, +// we can keep the benefits of an LSM store (very fast writes) with the +// benefits of a serialized roaring bitmap (very fast reads/initializations). +// +// Essentially this means the RoaringSet strategy behaves like a fully +// persistent (and durable) Roaring Bitmap. See the next section to learn how +// it works under the hood. +// +// # Internals +// +// The public-facing methods make use of [github.com/weaviate/sroar.Bitmap]. +// This serialized bitmap already fulfills many of the criteria needed in +// Weaviate. It can be initialized at almost no cost (sharing memory) or very +// little cost (copying some memory). Furthermore, its set, remove, and +// intersection methods work well for the inverted index use cases in Weaviate. +// +// So, the novel part in the lsmkv.RoaringSet strategy does not sit in the +// roaring bitmap itself, but rather in the way it's persisted. It uses the +// standard principles of an LSM store where each new write is first cached in +// a memtable (and of course written into a Write-Ahead-Log to make it +// durable). The memtable is flushed into a disk segment when specific criteria +// are met (memtable size, WAL size, idle time, time since last flush, etc.). +// +// This means that each layer (represented by [BitmapLayer]) only contains the +// deltas that were written in a specific time interval. When reading, all +// layers must be combined into a single bitmap (see [BitmapLayers.Flatten]). +// +// Over time segments can be combined into fewer, larger segments using an LSM +// Compaction process. The logic for that can be found in [BitmapLayers.Merge]. +// +// To make sure access is efficient the entire RoaringSet strategy is built to +// avoid encoding/decoding steps. Instead we internally store data as simple +// byte slices. For example, see [SegmentNode]. You can access bitmaps without +// any meaningful allocations using [SegmentNode.Additions] and +// [SegmentNode.Deletions]. If you plan to hold on to the bitmap for a time +// window that is longer than holding a lock that prevents a compaction, you +// need to copy data (e.g. using [SegmentNode.AdditionsWithCopy]). Even with +// such a copy, reading a 90M-ids bitmap takes only single-digit milliseconds. +package roaringset diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/roaringset/helpers.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/roaringset/helpers.go new file mode 100644 index 0000000000000000000000000000000000000000..4775ecad005b8f942a082e297da314abe275dac3 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/roaringset/helpers.go @@ -0,0 +1,126 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package roaringset + +import ( + "sync" + + "github.com/weaviate/sroar" +) + +func NewBitmap(values ...uint64) *sroar.Bitmap { + bm := sroar.NewBitmap() + bm.SetMany(values) + return bm +} + +// Operations on bitmaps may result in oversized instances in relation to +// number of elements currently contained in bitmap +// Examples of such operations: +// - And-ing bitmaps may results in size being sum of both sizes +// (especially and-ing bitmap with itself) +// - Removing elements from bitmap results in size not being reduced +// (even if there is only few or no elements left) +// +// Method should be used before saving bitmap to file, to ensure +// minimal required size +// +// For most cases Or between empty bitmap and used bitmap +// works pretty well for reducing its final size, except for use case, +// where used bitmap uses internally bitmap - it will not be converted +// to underlying array, even if there are single elements left +func Condense(bm *sroar.Bitmap) *sroar.Bitmap { + condensed := sroar.NewBitmap() + condensed.Or(bm) + return condensed +} + +// defaultIdIncrement is the amount of bits greater than +// to reduce the amount of times BitmapFactory has to reallocate. +const defaultIdIncrement = uint64(1024) + +type MaxIdGetterFunc func() uint64 + +// BitmapFactory exists to provide prefilled bitmaps using pool (reducing allocation of memory) +// and favor cloning (faster) over prefilling bitmap from scratch each time bitmap is requested +type BitmapFactory struct { + bufPool BitmapBufPool + maxIdGetter MaxIdGetterFunc + lock *sync.RWMutex + prefilled *sroar.Bitmap + prefilledMaxId uint64 +} + +func NewBitmapFactory(bufPool BitmapBufPool, maxIdGetter MaxIdGetterFunc) *BitmapFactory { + prefilledMaxId := maxIdGetter() + defaultIdIncrement + + return &BitmapFactory{ + bufPool: bufPool, + maxIdGetter: maxIdGetter, + lock: new(sync.RWMutex), + prefilled: sroar.Prefill(prefilledMaxId), + prefilledMaxId: prefilledMaxId, + } +} + +// GetBitmap returns a prefilled bitmap, which is cloned from a shared internal. +// This method is safe to call concurrently. The purpose behind sharing an +// internal bitmap, is that a Clone() operation is cheaper than prefilling +// a bitmap up to +func (bmf *BitmapFactory) GetBitmap() (cloned *sroar.Bitmap, release func()) { + var maxId, prefilledMaxId uint64 + + cloned, release = func() (*sroar.Bitmap, func()) { + bmf.lock.RLock() + defer bmf.lock.RUnlock() + + maxId = bmf.maxIdGetter() + prefilledMaxId = bmf.prefilledMaxId + + // No need to expand, maxId is included + if maxId <= prefilledMaxId { + return bmf.bufPool.CloneToBuf(bmf.prefilled) + } + return nil, nil + }() + + if cloned == nil { + cloned, release = func() (*sroar.Bitmap, func()) { + bmf.lock.Lock() + defer bmf.lock.Unlock() + + maxId = bmf.maxIdGetter() + prefilledMaxId = bmf.prefilledMaxId + + // 2nd check to ensure bitmap wasn't expanded by + // concurrent request white waiting for write lock + if maxId <= prefilledMaxId { + return bmf.bufPool.CloneToBuf(bmf.prefilled) + } + + // expand bitmap with additional ids + prefilledMaxId = maxId + defaultIdIncrement + bmf.prefilled.FillUp(prefilledMaxId) + bmf.prefilledMaxId = prefilledMaxId + return bmf.bufPool.CloneToBuf(bmf.prefilled) + }() + } + cloned.RemoveRange(maxId+1, prefilledMaxId+1) + return +} + +func (bmf *BitmapFactory) Remove(ids *sroar.Bitmap) { + bmf.lock.Lock() + defer bmf.lock.Unlock() + + bmf.prefilled.AndNot(ids) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/roaringset/helpers_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/roaringset/helpers_test.go new file mode 100644 index 0000000000000000000000000000000000000000..de3a4eac59fda4596f5a0b51bb47d8ca1b6fe367 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/roaringset/helpers_test.go @@ -0,0 +1,198 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package roaringset + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestBitmap_Condense(t *testing.T) { + t.Run("And with itself (internal array)", func(t *testing.T) { + bm := NewBitmap(slice(0, 1000)...) + for i := 0; i < 10; i++ { + bm.And(bm) + } + bmLen := len(bm.ToBuffer()) + + condensed := Condense(bm) + condensedLen := len(condensed.ToBuffer()) + + // As of sroar 0.0.5 "And" merge is optimized not to expand + // existing bitmap when not needed. Therefore calling Condense + // does not guarantee decreasing bitmap size + assert.GreaterOrEqual(t, bmLen, condensedLen) + assert.ElementsMatch(t, bm.ToArray(), condensed.ToArray()) + }) + + t.Run("And with itself (internal bitmap)", func(t *testing.T) { + bm := NewBitmap(slice(0, 5000)...) + for i := 0; i < 10; i++ { + bm.And(bm) + } + bmLen := len(bm.ToBuffer()) + + condensed := Condense(bm) + condensedLen := len(condensed.ToBuffer()) + + // As of sroar 0.0.5 "And" merge is optimized not to expand + // existing bitmap when not needed. Therefore calling Condense + // does not guarantee decreasing bitmap size + assert.GreaterOrEqual(t, bmLen, condensedLen) + assert.ElementsMatch(t, bm.ToArray(), condensed.ToArray()) + }) + + t.Run("And (internal arrays)", func(t *testing.T) { + bm1 := NewBitmap(slice(0, 1000)...) + bm2 := NewBitmap(slice(500, 1500)...) + bm := bm1.Clone() + bm.And(bm2) + bmLen := len(bm.ToBuffer()) + + condensed := Condense(bm) + condensedLen := len(condensed.ToBuffer()) + + assert.Greater(t, bmLen, condensedLen) + assert.ElementsMatch(t, bm.ToArray(), condensed.ToArray()) + }) + + t.Run("And (internal bitmaps)", func(t *testing.T) { + bm1 := NewBitmap(slice(0, 5000)...) + bm2 := NewBitmap(slice(1000, 6000)...) + bm := bm1.Clone() + bm.And(bm2) + bmLen := len(bm.ToBuffer()) + + condensed := Condense(bm) + condensedLen := len(condensed.ToBuffer()) + + // As of sroar 0.0.5 "And" merge is optimized not to expand + // existing bitmap when not needed. Therefore calling Condense + // does not guarantee decreasing bitmap size + assert.GreaterOrEqual(t, bmLen, condensedLen) + assert.ElementsMatch(t, bm.ToArray(), condensed.ToArray()) + }) + + t.Run("And (internal bitmaps to bitmap with few elements)", func(t *testing.T) { + bm1 := NewBitmap(slice(0, 5000)...) + bm2 := NewBitmap(slice(4000, 9000)...) + bm := bm1.Clone() + bm.And(bm2) + bmLen := len(bm.ToBuffer()) + + condensed := Condense(bm) + condensedLen := len(condensed.ToBuffer()) + + // As of sroar 0.0.5 "And" merge is optimized not to expand + // existing bitmap when not needed. Therefore calling Condense + // does not guarantee decreasing bitmap size + assert.GreaterOrEqual(t, bmLen, condensedLen) + assert.ElementsMatch(t, bm.ToArray(), condensed.ToArray()) + }) + + t.Run("Remove (array)", func(t *testing.T) { + bm := NewBitmap(slice(0, 1000)...) + for i := uint64(2); i < 1000; i++ { + bm.Remove(i) + } + bmLen := len(bm.ToBuffer()) + + condensed := Condense(bm) + condensedLen := len(condensed.ToBuffer()) + + assert.Greater(t, bmLen, condensedLen) + assert.ElementsMatch(t, bm.ToArray(), condensed.ToArray()) + }) + + t.Run("Remove (bitmap)", func(t *testing.T) { + bm := NewBitmap(slice(0, 100_000)...) + for i := uint64(10_000); i < 100_000; i++ { + bm.Remove(i) + } + bmLen := len(bm.ToBuffer()) + + condensed := Condense(bm) + condensedLen := len(condensed.ToBuffer()) + + assert.Greater(t, bmLen, condensedLen) + assert.ElementsMatch(t, bm.ToArray(), condensed.ToArray()) + }) +} + +func slice(from, to uint64) []uint64 { + len := to - from + s := make([]uint64, len) + for i := uint64(0); i < len; i++ { + s[i] = from + i + } + return s +} + +func TestBitmapFactory(t *testing.T) { + maxId := uint64(10) + maxIdGetter := func() uint64 { return maxId } + bmf := NewBitmapFactory(NewBitmapBufPoolNoop(), maxIdGetter) + + t.Run("prefilled bitmap includes increment", func(t *testing.T) { + expPrefilledMaxId := maxId + defaultIdIncrement + expPrefilledCardinality := int(maxId + defaultIdIncrement + 1) + + bm, release := bmf.GetBitmap() + defer release() + + require.NotNil(t, bm) + assert.Equal(t, expPrefilledMaxId, bmf.prefilled.Maximum()) + assert.Equal(t, expPrefilledCardinality, bmf.prefilled.GetCardinality()) + assert.Equal(t, maxId, bm.Maximum()) + assert.Equal(t, int(maxId)+1, bm.GetCardinality()) + }) + + t.Run("maxId increased up to increment threshold does not change internal bitmap", func(t *testing.T) { + expPrefilledMaxId := bmf.prefilled.Maximum() + + maxId += 10 + bm1, release1 := bmf.GetBitmap() + defer release1() + + require.NotNil(t, bm1) + assert.Equal(t, expPrefilledMaxId, bmf.prefilled.Maximum()) + assert.Equal(t, int(expPrefilledMaxId)+1, bmf.prefilled.GetCardinality()) + assert.Equal(t, maxId, bm1.Maximum()) + assert.Equal(t, int(maxId)+1, bm1.GetCardinality()) + + maxId += (defaultIdIncrement - 10) + bm2, release2 := bmf.GetBitmap() + defer release2() + + require.NotNil(t, bm2) + assert.Equal(t, expPrefilledMaxId, bmf.prefilled.Maximum()) + assert.Equal(t, int(expPrefilledMaxId)+1, bmf.prefilled.GetCardinality()) + assert.Equal(t, maxId, bm2.Maximum()) + assert.Equal(t, int(maxId)+1, bm2.GetCardinality()) + }) + + t.Run("maxId surpasses increment threshold changes internal bitmap", func(t *testing.T) { + maxId += 1 + expPrefilledMaxId := maxId + defaultIdIncrement + + bm, release := bmf.GetBitmap() + defer release() + + require.NotNil(t, bm) + assert.Equal(t, expPrefilledMaxId, bmf.prefilled.Maximum()) + assert.Equal(t, int(expPrefilledMaxId)+1, bmf.prefilled.GetCardinality()) + assert.Equal(t, maxId, bm.Maximum()) + assert.Equal(t, int(maxId)+1, bm.GetCardinality()) + }) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/roaringset/layers.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/roaringset/layers.go new file mode 100644 index 0000000000000000000000000000000000000000..e2f7f8140137795fadc12291bfa1912fff0602f7 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/roaringset/layers.go @@ -0,0 +1,130 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package roaringset + +import ( + "fmt" + + "github.com/weaviate/sroar" + "github.com/weaviate/weaviate/entities/concurrency" +) + +// A BitmapLayer contains all the bitmap related delta-information stored for a +// specific key in one layer. A layer typically corresponds to one disk segment +// or a memtable layer +// +// A layer is essentially a snapshot in time and to get an accurate few of the +// set in its entirety multiple layers need to be combined using +// [BitmapLayers]. +// +// The contents of Additions and Deletions must be mutually exclusive. A layer +// cannot both add and delete an element. The only way to create new layers is +// through inserting into a Memtable. The memtable must make sure that: +// +// - When an element is added, any previous deletion of this element is +// removed +// - When an element is deleted, any previous addition of this element is +// removed. +// +// As a result, an element is either a net addition or a net deletion in a +// layer, but it can never be both. +type BitmapLayer struct { + Additions *sroar.Bitmap + Deletions *sroar.Bitmap +} + +func (l *BitmapLayer) Clone() BitmapLayer { + clone := BitmapLayer{} + if l.Additions != nil { + clone.Additions = l.Additions.Clone() + } + if l.Deletions != nil { + clone.Deletions = l.Deletions.Clone() + } + return clone +} + +// BitmapLayers are a helper type to perform operations on multiple layers, +// such as [BitmapLayers.Flatten] or [BitmapLayers.Merge]. +type BitmapLayers []BitmapLayer + +// Flatten reduces all snapshots into a single Bitmap. This bitmap no longer +// contains separate additions and deletions, but a single set where all +// additions and deletions have been applied in the correct order. +// +// If you do not wish to flatten all of history, but rather combine two layers, +// such as would happen in a Compaction, use [BitmapLayers.Merge] instead. +// +// Flatten is typically used when serving a specific key to the user: It +// flattens all disk segments, a currently flushing memtable if it exists, and +// the active memtable into a single bitmap. The final bitmap is returned to +// the user. +// +// # Flattening Logic +// +// - The first layer is seen as chronologically first. Deletions in the +// first layers are ignored, as there is nothing to be deleted. As a +// result, the additions of the first segment become the root state in the +// first iteration. +// - Any subsequent layer is merged into the root layer in the following way: +// Deletions remove any existing additions, Additions are added. +// - This process happens one layer at a time. This way delete-and-readd +// cycles are reflected correctly. For example, if layer 2 deletes an element +// X and layer 3 adds element X, then it is a net addition overall, and X +// should be represented in the final bitmap. If the order is reversed and +// layer 2 adds X, whereas layer 3 removes X, it is should not be contained +// in the final map. +func (bml BitmapLayers) Flatten(clone bool) *sroar.Bitmap { + if len(bml) == 0 { + return sroar.NewBitmap() + } + + merged := bml[0].Additions + if clone { + merged = merged.Clone() + } + + for i := 1; i < len(bml); i++ { + merged.AndNotConc(bml[i].Deletions, concurrency.SROAR_MERGE) + merged.OrConc(bml[i].Additions, concurrency.SROAR_MERGE) + } + + return merged +} + +// Merge turns two successive layers into one. It does not flatten the segment, +// but keeps additions and deletions separate. This is because there are no +// guarantees that the first segment was the root segment. A merge could run on +// segments 3+4 and they could contain deletions of elements that were added in +// segments 1 or 2. +// +// Merge is intended to be used as part of compactions. +func (bml BitmapLayers) Merge() (BitmapLayer, error) { + out := BitmapLayer{} + if len(bml) != 2 { + return out, fmt.Errorf("merge requires exactly two input segments") + } + + left, right := bml[0], bml[1] + + additions := left.Additions.Clone() + additions.AndNotConc(right.Deletions, concurrency.SROAR_MERGE) + additions.OrConc(right.Additions, concurrency.SROAR_MERGE) + + deletions := left.Deletions.Clone() + deletions.AndNotConc(right.Additions, concurrency.SROAR_MERGE) + deletions.OrConc(right.Deletions, concurrency.SROAR_MERGE) + + out.Additions = additions + out.Deletions = deletions + return out, nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/roaringset/layers_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/roaringset/layers_test.go new file mode 100644 index 0000000000000000000000000000000000000000..66253217d58e73b82f1466f2ed60cdd69447ef30 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/roaringset/layers_test.go @@ -0,0 +1,328 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package roaringset + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/weaviate/sroar" +) + +func Test_BitmapLayers_Flatten(t *testing.T) { + type inputSegment struct { + additions []uint64 + deletions []uint64 + } + + type test struct { + name string + inputs []inputSegment + expectedContained []uint64 + expectedNotContained []uint64 + } + + tests := []test{ + { + name: "no inputs", + inputs: nil, + expectedContained: nil, + expectedNotContained: nil, + }, + { + name: "single segment", + inputs: []inputSegment{ + { + additions: []uint64{4, 5}, + }, + }, + expectedContained: []uint64{4, 5}, + expectedNotContained: nil, + }, + { + name: "three segments, only additions", + inputs: []inputSegment{ + { + additions: []uint64{4, 5}, + }, + { + additions: []uint64{5, 6}, + }, + { + additions: []uint64{6, 7, 8}, + }, + }, + expectedContained: []uint64{4, 5, 6, 7, 8}, + expectedNotContained: nil, + }, + { + name: "two segments, including a delete", + inputs: []inputSegment{ + { + additions: []uint64{4, 5}, + }, + { + additions: []uint64{5, 6}, + deletions: []uint64{4}, + }, + }, + expectedContained: []uint64{5, 6}, + expectedNotContained: []uint64{4}, + }, + { + name: "three segments, including a delete, and a re-add", + inputs: []inputSegment{ + { + additions: []uint64{3, 4, 5}, + }, + { + additions: []uint64{6}, + deletions: []uint64{4, 5}, + }, + { + additions: []uint64{5}, + }, + }, + expectedContained: []uint64{3, 5, 6}, + expectedNotContained: []uint64{4}, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + input := make(BitmapLayers, len(test.inputs)) + for i, inp := range test.inputs { + input[i].Additions = NewBitmap(inp.additions...) + input[i].Deletions = NewBitmap(inp.deletions...) + } + + res := input.Flatten(false) + for _, x := range test.expectedContained { + assert.True(t, res.Contains(x)) + } + + for _, x := range test.expectedNotContained { + assert.False(t, res.Contains(x)) + } + }) + } +} + +func Test_BitmapLayers_Merge(t *testing.T) { + type inputSegment struct { + additions []uint64 + deletions []uint64 + } + + type test struct { + name string + inputs []inputSegment + expectedAdditions []uint64 + expectedDeletions []uint64 + expectErr bool + } + + tests := []test{ + { + name: "no inputs - should error", + inputs: nil, + expectedAdditions: nil, + expectedDeletions: nil, + expectErr: true, + }, + { + name: "single layer - should error", + inputs: []inputSegment{ + { + additions: []uint64{4, 5}, + }, + }, + expectedAdditions: nil, + expectedDeletions: nil, + expectErr: true, + }, + { + name: "three layers - should error", + inputs: []inputSegment{ + { + additions: []uint64{4, 5}, + }, + { + additions: []uint64{4, 5}, + }, + { + additions: []uint64{4, 5}, + }, + }, + expectedAdditions: nil, + expectedDeletions: nil, + expectErr: true, + }, + { + name: "two layers, only additions", + inputs: []inputSegment{ + { + additions: []uint64{4, 5}, + }, + { + additions: []uint64{5, 6, 7}, + }, + }, + expectedAdditions: []uint64{4, 5, 6, 7}, + expectedDeletions: nil, + }, + { + name: "additions and deletions without overlap", + inputs: []inputSegment{ + { + additions: []uint64{4, 5}, + deletions: []uint64{1, 2}, + }, + { + additions: []uint64{5, 6, 7}, + deletions: []uint64{2, 3}, + }, + }, + expectedAdditions: []uint64{4, 5, 6, 7}, + expectedDeletions: []uint64{1, 2, 3}, + }, + { + name: "previously deleted element, re-added", + inputs: []inputSegment{ + { + additions: []uint64{}, + deletions: []uint64{1, 2}, + }, + { + additions: []uint64{2}, + deletions: []uint64{}, + }, + }, + expectedAdditions: []uint64{2}, + expectedDeletions: []uint64{1}, + }, + { + name: "previously added element deleted later", + inputs: []inputSegment{ + { + additions: []uint64{3, 4}, + deletions: []uint64{}, + }, + { + additions: []uint64{}, + deletions: []uint64{3}, + }, + }, + expectedAdditions: []uint64{4}, + expectedDeletions: []uint64{3}, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + input := make(BitmapLayers, len(test.inputs)) + for i, inp := range test.inputs { + input[i].Additions = NewBitmap(inp.additions...) + input[i].Deletions = NewBitmap(inp.deletions...) + } + + res, err := input.Merge() + if test.expectErr { + require.NotNil(t, err) + return + } else { + require.Nil(t, err) + } + for _, x := range test.expectedAdditions { + assert.True(t, res.Additions.Contains(x)) + } + + for _, x := range test.expectedDeletions { + assert.True(t, res.Deletions.Contains(x)) + } + + intersect := sroar.And(res.Additions, res.Deletions) + assert.True(t, intersect.IsEmpty(), + "verify that additions and deletions never intersect") + }) + } +} + +func Test_BitmapLayer_Clone(t *testing.T) { + t.Run("cloning empty BitmapLayer", func(t *testing.T) { + layerEmpty := BitmapLayer{} + + cloned := layerEmpty.Clone() + + assert.Nil(t, cloned.Additions) + assert.Nil(t, cloned.Deletions) + }) + + t.Run("cloning partially inited BitmapLayer", func(t *testing.T) { + additions := NewBitmap(1) + deletions := NewBitmap(100) + + layerAdditions := BitmapLayer{Additions: additions} + layerDeletions := BitmapLayer{Deletions: deletions} + + clonedLayerAdditions := layerAdditions.Clone() + clonedLayerDeletions := layerDeletions.Clone() + additions.Remove(1) + deletions.Remove(100) + + assert.True(t, layerAdditions.Additions.IsEmpty()) + assert.ElementsMatch(t, []uint64{1}, clonedLayerAdditions.Additions.ToArray()) + assert.Nil(t, clonedLayerAdditions.Deletions) + + assert.True(t, layerDeletions.Deletions.IsEmpty()) + assert.Nil(t, clonedLayerDeletions.Additions) + assert.ElementsMatch(t, []uint64{100}, clonedLayerDeletions.Deletions.ToArray()) + }) + + t.Run("cloning fully inited BitmapLayer", func(t *testing.T) { + additions := NewBitmap(1) + deletions := NewBitmap(100) + + layer := BitmapLayer{Additions: additions, Deletions: deletions} + + clonedLayer := layer.Clone() + additions.Remove(1) + deletions.Remove(100) + + assert.True(t, layer.Additions.IsEmpty()) + assert.True(t, layer.Deletions.IsEmpty()) + assert.ElementsMatch(t, []uint64{1}, clonedLayer.Additions.ToArray()) + assert.ElementsMatch(t, []uint64{100}, clonedLayer.Deletions.ToArray()) + }) +} + +// This test aims to prevent a regression on +// https://github.com/weaviate/sroar/issues/1 +// found in Serialized Roaring Bitmaps library +func Test_BitmapLayers_Merge_PanicSliceBoundOutOfRange(t *testing.T) { + genSlice := func(fromInc, toExc uint64) []uint64 { + slice := []uint64{} + for i := fromInc; i < toExc; i++ { + slice = append(slice, i) + } + return slice + } + + leftLayer := BitmapLayer{Deletions: NewBitmap(genSlice(289_800, 290_100)...)} + rightLayer := BitmapLayer{Additions: NewBitmap(genSlice(290_000, 293_000)...)} + + failingDeletionsLayer, err := BitmapLayers{leftLayer, rightLayer}.Merge() + assert.Nil(t, err) + + assert.ElementsMatch(t, genSlice(289_800, 290_000), failingDeletionsLayer.Deletions.ToArray()) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/roaringset/segment_cursor.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/roaringset/segment_cursor.go new file mode 100644 index 0000000000000000000000000000000000000000..e55445be066956171404efed471990f4dd5e4741 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/roaringset/segment_cursor.go @@ -0,0 +1,69 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package roaringset + +import ( + "github.com/weaviate/weaviate/adapters/repos/db/lsmkv/segmentindex" +) + +type Seeker interface { + Seek(key []byte) (segmentindex.Node, error) +} + +// A SegmentCursor iterates over all key-value pairs in a single disk segment. +// You can either start at the beginning using [*SegmentCursor.First] or start +// at an arbitrary key that you may find using [*SegmentCursor.Seek] +type SegmentCursor struct { + index Seeker + data []byte + nextOffset uint64 +} + +// NewSegmentCursor creates a cursor for a single disk segment. Make sure that +// the data buf is already sliced correctly to start at the payload, as calling +// [*SegmentCursor.First] will start reading at offset 0 relative to the passed +// in buffer. Similarly, the buffer may only contain payloads, as the buffer end +// is used to determine if more keys can be found. +// +// Therefore if the payload is part of a longer continuous buffer, the cursor +// should be initialized with data[payloadStartPos:payloadEndPos] +func NewSegmentCursor(data []byte, index Seeker) *SegmentCursor { + return &SegmentCursor{index: index, data: data, nextOffset: 0} +} + +func (c *SegmentCursor) Next() ([]byte, BitmapLayer, error) { + if c.nextOffset >= uint64(len(c.data)) { + return nil, BitmapLayer{}, nil + } + + sn := NewSegmentNodeFromBuffer(c.data[c.nextOffset:]) + c.nextOffset += sn.Len() + layer := BitmapLayer{ + Additions: sn.Additions(), + Deletions: sn.Deletions(), + } + return sn.PrimaryKey(), layer, nil +} + +func (c *SegmentCursor) First() ([]byte, BitmapLayer, error) { + c.nextOffset = 0 + return c.Next() +} + +func (c *SegmentCursor) Seek(key []byte) ([]byte, BitmapLayer, error) { + node, err := c.index.Seek(key) + if err != nil { + return nil, BitmapLayer{}, err + } + c.nextOffset = node.Start + return c.Next() +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/roaringset/segment_cursor_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/roaringset/segment_cursor_test.go new file mode 100644 index 0000000000000000000000000000000000000000..c7673cfa3a370429524656aaa9fb9b20e90b1511 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/roaringset/segment_cursor_test.go @@ -0,0 +1,117 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package roaringset + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/adapters/repos/db/lsmkv/segmentindex" +) + +func TestSegmentCursor(t *testing.T) { + seg, offsets := createDummySegment(t, 5) + + t.Run("starting from beginning", func(t *testing.T) { + c := NewSegmentCursor(seg, nil) + key, layer, err := c.First() + require.Nil(t, err) + assert.Equal(t, []byte("00000"), key) + assert.True(t, layer.Additions.Contains(0)) + assert.True(t, layer.Additions.Contains(1)) + assert.True(t, layer.Deletions.Contains(2)) + assert.True(t, layer.Deletions.Contains(3)) + }) + + t.Run("starting from beginning, page through all", func(t *testing.T) { + c := NewSegmentCursor(seg, nil) + it := uint64(0) + for key, layer, err := c.First(); key != nil; key, layer, err = c.Next() { + require.Nil(t, err) + assert.Equal(t, []byte(fmt.Sprintf("%05d", it)), key) + assert.True(t, layer.Additions.Contains(it*4)) + assert.True(t, layer.Additions.Contains(it*4+1)) + assert.True(t, layer.Deletions.Contains(it*4+2)) + assert.True(t, layer.Deletions.Contains(it*4+3)) + it++ + } + + assert.Equal(t, uint64(5), it) + }) + + t.Run("seek and iterate from there", func(t *testing.T) { + seeker := createDummySeeker(t, offsets, 3) + c := NewSegmentCursor(seg, seeker) + + // start on it 3 as this is where the seeker points us + it := uint64(3) + for key, layer, err := c.Seek([]byte("dummyseeker")); key != nil; key, layer, err = c.Next() { + require.Nil(t, err) + assert.Equal(t, []byte(fmt.Sprintf("%05d", it)), key) + assert.True(t, layer.Additions.Contains(it*4)) + assert.True(t, layer.Additions.Contains(it*4+1)) + assert.True(t, layer.Deletions.Contains(it*4+2)) + assert.True(t, layer.Deletions.Contains(it*4+3)) + it++ + } + + assert.Equal(t, uint64(5), it) + }) + + t.Run("seeker returns error", func(t *testing.T) { + seeker := createDummySeeker(t, offsets, 3) + seeker.err = fmt.Errorf("seek and fail") + c := NewSegmentCursor(seg, seeker) + + _, _, err := c.Seek([]byte("dummyseeker")) + require.NotNil(t, err) + assert.Contains(t, err.Error(), "seek and fail") + }) +} + +func createDummySegment(t *testing.T, count uint64) ([]byte, []uint64) { + out := []byte{} + offsets := []uint64{} + + for i := uint64(0); i < count; i++ { + key := []byte(fmt.Sprintf("%05d", i)) + add := NewBitmap(i*4, i*4+1) + del := NewBitmap(i*4+2, i*4+3) + sn, err := NewSegmentNode(key, add, del) + require.Nil(t, err) + offsets = append(offsets, uint64(len(out))) + out = append(out, sn.ToBuffer()...) + } + + return out, offsets +} + +func createDummySeeker(t *testing.T, offsets []uint64, pos int) *dummySeeker { + return &dummySeeker{offsets, pos, nil} +} + +type dummySeeker struct { + offsets []uint64 + pos int + err error +} + +// Seek returns the hard-coded pos that was set on init time, it ignores the +// key +func (s dummySeeker) Seek(key []byte) (segmentindex.Node, error) { + return segmentindex.Node{ + Start: s.offsets[s.pos], + End: s.offsets[s.pos+1], + }, s.err +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/roaringset/serialization.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/roaringset/serialization.go new file mode 100644 index 0000000000000000000000000000000000000000..55564ac79df3fd236d1440e1d14f26e9ee5aaabd --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/roaringset/serialization.go @@ -0,0 +1,217 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package roaringset + +import ( + "encoding/binary" + "fmt" + "io" + "math" + + "github.com/weaviate/sroar" + "github.com/weaviate/weaviate/adapters/repos/db/lsmkv/segmentindex" + "github.com/weaviate/weaviate/usecases/byteops" +) + +// SegmentNode was replaced by SegmentNodeList for WAL, but it is still used in +// the bitmap layers. +// +// SegmentNode stores one Key-Value pair (without its index) in +// the LSM Segment. It uses a single []byte internally. As a result there is +// no decode step required at runtime. Instead you can use +// +// - [*SegmentNode.Additions] +// - [*SegmentNode.AdditionsWithCopy] +// - [*SegmentNode.Deletions] +// - [*SegmentNode.DeletionsWithCopy] +// - [*SegmentNode.PrimaryKey] +// +// to access the contents. Those helpers in turn do not require a decoding +// step. The accessor methods that return Roaring Bitmaps only point to +// existing memory (methods without WithCopy suffix), or in the worst case copy +// one byte slice (methods with WithCopy suffix). +// +// This makes the SegmentNode very fast to access at query time, even when it +// contains a large amount of data. +// +// The internal structure of the data is: +// +// byte begin-start | description +// --------------------|----------------------------------------------------- +// 0-8 | uint64 indicating the total length of the node, +// | this is used in cursors to identify the next node. +// 8-16 | uint64 length indicator for additions sraor bm -> x +// 16-(x+16) | additions bitmap +// (x+16)-(x+24) | uint64 length indicator for deletions sroar bm -> y +// (x+24)-(x+y+24) | deletions bitmap +// (x+y+24)-(x+y+28) | uint32 length indicator for primary key length -> z +// (x+y+28)-(x+y+z+28) | primary key +type SegmentNode struct { + data []byte +} + +// Len indicates the total length of the [SegmentNode]. When reading multiple +// segments back-2-back, such as in a cursor situation, the offset of element +// (n+1) is the offset of element n + Len() +func (sn *SegmentNode) Len() uint64 { + return binary.LittleEndian.Uint64(sn.data[0:8]) +} + +// Additions returns the additions roaring bitmap with shared state. Only use +// this method if you can guarantee that you will only use it while holding a +// maintenance lock or can otherwise be sure that no compaction can occur. If +// you can't guarantee that, instead use [*SegmentNode.AdditionsWithCopy]. +func (sn *SegmentNode) Additions() *sroar.Bitmap { + rw := byteops.NewReadWriter(sn.data) + rw.MoveBufferToAbsolutePosition(8) + return sroar.FromBuffer(rw.ReadBytesFromBufferWithUint64LengthIndicator()) +} + +// AdditionsWithCopy returns the additions roaring bitmap without sharing state. It +// creates a copy of the underlying buffer. This is safe to use indefinitely, +// but much slower than [*SegmentNode.Additions] as it requires copying all the +// memory. If you know that you will only need the contents of the node for a +// duration of time where a lock is held that prevents compactions, it is more +// efficient to use [*SegmentNode.Additions]. +func (sn *SegmentNode) AdditionsWithCopy() *sroar.Bitmap { + rw := byteops.NewReadWriter(sn.data) + rw.MoveBufferToAbsolutePosition(8) + return sroar.FromBufferWithCopy(rw.ReadBytesFromBufferWithUint64LengthIndicator()) +} + +// AdditionsUnlimited returns the additions roaring bitmap with shared state. Only use +// this method if you can guarantee that you will only use it while holding a +// maintenance lock or can otherwise be sure that no compaction can occur. If +// you can't guarantee that, instead use [*SegmentNode.AdditionsWithCopy]. +// CAUTION: bitmap uses entire capacity of underlying buffer. By expanding it may overwrite +// node's data after additions bitmap +func (sn *SegmentNode) AdditionsUnlimited() *sroar.Bitmap { + rw := byteops.NewReadWriter(sn.data) + rw.MoveBufferToAbsolutePosition(8) + return sroar.FromBufferUnlimited(rw.ReadBytesFromBufferWithUint64LengthIndicator()) +} + +// Deletions returns the deletions roaring bitmap with shared state. Only use +// this method if you can guarantee that you will only use it while holding a +// maintenance lock or can otherwise be sure that no compaction can occur. If +// you can't guarantee that, instead use [*SegmentNode.DeletionsWithCopy]. +func (sn *SegmentNode) Deletions() *sroar.Bitmap { + rw := byteops.NewReadWriter(sn.data) + rw.MoveBufferToAbsolutePosition(8) + rw.DiscardBytesFromBufferWithUint64LengthIndicator() + return sroar.FromBuffer(rw.ReadBytesFromBufferWithUint64LengthIndicator()) +} + +// DeletionsWithCopy returns the deletions roaring bitmap without sharing state. It +// creates a copy of the underlying buffer. This is safe to use indefinitely, +// but much slower than [*SegmentNode.Deletions] as it requires copying all the +// memory. If you know that you will only need the contents of the node for a +// duration of time where a lock is held that prevents compactions, it is more +// efficient to use [*SegmentNode.Deletions]. +func (sn *SegmentNode) DeletionsWithCopy() *sroar.Bitmap { + rw := byteops.NewReadWriter(sn.data) + rw.MoveBufferToAbsolutePosition(8) + rw.DiscardBytesFromBufferWithUint64LengthIndicator() + return sroar.FromBufferWithCopy(rw.ReadBytesFromBufferWithUint64LengthIndicator()) +} + +func (sn *SegmentNode) PrimaryKey() []byte { + rw := byteops.NewReadWriter(sn.data) + rw.MoveBufferToAbsolutePosition(8) + rw.DiscardBytesFromBufferWithUint64LengthIndicator() + rw.DiscardBytesFromBufferWithUint64LengthIndicator() + return rw.ReadBytesFromBufferWithUint32LengthIndicator() +} + +func NewSegmentNode( + key []byte, additions, deletions *sroar.Bitmap, +) (*SegmentNode, error) { + if len(key) > math.MaxUint32 { + return nil, fmt.Errorf("key too long, max length is %d", math.MaxUint32) + } + + additionsBuf := additions.ToBuffer() + deletionsBuf := deletions.ToBuffer() + + // offset + 2*uint64 length indicators + uint32 length indicator + payloads + expectedSize := 8 + 8 + 8 + 4 + len(additionsBuf) + len(deletionsBuf) + len(key) + sn := SegmentNode{ + data: make([]byte, expectedSize), + } + + rw := byteops.NewReadWriter(sn.data) + + // reserve the first 8 bytes for the offset, which we will write at the very + // end + rw.MoveBufferPositionForward(8) + if err := rw.CopyBytesToBufferWithUint64LengthIndicator(additionsBuf); err != nil { + return nil, err + } + + if err := rw.CopyBytesToBufferWithUint64LengthIndicator(deletionsBuf); err != nil { + return nil, err + } + + if err := rw.CopyBytesToBufferWithUint32LengthIndicator(key); err != nil { + return nil, err + } + + offset := rw.Position + rw.MoveBufferToAbsolutePosition(0) + rw.WriteUint64(uint64(offset)) + + return &sn, nil +} + +// ToBuffer returns the internal buffer without copying data. Only use this, +// when you can be sure that it's safe to share the data, or create your own +// copy. +// +// It truncates the buffer at is own length, in case it was initialized with a +// long buffer that only had a beginning offset, but no end. Such a situation +// may occur with cursors. If we then returned the whole buffer and don't know +// what the caller plans on doing with the data, we risk passing around too +// much memory. Truncating at the length prevents this and has no other +// negative effects. +func (sn *SegmentNode) ToBuffer() []byte { + return sn.data[:sn.Len()] +} + +// NewSegmentNodeFromBuffer creates a new segment node by using the underlying +// buffer without copying data. Only use this when you can be sure that it's +// safe to share the data or create your own copy. +func NewSegmentNodeFromBuffer(buf []byte) *SegmentNode { + return &SegmentNode{data: buf} +} + +// KeyIndexAndWriteTo is a helper to flush a memtables full of SegmentNodes. It +// writes itself into the given writer and returns a [segmentindex.Key] with +// start and end indicators (respecting SegmentNode.Offset). Those keys can +// then be used to build an index for the nodes. The combination of index and +// node make up an LSM segment. +// +// RoaringSets do not support secondary keys, thus the segmentindex.Key will +// only ever contain a primary key. +func (sn *SegmentNode) KeyIndexAndWriteTo(w io.Writer, offset int) (segmentindex.Key, error) { + out := segmentindex.Key{} + + n, err := w.Write(sn.data) + if err != nil { + return out, err + } + + out.ValueStart = offset + out.ValueEnd = offset + n + out.Key = sn.PrimaryKey() + + return out, nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/roaringset/serialization_list.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/roaringset/serialization_list.go new file mode 100644 index 0000000000000000000000000000000000000000..470ea638591aae89eb8b981eacd31993e9d51497 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/roaringset/serialization_list.go @@ -0,0 +1,224 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package roaringset + +import ( + "encoding/binary" + "fmt" + "io" + "math" + + "github.com/weaviate/weaviate/adapters/repos/db/lsmkv/segmentindex" + "github.com/weaviate/weaviate/usecases/byteops" +) + +// SegmentNodeList is inspired by the roaringset.SegmentNode, keeping the +// same fuctionality, but stores lists of []uint64 instead of bitmaps. +// It stores one Key-Value pair (without its index) in the LSM Segment. +// As with SegmentNode, it uses a single []byte internally. As a result +// there is no decode step required at runtime. Instead you can use +// +// - [*SegmentNodeList.Additions] +// - [*SegmentNodeList.AdditionsWithCopy] +// - [*SegmentNodeList.Deletions] +// - [*SegmentNodeList.DeletionsWithCopy] +// - [*SegmentNodeList.PrimaryKey] +// +// to access the contents. Those helpers in turn do not require a decoding +// step. Instead of returning Roaring Bitmaps, it returns []uint64 slices. +// This makes the indexing time much faster, as we don't have to create the +// roaring bitmaps for each WAL insert. It also makes it smaller on disk, as we +// don't have to store the roaring bitmap, but only the list of uint64s. +// +// The internal structure of the data is close to the original SegmentNode, +// storing []uint64 instead of roaring bitmaps. The structure is as follows: +// +// byte begin-start | description +// --------------------|----------------------------------------------------- +// 0-8 | uint64 indicating the total length of the node, +// | this is used in cursors to identify the next node. +// 8-16 | uint64 length indicator for additions +// | len(additions)*size(uint64) -> x +// 16-(x+16) | additions []uint64 +// (x+16)-(x+24) | uint64 length indicator for deletions +// | len(deletions)*size(uint64) -> y +// (x+24)-(x+y+24) | deletions []uint64 +// (x+y+24)-(x+y+28) | uint32 length indicator for primary key length -> z +// (x+y+28)-(x+y+z+28) | primary key +type SegmentNodeList struct { + data []byte +} + +// Len indicates the total length of the [SegmentNodeList]. When reading multiple +// segments back-2-back, such as in a cursor situation, the offset of element +// (n+1) is the offset of element n + Len() +func (sn *SegmentNodeList) Len() uint64 { + return binary.LittleEndian.Uint64(sn.data[0:8]) +} + +// Additions returns the additions []uint64 with shared state. Only use +// this method if you can guarantee that you will only use it while holding a +// maintenance lock or can otherwise be sure that no compaction can occur. If +// you can't guarantee that, instead use [*SegmentNodeList.AdditionsWithCopy]. +func (sn *SegmentNodeList) Additions() []uint64 { + rw := byteops.NewReadWriter(sn.data) + rw.MoveBufferToAbsolutePosition(8) + bData := rw.ReadBytesFromBufferWithUint64LengthIndicator() + results := make([]uint64, len(bData)/8) + for i := 0; i < len(bData); i += 8 { + results[i/8] = binary.LittleEndian.Uint64(bData[i : i+8]) + } + return results +} + +// AdditionsWithCopy returns the additions []uint64 without sharing state. It +// creates a copy of the underlying buffer. This is safe to use indefinitely, +// but much slower than [*SegmentNodeList.Additions] as it requires copying all the +// memory. If you know that you will only need the contents of the node for a +// duration of time where a lock is held that prevents compactions, it is more +// efficient to use [*SegmentNodeList.Additions]. +func (sn *SegmentNodeList) AdditionsWithCopy() []uint64 { + rw := byteops.NewReadWriter(sn.data) + rw.MoveBufferToAbsolutePosition(8) + bData := rw.ReadBytesFromBufferWithUint64LengthIndicator() + results := make([]uint64, len(bData)/8) + for i := 0; i < len(bData); i += 8 { + results[i/8] = binary.LittleEndian.Uint64(bData[i : i+8]) + } + return results +} + +// Deletions returns the deletions []uint64 with shared state. Only use +// this method if you can guarantee that you will only use it while holding a +// maintenance lock or can otherwise be sure that no compaction can occur. If +// you can't guarantee that, instead use [*SegmentNodeList.DeletionsWithCopy]. +func (sn *SegmentNodeList) Deletions() []uint64 { + rw := byteops.NewReadWriter(sn.data) + rw.MoveBufferToAbsolutePosition(8) + rw.DiscardBytesFromBufferWithUint64LengthIndicator() + bData := rw.ReadBytesFromBufferWithUint64LengthIndicator() + results := make([]uint64, len(bData)/8) + for i := 0; i < len(bData); i += 8 { + results[i/8] = binary.LittleEndian.Uint64(bData[i : i+8]) + } + return results +} + +// DeletionsWithCopy returns the deletions []uint64 without sharing state. It +// creates a copy of the underlying buffer. This is safe to use indefinitely, +// but much slower than [*SegmentNodeList.Deletions] as it requires copying all the +// memory. If you know that you will only need the contents of the node for a +// duration of time where a lock is held that prevents compactions, it is more +// efficient to use [*SegmentNodeList.Deletions]. +func (sn *SegmentNodeList) DeletionsWithCopy() []uint64 { + rw := byteops.NewReadWriter(sn.data) + rw.MoveBufferToAbsolutePosition(8) + rw.DiscardBytesFromBufferWithUint64LengthIndicator() + bData := rw.ReadBytesFromBufferWithUint64LengthIndicator() + results := make([]uint64, len(bData)/8) + for i := 0; i < len(bData); i += 8 { + results[i/8] = binary.LittleEndian.Uint64(bData[i : i+8]) + } + return results +} + +func (sn *SegmentNodeList) PrimaryKey() []byte { + rw := byteops.NewReadWriter(sn.data) + rw.MoveBufferToAbsolutePosition(8) + rw.DiscardBytesFromBufferWithUint64LengthIndicator() + rw.DiscardBytesFromBufferWithUint64LengthIndicator() + return rw.ReadBytesFromBufferWithUint32LengthIndicator() +} + +func NewSegmentNodeList( + key []byte, additions, deletions []uint64, +) (*SegmentNodeList, error) { + if len(key) > math.MaxUint32 { + return nil, fmt.Errorf("key too long, max length is %d", math.MaxUint32) + } + + // offset + 2*uint64 length indicators + uint32 length indicator + payloads + lenAdditions := 8 * len(additions) + lenDeletions := 8 * len(deletions) + expectedSize := 8 + 8 + 8 + 4 + lenAdditions + lenDeletions + len(key) + sn := SegmentNodeList{ + data: make([]byte, expectedSize), + } + + rw := byteops.NewReadWriter(sn.data) + + // reserve the first 8 bytes for the offset, which we will write at the very + // end + rw.MoveBufferPositionForward(8) + rw.WriteUint64(uint64(lenAdditions)) + for _, v := range additions { + rw.WriteUint64(v) + } + rw.WriteUint64(uint64(lenDeletions)) + for _, v := range deletions { + rw.WriteUint64(v) + } + + if err := rw.CopyBytesToBufferWithUint32LengthIndicator(key); err != nil { + return nil, err + } + + offset := rw.Position + rw.MoveBufferToAbsolutePosition(0) + rw.WriteUint64(uint64(offset)) + + return &sn, nil +} + +// ToBuffer returns the internal buffer without copying data. Only use this, +// when you can be sure that it's safe to share the data, or create your own +// copy. +// +// It truncates the buffer at is own length, in case it was initialized with a +// long buffer that only had a beginning offset, but no end. Such a situation +// may occur with cursors. If we then returned the whole buffer and don't know +// what the caller plans on doing with the data, we risk passing around too +// much memory. Truncating at the length prevents this and has no other +// negative effects. +func (sn *SegmentNodeList) ToBuffer() []byte { + return sn.data[:sn.Len()] +} + +// NewSegmentNodeFromBuffer creates a new segment node by using the underlying +// buffer without copying data. Only use this when you can be sure that it's +// safe to share the data or create your own copy. +func NewSegmentNodeListFromBuffer(buf []byte) *SegmentNodeList { + return &SegmentNodeList{data: buf} +} + +// KeyIndexAndWriteTo is a helper to flush a memtables full of SegmentNodes. It +// writes itself into the given writer and returns a [segmentindex.Key] with +// start and end indicators (respecting SegmentNodeList.Offset). Those keys can +// then be used to build an index for the nodes. The combination of index and +// node make up an LSM segment. +// +// RoaringSets do not support secondary keys, thus the segmentindex.Key will +// only ever contain a primary key. +func (sn *SegmentNodeList) KeyIndexAndWriteTo(w io.Writer, offset int) (segmentindex.Key, error) { + out := segmentindex.Key{} + + n, err := w.Write(sn.data) + if err != nil { + return out, err + } + + out.ValueStart = offset + out.ValueEnd = offset + n + out.Key = sn.PrimaryKey() + + return out, nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/roaringset/serialization_list_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/roaringset/serialization_list_test.go new file mode 100644 index 0000000000000000000000000000000000000000..dacb8a23aa8835fecaa961e84f86b13691ead427 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/roaringset/serialization_list_test.go @@ -0,0 +1,119 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package roaringset + +import ( + "bytes" + "math" + "slices" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestSerializationList_HappyPath(t *testing.T) { + additions := []uint64{1, 2, 3, 4, 6} + deletions := []uint64{5, 7} + key := []byte("my-key") + + sn, err := NewSegmentNodeList(key, additions, deletions) + require.Nil(t, err) + + buf := sn.ToBuffer() + assert.Equal(t, sn.Len(), uint64(len(buf))) + + newSN := NewSegmentNodeListFromBuffer(buf) + assert.Equal(t, newSN.Len(), uint64(len(buf))) + + // without copying + newAdditions := newSN.Additions() + + assert.True(t, slices.Index(newAdditions, 4) != -1) + assert.False(t, slices.Index(newAdditions, 5) != -1) + newDeletions := newSN.Deletions() + assert.False(t, slices.Index(newDeletions, 4) != -1) + assert.True(t, slices.Index(newDeletions, 5) != -1) + assert.True(t, slices.Index(newDeletions, 7) != -1) + assert.Equal(t, []byte("my-key"), newSN.PrimaryKey()) + + // with copying + newAdditions = newSN.AdditionsWithCopy() + assert.True(t, slices.Index(newAdditions, 4) != -1) + assert.False(t, slices.Index(newAdditions, 5) != -1) + newDeletions = newSN.DeletionsWithCopy() + assert.False(t, slices.Index(newDeletions, 4) != -1) + assert.True(t, slices.Index(newDeletions, 5) != -1) + assert.True(t, slices.Index(newDeletions, 7) != -1) +} + +func TestSerializationList_InitializingFromBufferTooLarge(t *testing.T) { + additions := []uint64{1, 2, 3, 4, 6} + deletions := []uint64{5, 7} + key := []byte("my-key") + + sn, err := NewSegmentNodeList(key, additions, deletions) + require.Nil(t, err) + + buf := sn.ToBuffer() + assert.Equal(t, sn.Len(), uint64(len(buf))) + + bufTooLarge := make([]byte, 3*len(buf)) + copy(bufTooLarge, buf) + + newSN := NewSegmentNodeListFromBuffer(bufTooLarge) + // assert that the buffer self reports the useful length, not the length of + // the initialization buffer + assert.Equal(t, newSN.Len(), uint64(len(buf))) + // assert that ToBuffer() returns a buffer that is no longer than the useful + // length + assert.Equal(t, len(buf), len(newSN.ToBuffer())) +} + +func TestSerializationList_UnhappyPath(t *testing.T) { + t.Run("with primary key that's too long", func(t *testing.T) { + key := make([]byte, math.MaxUint32+3) + _, err := NewSegmentNodeList(key, nil, nil) + + require.NotNil(t, err) + assert.Contains(t, err.Error(), "key too long") + }) +} + +func TestSerializationList_KeyIndexAndWriteTo(t *testing.T) { + buf := &bytes.Buffer{} + offset := 7 + // write some dummy data, so we have an offset + buf.Write(make([]byte, offset)) + + additions := []uint64{1, 2, 3, 4, 6} + deletions := []uint64{5, 7} + key := []byte("my-key") + + sn, err := NewSegmentNodeList(key, additions, deletions) + require.Nil(t, err) + + keyIndex, err := sn.KeyIndexAndWriteTo(buf, offset) + require.Nil(t, err) + + res := buf.Bytes() + assert.Equal(t, keyIndex.ValueEnd, len(res)) + + newSN := NewSegmentNodeListFromBuffer(res[keyIndex.ValueStart:keyIndex.ValueEnd]) + newAdditions := newSN.Additions() + assert.True(t, slices.Index(newAdditions, 4) != -1) + assert.False(t, slices.Index(newAdditions, 5) != -1) + newDeletions := newSN.Deletions() + assert.False(t, slices.Index(newDeletions, 4) != -1) + assert.True(t, slices.Index(newDeletions, 5) != -1) + assert.Equal(t, []byte("my-key"), newSN.PrimaryKey()) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/roaringset/serialization_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/roaringset/serialization_test.go new file mode 100644 index 0000000000000000000000000000000000000000..0bb840002418feecb3b9daa7ea7516a4daba294f --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/roaringset/serialization_test.go @@ -0,0 +1,115 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package roaringset + +import ( + "bytes" + "math" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestSerialization_HappyPath(t *testing.T) { + additions := NewBitmap(1, 2, 3, 4, 6) + deletions := NewBitmap(5, 7) + key := []byte("my-key") + + sn, err := NewSegmentNode(key, additions, deletions) + require.Nil(t, err) + + buf := sn.ToBuffer() + assert.Equal(t, sn.Len(), uint64(len(buf))) + + newSN := NewSegmentNodeFromBuffer(buf) + assert.Equal(t, newSN.Len(), uint64(len(buf))) + + // without copying + newAdditions := newSN.Additions() + assert.True(t, newAdditions.Contains(4)) + assert.False(t, newAdditions.Contains(5)) + newDeletions := newSN.Deletions() + assert.False(t, newDeletions.Contains(4)) + assert.True(t, newDeletions.Contains(5)) + assert.Equal(t, []byte("my-key"), newSN.PrimaryKey()) + + // with copying + newAdditions = newSN.AdditionsWithCopy() + assert.True(t, newAdditions.Contains(4)) + assert.False(t, newAdditions.Contains(5)) + newDeletions = newSN.DeletionsWithCopy() + assert.False(t, newDeletions.Contains(4)) + assert.True(t, newDeletions.Contains(5)) +} + +func TestSerialization_InitializingFromBufferTooLarge(t *testing.T) { + additions := NewBitmap(1, 2, 3, 4, 6) + deletions := NewBitmap(5, 7) + key := []byte("my-key") + + sn, err := NewSegmentNode(key, additions, deletions) + require.Nil(t, err) + + buf := sn.ToBuffer() + assert.Equal(t, sn.Len(), uint64(len(buf))) + + bufTooLarge := make([]byte, 3*len(buf)) + copy(bufTooLarge, buf) + + newSN := NewSegmentNodeFromBuffer(bufTooLarge) + // assert that the buffer self reports the useful length, not the length of + // the initialization buffer + assert.Equal(t, newSN.Len(), uint64(len(buf))) + // assert that ToBuffer() returns a buffer that is no longer than the useful + // length + assert.Equal(t, len(buf), len(newSN.ToBuffer())) +} + +func TestSerialization_UnhappyPath(t *testing.T) { + t.Run("with primary key that's too long", func(t *testing.T) { + key := make([]byte, math.MaxUint32+3) + _, err := NewSegmentNode(key, nil, nil) + + require.NotNil(t, err) + assert.Contains(t, err.Error(), "key too long") + }) +} + +func TestSerialization_KeyIndexAndWriteTo(t *testing.T) { + buf := &bytes.Buffer{} + offset := 7 + // write some dummy data, so we have an offset + buf.Write(make([]byte, offset)) + + additions := NewBitmap(1, 2, 3, 4, 6) + deletions := NewBitmap(5, 7) + key := []byte("my-key") + + sn, err := NewSegmentNode(key, additions, deletions) + require.Nil(t, err) + + keyIndex, err := sn.KeyIndexAndWriteTo(buf, offset) + require.Nil(t, err) + + res := buf.Bytes() + assert.Equal(t, keyIndex.ValueEnd, len(res)) + + newSN := NewSegmentNodeFromBuffer(res[keyIndex.ValueStart:keyIndex.ValueEnd]) + newAdditions := newSN.Additions() + assert.True(t, newAdditions.Contains(4)) + assert.False(t, newAdditions.Contains(5)) + newDeletions := newSN.Deletions() + assert.False(t, newDeletions.Contains(4)) + assert.True(t, newDeletions.Contains(5)) + assert.Equal(t, []byte("my-key"), newSN.PrimaryKey()) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/roaringsetrange/compactor.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/roaringsetrange/compactor.go new file mode 100644 index 0000000000000000000000000000000000000000..b97cc7af8acbabca9cbff904b18e7a9b724202e5 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/roaringsetrange/compactor.go @@ -0,0 +1,323 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package roaringsetrange + +import ( + "fmt" + "io" + + "github.com/weaviate/weaviate/adapters/repos/db/compactor" + + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + "github.com/weaviate/sroar" + "github.com/weaviate/weaviate/adapters/repos/db/lsmkv/segmentindex" + "github.com/weaviate/weaviate/adapters/repos/db/roaringset" + "github.com/weaviate/weaviate/entities/concurrency" + "github.com/weaviate/weaviate/entities/diskio" + "github.com/weaviate/weaviate/usecases/monitoring" +) + +// Compactor takes in a left and a right segment and merges them into a single +// segment. The input segments are represented by cursors without their +// respective segmentindexes. A new segmentindex is built from the merged nodes +// without taking the old indexes into account at all. +// +// The left segment must precede the right one in its creation time, as the +// compactor applies latest-takes-presence rules when there is a conflict. +// +// # Merging independent key/value pairs +// +// The new segment's nodes will be in sorted fashion (this is a requirement for +// the segment index and segment cursors to function). To achieve a sorted end +// result, the Compactor goes over both input cursors simultaneously and always +// works on the smaller of the two keys. After a key/value pair has been added +// to the output only the input cursor that provided the pair is advanced. +// +// # Merging key/value pairs with identical keys +// +// When both segment have a key/value pair with an overlapping key, the value +// has to be merged. The merge logic is not part of the compactor itself. +// Instead it makes use of [BitmapLayers.Merge]. +// +// # Exit Criterium +// +// When both cursors no longer return values, all key/value pairs are +// considered compacted. The compactor then deals with metadata. +// +// # Index and Header metadata +// +// Once the key/value pairs have been compacted, the input writer is rewinded +// to be able to write the header metadata at the beginning of the file +// Because of this, the input writer must be an [io.WriteSeeker], +// such as [*os.File]. +// +// The level of the resulting segment is the input level increased by one. +// Levels help the "eligible for compaction" cycle to find suitable compaction +// pairs. +type Compactor struct { + left, right SegmentCursor + currentLevel uint16 + // Tells if deletions or keys without corresponding values + // can be removed from merged segment. + // (left segment is root (1st) one, keepTombstones is off for bucket) + cleanupDeletions bool + enableChecksumValidation bool + + w io.WriteSeeker + bufw compactor.Writer + mw *compactor.MemoryWriter +} + +// NewCompactor from left (older) and right (newer) seeker. See [Compactor] for +// an explanation of what goes on under the hood, and why the input +// requirements are the way they are. +func NewCompactor(w io.WriteSeeker, left, right SegmentCursor, + level uint16, cleanupDeletions bool, enableChecksumValidation bool, maxNewFileSize int64, +) *Compactor { + observeWrite := monitoring.GetMetrics().FileIOWrites.With(prometheus.Labels{ + "operation": "compaction", + "strategy": "roaringsetrange", + }) + writeCB := func(written int64) { + observeWrite.Observe(float64(written)) + } + meteredW := diskio.NewMeteredWriter(w, writeCB) + writer, mw := compactor.NewWriter(meteredW, maxNewFileSize) + + return &Compactor{ + left: left, + right: right, + w: meteredW, + bufw: writer, + mw: mw, + currentLevel: level, + cleanupDeletions: cleanupDeletions, + enableChecksumValidation: enableChecksumValidation, + } +} + +// Do starts a compaction. See [Compactor] for an explanation of this process. +func (c *Compactor) Do() error { + if err := c.init(); err != nil { + return fmt.Errorf("init: %w", err) + } + + segmentFile := segmentindex.NewSegmentFile( + segmentindex.WithBufferedWriter(c.bufw), + segmentindex.WithChecksumsDisabled(!c.enableChecksumValidation), + ) + + written, err := c.writeNodes(segmentFile) + if err != nil { + return fmt.Errorf("write keys: %w", err) + } + + // flush buffered, so we can safely seek on underlying writer + if c.mw == nil { + if err := c.bufw.Flush(); err != nil { + return fmt.Errorf("flush buffered: %w", err) + } + } + + dataEnd := segmentindex.HeaderSize + uint64(written) + version := segmentindex.ChooseHeaderVersion(c.enableChecksumValidation) + if err := compactor.WriteHeader(c.mw, c.w, c.bufw, segmentFile, c.currentLevel, version, + 0, dataEnd, segmentindex.StrategyRoaringSetRange); err != nil { + return errors.Wrap(err, "write header") + } + + if _, err := segmentFile.WriteChecksum(); err != nil { + return fmt.Errorf("write compactorRoaringSetRange segment checksum: %w", err) + } + + return nil +} + +func (c *Compactor) init() error { + // write a dummy header, we don't know the contents of the actual header yet, + // we will seek to the beginning and overwrite the actual header at the very + // end + + if _, err := c.bufw.Write(make([]byte, segmentindex.HeaderSize)); err != nil { + return errors.Wrap(err, "write empty header") + } + + return nil +} + +func (c *Compactor) writeNodes(f *segmentindex.SegmentFile) (int, error) { + nc := &nodeCompactor{ + left: c.left, + right: c.right, + bufw: f.BodyWriter(), + cleanupDeletions: c.cleanupDeletions, + emptyBitmap: sroar.NewBitmap(), + } + + if err := nc.loopThroughKeys(); err != nil { + return 0, err + } + + return nc.written, nil +} + +// nodeCompactor is a helper type to improve the code structure of merging +// nodes in a compaction +type nodeCompactor struct { + left, right SegmentCursor + bufw io.Writer + written int + + cleanupDeletions bool + emptyBitmap *sroar.Bitmap + deletionsLeft, deletionsRight *sroar.Bitmap +} + +func (nc *nodeCompactor) loopThroughKeys() error { + keyLeft, layerLeft, okLeft := nc.left.First() + keyRight, layerRight, okRight := nc.right.First() + + if okLeft && keyLeft != 0 { + return fmt.Errorf("left segment: missing key 0 (non-null bitmap)") + } + if okRight && keyRight != 0 { + return fmt.Errorf("right segment: missing key 0 (non-null bitmap)") + } + + // both segments empty + if !okLeft && !okRight { + return nil + } + + // left segment empty, take right + if !okLeft { + for ; okRight; keyRight, layerRight, okRight = nc.right.Next() { + if err := nc.writeLayer(keyRight, layerRight); err != nil { + return fmt.Errorf("right segment: %w", err) + } + } + return nil + } + + // right segment empty, take left + if !okRight { + for ; okLeft; keyLeft, layerLeft, okLeft = nc.left.Next() { + if err := nc.writeLayer(keyLeft, layerLeft); err != nil { + return fmt.Errorf("left segment: %w", err) + } + } + return nil + } + + // both segments, merge + // + // bitmaps' cloning is necessary for both types of cursors: mmap and pread + // (pread cursor use buffers to read entire nodes from file, therefore nodes already read + // are later overwritten with nodes being read later) + nc.deletionsLeft = nc.emptyBitmap + if !layerLeft.Deletions.IsEmpty() { + nc.deletionsLeft = layerLeft.Deletions.Clone() + } + nc.deletionsRight = nc.emptyBitmap + if !layerRight.Deletions.IsEmpty() { + nc.deletionsRight = layerRight.Deletions.Clone() + } + + for okLeft || okRight { + if okLeft && (!okRight || keyLeft < keyRight) { + // merge left + merged := nc.mergeLayers(keyLeft, layerLeft.Additions, nc.emptyBitmap) + if err := nc.writeLayer(keyLeft, merged); err != nil { + return fmt.Errorf("left segment merge: %w", err) + } + keyLeft, layerLeft, okLeft = nc.left.Next() + } else if okRight && (!okLeft || keyLeft > keyRight) { + // merge right + merged := nc.mergeLayers(keyRight, nc.emptyBitmap, layerRight.Additions) + if err := nc.writeLayer(keyRight, merged); err != nil { + return fmt.Errorf("right segment merge: %w", err) + } + keyRight, layerRight, okRight = nc.right.Next() + } else { + // merge both + merged := nc.mergeLayers(keyLeft, layerLeft.Additions, layerRight.Additions) + if err := nc.writeLayer(keyLeft, merged); err != nil { + return fmt.Errorf("both segments merge: %w", err) + } + keyLeft, layerLeft, okLeft = nc.left.Next() + keyRight, layerRight, okRight = nc.right.Next() + } + } + return nil +} + +func (nc *nodeCompactor) mergeLayers(key uint8, additionsLeft, additionsRight *sroar.Bitmap, +) roaringset.BitmapLayer { + // bitmaps' cloning is necessary for both types of cursors: mmap and pread + // (pread cursor use buffers to read entire nodes from file, therefore nodes already read + // are later overwritten with nodes being read later) + additions := additionsLeft.Clone() + additions.AndNotConc(nc.deletionsRight, concurrency.SROAR_MERGE) + additions.OrConc(additionsRight, concurrency.SROAR_MERGE) + + var deletions *sroar.Bitmap + if key == 0 { + deletions = nc.deletionsLeft.Clone() + deletions.OrConc(nc.deletionsRight, concurrency.SROAR_MERGE) + } + + return roaringset.BitmapLayer{Additions: additions, Deletions: deletions} +} + +func (nc *nodeCompactor) writeLayer(key uint8, layer roaringset.BitmapLayer) error { + if cleanLayer, skip := nc.cleanupLayer(key, layer); !skip { + sn, err := NewSegmentNode(key, cleanLayer.Additions, cleanLayer.Deletions) + if err != nil { + return fmt.Errorf("new segment node for key %d: %w", key, err) + } + + n, err := nc.bufw.Write(sn.ToBuffer()) + if err != nil { + return fmt.Errorf("write segment node for key %d: %w", key, err) + } + + nc.written += n + } + return nil +} + +func (nc *nodeCompactor) cleanupLayer(key uint8, layer roaringset.BitmapLayer) (roaringset.BitmapLayer, bool) { + var additions, deletions *sroar.Bitmap + + if layer.Additions.IsEmpty() { + if key != 0 || nc.cleanupDeletions || layer.Deletions.IsEmpty() { + return roaringset.BitmapLayer{}, true + } + + additions = nc.emptyBitmap + deletions = roaringset.Condense(layer.Deletions) + } else { + additions = roaringset.Condense(layer.Additions) + deletions = nil + + if key == 0 { + if nc.cleanupDeletions || layer.Deletions.IsEmpty() { + deletions = nc.emptyBitmap + } else { + deletions = roaringset.Condense(layer.Deletions) + } + } + } + + return roaringset.BitmapLayer{Additions: additions, Deletions: deletions}, false +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/roaringsetrange/compactor_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/roaringsetrange/compactor_test.go new file mode 100644 index 0000000000000000000000000000000000000000..5a45348bfef9abf69d19d56ad56de4926bd6dc8d --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/roaringsetrange/compactor_test.go @@ -0,0 +1,565 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package roaringsetrange + +import ( + "fmt" + "io" + "os" + "path/filepath" + "testing" + + "github.com/weaviate/weaviate/adapters/repos/db/compactor" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/adapters/repos/db/lsmkv/segmentindex" + "github.com/weaviate/weaviate/adapters/repos/db/roaringset" +) + +func Test_Compactor(t *testing.T) { + type test struct { + name string + left []byte + right []byte + expectedKeep []segmentEntry + expectedCleanup []segmentEntry + expectedErr string + } + + tests := []test{ + { + name: "segments with nothing deleted", + left: createSegmentsFromEntries(t, []segmentEntry{ + { + key: uint8(0), + additions: []uint64{11, 22, 33}, + deletions: []uint64{111}, + }, + { + key: uint8(1), + additions: []uint64{22}, + deletions: []uint64{222}, // ignored + }, + { + key: uint8(2), + additions: []uint64{33}, + deletions: []uint64{333}, // ignored + }, + }), + right: createSegmentsFromEntries(t, []segmentEntry{ + { + key: uint8(0), + additions: []uint64{55, 66}, + deletions: []uint64{444}, + }, + { + key: uint8(1), + additions: []uint64{55}, + deletions: []uint64{555}, // ignored + }, + { + key: uint8(3), + additions: []uint64{66}, + deletions: []uint64{666}, // ignored + }, + }), + expectedKeep: []segmentEntry{ + { + key: uint8(0), + additions: []uint64{11, 22, 33, 55, 66}, + deletions: []uint64{111, 444}, + }, + { + key: uint8(1), + additions: []uint64{22, 55}, + deletions: nil, + }, + { + key: uint8(2), + additions: []uint64{33}, + deletions: nil, + }, + { + key: uint8(3), + additions: []uint64{66}, + deletions: nil, + }, + }, + expectedCleanup: []segmentEntry{ + { + key: uint8(0), + additions: []uint64{11, 22, 33, 55, 66}, + deletions: []uint64{}, + }, + { + key: uint8(1), + additions: []uint64{22, 55}, + deletions: nil, + }, + { + key: uint8(2), + additions: []uint64{33}, + deletions: nil, + }, + { + key: uint8(3), + additions: []uint64{66}, + deletions: nil, + }, + }, + }, + { + name: "segments with everything overwritten", + left: createSegmentsFromEntries(t, []segmentEntry{ + { + key: uint8(0), + additions: []uint64{11, 22, 33, 44}, + deletions: []uint64{111}, + }, + { + key: uint8(1), + additions: []uint64{22}, + deletions: []uint64{}, + }, + { + key: uint8(2), + additions: []uint64{33}, + deletions: []uint64{}, + }, + { + key: uint8(3), + additions: []uint64{44}, + deletions: []uint64{}, + }, + }), + right: createSegmentsFromEntries(t, []segmentEntry{ + { + key: uint8(0), + additions: []uint64{22, 33, 44, 55}, + deletions: []uint64{11, 22, 33, 44, 666}, + }, + { + key: uint8(1), + additions: []uint64{55}, + deletions: []uint64{}, + }, + { + key: uint8(2), + additions: []uint64{22}, + deletions: []uint64{}, + }, + { + key: uint8(3), + additions: []uint64{33}, + deletions: []uint64{}, + }, + { + key: uint8(4), + additions: []uint64{44}, + deletions: []uint64{}, + }, + }), + expectedKeep: []segmentEntry{ + { + key: uint8(0), + additions: []uint64{22, 33, 44, 55}, + deletions: []uint64{11, 22, 33, 44, 111, 666}, + }, + { + key: uint8(1), + additions: []uint64{55}, + deletions: nil, + }, + { + key: uint8(2), + additions: []uint64{22}, + deletions: nil, + }, + { + key: uint8(3), + additions: []uint64{33}, + deletions: nil, + }, + { + key: uint8(4), + additions: []uint64{44}, + deletions: nil, + }, + }, + expectedCleanup: []segmentEntry{ + { + key: uint8(0), + additions: []uint64{22, 33, 44, 55}, + deletions: []uint64{}, + }, + { + key: uint8(1), + additions: []uint64{55}, + deletions: nil, + }, + { + key: uint8(2), + additions: []uint64{22}, + deletions: nil, + }, + { + key: uint8(3), + additions: []uint64{33}, + deletions: nil, + }, + { + key: uint8(4), + additions: []uint64{44}, + deletions: nil, + }, + }, + }, + { + name: "segments with everything deleted", + left: createSegmentsFromEntries(t, []segmentEntry{ + { + key: uint8(0), + additions: []uint64{11, 22, 33, 44}, + deletions: []uint64{111}, + }, + { + key: uint8(1), + additions: []uint64{22}, + deletions: []uint64{}, + }, + { + key: uint8(2), + additions: []uint64{33}, + deletions: []uint64{}, + }, + { + key: uint8(3), + additions: []uint64{44}, + deletions: []uint64{}, + }, + }), + right: createSegmentsFromEntries(t, []segmentEntry{ + { + key: uint8(0), + additions: []uint64{}, + deletions: []uint64{11, 22, 33, 44}, + }, + }), + expectedKeep: []segmentEntry{ + { + key: uint8(0), + additions: []uint64{}, + deletions: []uint64{11, 22, 33, 44, 111}, + }, + }, + expectedCleanup: []segmentEntry{}, + }, + { + name: "empty both segments", + left: []byte{}, + right: []byte{}, + expectedKeep: []segmentEntry{}, + expectedCleanup: []segmentEntry{}, + }, + { + name: "empty right segment", + left: createSegmentsFromEntries(t, []segmentEntry{ + { + key: uint8(0), + additions: []uint64{11, 22, 33}, + deletions: []uint64{111}, + }, + { + key: uint8(1), + additions: []uint64{22}, + deletions: []uint64{222}, // ignored + }, + { + key: uint8(2), + additions: []uint64{33}, + deletions: []uint64{333}, // ignored + }, + }), + right: []byte{}, + expectedKeep: []segmentEntry{ + { + key: uint8(0), + additions: []uint64{11, 22, 33}, + deletions: []uint64{111}, + }, + { + key: uint8(1), + additions: []uint64{22}, + deletions: nil, + }, + { + key: uint8(2), + additions: []uint64{33}, + deletions: nil, + }, + }, + expectedCleanup: []segmentEntry{ + { + key: uint8(0), + additions: []uint64{11, 22, 33}, + deletions: []uint64{}, + }, + { + key: uint8(1), + additions: []uint64{22}, + deletions: nil, + }, + { + key: uint8(2), + additions: []uint64{33}, + deletions: nil, + }, + }, + }, + { + name: "empty left segment", + left: []byte{}, + right: createSegmentsFromEntries(t, []segmentEntry{ + { + key: uint8(0), + additions: []uint64{11, 22, 33}, + deletions: []uint64{111}, + }, + { + key: uint8(1), + additions: []uint64{22}, + deletions: []uint64{222}, // ignored + }, + { + key: uint8(2), + additions: []uint64{33}, + deletions: []uint64{333}, // ignored + }, + }), + expectedKeep: []segmentEntry{ + { + key: uint8(0), + additions: []uint64{11, 22, 33}, + deletions: []uint64{111}, + }, + { + key: uint8(1), + additions: []uint64{22}, + deletions: nil, + }, + { + key: uint8(2), + additions: []uint64{33}, + deletions: nil, + }, + }, + expectedCleanup: []segmentEntry{ + { + key: uint8(0), + additions: []uint64{11, 22, 33}, + deletions: []uint64{}, + }, + { + key: uint8(1), + additions: []uint64{22}, + deletions: nil, + }, + { + key: uint8(2), + additions: []uint64{33}, + deletions: nil, + }, + }, + }, + { + name: "invalid left segment", + left: createSegmentsFromEntries(t, []segmentEntry{ + { + key: uint8(1), + additions: []uint64{12345}, + deletions: []uint64{}, + }, + }), + right: createSegmentsFromEntries(t, []segmentEntry{ + { + key: uint8(0), + additions: []uint64{11, 22, 33}, + deletions: []uint64{111}, + }, + { + key: uint8(1), + additions: []uint64{22}, + deletions: []uint64{222}, // ignored + }, + { + key: uint8(2), + additions: []uint64{33}, + deletions: []uint64{333}, // ignored + }, + }), + expectedErr: "left segment: missing key 0 (non-null bitmap)", + }, + { + name: "invalid right segment", + left: createSegmentsFromEntries(t, []segmentEntry{ + { + key: uint8(0), + additions: []uint64{11, 22, 33}, + deletions: []uint64{111}, + }, + { + key: uint8(1), + additions: []uint64{22}, + deletions: []uint64{222}, // ignored + }, + { + key: uint8(2), + additions: []uint64{33}, + deletions: []uint64{333}, // ignored + }, + }), + right: createSegmentsFromEntries(t, []segmentEntry{ + { + key: uint8(1), + additions: []uint64{12345}, + deletions: []uint64{}, + }, + }), + expectedErr: "right segment: missing key 0 (non-null bitmap)", + }, + } + + for _, test := range tests { + for _, checkSum := range []bool{true, false} { + maxNewFileSize := int64(len(test.left)+len(test.right)) + segmentindex.HeaderSize + if checkSum { + maxNewFileSize += 8 // for checksum + } + + t.Run("[keep] "+test.name, func(t *testing.T) { + leftCursor := NewSegmentCursorMmap(test.left) + rightCursor := NewSegmentCursorMmap(test.right) + + bytesInMemory, _ := cursorCompactor(t, leftCursor, rightCursor, maxNewFileSize, false, checkSum) + bytesWriter, err := cursorCompactor(t, leftCursor, rightCursor, compactor.SegmentWriterBufferSize+1, false, checkSum) + + if test.expectedErr == "" { + require.NoError(t, err) + require.Equal(t, bytesInMemory, bytesWriter) + + header, err := segmentindex.ParseHeader(bytesInMemory[:segmentindex.HeaderSize]) + require.NoError(t, err) + + cu := NewSegmentCursorMmap(bytesInMemory[segmentindex.HeaderSize:header.IndexStart]) + + i := 0 + for k, l, ok := cu.First(); ok; k, l, ok = cu.Next() { + assert.Equal(t, test.expectedKeep[i].key, k) + assert.Equal(t, test.expectedKeep[i].additions, l.Additions.ToArray()) + assert.Equal(t, test.expectedKeep[i].deletions, l.Deletions.ToArray()) + i++ + } + + assert.Equal(t, len(test.expectedKeep), i, "all expected keys must have been hit") + } else { + assert.ErrorContains(t, err, test.expectedErr) + } + }) + } + } + + for _, test := range tests { + for _, checkSum := range []bool{true, false} { + maxNewFileSize := int64(len(test.left)+len(test.right)) + segmentindex.HeaderSize + if checkSum { + maxNewFileSize += 8 // for checksum + } + + t.Run("[cleanup] "+test.name, func(t *testing.T) { + leftCursor := NewSegmentCursorMmap(test.left) + rightCursor := NewSegmentCursorMmap(test.right) + + bytesInMemory, _ := cursorCompactor(t, leftCursor, rightCursor, maxNewFileSize, true, checkSum) + bytesWriter, err := cursorCompactor(t, leftCursor, rightCursor, compactor.SegmentWriterBufferSize+1, true, checkSum) + + if test.expectedErr == "" { + require.NoError(t, err) + require.Equal(t, bytesInMemory, bytesWriter) + + header, err := segmentindex.ParseHeader(bytesInMemory[:segmentindex.HeaderSize]) + require.NoError(t, err) + + cu := NewSegmentCursorMmap(bytesInMemory[segmentindex.HeaderSize:header.IndexStart]) + + i := 0 + for k, l, ok := cu.First(); ok; k, l, ok = cu.Next() { + assert.Equal(t, test.expectedCleanup[i].key, k) + assert.Equal(t, test.expectedCleanup[i].additions, l.Additions.ToArray()) + assert.Equal(t, test.expectedCleanup[i].deletions, l.Deletions.ToArray()) + i++ + } + + assert.Equal(t, len(test.expectedCleanup), i, "all expected keys must have been hit") + } else { + assert.ErrorContains(t, err, test.expectedErr) + } + }) + } + } +} + +func cursorCompactor(t *testing.T, leftCursor, rightCursor SegmentCursor, maxNewFileSize int64, cleanup, checkSum bool) ([]byte, error) { + t.Helper() + dir := t.TempDir() + + segmentFile := filepath.Join(dir, fmt.Sprintf("result-%v-%v-%v.db", cleanup, checkSum, maxNewFileSize)) + f, err := os.Create(segmentFile) + require.NoError(t, err) + + c := NewCompactor(f, leftCursor, rightCursor, 5, cleanup, checkSum, maxNewFileSize) + if err := c.Do(); err != nil { + require.NoError(t, f.Close()) + return nil, err + } + + require.NoError(t, f.Close()) + + f, err = os.Open(segmentFile) + require.NoError(t, err) + + segmentBytes, err := io.ReadAll(f) + require.NoError(t, err) + require.NoError(t, f.Close()) + + return segmentBytes, nil +} + +type segmentEntry struct { + key uint8 + additions []uint64 + deletions []uint64 +} + +func createSegmentsFromEntries(t *testing.T, entries []segmentEntry) []byte { + out := []byte{} + + for _, entry := range entries { + add := roaringset.NewBitmap(entry.additions...) + del := roaringset.NewBitmap(entry.deletions...) + sn, err := NewSegmentNode(entry.key, add, del) + require.Nil(t, err) + out = append(out, sn.ToBuffer()...) + } + + return out +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/roaringsetrange/memtable.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/roaringsetrange/memtable.go new file mode 100644 index 0000000000000000000000000000000000000000..ee5c310ed5bdabbfb7457410fe04da8232bdb256 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/roaringsetrange/memtable.go @@ -0,0 +1,150 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package roaringsetrange + +import ( + "sync" + + "github.com/sirupsen/logrus" + "github.com/weaviate/sroar" + "github.com/weaviate/weaviate/entities/errors" +) + +// As docID (acting as value) can have only single value (acting as key) assigned +// every new value replaces the previous one +// (therefore array data types are not supported) +type Memtable struct { + logger logrus.FieldLogger + additions map[uint64]uint64 + deletions map[uint64]struct{} +} + +func NewMemtable(logger logrus.FieldLogger) *Memtable { + return &Memtable{ + logger: logger, + additions: make(map[uint64]uint64), + deletions: make(map[uint64]struct{}), + } +} + +func (m *Memtable) Insert(key uint64, values []uint64) { + if len(values) == 0 { + return + } + + for _, v := range values { + m.additions[v] = key + } +} + +func (m *Memtable) Delete(key uint64, values []uint64) { + if len(values) == 0 { + return + } + + for _, v := range values { + delete(m.additions, v) + m.deletions[v] = struct{}{} + } +} + +func (m *Memtable) Clone() *Memtable { + clone := &Memtable{logger: m.logger} + clone.additions = make(map[uint64]uint64, len(m.additions)) + clone.deletions = make(map[uint64]struct{}, len(m.deletions)) + + for k := range m.additions { + clone.additions[k] = m.additions[k] + } + for k := range m.deletions { + clone.deletions[k] = m.deletions[k] + } + + return clone +} + +func (m *Memtable) Nodes() []*MemtableNode { + if len(m.additions) == 0 && len(m.deletions) == 0 { + return []*MemtableNode{} + } + + nnDeletions := sroar.NewBitmap() + nnAdditions := sroar.NewBitmap() + var bitsAdditions [64]*sroar.Bitmap + + for v := range m.deletions { + nnDeletions.Set(v) + } + for v := range m.additions { + nnDeletions.Set(v) + nnAdditions.Set(v) + } + + routines := 8 + wg := new(sync.WaitGroup) + wg.Add(routines - 1) + + for i := 0; i < routines-1; i++ { + i := i + errors.GoWrapper(func() { + for j := 0; j < 64; j += routines { + bit := i + j + for value, key := range m.additions { + if key&(1< value }), noopRelease, nil + + case filters.OperatorGreaterThanEqual: + return r.read(func(k uint64) bool { return k >= value }), noopRelease, nil + + default: + // TODO move strategies to separate package? + return roaringset.BitmapLayer{}, noopRelease, + fmt.Errorf("operator %v not supported for segments of strategy %q", operator.Name(), "roaringsetrange") + } +} + +func (r *MemtableReader) read(predicate func(k uint64) bool) roaringset.BitmapLayer { + additions := sroar.NewBitmap() + deletions := sroar.NewBitmap() + + for v, k := range r.memtable.additions { + if predicate(k) { + additions.Set(v) + } + deletions.Set(v) + } + for v := range r.memtable.deletions { + deletions.Set(v) + } + + return roaringset.BitmapLayer{ + Additions: additions, + Deletions: deletions, + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/roaringsetrange/memtable_reader_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/roaringsetrange/memtable_reader_test.go new file mode 100644 index 0000000000000000000000000000000000000000..153a7b8c82136ec005b1157b45977eb32e0b2876 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/roaringsetrange/memtable_reader_test.go @@ -0,0 +1,163 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package roaringsetrange + +import ( + "testing" + + "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/assert" + "github.com/weaviate/weaviate/entities/filters" + "golang.org/x/net/context" +) + +func TestMemtableReader(t *testing.T) { + logger, _ := test.NewNullLogger() + + mem := NewMemtable(logger) + mem.Insert(13, []uint64{113, 213}) // ...1101 + mem.Insert(5, []uint64{15, 25}) // ...0101 + mem.Insert(0, []uint64{10, 20}) // ...0000 + mem.Delete(20, []uint64{120, 220}) + + reader := NewMemtableReader(mem) + + type testCase struct { + value uint64 + operator filters.Operator + expectedAdd []uint64 + expectedDel []uint64 + } + + testCases := []testCase{ + { + value: 0, + operator: filters.OperatorGreaterThanEqual, + expectedAdd: []uint64{10, 20, 15, 25, 113, 213}, + expectedDel: []uint64{10, 20, 15, 25, 113, 213, 120, 220}, + }, + { + value: 0, + operator: filters.OperatorGreaterThan, + expectedAdd: []uint64{15, 25, 113, 213}, + expectedDel: []uint64{10, 20, 15, 25, 113, 213, 120, 220}, + }, + { + value: 0, + operator: filters.OperatorLessThanEqual, + expectedAdd: []uint64{10, 20}, + expectedDel: []uint64{10, 20, 15, 25, 113, 213, 120, 220}, + }, + { + value: 0, + operator: filters.OperatorLessThan, + expectedAdd: []uint64{}, + expectedDel: []uint64{10, 20, 15, 25, 113, 213, 120, 220}, + }, + { + value: 0, + operator: filters.OperatorEqual, + expectedAdd: []uint64{10, 20}, + expectedDel: []uint64{10, 20, 15, 25, 113, 213, 120, 220}, + }, + { + value: 0, + operator: filters.OperatorNotEqual, + expectedAdd: []uint64{15, 25, 113, 213}, + expectedDel: []uint64{10, 20, 15, 25, 113, 213, 120, 220}, + }, + + { + value: 5, + operator: filters.OperatorGreaterThanEqual, + expectedAdd: []uint64{15, 25, 113, 213}, + expectedDel: []uint64{10, 20, 15, 25, 113, 213, 120, 220}, + }, + { + value: 5, + operator: filters.OperatorGreaterThan, + expectedAdd: []uint64{113, 213}, + expectedDel: []uint64{10, 20, 15, 25, 113, 213, 120, 220}, + }, + { + value: 5, + operator: filters.OperatorLessThanEqual, + expectedAdd: []uint64{10, 20, 15, 25}, + expectedDel: []uint64{10, 20, 15, 25, 113, 213, 120, 220}, + }, + { + value: 5, + operator: filters.OperatorLessThan, + expectedAdd: []uint64{10, 20}, + expectedDel: []uint64{10, 20, 15, 25, 113, 213, 120, 220}, + }, + { + value: 5, + operator: filters.OperatorEqual, + expectedAdd: []uint64{15, 25}, + expectedDel: []uint64{10, 20, 15, 25, 113, 213, 120, 220}, + }, + { + value: 5, + operator: filters.OperatorNotEqual, + expectedAdd: []uint64{10, 20, 113, 213}, + expectedDel: []uint64{10, 20, 15, 25, 113, 213, 120, 220}, + }, + + { + value: 13, + operator: filters.OperatorGreaterThanEqual, + expectedAdd: []uint64{113, 213}, + expectedDel: []uint64{10, 20, 15, 25, 113, 213, 120, 220}, + }, + { + value: 13, + operator: filters.OperatorGreaterThan, + expectedAdd: []uint64{}, + expectedDel: []uint64{10, 20, 15, 25, 113, 213, 120, 220}, + }, + { + value: 13, + operator: filters.OperatorLessThanEqual, + expectedAdd: []uint64{10, 20, 15, 25, 113, 213}, + expectedDel: []uint64{10, 20, 15, 25, 113, 213, 120, 220}, + }, + { + value: 13, + operator: filters.OperatorLessThan, + expectedAdd: []uint64{10, 20, 15, 25}, + expectedDel: []uint64{10, 20, 15, 25, 113, 213, 120, 220}, + }, + { + value: 13, + operator: filters.OperatorEqual, + expectedAdd: []uint64{113, 213}, + expectedDel: []uint64{10, 20, 15, 25, 113, 213, 120, 220}, + }, + { + value: 13, + operator: filters.OperatorNotEqual, + expectedAdd: []uint64{10, 20, 15, 25}, + expectedDel: []uint64{10, 20, 15, 25, 113, 213, 120, 220}, + }, + } + + for _, tc := range testCases { + t.Run("read", func(t *testing.T) { + bm, release, err := reader.Read(context.Background(), tc.value, tc.operator) + assert.NoError(t, err) + defer release() + assert.ElementsMatch(t, bm.Additions.ToArray(), tc.expectedAdd) + assert.ElementsMatch(t, bm.Deletions.ToArray(), tc.expectedDel) + }) + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/roaringsetrange/memtable_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/roaringsetrange/memtable_test.go new file mode 100644 index 0000000000000000000000000000000000000000..bd1760275050076fe6edaf5f08f444575f341a13 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/roaringsetrange/memtable_test.go @@ -0,0 +1,324 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package roaringsetrange + +import ( + "bytes" + "encoding/binary" + "math/rand" + "testing" + "time" + + "github.com/pkg/errors" + "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestMemtable(t *testing.T) { + logger, _ := test.NewNullLogger() + + t.Run("empty returns no nodes", func(t *testing.T) { + m := NewMemtable(logger) + nodes := m.Nodes() + + assert.Empty(t, nodes) + }) + + t.Run("returns only nodes for set bits - unique inserts", func(t *testing.T) { + m := NewMemtable(logger) + m.Insert(13, []uint64{113, 213}) // ...1101 + m.Insert(5, []uint64{15, 25}) // ...0101 + m.Insert(0, []uint64{10, 20}) // ...0000 + + nodes := m.Nodes() + require.Len(t, nodes, 3+1) + + nodeNN := nodes[0] + assert.Equal(t, uint8(0), nodeNN.Key) + assert.ElementsMatch(t, []uint64{10, 20, 15, 25, 113, 213}, nodeNN.Additions.ToArray()) + assert.ElementsMatch(t, []uint64{10, 20, 15, 25, 113, 213}, nodeNN.Deletions.ToArray()) + + node0 := nodes[1] + assert.Equal(t, uint8(1), node0.Key) + assert.ElementsMatch(t, []uint64{15, 25, 113, 213}, node0.Additions.ToArray()) + assert.True(t, node0.Deletions.IsEmpty()) + + node2 := nodes[2] + assert.Equal(t, uint8(3), node2.Key) + assert.ElementsMatch(t, []uint64{15, 25, 113, 213}, node2.Additions.ToArray()) + assert.True(t, node2.Deletions.IsEmpty()) + + node3 := nodes[3] + assert.Equal(t, uint8(4), node3.Key) + assert.ElementsMatch(t, []uint64{113, 213}, node3.Additions.ToArray()) + assert.True(t, node3.Deletions.IsEmpty()) + }) + + t.Run("returns only nodes for set bits - overwriting inserts", func(t *testing.T) { + m := NewMemtable(logger) + m.Insert(13, []uint64{11, 22, 33}) // ...1101 + m.Insert(5, []uint64{11, 22}) // ...0101 + m.Insert(0, []uint64{11}) // ...0000 + + nodes := m.Nodes() + require.Len(t, nodes, 3+1) + + nodeNN := nodes[0] + assert.Equal(t, uint8(0), nodeNN.Key) + assert.ElementsMatch(t, []uint64{11, 22, 33}, nodeNN.Additions.ToArray()) + assert.ElementsMatch(t, []uint64{11, 22, 33}, nodeNN.Deletions.ToArray()) + + node0 := nodes[1] + assert.Equal(t, uint8(1), node0.Key) + assert.ElementsMatch(t, []uint64{22, 33}, node0.Additions.ToArray()) + assert.True(t, node0.Deletions.IsEmpty()) + + node2 := nodes[2] + assert.Equal(t, uint8(3), node2.Key) + assert.ElementsMatch(t, []uint64{22, 33}, node2.Additions.ToArray()) + assert.True(t, node2.Deletions.IsEmpty()) + + node3 := nodes[3] + assert.Equal(t, uint8(4), node3.Key) + assert.ElementsMatch(t, []uint64{33}, node3.Additions.ToArray()) + assert.True(t, node3.Deletions.IsEmpty()) + }) + + t.Run("returns only nodes for set bits - overwriting inserts with deletes", func(t *testing.T) { + m := NewMemtable(logger) + m.Insert(13, []uint64{11, 22, 33}) // ...1101 + m.Delete(5, []uint64{11, 22}) // ...0101 + m.Insert(5, []uint64{11, 22}) // ...0101 + m.Delete(0, []uint64{11}) // ...0000 + m.Insert(0, []uint64{11}) // ...0000 + + nodes := m.Nodes() + require.Len(t, nodes, 3+1) + + nodeNN := nodes[0] + assert.Equal(t, uint8(0), nodeNN.Key) + assert.ElementsMatch(t, []uint64{11, 22, 33}, nodeNN.Additions.ToArray()) + assert.ElementsMatch(t, []uint64{11, 22, 33}, nodeNN.Deletions.ToArray()) + + node0 := nodes[1] + assert.Equal(t, uint8(1), node0.Key) + assert.ElementsMatch(t, []uint64{22, 33}, node0.Additions.ToArray()) + assert.True(t, node0.Deletions.IsEmpty()) + + node2 := nodes[2] + assert.Equal(t, uint8(3), node2.Key) + assert.ElementsMatch(t, []uint64{22, 33}, node2.Additions.ToArray()) + assert.True(t, node2.Deletions.IsEmpty()) + + node3 := nodes[3] + assert.Equal(t, uint8(4), node3.Key) + assert.ElementsMatch(t, []uint64{33}, node3.Additions.ToArray()) + assert.True(t, node3.Deletions.IsEmpty()) + }) + + t.Run("delete does not mind key value", func(t *testing.T) { + m := NewMemtable(logger) + m.Delete(13, []uint64{33}) // ...1101 + m.Delete(5, []uint64{22}) // ...0101 + m.Delete(0, []uint64{11}) // ...0000 + + nodes := m.Nodes() + require.Len(t, nodes, 1) + + nodeNN := nodes[0] + assert.Equal(t, uint8(0), nodeNN.Key) + assert.True(t, nodeNN.Additions.IsEmpty()) + assert.ElementsMatch(t, []uint64{11, 22, 33}, nodeNN.Deletions.ToArray()) + }) + + t.Run("deletes all", func(t *testing.T) { + m := NewMemtable(logger) + m.Insert(13, []uint64{33}) // ...1101 + m.Delete(13, []uint64{33}) // ...1101 + m.Insert(5, []uint64{22}) // ...0101 + m.Insert(0, []uint64{11}) // ...0000 + m.Delete(5, []uint64{22}) // ...0101 + m.Delete(0, []uint64{11}) // ...0000 + + nodes := m.Nodes() + require.Len(t, nodes, 1) + + nodeNN := nodes[0] + assert.Equal(t, uint8(0), nodeNN.Key) + assert.True(t, nodeNN.Additions.IsEmpty()) + assert.ElementsMatch(t, []uint64{11, 22, 33}, nodeNN.Deletions.ToArray()) + }) + + t.Run("deletes all but one", func(t *testing.T) { + m := NewMemtable(logger) + m.Insert(13, []uint64{33}) // ...1101 + m.Delete(13, []uint64{33}) // ...1101 + m.Insert(5, []uint64{22}) // ...0101 + m.Insert(0, []uint64{11}) // ...0000 + m.Delete(0, []uint64{11}) // ...0000 + + nodes := m.Nodes() + require.Len(t, nodes, 2+1) + + nodeNN := nodes[0] + assert.Equal(t, uint8(0), nodeNN.Key) + assert.ElementsMatch(t, []uint64{22}, nodeNN.Additions.ToArray()) + assert.ElementsMatch(t, []uint64{11, 22, 33}, nodeNN.Deletions.ToArray()) + + node0 := nodes[1] + assert.Equal(t, uint8(1), node0.Key) + assert.ElementsMatch(t, []uint64{22}, node0.Additions.ToArray()) + assert.True(t, node0.Deletions.IsEmpty()) + + node2 := nodes[2] + assert.Equal(t, uint8(3), node2.Key) + assert.ElementsMatch(t, []uint64{22}, node2.Additions.ToArray()) + assert.True(t, node2.Deletions.IsEmpty()) + }) + + t.Run("delete removes values regardless of key being deleted", func(t *testing.T) { + m := NewMemtable(logger) + m.Insert(13, []uint64{33}) // ...00001101 + m.Insert(5, []uint64{22}) // ...00000101 + m.Insert(0, []uint64{11}) // ...00000000 + m.Delete(123, []uint64{11, 22, 33, 44}) // ...01111011 + + nodes := m.Nodes() + require.Len(t, nodes, 1) + + nodeNN := nodes[0] + assert.Equal(t, uint8(0), nodeNN.Key) + assert.True(t, nodeNN.Additions.IsEmpty()) + assert.ElementsMatch(t, []uint64{11, 22, 33, 44}, nodeNN.Deletions.ToArray()) + }) + + t.Run("cloned memtable is not mutated", func(t *testing.T) { + assertSameNodes := func(t *testing.T, expNodes, nodes []*MemtableNode) { + require.NotNil(t, nodes) + require.Len(t, nodes, len(expNodes)) + for i := range expNodes { + assert.Equal(t, expNodes[i].Key, nodes[i].Key) + assert.ElementsMatch(t, expNodes[i].Additions.ToArray(), nodes[i].Additions.ToArray()) + assert.ElementsMatch(t, expNodes[i].Deletions.ToArray(), nodes[i].Deletions.ToArray()) + } + } + + m := NewMemtable(logger) + m.Insert(1, []uint64{11, 21, 31}) + m.Insert(2, []uint64{12, 22, 32}) + m.Insert(3, []uint64{13, 23, 33}) + m.Delete(4, []uint64{14, 24, 34}) + m.Delete(5, []uint64{15, 25, 35}) + mNodes := m.Nodes() + + c := m.Clone() + cNodes := c.Nodes() + + assertSameNodes(t, mNodes, cNodes) + + m.Insert(13, []uint64{113, 213, 313}) + m.Delete(14, []uint64{114, 214, 314}) + mNodes2 := m.Nodes() + cNodes2 := c.Nodes() + + assert.NotEqual(t, len(mNodes), len(mNodes2)) + assertSameNodes(t, cNodes, cNodes2) + }) +} + +func BenchmarkMemtableInsert(b *testing.B) { + logger, _ := test.NewNullLogger() + count := uint64(100_000) + keys := make([]uint64, count) + + // generate + for i := range keys { + bytes, err := lexicographicallySortableFloat64(float64(i) / 3) + require.NoError(b, err) + value := binary.BigEndian.Uint64(bytes) + keys[i] = value + } + + // shuffle + r := rand.New(rand.NewSource(time.Now().UnixNano())) + for i := range keys { + j := r.Intn(i + 1) + keys[i], keys[j] = keys[j], keys[i] + } + + val := make([]uint64, 1) + for i := 0; i < b.N; i++ { + m := NewMemtable(logger) + for value := uint64(0); value < count; value++ { + val[0] = value + m.Insert(keys[value], val) + } + } +} + +func BenchmarkMemtableFlatten(b *testing.B) { + logger, _ := test.NewNullLogger() + count := uint64(100_000) + keys := make([]uint64, count) + + // generate + for i := range keys { + bytes, err := lexicographicallySortableFloat64(float64(i) / 3) + require.NoError(b, err) + value := binary.BigEndian.Uint64(bytes) + keys[i] = value + } + + // shuffle + r := rand.New(rand.NewSource(time.Now().UnixNano())) + for i := range keys { + j := r.Intn(i + 1) + keys[i], keys[j] = keys[j], keys[i] + } + + val := make([]uint64, 1) + m := NewMemtable(logger) + for value := uint64(0); value < count; value++ { + val[0] = value + m.Insert(keys[value], val) + } + + for i := 0; i < b.N; i++ { + m.Nodes() + } +} + +func lexicographicallySortableFloat64(in float64) ([]byte, error) { + buf := bytes.NewBuffer(nil) + + err := binary.Write(buf, binary.BigEndian, in) + if err != nil { + return nil, errors.Wrap(err, "serialize float64 value as big endian") + } + + var out []byte + if in >= 0 { + // on positive numbers only flip the sign + out = buf.Bytes() + firstByte := out[0] ^ 0x80 + out = append([]byte{firstByte}, out[1:]...) + } else { + // on negative numbers flip every bit + out = make([]byte, 8) + for i, b := range buf.Bytes() { + out[i] = b ^ 0xFF + } + } + + return out, nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/roaringsetrange/reader.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/roaringsetrange/reader.go new file mode 100644 index 0000000000000000000000000000000000000000..525af14a029c085c4e641df53cf09f6ce53818ac --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/roaringsetrange/reader.go @@ -0,0 +1,156 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package roaringsetrange + +import ( + "context" + "sync" + "time" + + "github.com/sirupsen/logrus" + "github.com/weaviate/sroar" + "github.com/weaviate/weaviate/adapters/repos/db/helpers" + "github.com/weaviate/weaviate/adapters/repos/db/roaringset" + "github.com/weaviate/weaviate/entities/concurrency" + "github.com/weaviate/weaviate/entities/errorcompounder" + "github.com/weaviate/weaviate/entities/errors" + "github.com/weaviate/weaviate/entities/filters" +) + +type InnerReader interface { + Read(ctx context.Context, value uint64, operator filters.Operator) (layer roaringset.BitmapLayer, release func(), err error) +} + +type CombinedReader struct { + logger logrus.FieldLogger + readers []InnerReader + releaseReaders func() + concurrency int +} + +func NewCombinedReader(readers []InnerReader, releaseReaders func(), concurrency int, + logger logrus.FieldLogger, +) *CombinedReader { + return &CombinedReader{ + logger: logger, + readers: readers, + releaseReaders: releaseReaders, + concurrency: concurrency, + } +} + +func (r *CombinedReader) Read(ctx context.Context, value uint64, operator filters.Operator, +) (*sroar.Bitmap, func(), error) { + before := time.Now() + count := len(r.readers) + + var subresultsReadSum time.Duration + var mergingSum time.Duration + + defer func() { + took := time.Since(before) + vals := map[string]any{ + "readers": count, + "subresults_read_sum_took": subresultsReadSum, + "subresults_read_sum_took_string": subresultsReadSum.String(), + "merging_sum_took": mergingSum, + "merging_sum_took_string": mergingSum.String(), + "took": took, + "took_string": took.String(), + } + + helpers.AnnotateSlowQueryLogAppend(ctx, "build_allow_list_doc_bitmap_rangeable", vals) + }() + + switch count { + case 0: + return sroar.NewBitmap(), noopRelease, nil + case 1: + t := time.Now() + layer, release, err := r.readers[0].Read(ctx, value, operator) + subresultsReadSum = time.Since(t) + + if err != nil { + return nil, noopRelease, err + } + return layer.Additions, release, nil + } + + lock := new(sync.Mutex) + addReadTime := func(d time.Duration) { + lock.Lock() + subresultsReadSum += d + lock.Unlock() + } + + // all readers but last one. it will be processed by current goroutine + responseChans := make([]chan *readerResponse, count-1) + for i := range responseChans { + responseChans[i] = make(chan *readerResponse, 1) + } + + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + errors.GoWrapper(func() { + eg, gctx := errors.NewErrorGroupWithContextWrapper(r.logger, ctx) + eg.SetLimit(r.concurrency) + + for i := 1; i < count; i++ { + i := i + eg.Go(func() error { + t := time.Now() + layer, release, err := r.readers[i].Read(gctx, value, operator) + addReadTime(time.Since(t)) + responseChans[i-1] <- &readerResponse{layer, release, err} + return err + }) + } + }, r.logger) + + t := time.Now() + layer, release, err := r.readers[0].Read(ctx, value, operator) + addReadTime(time.Since(t)) + + ec := errorcompounder.New() + ec.Add(err) + + for i := 1; i < count; i++ { + response := <-responseChans[i-1] + ec.Add(response.err) + + if ec.Len() == 0 { + t := time.Now() + layer.Additions.AndNotConc(response.layer.Deletions, concurrency.SROAR_MERGE) + layer.Additions.OrConc(response.layer.Additions, concurrency.SROAR_MERGE) + mergingSum += time.Since(t) + } + response.release() + } + + if ec.Len() > 0 { + release() + return nil, noopRelease, ec.ToError() + } + + return layer.Additions, release, nil +} + +func (r *CombinedReader) Close() { + r.releaseReaders() +} + +type readerResponse struct { + layer roaringset.BitmapLayer + release func() + err error +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/roaringsetrange/reader_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/roaringsetrange/reader_test.go new file mode 100644 index 0000000000000000000000000000000000000000..04fd4917c28d785d6fae722b0efa2a27ec22fe38 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/roaringsetrange/reader_test.go @@ -0,0 +1,397 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package roaringsetrange + +import ( + "context" + "fmt" + "testing" + + "github.com/sirupsen/logrus" + "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/weaviate/sroar" + "github.com/weaviate/weaviate/adapters/repos/db/roaringset" + "github.com/weaviate/weaviate/entities/filters" +) + +func TestCombinedReader(t *testing.T) { + logger, _ := test.NewNullLogger() + mt1, mt2, mt3 := createTestMemtables(logger) + + testCases := []struct { + name string + value uint64 + operator filters.Operator + expected []uint64 + }{ + { + name: "greater than equal 0", + value: 0, + operator: filters.OperatorGreaterThanEqual, + expected: []uint64{10, 20, 14, 24, 15, 25, 113, 213, 117, 217, 119, 219}, + }, + { + name: "greater than 0", + value: 0, + operator: filters.OperatorGreaterThan, + expected: []uint64{14, 24, 15, 25, 113, 213, 117, 217, 119, 219}, + }, + { + name: "less than equal 0", + value: 0, + operator: filters.OperatorLessThanEqual, + expected: []uint64{10, 20}, + }, + { + name: "less than 0", + value: 0, + operator: filters.OperatorLessThan, + expected: []uint64{}, + }, + { + name: "equal 0", + value: 0, + operator: filters.OperatorEqual, + expected: []uint64{10, 20}, + }, + { + name: "not equal 0", + value: 0, + operator: filters.OperatorNotEqual, + expected: []uint64{14, 24, 15, 25, 113, 213, 117, 217, 119, 219}, + }, + + { + name: "greater than equal 4", + value: 4, + operator: filters.OperatorGreaterThanEqual, + expected: []uint64{14, 24, 15, 25, 113, 213, 117, 217, 119, 219}, + }, + { + name: "greater than 4", + value: 4, + operator: filters.OperatorGreaterThan, + expected: []uint64{15, 25, 113, 213, 117, 217, 119, 219}, + }, + { + name: "less than equal 4", + value: 4, + operator: filters.OperatorLessThanEqual, + expected: []uint64{10, 20, 14, 24}, + }, + { + name: "less than 4", + value: 4, + operator: filters.OperatorLessThan, + expected: []uint64{10, 20}, + }, + { + name: "equal 4", + value: 4, + operator: filters.OperatorEqual, + expected: []uint64{14, 24}, + }, + { + name: "not equal 4", + value: 4, + operator: filters.OperatorNotEqual, + expected: []uint64{10, 20, 15, 25, 113, 213, 117, 217, 119, 219}, + }, + + { + name: "greater than equal 5", + value: 5, + operator: filters.OperatorGreaterThanEqual, + expected: []uint64{15, 25, 113, 213, 117, 217, 119, 219}, + }, + { + name: "greater than 5", + value: 5, + operator: filters.OperatorGreaterThan, + expected: []uint64{113, 213, 117, 217, 119, 219}, + }, + { + name: "less than equal 5", + value: 5, + operator: filters.OperatorLessThanEqual, + expected: []uint64{10, 20, 14, 24, 15, 25}, + }, + { + name: "less than 5", + value: 5, + operator: filters.OperatorLessThan, + expected: []uint64{10, 20, 14, 24}, + }, + { + name: "equal 5", + value: 5, + operator: filters.OperatorEqual, + expected: []uint64{15, 25}, + }, + { + name: "not equal 5", + value: 5, + operator: filters.OperatorNotEqual, + expected: []uint64{10, 20, 14, 24, 113, 213, 117, 217, 119, 219}, + }, + + { + name: "greater than equal 13", + value: 13, + operator: filters.OperatorGreaterThanEqual, + expected: []uint64{113, 213, 117, 217, 119, 219}, + }, + { + name: "greater than 13", + value: 13, + operator: filters.OperatorGreaterThan, + expected: []uint64{117, 217, 119, 219}, + }, + { + name: "less than equal 13", + value: 13, + operator: filters.OperatorLessThanEqual, + expected: []uint64{10, 20, 14, 24, 15, 25, 113, 213}, + }, + { + name: "less than 13", + value: 13, + operator: filters.OperatorLessThan, + expected: []uint64{10, 20, 14, 24, 15, 25}, + }, + { + name: "equal 13", + value: 13, + operator: filters.OperatorEqual, + expected: []uint64{113, 213}, + }, + { + name: "not equal 13", + value: 13, + operator: filters.OperatorNotEqual, + expected: []uint64{10, 20, 14, 24, 15, 25, 117, 217, 119, 219}, + }, + + { + name: "greater than equal 17", + value: 17, + operator: filters.OperatorGreaterThanEqual, + expected: []uint64{117, 217, 119, 219}, + }, + { + name: "greater than 17", + value: 17, + operator: filters.OperatorGreaterThan, + expected: []uint64{119, 219}, + }, + { + name: "less than equal 17", + value: 17, + operator: filters.OperatorLessThanEqual, + expected: []uint64{10, 20, 14, 24, 15, 25, 113, 213, 117, 217}, + }, + { + name: "less than 17", + value: 17, + operator: filters.OperatorLessThan, + expected: []uint64{10, 20, 14, 24, 15, 25, 113, 213}, + }, + { + name: "equal 17", + value: 17, + operator: filters.OperatorEqual, + expected: []uint64{117, 217}, + }, + { + name: "not equal 17", + value: 17, + operator: filters.OperatorNotEqual, + expected: []uint64{10, 20, 14, 24, 15, 25, 113, 213, 119, 219}, + }, + + { + name: "greater than equal 19", + value: 19, + operator: filters.OperatorGreaterThanEqual, + expected: []uint64{119, 219}, + }, + { + name: "greater than 19", + value: 19, + operator: filters.OperatorGreaterThan, + expected: []uint64{}, + }, + { + name: "less than equal 19", + value: 19, + operator: filters.OperatorLessThanEqual, + expected: []uint64{10, 20, 14, 24, 15, 25, 113, 213, 117, 217, 119, 219}, + }, + { + name: "less than 19", + value: 19, + operator: filters.OperatorLessThan, + expected: []uint64{10, 20, 14, 24, 15, 25, 113, 213, 117, 217}, + }, + { + name: "equal 19", + value: 19, + operator: filters.OperatorEqual, + expected: []uint64{119, 219}, + }, + { + name: "not equal 19", + value: 19, + operator: filters.OperatorNotEqual, + expected: []uint64{10, 20, 14, 24, 15, 25, 113, 213, 117, 217}, + }, + } + + t.Run("segments + memtable readers", func(t *testing.T) { + seg1Reader := NewSegmentReader(NewGaplessSegmentCursor(newFakeSegmentCursor(mt1))) + seg2Reader := NewSegmentReader(NewGaplessSegmentCursor(newFakeSegmentCursor(mt2))) + mtReader := NewMemtableReader(mt3) + + reader := NewCombinedReader([]InnerReader{seg1Reader, seg2Reader, mtReader}, func() {}, 4, logger) + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + bm, release, err := reader.Read(context.Background(), tc.value, tc.operator) + assert.NoError(t, err) + defer release() + + assert.NotNil(t, bm) + assert.ElementsMatch(t, bm.ToArray(), tc.expected) + }) + } + }) + + t.Run("segment-in-memory + memtable readers", func(t *testing.T) { + s := NewSegmentInMemory() + s.MergeMemtable(mt1) + s.MergeMemtable(mt2) + + segInMemoReader, release := NewSegmentInMemoryReader(s, roaringset.NewBitmapBufPoolNoop()) + mtReader := NewMemtableReader(mt3) + + reader := NewCombinedReader([]InnerReader{segInMemoReader, mtReader}, release, 4, logger) + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + bm, release, err := reader.Read(context.Background(), tc.value, tc.operator) + assert.NoError(t, err) + defer release() + + assert.NotNil(t, bm) + assert.ElementsMatch(t, bm.ToArray(), tc.expected) + }) + } + }) +} + +func TestCombinedReaderInnerReaders(t *testing.T) { + logger, _ := test.NewNullLogger() + + t.Run("all but 1st inner readers' results are released", func(t *testing.T) { + expected := []uint64{1, 3, 5, 6} + + innerReader1 := newFakeInnerReader(roaringset.NewBitmap(1, 2), nil, nil) + innerReader2 := newFakeInnerReader(roaringset.NewBitmap(3, 4), roaringset.NewBitmap(2), nil) + innerReader3 := newFakeInnerReader(roaringset.NewBitmap(5, 6), roaringset.NewBitmap(4), nil) + reader := NewCombinedReader([]InnerReader{innerReader1, innerReader2, innerReader3}, func() {}, 4, logger) + + bm, release, err := reader.Read(context.Background(), 0, filters.OperatorGreaterThanEqual) + require.NoError(t, err) + + assert.ElementsMatch(t, expected, bm.ToArray()) + assert.Equal(t, 1, innerReader1.InUseCounter()) + assert.Equal(t, 0, innerReader2.InUseCounter()) + assert.Equal(t, 0, innerReader3.InUseCounter()) + release() + assert.Equal(t, 0, innerReader1.InUseCounter()) + }) + + t.Run("all inner readers' results are released on error", func(t *testing.T) { + innerReader1 := newFakeInnerReader(nil, nil, fmt.Errorf("error1")) + innerReader2 := newFakeInnerReader(roaringset.NewBitmap(3, 4), roaringset.NewBitmap(2), nil) + innerReader3 := newFakeInnerReader(nil, nil, fmt.Errorf("error3")) + reader := NewCombinedReader([]InnerReader{innerReader1, innerReader2, innerReader3}, func() {}, 4, logger) + + bm, _, err := reader.Read(context.Background(), 0, filters.OperatorGreaterThanEqual) + require.Error(t, err) + + assert.Nil(t, bm) + assert.ErrorContains(t, err, "error1") + assert.ErrorContains(t, err, "error3") + assert.Equal(t, 0, innerReader1.InUseCounter()) + assert.Equal(t, 0, innerReader2.InUseCounter()) + assert.Equal(t, 0, innerReader3.InUseCounter()) + }) +} + +func createTestMemtables(logger logrus.FieldLogger) (*Memtable, *Memtable, *Memtable) { + mt1 := NewMemtable(logger) + mt1.Insert(6, []uint64{16, 26}) // deleted + mt1.Insert(19, []uint64{119, 219}) // 010011 + mt1.Insert(25, []uint64{113, 213}) // overwriten + mt1.Delete(8, []uint64{10, 20}) + + mt2 := NewMemtable(logger) + mt2.Insert(4, []uint64{14, 24}) // 000100 + mt2.Insert(17, []uint64{117, 217}) // 010001 + mt2.Insert(22, []uint64{15, 25}) // overwritten + mt2.Delete(1, []uint64{16, 26}) + + mt3 := NewMemtable(logger) + mt3.Insert(0, []uint64{10, 20}) // 000000 + mt3.Insert(5, []uint64{15, 25}) // 000101 + mt3.Insert(13, []uint64{113, 213}) // 001101 + mt3.Delete(21, []uint64{121, 221}) + + // 0 -> 10, 20 + // 4 -> 14, 24 + // 5 -> 15, 25 + // 13 -> 113, 213 + // 17 -> 117, 217 + // 19 -> 119, 219 + + return mt1, mt2, mt3 +} + +type fakeInnerReader struct { + inUseCounter int + additions *sroar.Bitmap + deletions *sroar.Bitmap + err error +} + +func newFakeInnerReader(additions, deletions *sroar.Bitmap, err error) *fakeInnerReader { + return &fakeInnerReader{ + inUseCounter: 0, + additions: additions, + deletions: deletions, + err: err, + } +} + +func (r *fakeInnerReader) Read(ctx context.Context, value uint64, operator filters.Operator, +) (layer roaringset.BitmapLayer, release func(), err error) { + r.inUseCounter++ + return roaringset.BitmapLayer{Additions: r.additions, Deletions: r.deletions}, + func() { r.inUseCounter-- }, r.err +} + +func (r *fakeInnerReader) InUseCounter() int { + return r.inUseCounter +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/roaringsetrange/segment_cursor.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/roaringsetrange/segment_cursor.go new file mode 100644 index 0000000000000000000000000000000000000000..94cec7c268372cd02b615cbee402e72d353ca0f7 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/roaringsetrange/segment_cursor.go @@ -0,0 +1,225 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package roaringsetrange + +import ( + "bufio" + "encoding/binary" + "errors" + "fmt" + "io" + + "github.com/weaviate/weaviate/adapters/repos/db/lsmkv/segmentindex" + "github.com/weaviate/weaviate/adapters/repos/db/roaringset" +) + +type SegmentCursor interface { + First() (uint8, roaringset.BitmapLayer, bool) + Next() (uint8, roaringset.BitmapLayer, bool) +} + +// A SegmentCursorMmap iterates over all key-value pairs in a single disk segment. +// You can start at the beginning using [*SegmentCursorMmap.First] and move forward +// using [*SegmentCursorMmap.Next] +type SegmentCursorMmap struct { + data []byte + nextOffset uint64 +} + +// NewSegmentCursorMmap creates a cursor for a single disk segment. Make sure that +// the data buf is already sliced correctly to start at the payload, as calling +// [*SegmentCursorMmap.First] will start reading at offset 0 relative to the passed +// in buffer. Similarly, the buffer may only contain payloads, as the buffer end +// is used to determine if more keys can be found. +// +// Therefore if the payload is part of a longer continuous buffer, the cursor +// should be initialized with data[payloadStartPos:payloadEndPos] +func NewSegmentCursorMmap(data []byte) *SegmentCursorMmap { + return &SegmentCursorMmap{data: data, nextOffset: 0} +} + +func (c *SegmentCursorMmap) First() (uint8, roaringset.BitmapLayer, bool) { + c.nextOffset = 0 + return c.Next() +} + +func (c *SegmentCursorMmap) Next() (uint8, roaringset.BitmapLayer, bool) { + if c.nextOffset >= uint64(len(c.data)) { + return 0, roaringset.BitmapLayer{}, false + } + + sn := NewSegmentNodeFromBuffer(c.data[c.nextOffset:]) + c.nextOffset += sn.Len() + + return sn.Key(), roaringset.BitmapLayer{ + Additions: sn.Additions(), + Deletions: sn.Deletions(), + }, true +} + +// ================================================================================ + +// A SegmentCursor iterates over all key-value pairs in a single disk segment. +// You can start at the beginning using [*SegmentCursorPread.First] and move forward +// using [*SegmentCursorPread.Next] +type SegmentCursorPread struct { + readSeeker io.ReadSeeker + reader *bufio.Reader + lenBuf []byte + nodeBufs [][]byte + nodeBufPos int + nodeBufMinSize int +} + +// NewSegmentCursorPread creates a cursor for a single disk segment. Make sure that +// the reader has offset = 0 set correctly to start at the payload, as calling +// [*SegmentCursorPread.First] will start reading at offset 0. +// Similarly, the reader may only read payload, as the EOF +// is used to determine if more keys can be found. +// +// bufferCount tells how many exclusive payload buffers should be used to return +// expected data. Set multiple buffers if data returned by First/Next will not be used +// before following call will be made, not to overwrite previously fetched values. +// (e.g. count 3 means, 3 buffers will be used internally and following calls to First/Next +// will return data in buffers: 1, 2, 3, 1, 2, 3, ...) +func NewSegmentCursorPread(readSeeker io.ReadSeeker, bufferCount int) *SegmentCursorPread { + readSeeker.Seek(0, io.SeekStart) + return &SegmentCursorPread{ + readSeeker: readSeeker, + reader: bufio.NewReaderSize(readSeeker, 10*1024*1024), + lenBuf: make([]byte, 8), + nodeBufs: make([][]byte, bufferCount), + nodeBufPos: 0, + nodeBufMinSize: 0, + } +} + +func (c *SegmentCursorPread) First() (uint8, roaringset.BitmapLayer, bool) { + c.readSeeker.Seek(0, io.SeekStart) + c.reader.Reset(c.readSeeker) + return c.read(true) +} + +func (c *SegmentCursorPread) Next() (uint8, roaringset.BitmapLayer, bool) { + return c.read(false) +} + +func (c *SegmentCursorPread) read(isFirst bool) (uint8, roaringset.BitmapLayer, bool) { + n, err := io.ReadFull(c.reader, c.lenBuf) + if err == io.EOF || onlyChecksumRemaining(n, err) { + return 0, roaringset.BitmapLayer{}, false + } + if err != nil { + panic(fmt.Errorf("SegmentCursorReader::Next reading node length: %w", err)) + } + + nodeLen := binary.LittleEndian.Uint64(c.lenBuf) + nodeBuf := c.getNodeBuf(int(nodeLen)) + + _, err = io.ReadFull(c.reader, nodeBuf[8:]) + if err != nil { + panic(fmt.Errorf("SegmentCursorReader::Next reading node: %w", err)) + } + + copy(nodeBuf, c.lenBuf) + sn := NewSegmentNodeFromBuffer(nodeBuf) + + deletions := sn.Deletions() + if isFirst { + c.updateNodeBufMinSize(int(nodeLen) - len(deletions.ToBuffer())) + } + + return sn.Key(), roaringset.BitmapLayer{ + Additions: sn.Additions(), + Deletions: deletions, + }, true +} + +func (c *SegmentCursorPread) getNodeBuf(size int) []byte { + pos := c.nodeBufPos + c.nodeBufPos = (c.nodeBufPos + 1) % len(c.nodeBufs) + + if cap(c.nodeBufs[pos]) < size { + newSize := c.nodeBufMinSize + if newSize < size { + newSize = size + } + c.nodeBufs[pos] = make([]byte, newSize) + } + return c.nodeBufs[pos][:size] +} + +// First node's additions (non-null) contains all ids present in the segment. +// By setting minimum buffer size to maximum size in use, it is ensured, that +// existing buffer will fit data of following nodes +func (c *SegmentCursorPread) updateNodeBufMinSize(size int) { + if c.nodeBufMinSize < size { + c.nodeBufMinSize = size + } +} + +// ================================================================================ + +type GaplessSegmentCursor struct { + cursor SegmentCursor + + started bool + key uint8 + readKey uint8 + readLayer roaringset.BitmapLayer + readOk bool +} + +func NewGaplessSegmentCursor(cursor SegmentCursor) *GaplessSegmentCursor { + return &GaplessSegmentCursor{cursor: cursor, started: false, key: 0} +} + +func (c *GaplessSegmentCursor) First() (uint8, roaringset.BitmapLayer, bool) { + c.started = true + + c.readKey, c.readLayer, c.readOk = c.cursor.First() + + c.key = 1 + if c.readOk && c.readKey == 0 { + return c.readKey, c.readLayer, c.readOk + } + return 0, roaringset.BitmapLayer{}, true +} + +func (c *GaplessSegmentCursor) Next() (uint8, roaringset.BitmapLayer, bool) { + if !c.started { + return c.First() + } + + if c.key >= 65 { + return 0, roaringset.BitmapLayer{}, false + } + + for c.readOk && c.readKey < c.key { + c.readKey, c.readLayer, c.readOk = c.cursor.Next() + } + + currKey := c.key + c.key++ + if c.readOk && c.readKey == currKey { + return currKey, c.readLayer, true + } + return currKey, roaringset.BitmapLayer{}, true +} + +// onlyChecksumRemaining determines if the only remaining segment contents +// are the checksum. Segment file checksums were introduced with +// https://github.com/weaviate/weaviate/pull/6620. +func onlyChecksumRemaining(n int, err error) bool { + return errors.Is(err, io.ErrUnexpectedEOF) && + n == segmentindex.ChecksumSize +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/roaringsetrange/segment_cursor_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/roaringsetrange/segment_cursor_test.go new file mode 100644 index 0000000000000000000000000000000000000000..cf09974b3014fc299664a1a55816f53a43959a74 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/roaringsetrange/segment_cursor_test.go @@ -0,0 +1,232 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package roaringsetrange + +import ( + "bytes" + "testing" + + "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/adapters/repos/db/roaringset" +) + +func TestSegmentCursorMmap(t *testing.T) { + seg := createDummySegment(t, 5) + + t.Run("starting from beginning", func(t *testing.T) { + c := NewSegmentCursorMmap(seg) + key, layer, ok := c.First() + require.True(t, ok) + assert.Equal(t, uint8(0), key) + assert.Equal(t, []uint64{0, 1}, layer.Additions.ToArray()) + assert.Equal(t, []uint64{2, 3}, layer.Deletions.ToArray()) + }) + + t.Run("starting from beginning, page through all", func(t *testing.T) { + c := NewSegmentCursorMmap(seg) + i := uint64(0) + for key, layer, ok := c.First(); ok; key, layer, ok = c.Next() { + assert.Equal(t, uint8(i), key) + assert.Equal(t, []uint64{i * 4, i*4 + 1}, layer.Additions.ToArray()) + + if i == 0 { + assert.Equal(t, []uint64{2, 3}, layer.Deletions.ToArray()) + } else { + assert.True(t, layer.Deletions.IsEmpty()) + } + i++ + } + + assert.Equal(t, uint64(5), i) + }) + + t.Run("no first, page through all", func(t *testing.T) { + c := NewSegmentCursorMmap(seg) + i := uint64(0) + for key, layer, ok := c.Next(); ok; key, layer, ok = c.Next() { + assert.Equal(t, uint8(i), key) + assert.Equal(t, []uint64{i * 4, i*4 + 1}, layer.Additions.ToArray()) + + if i == 0 { + assert.Equal(t, []uint64{2, 3}, layer.Deletions.ToArray()) + } else { + assert.True(t, layer.Deletions.IsEmpty()) + } + i++ + } + + assert.Equal(t, uint64(5), i) + }) +} + +func TestSegmentCursorPread(t *testing.T) { + seg := createDummySegment(t, 5) + readSeeker := bytes.NewReader(seg) + + t.Run("starting from beginning", func(t *testing.T) { + c := NewSegmentCursorPread(readSeeker, 1) + key, layer, ok := c.First() + require.True(t, ok) + assert.Equal(t, uint8(0), key) + assert.Equal(t, []uint64{0, 1}, layer.Additions.ToArray()) + assert.Equal(t, []uint64{2, 3}, layer.Deletions.ToArray()) + }) + + t.Run("starting from beginning, page through all", func(t *testing.T) { + c := NewSegmentCursorPread(readSeeker, 1) + i := uint64(0) + for key, layer, ok := c.First(); ok; key, layer, ok = c.Next() { + assert.Equal(t, uint8(i), key) + assert.Equal(t, []uint64{i * 4, i*4 + 1}, layer.Additions.ToArray()) + + if i == 0 { + assert.Equal(t, []uint64{2, 3}, layer.Deletions.ToArray()) + } else { + assert.True(t, layer.Deletions.IsEmpty()) + } + i++ + } + + assert.Equal(t, uint64(5), i) + }) + + t.Run("no first, page through all", func(t *testing.T) { + c := NewSegmentCursorPread(readSeeker, 1) + i := uint64(0) + for key, layer, ok := c.Next(); ok; key, layer, ok = c.Next() { + assert.Equal(t, uint8(i), key) + assert.Equal(t, []uint64{i * 4, i*4 + 1}, layer.Additions.ToArray()) + + if i == 0 { + assert.Equal(t, []uint64{2, 3}, layer.Deletions.ToArray()) + } else { + assert.True(t, layer.Deletions.IsEmpty()) + } + i++ + } + + assert.Equal(t, uint64(5), i) + }) +} + +func createDummySegment(t *testing.T, count uint64) []byte { + out := []byte{} + + for i := uint64(0); i < count; i++ { + key := uint8(i) + add := roaringset.NewBitmap(i*4, i*4+1) + del := roaringset.NewBitmap(i*4+2, i*4+3) // ignored for key != 0 + sn, err := NewSegmentNode(key, add, del) + require.Nil(t, err) + + out = append(out, sn.ToBuffer()...) + } + + return out +} + +func TestGaplessSegmentCursor(t *testing.T) { + logger, _ := test.NewNullLogger() + + t.Run("with empty SegmentCursor", func(t *testing.T) { + cur := &GaplessSegmentCursor{cursor: newFakeSegmentCursor(NewMemtable(logger))} + + k, v, ok := cur.First() + require.Equal(t, uint8(0), k) + require.True(t, ok) + assert.Nil(t, v.Additions) + assert.Nil(t, v.Deletions) + + for i := uint8(1); i < 65; i++ { + k, v, ok = cur.Next() + require.Equal(t, i, k) + require.True(t, ok) + assert.Nil(t, v.Additions) + assert.Nil(t, v.Deletions) + } + + k, v, ok = cur.Next() + require.Equal(t, uint8(0), k) + require.False(t, ok) + assert.Nil(t, v.Additions) + assert.Nil(t, v.Deletions) + }) + + t.Run("with populated SegmentCursor", func(t *testing.T) { + mem := NewMemtable(logger) + mem.Insert(0, []uint64{10, 20}) // 0000 + mem.Insert(5, []uint64{15, 25}) // 0101 + mem.Insert(13, []uint64{113, 213}) // 1101 + cur := &GaplessSegmentCursor{cursor: newFakeSegmentCursor(mem)} + + k, v, ok := cur.First() + require.Equal(t, uint8(0), k) + require.True(t, ok) + assert.ElementsMatch(t, []uint64{10, 20, 15, 25, 113, 213}, v.Additions.ToArray()) + assert.ElementsMatch(t, []uint64{10, 20, 15, 25, 113, 213}, v.Deletions.ToArray()) + + expected := map[uint8][]uint64{ + 1: {15, 25, 113, 213}, + 3: {15, 25, 113, 213}, + 4: {113, 213}, + } + + for i := uint8(1); i < 65; i++ { + k, v, ok := cur.Next() + require.Equal(t, i, k) + require.True(t, ok) + + if _, ok := expected[i]; ok { + assert.ElementsMatch(t, expected[i], v.Additions.ToArray()) + } else { + assert.Nil(t, v.Additions) + } + assert.Nil(t, v.Deletions) + } + + k, v, ok = cur.Next() + require.Equal(t, uint8(0), k) + require.False(t, ok) + assert.Nil(t, v.Additions) + assert.Nil(t, v.Deletions) + }) +} + +type fakeSegmentCursor struct { + nodes []*MemtableNode + nextPos int +} + +func newFakeSegmentCursor(memtable *Memtable) *fakeSegmentCursor { + return &fakeSegmentCursor{nodes: memtable.Nodes()} +} + +func (c *fakeSegmentCursor) First() (uint8, roaringset.BitmapLayer, bool) { + c.nextPos = 0 + return c.Next() +} + +func (c *fakeSegmentCursor) Next() (uint8, roaringset.BitmapLayer, bool) { + if c.nextPos >= len(c.nodes) { + return 0, roaringset.BitmapLayer{}, false + } + + mn := c.nodes[c.nextPos] + c.nextPos++ + + return mn.Key, roaringset.BitmapLayer{ + Additions: mn.Additions, + Deletions: mn.Deletions, + }, true +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/roaringsetrange/segment_in_memory.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/roaringsetrange/segment_in_memory.go new file mode 100644 index 0000000000000000000000000000000000000000..200fb1266d8fc149e3a5166e06ba1d3a3c20cf7b --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/roaringsetrange/segment_in_memory.go @@ -0,0 +1,274 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package roaringsetrange + +import ( + "context" + "fmt" + "math" + "sync" + + "github.com/weaviate/sroar" + "github.com/weaviate/weaviate/adapters/repos/db/roaringset" + "github.com/weaviate/weaviate/entities/concurrency" + "github.com/weaviate/weaviate/entities/filters" +) + +type SegmentInMemory struct { + lock *sync.RWMutex + bitmaps rangeBitmaps +} + +func NewSegmentInMemory() *SegmentInMemory { + s := &SegmentInMemory{ + lock: new(sync.RWMutex), + } + for key := range s.bitmaps { + s.bitmaps[key] = sroar.NewBitmap() + } + return s +} + +func (s *SegmentInMemory) MergeSegmentByCursor(cursor SegmentCursor) error { + key, layer, ok := cursor.First() + if !ok { + // empty segment, nothing to merge + return nil + } + if key != 0 { + return fmt.Errorf("invalid first key of merged segment") + } + + s.lock.Lock() + defer s.lock.Unlock() + + if deletions := layer.Deletions; !deletions.IsEmpty() { + for key := range s.bitmaps { + s.bitmaps[key].AndNotConc(deletions, concurrency.SROAR_MERGE) + } + } + for ; ok; key, layer, ok = cursor.Next() { + s.bitmaps[key].OrConc(layer.Additions, concurrency.SROAR_MERGE) + } + return nil +} + +func (s *SegmentInMemory) MergeMemtable(memtable *Memtable) error { + nodes := memtable.Nodes() + if len(nodes) == 0 { + // empty memtable, nothing to merge + return nil + } + + s.lock.Lock() + defer s.lock.Unlock() + + if deletions := nodes[0].Deletions; !deletions.IsEmpty() { + for key := range s.bitmaps { + s.bitmaps[key].AndNotConc(deletions, concurrency.SROAR_MERGE) + } + } + for _, node := range nodes { + s.bitmaps[node.Key].OrConc(node.Additions, concurrency.SROAR_MERGE) + } + return nil +} + +func (s *SegmentInMemory) Size() int { + size := 0 + for i := range s.bitmaps { + size += s.bitmaps[i].LenInBytes() + } + return size +} + +// ----------------------------------------------------------------------------- + +type segmentInMemoryReader struct { + bitmaps rangeBitmaps + bufPool roaringset.BitmapBufPool +} + +func NewSegmentInMemoryReader(s *SegmentInMemory, bufPool roaringset.BitmapBufPool, +) (reader *segmentInMemoryReader, release func()) { + // TODO aliszka:roaringrange optimize locking? + s.lock.RLock() + return &segmentInMemoryReader{ + bitmaps: s.bitmaps, + bufPool: bufPool, + }, s.lock.RUnlock +} + +func (r *segmentInMemoryReader) Read(ctx context.Context, value uint64, operator filters.Operator, +) (roaringset.BitmapLayer, func(), error) { + if err := ctx.Err(); err != nil { + return roaringset.BitmapLayer{}, noopRelease, err + } + + switch operator { + case filters.OperatorEqual: + bm, release := r.readEqual(value) + return bm, release, nil + + case filters.OperatorNotEqual: + bm, release := r.readNotEqual(value) + return bm, release, nil + + case filters.OperatorLessThan: + bm, release := r.readLessThan(value) + return bm, release, nil + + case filters.OperatorLessThanEqual: + bm, release := r.readLessThanEqual(value) + return bm, release, nil + + case filters.OperatorGreaterThan: + bm, release := r.readGreaterThan(value) + return bm, release, nil + + case filters.OperatorGreaterThanEqual: + bm, release := r.readGreaterThanEqual(value) + return bm, release, nil + + default: + // TODO move strategies to separate package? + return roaringset.BitmapLayer{}, noopRelease, + fmt.Errorf("operator %v not supported for segment-in-memory of strategy %q", operator.Name(), "roaringsetrange") + } +} + +func (r *segmentInMemoryReader) readEqual(value uint64) (roaringset.BitmapLayer, func()) { + if value == 0 { + return r.readLessThanEqual(value) + } + if value == math.MaxUint64 { + return r.readGreaterThanEqual(value) + } + + eq, eqRelease := r.mergeBetween(value, value+1) + return roaringset.BitmapLayer{Additions: eq}, eqRelease +} + +func (r *segmentInMemoryReader) readNotEqual(value uint64) (roaringset.BitmapLayer, func()) { + if value == 0 { + return r.readGreaterThan(value) + } + if value == math.MaxUint64 { + return r.readLessThan(value) + } + + eq, eqRelease := r.mergeBetween(value, value+1) + defer eqRelease() + + neq, neqRelease := r.bufPool.CloneToBuf(r.bitmaps[0]) + neq.AndNotConc(eq, concurrency.SROAR_MERGE) + return roaringset.BitmapLayer{Additions: neq}, neqRelease +} + +func (r *segmentInMemoryReader) readLessThan(value uint64) (roaringset.BitmapLayer, func()) { + if value == 0 { + // no value is < 0 + return roaringset.BitmapLayer{Additions: sroar.NewBitmap()}, noopRelease + } + + gte, gteRelease := r.mergeGreaterThanEqual(value) + defer gteRelease() + + lt, ltRelease := r.bufPool.CloneToBuf(r.bitmaps[0]) + lt.AndNotConc(gte, concurrency.SROAR_MERGE) + return roaringset.BitmapLayer{Additions: lt}, ltRelease +} + +func (r *segmentInMemoryReader) readLessThanEqual(value uint64) (roaringset.BitmapLayer, func()) { + if value == math.MaxUint64 { + all, allRelease := r.bufPool.CloneToBuf(r.bitmaps[0]) + // all values are <= max uint64 + return roaringset.BitmapLayer{Additions: all}, allRelease + } + + gte1, gte1Release := r.mergeGreaterThanEqual(value + 1) + defer gte1Release() + + lte, lteRelease := r.bufPool.CloneToBuf(r.bitmaps[0]) + lte.AndNotConc(gte1, concurrency.SROAR_MERGE) + return roaringset.BitmapLayer{Additions: lte}, lteRelease +} + +func (r *segmentInMemoryReader) readGreaterThan(value uint64) (roaringset.BitmapLayer, func()) { + if value == math.MaxUint64 { + // no value is > max uint64 + return roaringset.BitmapLayer{Additions: sroar.NewBitmap()}, noopRelease + } + + gte1, gte1Release := r.mergeGreaterThanEqual(value + 1) + return roaringset.BitmapLayer{Additions: gte1}, gte1Release +} + +func (r *segmentInMemoryReader) readGreaterThanEqual(value uint64) (roaringset.BitmapLayer, func()) { + if value == 0 { + all, allRelease := r.bufPool.CloneToBuf(r.bitmaps[0]) + // all values are >= 0 + return roaringset.BitmapLayer{Additions: all}, allRelease + } + + gte, gteRelease := r.mergeGreaterThanEqual(value) + return roaringset.BitmapLayer{Additions: gte}, gteRelease +} + +func (r *segmentInMemoryReader) mergeGreaterThanEqual(value uint64) (*sroar.Bitmap, func()) { + result, release := r.bufPool.CloneToBuf(r.bitmaps[0]) + ANDed := false + + for bit := 1; bit < len(r.bitmaps); bit++ { + if value&(1<<(bit-1)) != 0 { + result.AndConc(r.bitmaps[bit], concurrency.SROAR_MERGE) + ANDed = true + } else if ANDed { + result.OrConc(r.bitmaps[bit], concurrency.SROAR_MERGE) + } + } + return result, release +} + +func (r *segmentInMemoryReader) mergeBetween(valueMinInc, valueMaxExc uint64) (*sroar.Bitmap, func()) { + resultMin, releaseMin := r.bufPool.CloneToBuf(r.bitmaps[0]) + resultMax, releaseMax := r.bufPool.CloneToBuf(r.bitmaps[0]) + defer releaseMax() + ANDedMin := false + ANDedMax := false + + for bit := 1; bit < len(r.bitmaps); bit++ { + var b uint64 = 1 << (bit - 1) + + if valueMinInc&b != 0 { + resultMin.AndConc(r.bitmaps[bit], concurrency.SROAR_MERGE) + ANDedMin = true + } else if ANDedMin { + resultMin.OrConc(r.bitmaps[bit], concurrency.SROAR_MERGE) + } + + if valueMaxExc&b != 0 { + resultMax.AndConc(r.bitmaps[bit], concurrency.SROAR_MERGE) + ANDedMax = true + } else if ANDedMax { + resultMax.OrConc(r.bitmaps[bit], concurrency.SROAR_MERGE) + } + } + + return resultMin.AndNotConc(resultMax, concurrency.SROAR_MERGE), releaseMin +} + +// ----------------------------------------------------------------------------- + +type rangeBitmaps [65]*sroar.Bitmap + +var noopRelease = func() {} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/roaringsetrange/segment_in_memory_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/roaringsetrange/segment_in_memory_test.go new file mode 100644 index 0000000000000000000000000000000000000000..651182be1cf0fa399ae607c083784c914643ff46 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/roaringsetrange/segment_in_memory_test.go @@ -0,0 +1,409 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package roaringsetrange + +import ( + "context" + "testing" + + "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/weaviate/sroar" + "github.com/weaviate/weaviate/adapters/repos/db/roaringset" + "github.com/weaviate/weaviate/entities/filters" +) + +func TestSegmentInMemory(t *testing.T) { + t.Run("bitmaps are initialized and empty", func(t *testing.T) { + s := NewSegmentInMemory() + + for i := range s.bitmaps { + assert.NotNil(t, s.bitmaps[i]) + assert.True(t, s.bitmaps[i].IsEmpty()) + } + }) + + t.Run("size is sum of bitmap sizes", func(t *testing.T) { + bmSize := sroar.NewBitmap().LenInBytes() + + s := NewSegmentInMemory() + assert.Equal(t, bmSize*65, s.Size()) + }) + + t.Run("merging", func(t *testing.T) { + logger, _ := test.NewNullLogger() + + mt1, mt2, mt3 := createTestMemtables(logger) + expectedElemsByBit := map[int][]uint64{ + 0: {10, 20, 14, 24, 15, 25, 113, 213, 117, 217, 119, 219}, + 1: {119, 219, 117, 217, 15, 25, 113, 213}, + 2: {119, 219}, + 3: {14, 24, 15, 25, 113, 213}, + 4: {113, 213}, + 5: {119, 219, 117, 217}, + } + + t.Run("segments", func(t *testing.T) { + cur1 := newFakeSegmentCursor(mt1) + cur2 := newFakeSegmentCursor(mt2) + cur3 := newFakeSegmentCursor(mt3) + + s := NewSegmentInMemory() + s.MergeSegmentByCursor(cur1) + s.MergeSegmentByCursor(cur2) + s.MergeSegmentByCursor(cur3) + + assertElemsByBit(t, s, expectedElemsByBit) + }) + + t.Run("memtables", func(t *testing.T) { + s := NewSegmentInMemory() + s.MergeMemtable(mt1) + s.MergeMemtable(mt2) + s.MergeMemtable(mt3) + + assertElemsByBit(t, s, expectedElemsByBit) + }) + + t.Run("segments + memtable", func(t *testing.T) { + cur1 := newFakeSegmentCursor(mt1) + cur2 := newFakeSegmentCursor(mt2) + + s := NewSegmentInMemory() + s.MergeSegmentByCursor(cur1) + s.MergeSegmentByCursor(cur2) + s.MergeMemtable(mt3) + + assertElemsByBit(t, s, expectedElemsByBit) + }) + }) +} + +func TestSegmentInMemoryReader(t *testing.T) { + logger, _ := test.NewNullLogger() + mt1, mt2, mt3 := createTestMemtables(logger) + + s := NewSegmentInMemory() + s.MergeMemtable(mt1) + s.MergeMemtable(mt2) + s.MergeMemtable(mt3) + + bufPool := roaringset.NewBitmapBufPoolNoop() + reader, release := NewSegmentInMemoryReader(s, bufPool) + defer release() + + t.Run("read valid operators", func(t *testing.T) { + testCases := []struct { + name string + value uint64 + operator filters.Operator + expected []uint64 + }{ + { + name: "equal 0", + value: 0, + operator: filters.OperatorEqual, + expected: []uint64{10, 20}, + }, + { + name: "equal 13", + value: 13, + operator: filters.OperatorEqual, + expected: []uint64{113, 213}, + }, + { + name: "equal 8", + value: 8, + operator: filters.OperatorEqual, + expected: []uint64{}, + }, + { + name: "not equal 0", + value: 0, + operator: filters.OperatorNotEqual, + expected: []uint64{14, 24, 15, 25, 113, 213, 117, 217, 119, 219}, + }, + { + name: "not equal 13", + value: 13, + operator: filters.OperatorNotEqual, + expected: []uint64{10, 20, 14, 24, 15, 25, 117, 217, 119, 219}, + }, + { + name: "not equal 8", + value: 8, + operator: filters.OperatorNotEqual, + expected: []uint64{10, 20, 14, 24, 15, 25, 113, 213, 117, 217, 119, 219}, + }, + { + name: "greater than equal 0", + value: 0, + operator: filters.OperatorGreaterThanEqual, + expected: []uint64{10, 20, 14, 24, 15, 25, 113, 213, 117, 217, 119, 219}, + }, + { + name: "greater than equal 13", + value: 13, + operator: filters.OperatorGreaterThanEqual, + expected: []uint64{113, 213, 117, 217, 119, 219}, + }, + { + name: "greater than 0", + value: 0, + operator: filters.OperatorGreaterThan, + expected: []uint64{14, 24, 15, 25, 113, 213, 117, 217, 119, 219}, + }, + { + name: "greater than 13", + value: 13, + operator: filters.OperatorGreaterThan, + expected: []uint64{117, 217, 119, 219}, + }, + { + name: "less than equal 0", + value: 0, + operator: filters.OperatorLessThanEqual, + expected: []uint64{10, 20}, + }, + { + name: "less than equal 13", + value: 13, + operator: filters.OperatorLessThanEqual, + expected: []uint64{10, 20, 14, 24, 15, 25, 113, 213}, + }, + { + name: "less than 0", + value: 0, + operator: filters.OperatorLessThan, + expected: []uint64{}, + }, + { + name: "less than 13", + value: 13, + operator: filters.OperatorLessThan, + expected: []uint64{10, 20, 14, 24, 15, 25}, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + layer, release, err := reader.Read(context.Background(), tc.value, tc.operator) + require.NoError(t, err) + defer release() + + assert.ElementsMatch(t, tc.expected, layer.Additions.ToArray()) + assert.Nil(t, layer.Deletions) + }) + } + }) + + t.Run("read invalid opeators", func(t *testing.T) { + testCases := []struct { + name string + operator filters.Operator + }{ + { + name: "like", + operator: filters.OperatorLike, + }, + { + name: "is null", + operator: filters.OperatorIsNull, + }, + { + name: "and", + operator: filters.OperatorAnd, + }, + { + name: "or", + operator: filters.OperatorOr, + }, + { + name: "within geo range", + operator: filters.OperatorWithinGeoRange, + }, + { + name: "contains any", + operator: filters.ContainsAny, + }, + { + name: "contains all", + operator: filters.ContainsAll, + }, + { + name: "contains none", + operator: filters.ContainsNone, + }, + { + name: "not", + operator: filters.OperatorNot, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + layer, _, err := reader.Read(context.Background(), 0, tc.operator) + assert.ErrorContains(t, err, "not supported for segment-in-memory") + assert.Nil(t, layer.Additions) + assert.Nil(t, layer.Deletions) + }) + } + }) + + t.Run("read expired context", func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + cancel() + + layer, _, err := reader.Read(ctx, 0, filters.OperatorGreaterThanEqual) + assert.ErrorContains(t, err, ctx.Err().Error()) + assert.Nil(t, layer.Additions) + assert.Nil(t, layer.Deletions) + }) +} + +func TestSegmentInMemoryReaderBufPool(t *testing.T) { + logger, _ := test.NewNullLogger() + mt1, mt2, mt3 := createTestMemtables(logger) + + s := NewSegmentInMemory() + s.MergeMemtable(mt1) + s.MergeMemtable(mt2) + s.MergeMemtable(mt3) + + bufPool := newBitmapBufPoolWithCounter() + reader, release := NewSegmentInMemoryReader(s, bufPool) + defer release() + + t.Run("all but one bufs are returned to the pull on read", func(t *testing.T) { + testCases := []struct { + name string + value uint64 + operator filters.Operator + }{ + { + name: "equal 0", + value: 0, + operator: filters.OperatorEqual, + }, + { + name: "equal 13", + value: 13, + operator: filters.OperatorEqual, + }, + { + name: "equal 8", + value: 8, + operator: filters.OperatorEqual, + }, + { + name: "not equal 0", + value: 0, + operator: filters.OperatorNotEqual, + }, + { + name: "not equal 13", + value: 13, + operator: filters.OperatorNotEqual, + }, + { + name: "not equal 8", + value: 8, + operator: filters.OperatorNotEqual, + }, + { + name: "greater than equal 0", + value: 0, + operator: filters.OperatorGreaterThanEqual, + }, + { + name: "greater than equal 13", + value: 13, + operator: filters.OperatorGreaterThanEqual, + }, + { + name: "greater than 0", + value: 0, + operator: filters.OperatorGreaterThan, + }, + { + name: "greater than 13", + value: 13, + operator: filters.OperatorGreaterThan, + }, + { + name: "less than equal 0", + value: 0, + operator: filters.OperatorLessThanEqual, + }, + { + name: "less than equal 13", + value: 13, + operator: filters.OperatorLessThanEqual, + }, + { + name: "less than 0", + value: 0, + operator: filters.OperatorLessThan, + }, + { + name: "less than 13", + value: 13, + operator: filters.OperatorLessThan, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + _, release, err := reader.Read(context.Background(), tc.value, tc.operator) + require.NoError(t, err) + + assert.GreaterOrEqual(t, 1, bufPool.InUseCounter()) + release() + assert.Equal(t, 0, bufPool.InUseCounter()) + }) + } + }) +} + +func assertElemsByBit(t *testing.T, s *SegmentInMemory, expectedElemsByBit map[int][]uint64) { + for bit := 0; bit < 65; bit++ { + if elems, ok := expectedElemsByBit[bit]; ok { + assert.ElementsMatch(t, elems, s.bitmaps[bit].ToArray()) + } else { + assert.True(t, s.bitmaps[bit].IsEmpty()) + } + } +} + +type bitmapBufPoolWithCounter struct { + inUseCounter int +} + +func newBitmapBufPoolWithCounter() *bitmapBufPoolWithCounter { + return &bitmapBufPoolWithCounter{inUseCounter: 0} +} + +func (p *bitmapBufPoolWithCounter) Get(minCap int) (buf []byte, put func()) { + p.inUseCounter++ + return make([]byte, 0, minCap), func() { p.inUseCounter-- } +} + +func (p *bitmapBufPoolWithCounter) CloneToBuf(bm *sroar.Bitmap) (cloned *sroar.Bitmap, put func()) { + buf, put := p.Get(bm.LenInBytes()) + cloned = bm.CloneToBuf(buf) + return +} + +func (p *bitmapBufPoolWithCounter) InUseCounter() int { + return p.inUseCounter +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/roaringsetrange/segment_node.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/roaringsetrange/segment_node.go new file mode 100644 index 0000000000000000000000000000000000000000..e48517fb2280f9115aa30faf53e8a620d7a2df82 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/roaringsetrange/segment_node.go @@ -0,0 +1,146 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package roaringsetrange + +import ( + "encoding/binary" + + "github.com/weaviate/sroar" + "github.com/weaviate/weaviate/usecases/byteops" +) + +// SegmentNode stores one Key-Value pair in +// the LSM Segment. It uses a single []byte internally. As a result there is +// no decode step required at runtime. Instead you can use +// +// - [*SegmentNode.Key] +// - [*SegmentNode.Additions] +// - [*SegmentNode.Deletions] +// +// to access the contents. Those helpers in turn do not require a decoding +// step. The accessor methods that return Roaring Bitmaps only point to +// existing memory. +// +// This makes the SegmentNode very fast to access at query time, even when it +// contains a large amount of data. +// +// The internal structure of the data is: +// +// byte begin-start | description +// --------------------|----------------------------------------------------- +// 0:8 | uint64 indicating the total length of the node, +// | this is used in cursors to identify the next node. +// 8:9 | key +// 9:17 | uint64 length indicator for additions sraor bitmap (x) +// 17:(17+x) | additions bitmap +// (17+x):(25+x) | uint64 length indicator for deletions sroar bitmap (y) +// (25+x):(25+x+y) | deletions bitmap +// | deletion indicator and bitmaps are used only for key == 0 +type SegmentNode struct { + data []byte + rw byteops.ReadWriter +} + +// Len indicates the total length of the [SegmentNode]. When reading multiple +// segments back-2-back, such as in a cursor situation, the offset of element +// (n+1) is the offset of element n + Len() +func (sn *SegmentNode) Len() uint64 { + return binary.LittleEndian.Uint64(sn.data[0:8]) +} + +func (sn *SegmentNode) Key() uint8 { + sn.rw.MoveBufferToAbsolutePosition(8) + return sn.rw.ReadUint8() +} + +// Additions returns the additions roaring bitmap with shared state. Only use +// this method if you can guarantee that you will only use it while holding a +// maintenance lock or can otherwise be sure that no compaction can occur. +func (sn *SegmentNode) Additions() *sroar.Bitmap { + sn.rw.MoveBufferToAbsolutePosition(9) + return sroar.FromBuffer(sn.rw.ReadBytesFromBufferWithUint64LengthIndicator()) +} + +// Deletions returns the deletions roaring bitmap with shared state. Only use +// this method if you can guarantee that you will only use it while holding a +// maintenance lock or can otherwise be sure that no compaction can occur. +func (sn *SegmentNode) Deletions() *sroar.Bitmap { + sn.rw.MoveBufferToAbsolutePosition(8) + if key := sn.rw.ReadUint8(); key != 0 { + return nil + } + sn.rw.DiscardBytesFromBufferWithUint64LengthIndicator() + return sroar.FromBuffer(sn.rw.ReadBytesFromBufferWithUint64LengthIndicator()) +} + +func NewSegmentNode(key uint8, additions, deletions *sroar.Bitmap) (*SegmentNode, error) { + additionsBuf := additions.ToBuffer() + var deletionsBuf []byte + + // total len + key + length indicators + payload + expectedSize := 8 + 1 + 8 + len(additionsBuf) + + if key == 0 { + deletionsBuf = deletions.ToBuffer() + expectedSize += 8 + len(deletionsBuf) + } + + data := make([]byte, expectedSize) + rw := byteops.NewReadWriter(data) + + // reserve the first 8 bytes for the offset, which will be written at the very end + rw.MoveBufferPositionForward(8) + rw.CopyBytesToBuffer([]byte{key}) + + if err := rw.CopyBytesToBufferWithUint64LengthIndicator(additionsBuf); err != nil { + return nil, err + } + + if key == 0 { + if err := rw.CopyBytesToBufferWithUint64LengthIndicator(deletionsBuf); err != nil { + return nil, err + } + } + + offset := rw.Position + rw.MoveBufferToAbsolutePosition(0) + rw.WriteUint64(uint64(offset)) + + return &SegmentNode{ + data: data, + rw: rw, + }, nil +} + +// ToBuffer returns the internal buffer without copying data. Only use this, +// when you can be sure that it's safe to share the data, or create your own +// copy. +// +// It truncates the buffer at is own length, in case it was initialized with a +// long buffer that only had a beginning offset, but no end. Such a situation +// may occur with cursors. If we then returned the whole buffer and don't know +// what the caller plans on doing with the data, we risk passing around too +// much memory. Truncating at the length prevents this and has no other +// negative effects. +func (sn *SegmentNode) ToBuffer() []byte { + return sn.data[:sn.Len()] +} + +// NewSegmentNodeFromBuffer creates a new segment node by using the underlying +// buffer without copying data. Only use this when you can be sure that it's +// safe to share the data or create your own copy. +func NewSegmentNodeFromBuffer(buf []byte) *SegmentNode { + return &SegmentNode{ + data: buf, + rw: byteops.NewReadWriter(buf), + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/roaringsetrange/segment_node_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/roaringsetrange/segment_node_test.go new file mode 100644 index 0000000000000000000000000000000000000000..9e56e9fafce11fde369909b32727564a37011771 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/roaringsetrange/segment_node_test.go @@ -0,0 +1,133 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package roaringsetrange + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/adapters/repos/db/roaringset" +) + +func TestSegmentNode_WithDeletions(t *testing.T) { + key := uint8(0) + additions := []uint64{1, 2, 3, 4, 6} + deletions := []uint64{5, 7} + + sn, err := NewSegmentNode(key, roaringset.NewBitmap(additions...), roaringset.NewBitmap(deletions...)) + require.Nil(t, err) + buf := sn.ToBuffer() + assert.Equal(t, sn.Len(), uint64(len(buf))) + assert.Equal(t, key, sn.Key()) + assert.ElementsMatch(t, additions, sn.Additions().ToArray()) + assert.ElementsMatch(t, deletions, sn.Deletions().ToArray()) + + snBuf := NewSegmentNodeFromBuffer(buf) + assert.Equal(t, snBuf.Len(), uint64(len(buf))) + assert.Equal(t, key, snBuf.Key()) + assert.ElementsMatch(t, additions, snBuf.Additions().ToArray()) + assert.ElementsMatch(t, deletions, snBuf.Deletions().ToArray()) +} + +func TestSegmentNode_WithoutDeletions(t *testing.T) { + key := uint8(63) + additions := []uint64{1, 2, 3, 4, 6} + deletions := []uint64{5, 7} // ignored + + sn, err := NewSegmentNode(key, roaringset.NewBitmap(additions...), roaringset.NewBitmap(deletions...)) + require.Nil(t, err) + buf := sn.ToBuffer() + assert.Equal(t, sn.Len(), uint64(len(buf))) + assert.Equal(t, key, sn.Key()) + assert.ElementsMatch(t, additions, sn.Additions().ToArray()) + assert.True(t, sn.Deletions().IsEmpty()) + + snBuf := NewSegmentNodeFromBuffer(buf) + assert.Equal(t, snBuf.Len(), uint64(len(buf))) + assert.Equal(t, key, snBuf.Key()) + assert.ElementsMatch(t, additions, snBuf.Additions().ToArray()) + assert.True(t, snBuf.Deletions().IsEmpty()) +} + +func TestSegmentNode_WithDeletions_InitializingFromBufferTooLarge(t *testing.T) { + key := uint8(0) + additions := []uint64{1, 2, 3, 4, 6} + deletions := []uint64{5, 7} + + sn, err := NewSegmentNode(key, roaringset.NewBitmap(additions...), roaringset.NewBitmap(deletions...)) + require.Nil(t, err) + buf := sn.ToBuffer() + assert.Equal(t, sn.Len(), uint64(len(buf))) + + bufTooLarge := make([]byte, 3*len(buf)) + copy(bufTooLarge, buf) + + snBuf := NewSegmentNodeFromBuffer(bufTooLarge) + // assert that the buffer self reports the useful length, not the length of + // the initialization buffer + assert.Equal(t, snBuf.Len(), uint64(len(buf))) + // assert that ToBuffer() returns a buffer that is no longer than the useful + // length + assert.Equal(t, len(buf), len(snBuf.ToBuffer())) + + assert.Equal(t, key, snBuf.Key()) + assert.ElementsMatch(t, additions, snBuf.Additions().ToArray()) + assert.ElementsMatch(t, deletions, snBuf.Deletions().ToArray()) +} + +func TestSegmentNode_WithoutDeletions_InitializingFromBufferTooLarge(t *testing.T) { + key := uint8(63) + additions := []uint64{1, 2, 3, 4, 6} + deletions := []uint64{5, 7} // ignored + + sn, err := NewSegmentNode(key, roaringset.NewBitmap(additions...), roaringset.NewBitmap(deletions...)) + require.Nil(t, err) + buf := sn.ToBuffer() + assert.Equal(t, sn.Len(), uint64(len(buf))) + + bufTooLarge := make([]byte, 3*len(buf)) + copy(bufTooLarge, buf) + + snBuf := NewSegmentNodeFromBuffer(bufTooLarge) + // assert that the buffer self reports the useful length, not the length of + // the initialization buffer + assert.Equal(t, snBuf.Len(), uint64(len(buf))) + // assert that ToBuffer() returns a buffer that is no longer than the useful + // length + assert.Equal(t, len(buf), len(snBuf.ToBuffer())) + + assert.Equal(t, key, snBuf.Key()) + assert.ElementsMatch(t, additions, snBuf.Additions().ToArray()) + assert.True(t, snBuf.Deletions().IsEmpty()) +} + +func TestSegmentNode_DeletionsNotStoredForNon0Key(t *testing.T) { + key1 := uint8(0) + key2 := uint8(15) + key3 := uint8(63) + additions := roaringset.NewBitmap(1, 2, 3, 4, 6) + deletions := roaringset.NewBitmap(5, 7) + + sn1, err := NewSegmentNode(key1, additions, deletions) + require.Nil(t, err) + sn2, err := NewSegmentNode(key2, additions, deletions) + require.Nil(t, err) + sn3, err := NewSegmentNode(key3, additions, deletions) + require.Nil(t, err) + + assert.Greater(t, sn1.Len(), sn2.Len()) + assert.Equal(t, sn2.Len(), sn3.Len()) + assert.False(t, sn1.Deletions().IsEmpty()) + assert.True(t, sn2.Deletions().IsEmpty()) + assert.True(t, sn3.Deletions().IsEmpty()) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/roaringsetrange/segment_reader.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/roaringsetrange/segment_reader.go new file mode 100644 index 0000000000000000000000000000000000000000..4b05bad3f7c4ce46fcf13ff625f30673bcdc0bf8 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/roaringsetrange/segment_reader.go @@ -0,0 +1,349 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package roaringsetrange + +import ( + "context" + "fmt" + "math" + + "github.com/weaviate/sroar" + "github.com/weaviate/weaviate/adapters/repos/db/roaringset" + "github.com/weaviate/weaviate/entities/errors" + "github.com/weaviate/weaviate/entities/filters" +) + +type SegmentReader struct { + cursor SegmentCursor + concurrency int +} + +func NewSegmentReader(cursor *GaplessSegmentCursor) *SegmentReader { + return NewSegmentReaderConcurrent(cursor, 1) +} + +// TODO aliszka:roaringrange add buf pool? +func NewSegmentReaderConcurrent(cursor *GaplessSegmentCursor, concurrency int) *SegmentReader { + return &SegmentReader{ + cursor: cursor, + concurrency: concurrency, + } +} + +func (r *SegmentReader) Read(ctx context.Context, value uint64, operator filters.Operator, +) (roaringset.BitmapLayer, func(), error) { + if err := ctx.Err(); err != nil { + return roaringset.BitmapLayer{}, noopRelease, err + } + + switch operator { + case filters.OperatorEqual: + bm, err := r.readEqual(ctx, value) + return bm, noopRelease, err + + case filters.OperatorNotEqual: + bm, err := r.readNotEqual(ctx, value) + return bm, noopRelease, err + + case filters.OperatorLessThan: + bm, err := r.readLessThan(ctx, value) + return bm, noopRelease, err + + case filters.OperatorLessThanEqual: + bm, err := r.readLessThanEqual(ctx, value) + return bm, noopRelease, err + + case filters.OperatorGreaterThan: + bm, err := r.readGreaterThan(ctx, value) + return bm, noopRelease, err + + case filters.OperatorGreaterThanEqual: + bm, err := r.readGreaterThanEqual(ctx, value) + return bm, noopRelease, err + + default: + // TODO move strategies to separate package? + return roaringset.BitmapLayer{}, noopRelease, + fmt.Errorf("operator %v not supported for segments of strategy %q", operator.Name(), "roaringsetrange") + } +} + +func (r *SegmentReader) firstLayer() (roaringset.BitmapLayer, bool) { + // bitmaps' cloning is necessary for both types of cursors: mmap and pread + // (pread cursor use buffers to read entire nodes from file, therefore nodes already read + // are later overwritten with nodes being read later) + _, layer, ok := r.cursor.First() + if !ok { + return roaringset.BitmapLayer{ + Additions: sroar.NewBitmap(), + Deletions: sroar.NewBitmap(), + }, false + } + + var deletions *sroar.Bitmap + if layer.Deletions == nil { + deletions = sroar.NewBitmap() + } else { + deletions = layer.Deletions.Clone() + } + + if layer.Additions.IsEmpty() { + return roaringset.BitmapLayer{ + Additions: sroar.NewBitmap(), + Deletions: deletions, + }, false + } + return roaringset.BitmapLayer{ + Additions: layer.Additions.Clone(), + Deletions: deletions, + }, true +} + +func (r *SegmentReader) readEqual(ctx context.Context, value uint64, +) (roaringset.BitmapLayer, error) { + if value == 0 { + return r.readLessThanEqual(ctx, value) + } + if value == math.MaxUint64 { + return r.readGreaterThanEqual(ctx, value) + } + + firstLayer, ok := r.firstLayer() + if !ok { + return firstLayer, nil + } + + eq, err := r.mergeBetween(ctx, value, value+1, firstLayer.Additions) + if err != nil { + return roaringset.BitmapLayer{}, err + } + + return roaringset.BitmapLayer{ + Additions: eq, + Deletions: firstLayer.Deletions, + }, nil +} + +func (r *SegmentReader) readNotEqual(ctx context.Context, value uint64, +) (roaringset.BitmapLayer, error) { + if value == 0 { + return r.readGreaterThan(ctx, value) + } + if value == math.MaxUint64 { + return r.readLessThan(ctx, value) + } + + firstLayer, ok := r.firstLayer() + if !ok { + return firstLayer, nil + } + + neq := firstLayer.Additions.Clone() + eq, err := r.mergeBetween(ctx, value, value+1, firstLayer.Additions) + if err != nil { + return roaringset.BitmapLayer{}, err + } + + neq.AndNotConc(eq, r.concurrency) + return roaringset.BitmapLayer{ + Additions: neq, + Deletions: firstLayer.Deletions, + }, nil +} + +func (r *SegmentReader) readLessThan(ctx context.Context, value uint64, +) (roaringset.BitmapLayer, error) { + firstLayer, ok := r.firstLayer() + if !ok { + return firstLayer, nil + } + + if value == 0 { + // no value is < 0 + return roaringset.BitmapLayer{ + Additions: sroar.NewBitmap(), + Deletions: firstLayer.Deletions, + }, nil + } + + lt := firstLayer.Additions.Clone() + gte, err := r.mergeGreaterThanEqual(ctx, value, firstLayer.Additions) + if err != nil { + return roaringset.BitmapLayer{}, err + } + + lt.AndNotConc(gte, r.concurrency) + return roaringset.BitmapLayer{ + Additions: lt, + Deletions: firstLayer.Deletions, + }, nil +} + +func (r *SegmentReader) readLessThanEqual(ctx context.Context, value uint64, +) (roaringset.BitmapLayer, error) { + firstLayer, ok := r.firstLayer() + if !ok { + return firstLayer, nil + } + + if value == math.MaxUint64 { + // all values are <= max uint64 + return firstLayer, nil + } + + lte := firstLayer.Additions.Clone() + gte1, err := r.mergeGreaterThanEqual(ctx, value+1, firstLayer.Additions) + if err != nil { + return roaringset.BitmapLayer{}, err + } + + lte.AndNotConc(gte1, r.concurrency) + return roaringset.BitmapLayer{ + Additions: lte, + Deletions: firstLayer.Deletions, + }, nil +} + +func (r *SegmentReader) readGreaterThan(ctx context.Context, value uint64, +) (roaringset.BitmapLayer, error) { + firstLayer, ok := r.firstLayer() + if !ok { + return firstLayer, nil + } + + if value == math.MaxUint64 { + // no value is > max uint64 + return roaringset.BitmapLayer{ + Additions: sroar.NewBitmap(), + Deletions: firstLayer.Deletions, + }, nil + } + + gte1, err := r.mergeGreaterThanEqual(ctx, value+1, firstLayer.Additions) + if err != nil { + return roaringset.BitmapLayer{}, err + } + + return roaringset.BitmapLayer{ + Additions: gte1, + Deletions: firstLayer.Deletions, + }, nil +} + +func (r *SegmentReader) readGreaterThanEqual(ctx context.Context, value uint64, +) (roaringset.BitmapLayer, error) { + firstLayer, ok := r.firstLayer() + if !ok { + return firstLayer, nil + } + + if value == 0 { + // all values are >= 0 + return firstLayer, nil + } + + gte, err := r.mergeGreaterThanEqual(ctx, value, firstLayer.Additions) + if err != nil { + return roaringset.BitmapLayer{}, err + } + + return roaringset.BitmapLayer{ + Additions: gte, + Deletions: firstLayer.Deletions, + }, nil +} + +func (r *SegmentReader) mergeGreaterThanEqual(ctx context.Context, value uint64, + all *sroar.Bitmap, +) (*sroar.Bitmap, error) { + ANDed := false + result := all + entriesCh := make(chan *cursorEntry) + + errors.GoWrapper(func() { + defer close(entriesCh) + for bit, layer, ok := r.cursor.Next(); ok; bit, layer, ok = r.cursor.Next() { + if ctx.Err() != nil { + break + } + entriesCh <- &cursorEntry{bit: bit, layer: layer} + } + }, nil) + + for entry := range entriesCh { + bit := entry.bit + layer := entry.layer + + if value&(1<<(bit-1)) != 0 { + ANDed = true + result.AndConc(layer.Additions, r.concurrency) + } else if ANDed { + result.OrConc(layer.Additions, r.concurrency) + } + } + + if ctx.Err() != nil { + return nil, ctx.Err() + } + + return result, nil +} + +func (r *SegmentReader) mergeBetween(ctx context.Context, valueMinInc, valueMaxExc uint64, + all *sroar.Bitmap, +) (*sroar.Bitmap, error) { + ANDedMin := false + ANDedMax := false + resultMin := all.Clone() + resultMax := all + entriesCh := make(chan *cursorEntry) + + errors.GoWrapper(func() { + defer close(entriesCh) + for bit, layer, ok := r.cursor.Next(); ok; bit, layer, ok = r.cursor.Next() { + if ctx.Err() != nil { + break + } + entriesCh <- &cursorEntry{bit: bit, layer: layer} + } + }, nil) + + for entry := range entriesCh { + bit := entry.bit + layer := entry.layer + + var b uint64 = 1 << (bit - 1) + + if valueMinInc&b != 0 { + ANDedMin = true + resultMin.AndConc(layer.Additions, r.concurrency) + } else if ANDedMin { + resultMin.OrConc(layer.Additions, r.concurrency) + } + + if valueMaxExc&b != 0 { + ANDedMax = true + resultMax.AndConc(layer.Additions, r.concurrency) + } else if ANDedMax { + resultMax.OrConc(layer.Additions, r.concurrency) + } + } + + resultMin.AndNotConc(resultMax, r.concurrency) + + return resultMin, nil +} + +type cursorEntry struct { + bit uint8 + layer roaringset.BitmapLayer +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/roaringsetrange/segment_reader_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/roaringsetrange/segment_reader_test.go new file mode 100644 index 0000000000000000000000000000000000000000..4619490dfcb50414428d0b5c93c5ad04604991ae --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/roaringsetrange/segment_reader_test.go @@ -0,0 +1,164 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package roaringsetrange + +import ( + "testing" + + "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/assert" + "github.com/weaviate/weaviate/entities/filters" + "golang.org/x/net/context" +) + +func TestSegmentReader(t *testing.T) { + logger, _ := test.NewNullLogger() + + mem := NewMemtable(logger) + mem.Insert(13, []uint64{113, 213}) // ...1101 + mem.Insert(5, []uint64{15, 25}) // ...0101 + mem.Insert(0, []uint64{10, 20}) // ...0000 + mem.Delete(20, []uint64{120, 220}) + + cursor := NewGaplessSegmentCursor(newFakeSegmentCursor(mem)) + reader := NewSegmentReader(cursor) + + type testCase struct { + value uint64 + operator filters.Operator + expectedAdd []uint64 + expectedDel []uint64 + } + + testCases := []testCase{ + { + value: 0, + operator: filters.OperatorGreaterThanEqual, + expectedAdd: []uint64{10, 20, 15, 25, 113, 213}, + expectedDel: []uint64{10, 20, 15, 25, 113, 213, 120, 220}, + }, + { + value: 0, + operator: filters.OperatorGreaterThan, + expectedAdd: []uint64{15, 25, 113, 213}, + expectedDel: []uint64{10, 20, 15, 25, 113, 213, 120, 220}, + }, + { + value: 0, + operator: filters.OperatorLessThanEqual, + expectedAdd: []uint64{10, 20}, + expectedDel: []uint64{10, 20, 15, 25, 113, 213, 120, 220}, + }, + { + value: 0, + operator: filters.OperatorLessThan, + expectedAdd: []uint64{}, + expectedDel: []uint64{10, 20, 15, 25, 113, 213, 120, 220}, + }, + { + value: 0, + operator: filters.OperatorEqual, + expectedAdd: []uint64{10, 20}, + expectedDel: []uint64{10, 20, 15, 25, 113, 213, 120, 220}, + }, + { + value: 0, + operator: filters.OperatorNotEqual, + expectedAdd: []uint64{15, 25, 113, 213}, + expectedDel: []uint64{10, 20, 15, 25, 113, 213, 120, 220}, + }, + + { + value: 5, + operator: filters.OperatorGreaterThanEqual, + expectedAdd: []uint64{15, 25, 113, 213}, + expectedDel: []uint64{10, 20, 15, 25, 113, 213, 120, 220}, + }, + { + value: 5, + operator: filters.OperatorGreaterThan, + expectedAdd: []uint64{113, 213}, + expectedDel: []uint64{10, 20, 15, 25, 113, 213, 120, 220}, + }, + { + value: 5, + operator: filters.OperatorLessThanEqual, + expectedAdd: []uint64{10, 20, 15, 25}, + expectedDel: []uint64{10, 20, 15, 25, 113, 213, 120, 220}, + }, + { + value: 5, + operator: filters.OperatorLessThan, + expectedAdd: []uint64{10, 20}, + expectedDel: []uint64{10, 20, 15, 25, 113, 213, 120, 220}, + }, + { + value: 5, + operator: filters.OperatorEqual, + expectedAdd: []uint64{15, 25}, + expectedDel: []uint64{10, 20, 15, 25, 113, 213, 120, 220}, + }, + { + value: 5, + operator: filters.OperatorNotEqual, + expectedAdd: []uint64{10, 20, 113, 213}, + expectedDel: []uint64{10, 20, 15, 25, 113, 213, 120, 220}, + }, + + { + value: 13, + operator: filters.OperatorGreaterThanEqual, + expectedAdd: []uint64{113, 213}, + expectedDel: []uint64{10, 20, 15, 25, 113, 213, 120, 220}, + }, + { + value: 13, + operator: filters.OperatorGreaterThan, + expectedAdd: []uint64{}, + expectedDel: []uint64{10, 20, 15, 25, 113, 213, 120, 220}, + }, + { + value: 13, + operator: filters.OperatorLessThanEqual, + expectedAdd: []uint64{10, 20, 15, 25, 113, 213}, + expectedDel: []uint64{10, 20, 15, 25, 113, 213, 120, 220}, + }, + { + value: 13, + operator: filters.OperatorLessThan, + expectedAdd: []uint64{10, 20, 15, 25}, + expectedDel: []uint64{10, 20, 15, 25, 113, 213, 120, 220}, + }, + { + value: 13, + operator: filters.OperatorEqual, + expectedAdd: []uint64{113, 213}, + expectedDel: []uint64{10, 20, 15, 25, 113, 213, 120, 220}, + }, + { + value: 13, + operator: filters.OperatorNotEqual, + expectedAdd: []uint64{10, 20, 15, 25}, + expectedDel: []uint64{10, 20, 15, 25, 113, 213, 120, 220}, + }, + } + + for _, tc := range testCases { + t.Run("read", func(t *testing.T) { + bm, release, err := reader.Read(context.Background(), tc.value, tc.operator) + assert.NoError(t, err) + defer release() + assert.ElementsMatch(t, bm.Additions.ToArray(), tc.expectedAdd) + assert.ElementsMatch(t, bm.Deletions.ToArray(), tc.expectedDel) + }) + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/sorter/basic_comparators.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/sorter/basic_comparators.go new file mode 100644 index 0000000000000000000000000000000000000000..be01c98bad0b0b4b08299dd4a1dfcbd080648de9 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/sorter/basic_comparators.go @@ -0,0 +1,382 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package sorter + +import ( + "strings" + "time" + + "github.com/weaviate/weaviate/entities/schema" +) + +type basicComparatorProvider struct{} + +func (bcp *basicComparatorProvider) provide(dataType schema.DataType, order string) basicComparator { + switch dataType { + case schema.DataTypeBlob: + return newStringComparator(order) + case schema.DataTypeText: + return newStringComparator(order) + case schema.DataTypeTextArray: + return newStringArrayComparator(order) + case schema.DataTypeNumber, schema.DataTypeInt: + return newFloat64Comparator(order) + case schema.DataTypeNumberArray, schema.DataTypeIntArray: + return newFloat64ArrayComparator(order) + case schema.DataTypeDate: + return newDateComparator(order) + case schema.DataTypeDateArray: + return newDateArrayComparator(order) + case schema.DataTypeBoolean: + return newBoolComparator(order) + case schema.DataTypeBooleanArray: + return newBoolArrayComparator(order) + case schema.DataTypePhoneNumber: + return newFloat64ArrayComparator(order) + case schema.DataTypeGeoCoordinates: + return newFloat64ArrayComparator(order) + default: + return newAnyComparator(order) + } +} + +type basicComparator interface { + compare(a, b interface{}) int +} + +type stringComparator struct { + lessValue int +} + +func newStringComparator(order string) *stringComparator { + return &stringComparator{lessValue(order)} +} + +func (sc *stringComparator) compare(a, b interface{}) int { + a, b = sc.untypedNil(a), sc.untypedNil(b) + if a != nil && b != nil { + return sc.compareStrings(*(a.(*string)), *(b.(*string))) + } + return handleNils(a == nil, b == nil, sc.lessValue) +} + +func (sc *stringComparator) compareStrings(a, b string) int { + if strings.EqualFold(a, b) { + return 0 + } + if strings.ToLower(a) < strings.ToLower(b) { + return sc.lessValue + } + return -sc.lessValue +} + +func (sc *stringComparator) untypedNil(x interface{}) interface{} { + if x == (*string)(nil) { + return nil + } + return x +} + +type stringArrayComparator struct { + sc *stringComparator + ic *intComparator +} + +func newStringArrayComparator(order string) *stringArrayComparator { + return &stringArrayComparator{newStringComparator(order), newIntComparator(order)} +} + +func (sac *stringArrayComparator) compare(a, b interface{}) int { + a, b = sac.untypedNil(a), sac.untypedNil(b) + if a != nil && b != nil { + aArr, bArr := *(a.(*[]string)), *(b.(*[]string)) + aLen, bLen := len(aArr), len(bArr) + + for i := 0; i < aLen && i < bLen; i++ { + if res := sac.sc.compareStrings(aArr[i], bArr[i]); res != 0 { + return res + } + } + return sac.ic.compareInts(aLen, bLen) + } + return handleNils(a == nil, b == nil, sac.sc.lessValue) +} + +func (sac *stringArrayComparator) untypedNil(x interface{}) interface{} { + if x == (*[]string)(nil) { + return nil + } + return x +} + +type float64Comparator struct { + lessValue int +} + +func newFloat64Comparator(order string) *float64Comparator { + return &float64Comparator{lessValue(order)} +} + +func (fc *float64Comparator) compare(a, b interface{}) int { + a, b = fc.untypedNil(a), fc.untypedNil(b) + if a != nil && b != nil { + return fc.compareFloats64(*(a.(*float64)), *(b.(*float64))) + } + return handleNils(a == nil, b == nil, fc.lessValue) +} + +func (fc *float64Comparator) compareFloats64(a, b float64) int { + if a == b { + return 0 + } + if a < b { + return fc.lessValue + } + return -fc.lessValue +} + +func (fc *float64Comparator) untypedNil(x interface{}) interface{} { + if x == (*float64)(nil) { + return nil + } + return x +} + +type float64ArrayComparator struct { + fc *float64Comparator + ic *intComparator +} + +func newFloat64ArrayComparator(order string) *float64ArrayComparator { + return &float64ArrayComparator{newFloat64Comparator(order), newIntComparator(order)} +} + +func (fac *float64ArrayComparator) compare(a, b interface{}) int { + a, b = fac.untypedNil(a), fac.untypedNil(b) + if a != nil && b != nil { + aArr, bArr := *(a.(*[]float64)), *(b.(*[]float64)) + aLen, bLen := len(aArr), len(bArr) + + for i := 0; i < aLen && i < bLen; i++ { + if res := fac.fc.compareFloats64(aArr[i], bArr[i]); res != 0 { + return res + } + } + return fac.ic.compareInts(aLen, bLen) + } + return handleNils(a == nil, b == nil, fac.fc.lessValue) +} + +func (fac *float64ArrayComparator) untypedNil(x interface{}) interface{} { + if x == (*[]float64)(nil) { + return nil + } + return x +} + +type dateComparator struct { + lessValue int +} + +func newDateComparator(order string) *dateComparator { + return &dateComparator{lessValue(order)} +} + +func (dc *dateComparator) compare(a, b interface{}) int { + a, b = dc.untypedNil(a), dc.untypedNil(b) + if a != nil && b != nil { + return dc.compareDates(*(a.(*time.Time)), *(b.(*time.Time))) + } + return handleNils(a == nil, b == nil, dc.lessValue) +} + +func (dc *dateComparator) compareDates(a, b time.Time) int { + if a.Equal(b) { + return 0 + } + if a.Before(b) { + return dc.lessValue + } + return -dc.lessValue +} + +func (dc *dateComparator) untypedNil(x interface{}) interface{} { + if x == (*time.Time)(nil) { + return nil + } + return x +} + +type dateArrayComparator struct { + dc *dateComparator + ic *intComparator +} + +func newDateArrayComparator(order string) *dateArrayComparator { + return &dateArrayComparator{newDateComparator(order), newIntComparator(order)} +} + +func (dac *dateArrayComparator) compare(a, b interface{}) int { + a, b = dac.untypedNil(a), dac.untypedNil(b) + if a != nil && b != nil { + aArr, bArr := *(a.(*[]time.Time)), *(b.(*[]time.Time)) + aLen, bLen := len(aArr), len(bArr) + + for i := 0; i < aLen && i < bLen; i++ { + if res := dac.dc.compareDates(aArr[i], bArr[i]); res != 0 { + return res + } + } + return dac.ic.compareInts(aLen, bLen) + } + return handleNils(a == nil, b == nil, dac.dc.lessValue) +} + +func (dac *dateArrayComparator) untypedNil(x interface{}) interface{} { + if x == (*[]time.Time)(nil) { + return nil + } + return x +} + +type boolComparator struct { + lessValue int +} + +func newBoolComparator(order string) *boolComparator { + return &boolComparator{lessValue(order)} +} + +func (bc *boolComparator) compare(a, b interface{}) int { + a, b = bc.untypedNil(a), bc.untypedNil(b) + if a != nil && b != nil { + return bc.compareBools(*(a.(*bool)), *(b.(*bool))) + } + return handleNils(a == nil, b == nil, bc.lessValue) +} + +func (bc *boolComparator) compareBools(a, b bool) int { + if a && b { + return 0 + } + if !a && !b { + return 0 + } + if !a { + return bc.lessValue + } + return -bc.lessValue +} + +func (bc *boolComparator) untypedNil(x interface{}) interface{} { + if x == (*bool)(nil) { + return nil + } + return x +} + +type boolArrayComparator struct { + bc *boolComparator + ic *intComparator +} + +func newBoolArrayComparator(order string) *boolArrayComparator { + return &boolArrayComparator{newBoolComparator(order), newIntComparator(order)} +} + +func (bac *boolArrayComparator) compare(a, b interface{}) int { + a, b = bac.untypedNil(a), bac.untypedNil(b) + if a != nil && b != nil { + aArr, bArr := *(a.(*[]bool)), *(b.(*[]bool)) + aLen, bLen := len(aArr), len(bArr) + + for i := 0; i < aLen && i < bLen; i++ { + if res := bac.bc.compareBools(aArr[i], bArr[i]); res != 0 { + return res + } + } + return bac.ic.compareInts(aLen, bLen) + } + return handleNils(a == nil, b == nil, bac.bc.lessValue) +} + +func (bac *boolArrayComparator) untypedNil(x interface{}) interface{} { + if x == (*[]bool)(nil) { + return nil + } + return x +} + +type intComparator struct { + lessValue int +} + +func newIntComparator(order string) *intComparator { + return &intComparator{lessValue(order)} +} + +func (ic *intComparator) compare(a, b interface{}) int { + a, b = ic.untypedNil(a), ic.untypedNil(b) + if a != nil && b != nil { + return ic.compareInts(*(a.(*int)), *(b.(*int))) + } + return handleNils(a == nil, b == nil, ic.lessValue) +} + +func (ic *intComparator) compareInts(a, b int) int { + if a == b { + return 0 + } + if a < b { + return ic.lessValue + } + return -ic.lessValue +} + +func (ic *intComparator) untypedNil(x interface{}) interface{} { + if x == (*int)(nil) { + return nil + } + return x +} + +type anyComparator struct { + lessValue int +} + +func newAnyComparator(order string) *anyComparator { + return &anyComparator{lessValue(order)} +} + +func (ac *anyComparator) compare(a, b interface{}) int { + if a != nil && b != nil { + return 0 + } + return handleNils(a == nil, b == nil, ac.lessValue) +} + +func handleNils(aNil, bNil bool, lessValue int) int { + if aNil && bNil { + return 0 + } + if aNil { + return lessValue + } + return -lessValue +} + +func lessValue(order string) int { + if order == "desc" { + return 1 + } + return -1 +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/sorter/basic_comparators_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/sorter/basic_comparators_test.go new file mode 100644 index 0000000000000000000000000000000000000000..8530d338605d8c8f0e9b7dde9fc89d5b771a395e --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/sorter/basic_comparators_test.go @@ -0,0 +1,625 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package sorter + +import ( + "fmt" + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +func TestBasicComparator_String(t *testing.T) { + Orange := "Orange" + orange := "orange" + apple := "apple" + + t.Run("strings asc", func(t *testing.T) { + comp := newStringComparator("asc") + + params := []struct { + a *string + b *string + expected int + }{ + {&Orange, &orange, 0}, + {&orange, &orange, 0}, + {&apple, &apple, 0}, + {&orange, &apple, 1}, + {&apple, &orange, -1}, + {nil, &apple, -1}, + {&orange, nil, 1}, + {nil, nil, 0}, + } + + for i, p := range params { + t.Run(fmt.Sprintf("data #%d", i), func(t *testing.T) { + assert.Equal(t, p.expected, comp.compare(p.a, p.b)) + }) + } + }) + + t.Run("strings desc", func(t *testing.T) { + comp := newStringComparator("desc") + + params := []struct { + a *string + b *string + expected int + }{ + {&Orange, &orange, 0}, + {&orange, &orange, 0}, + {&apple, &apple, 0}, + {&orange, &apple, -1}, + {&apple, &orange, 1}, + {nil, &apple, 1}, + {&orange, nil, -1}, + {nil, nil, 0}, + } + + for i, p := range params { + t.Run(fmt.Sprintf("data #%d", i), func(t *testing.T) { + assert.Equal(t, p.expected, comp.compare(p.a, p.b)) + }) + } + }) +} + +func TestBasicComparator_StringArray(t *testing.T) { + o_b_a := []string{"orange", "banana", "apple"} + p := []string{"pear"} + o_a := []string{"orange", "apple"} + o_b := []string{"orange", "banana"} + + t.Run("strings array asc", func(t *testing.T) { + comp := newStringArrayComparator("asc") + + params := []struct { + a *[]string + b *[]string + expected int + }{ + {&o_b_a, &o_b_a, 0}, + {&p, &p, 0}, + {&o_a, &o_a, 0}, + {&o_b, &o_b, 0}, + {&o_a, &o_b, -1}, + {&o_b, &o_b_a, -1}, + {&p, &o_b_a, 1}, + {nil, &o_a, -1}, + {&p, nil, 1}, + {nil, nil, 0}, + } + + for i, p := range params { + t.Run(fmt.Sprintf("data #%d", i), func(t *testing.T) { + assert.Equal(t, p.expected, comp.compare(p.a, p.b)) + }) + } + }) + + t.Run("strings array desc", func(t *testing.T) { + comp := newStringArrayComparator("desc") + + params := []struct { + a *[]string + b *[]string + expected int + }{ + {&o_b_a, &o_b_a, 0}, + {&p, &p, 0}, + {&o_a, &o_a, 0}, + {&o_b, &o_b, 0}, + {&o_a, &o_b, 1}, + {&o_b, &o_b_a, 1}, + {&p, &o_b_a, -1}, + {nil, &o_a, 1}, + {&p, nil, -1}, + {nil, nil, 0}, + } + + for i, p := range params { + t.Run(fmt.Sprintf("data #%d", i), func(t *testing.T) { + assert.Equal(t, p.expected, comp.compare(p.a, p.b)) + }) + } + }) +} + +func TestBasicComparator_Float64(t *testing.T) { + f_10 := -10.0 + f100 := 100.0 + f0 := 0.0 + + t.Run("floats asc", func(t *testing.T) { + comp := newFloat64Comparator("asc") + + params := []struct { + a *float64 + b *float64 + expected int + }{ + {&f_10, &f_10, 0}, + {&f100, &f100, 0}, + {&f0, &f0, 0}, + {&f100, &f_10, 1}, + {&f0, &f100, -1}, + {nil, &f_10, -1}, + {&f0, nil, 1}, + {nil, nil, 0}, + } + + for i, p := range params { + t.Run(fmt.Sprintf("data #%d", i), func(t *testing.T) { + assert.Equal(t, p.expected, comp.compare(p.a, p.b)) + }) + } + }) + + t.Run("floats desc", func(t *testing.T) { + comp := newFloat64Comparator("desc") + + params := []struct { + a *float64 + b *float64 + expected int + }{ + {&f_10, &f_10, 0}, + {&f100, &f100, 0}, + {&f0, &f0, 0}, + {&f100, &f_10, -1}, + {&f0, &f100, 1}, + {nil, &f_10, 1}, + {&f0, nil, -1}, + {nil, nil, 0}, + } + + for i, p := range params { + t.Run(fmt.Sprintf("data #%d", i), func(t *testing.T) { + assert.Equal(t, p.expected, comp.compare(p.a, p.b)) + }) + } + }) +} + +func TestBasicComparator_Float64Array(t *testing.T) { + f_3_2_1 := []float64{3, 2, 1} + f_4 := []float64{4} + f_3_1 := []float64{3, 1} + f_3_2 := []float64{3, 2} + + t.Run("floats array asc", func(t *testing.T) { + comp := newFloat64ArrayComparator("asc") + + params := []struct { + a *[]float64 + b *[]float64 + expected int + }{ + {&f_3_2_1, &f_3_2_1, 0}, + {&f_4, &f_4, 0}, + {&f_3_1, &f_3_1, 0}, + {&f_3_2, &f_3_2, 0}, + {&f_3_1, &f_3_2, -1}, + {&f_3_2, &f_3_2_1, -1}, + {&f_4, &f_3_2_1, 1}, + {nil, &f_3_1, -1}, + {&f_4, nil, 1}, + {nil, nil, 0}, + } + + for i, p := range params { + t.Run(fmt.Sprintf("data #%d", i), func(t *testing.T) { + assert.Equal(t, p.expected, comp.compare(p.a, p.b)) + }) + } + }) + + t.Run("floats array desc", func(t *testing.T) { + comp := newFloat64ArrayComparator("desc") + + params := []struct { + a *[]float64 + b *[]float64 + expected int + }{ + {&f_3_2_1, &f_3_2_1, 0}, + {&f_4, &f_4, 0}, + {&f_3_1, &f_3_1, 0}, + {&f_3_2, &f_3_2, 0}, + {&f_3_1, &f_3_2, 1}, + {&f_3_2, &f_3_2_1, 1}, + {&f_4, &f_3_2_1, -1}, + {nil, &f_3_1, 1}, + {&f_4, nil, -1}, + {nil, nil, 0}, + } + + for i, p := range params { + t.Run(fmt.Sprintf("data #%d", i), func(t *testing.T) { + assert.Equal(t, p.expected, comp.compare(p.a, p.b)) + }) + } + }) +} + +func TestBasicComparator_Date(t *testing.T) { + t1 := time.Now() + t2 := time.Now().Add(time.Second) + t3 := time.Now().Add(2 * time.Second) + + t.Run("dates asc", func(t *testing.T) { + comp := newDateComparator("asc") + + params := []struct { + a *time.Time + b *time.Time + expected int + }{ + {&t1, &t1, 0}, + {&t3, &t3, 0}, + {&t2, &t2, 0}, + {&t3, &t1, 1}, + {&t2, &t3, -1}, + {nil, &t1, -1}, + {&t2, nil, 1}, + {nil, nil, 0}, + } + + for i, p := range params { + t.Run(fmt.Sprintf("data #%d", i), func(t *testing.T) { + assert.Equal(t, p.expected, comp.compare(p.a, p.b)) + }) + } + }) + + t.Run("dates desc", func(t *testing.T) { + comp := newDateComparator("desc") + + params := []struct { + a *time.Time + b *time.Time + expected int + }{ + {&t1, &t1, 0}, + {&t3, &t3, 0}, + {&t2, &t2, 0}, + {&t3, &t1, -1}, + {&t2, &t3, 1}, + {nil, &t1, 1}, + {&t2, nil, -1}, + {nil, nil, 0}, + } + + for i, p := range params { + t.Run(fmt.Sprintf("data #%d", i), func(t *testing.T) { + assert.Equal(t, p.expected, comp.compare(p.a, p.b)) + }) + } + }) +} + +func TestBasicComparator_DateArray(t *testing.T) { + t1 := time.Now() + t2 := time.Now().Add(time.Second) + t3 := time.Now().Add(2 * time.Second) + t4 := time.Now().Add(3 * time.Second) + + t_3_2_1 := []time.Time{t3, t2, t1} + t_4 := []time.Time{t4} + t_3_1 := []time.Time{t3, t1} + t_3_2 := []time.Time{t3, t2} + + t.Run("dates array asc", func(t *testing.T) { + comp := newDateArrayComparator("asc") + + params := []struct { + a *[]time.Time + b *[]time.Time + expected int + }{ + {&t_3_2_1, &t_3_2_1, 0}, + {&t_4, &t_4, 0}, + {&t_3_1, &t_3_1, 0}, + {&t_3_2, &t_3_2, 0}, + {&t_3_1, &t_3_2, -1}, + {&t_3_2, &t_3_2_1, -1}, + {&t_4, &t_3_2_1, 1}, + {nil, &t_3_1, -1}, + {&t_4, nil, 1}, + {nil, nil, 0}, + } + + for i, p := range params { + t.Run(fmt.Sprintf("data #%d", i), func(t *testing.T) { + assert.Equal(t, p.expected, comp.compare(p.a, p.b)) + }) + } + }) + + t.Run("dates array desc", func(t *testing.T) { + comp := newDateArrayComparator("desc") + + params := []struct { + a *[]time.Time + b *[]time.Time + expected int + }{ + {&t_3_2_1, &t_3_2_1, 0}, + {&t_4, &t_4, 0}, + {&t_3_1, &t_3_1, 0}, + {&t_3_2, &t_3_2, 0}, + {&t_3_1, &t_3_2, 1}, + {&t_3_2, &t_3_2_1, 1}, + {&t_4, &t_3_2_1, -1}, + {nil, &t_3_1, 1}, + {&t_4, nil, -1}, + {nil, nil, 0}, + } + + for i, p := range params { + t.Run(fmt.Sprintf("data #%d", i), func(t *testing.T) { + assert.Equal(t, p.expected, comp.compare(p.a, p.b)) + }) + } + }) +} + +func TestBasicComparator_Bool(t *testing.T) { + fa := false + tr := true + + t.Run("bools asc", func(t *testing.T) { + comp := newBoolComparator("asc") + + params := []struct { + a *bool + b *bool + expected int + }{ + {&fa, &fa, 0}, + {&tr, &tr, 0}, + {&fa, &tr, -1}, + {nil, &fa, -1}, + {&tr, nil, 1}, + {nil, nil, 0}, + } + + for i, p := range params { + t.Run(fmt.Sprintf("data #%d", i), func(t *testing.T) { + assert.Equal(t, p.expected, comp.compare(p.a, p.b)) + }) + } + }) + + t.Run("bools desc", func(t *testing.T) { + comp := newBoolComparator("desc") + + params := []struct { + a *bool + b *bool + expected int + }{ + {&fa, &fa, 0}, + {&tr, &tr, 0}, + {&fa, &tr, 1}, + {nil, &fa, 1}, + {&tr, nil, -1}, + {nil, nil, 0}, + } + + for i, p := range params { + t.Run(fmt.Sprintf("data #%d", i), func(t *testing.T) { + assert.Equal(t, p.expected, comp.compare(p.a, p.b)) + }) + } + }) +} + +func TestBasicComparator_BoolArray(t *testing.T) { + fa_tr_fa := []bool{false, true, false} + tr := []bool{true} + fa_fa := []bool{false, false} + fa_tr := []bool{false, true} + + t.Run("bools array asc", func(t *testing.T) { + comp := newBoolArrayComparator("asc") + + params := []struct { + a *[]bool + b *[]bool + expected int + }{ + {&fa_tr_fa, &fa_tr_fa, 0}, + {&tr, &tr, 0}, + {&fa_fa, &fa_fa, 0}, + {&fa_tr, &fa_tr, 0}, + {&fa_fa, &fa_tr, -1}, + {&fa_tr, &fa_tr_fa, -1}, + {&tr, &fa_tr_fa, 1}, + {nil, &fa_fa, -1}, + {&tr, nil, 1}, + {nil, nil, 0}, + } + + for i, p := range params { + t.Run(fmt.Sprintf("data #%d", i), func(t *testing.T) { + assert.Equal(t, p.expected, comp.compare(p.a, p.b)) + }) + } + }) + + t.Run("bools array desc", func(t *testing.T) { + comp := newBoolArrayComparator("desc") + + params := []struct { + a *[]bool + b *[]bool + expected int + }{ + {&fa_tr_fa, &fa_tr_fa, 0}, + {&tr, &tr, 0}, + {&fa_fa, &fa_fa, 0}, + {&fa_tr, &fa_tr, 0}, + {&fa_fa, &fa_tr, 1}, + {&fa_tr, &fa_tr_fa, 1}, + {&tr, &fa_tr_fa, -1}, + {nil, &fa_fa, 1}, + {&tr, nil, -1}, + {nil, nil, 0}, + } + + for i, p := range params { + t.Run(fmt.Sprintf("data #%d", i), func(t *testing.T) { + assert.Equal(t, p.expected, comp.compare(p.a, p.b)) + }) + } + }) +} + +func TestBasicComparator_Int(t *testing.T) { + i_10 := -10 + i100 := 100 + i0 := 0 + + t.Run("ints asc", func(t *testing.T) { + comp := newIntComparator("asc") + + params := []struct { + a *int + b *int + expected int + }{ + {&i_10, &i_10, 0}, + {&i100, &i100, 0}, + {&i0, &i0, 0}, + {&i100, &i_10, 1}, + {&i0, &i100, -1}, + {nil, &i_10, -1}, + {&i0, nil, 1}, + {nil, nil, 0}, + } + + for i, p := range params { + t.Run(fmt.Sprintf("data #%d", i), func(t *testing.T) { + assert.Equal(t, p.expected, comp.compare(p.a, p.b)) + }) + } + }) + + t.Run("ints desc", func(t *testing.T) { + comp := newIntComparator("desc") + + params := []struct { + a *int + b *int + expected int + }{ + {&i_10, &i_10, 0}, + {&i100, &i100, 0}, + {&i0, &i0, 0}, + {&i100, &i_10, -1}, + {&i0, &i100, 1}, + {nil, &i_10, 1}, + {&i0, nil, -1}, + {nil, nil, 0}, + } + + for i, p := range params { + t.Run(fmt.Sprintf("data #%d", i), func(t *testing.T) { + assert.Equal(t, p.expected, comp.compare(p.a, p.b)) + }) + } + }) +} + +func TestBasicComparator_Any(t *testing.T) { + in := -10 + fl := 100.0 + st := "string" + ti := time.Now() + bo := true + an := struct{}{} + + t.Run("any asc", func(t *testing.T) { + comp := newAnyComparator("asc") + + params := []struct { + a interface{} + b interface{} + expected int + }{ + {&in, &in, 0}, + {&fl, &fl, 0}, + {&st, &st, 0}, + {&ti, &ti, 0}, + {&bo, &bo, 0}, + {&an, &an, 0}, + {&in, &fl, 0}, + {&fl, &st, 0}, + {&st, &ti, 0}, + {&ti, &bo, 0}, + {&bo, &an, 0}, + {&an, &in, 0}, + {nil, &in, -1}, + {nil, &fl, -1}, + {nil, &st, -1}, + {&ti, nil, 1}, + {&bo, nil, 1}, + {&an, nil, 1}, + {nil, nil, 0}, + } + + for i, p := range params { + t.Run(fmt.Sprintf("data #%d", i), func(t *testing.T) { + assert.Equal(t, p.expected, comp.compare(p.a, p.b)) + }) + } + }) + + t.Run("any desc", func(t *testing.T) { + comp := newAnyComparator("desc") + + params := []struct { + a interface{} + b interface{} + expected int + }{ + {&in, &in, 0}, + {&fl, &fl, 0}, + {&st, &st, 0}, + {&ti, &ti, 0}, + {&bo, &bo, 0}, + {&an, &an, 0}, + {&in, &fl, 0}, + {&fl, &st, 0}, + {&st, &ti, 0}, + {&ti, &bo, 0}, + {&bo, &an, 0}, + {&an, &in, 0}, + {nil, &in, 1}, + {nil, &fl, 1}, + {nil, &st, 1}, + {&ti, nil, -1}, + {&bo, nil, -1}, + {&an, nil, -1}, + {nil, nil, 0}, + } + + for i, p := range params { + t.Run(fmt.Sprintf("data #%d", i), func(t *testing.T) { + assert.Equal(t, p.expected, comp.compare(p.a, p.b)) + }) + } + }) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/sorter/comparable_creator.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/sorter/comparable_creator.go new file mode 100644 index 0000000000000000000000000000000000000000..9b0f03338b4d243a4fdec9825f09edcc43401cdf --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/sorter/comparable_creator.go @@ -0,0 +1,70 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package sorter + +import "github.com/weaviate/weaviate/entities/storobj" + +type comparable struct { + docID uint64 + // all property values that will be used for comparison + // most important 1st, least important last + values []interface{} + // additional payload to hold data related to object, not used in sorting process + payload interface{} +} + +type comparableCreator struct { + extractor *comparableValueExtractor + propNames []string +} + +func newComparableCreator(extractor *comparableValueExtractor, propNames []string) *comparableCreator { + return &comparableCreator{extractor, propNames} +} + +func (c *comparableCreator) createFromBytes(docID uint64, objData []byte) *comparable { + return c.createFromBytesWithPayload(docID, objData, nil) +} + +func (c *comparableCreator) createFromBytesWithPayload(docID uint64, objData []byte, payload interface{}) *comparable { + values := make([]interface{}, len(c.propNames)) + for level, propName := range c.propNames { + values[level] = c.extractor.extractFromBytes(objData, propName) + } + return &comparable{docID, values, payload} +} + +func (c *comparableCreator) createFromObjectWithPayload(object *storobj.Object, payload interface{}) *comparable { + values := make([]interface{}, len(c.propNames)) + for level, propName := range c.propNames { + values[level] = c.extractor.extractFromObject(object, propName) + } + return &comparable{object.DocID, values, payload} +} + +func (c *comparableCreator) extractDocIDs(comparables []*comparable) []uint64 { + docIDs := make([]uint64, len(comparables)) + for i, comparable := range comparables { + docIDs[i] = comparable.docID + } + return docIDs +} + +func (c *comparableCreator) extractPayloads(comparables []*comparable, + consume func(i int, docID uint64, payload interface{}) (stop bool), +) { + for i, comparable := range comparables { + if consume(i, comparable.docID, comparable.payload) { + break + } + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/sorter/comparable_sorter.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/sorter/comparable_sorter.go new file mode 100644 index 0000000000000000000000000000000000000000..2bfb6b2e5745e83de2f2219516466fbcd8e562c1 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/sorter/comparable_sorter.go @@ -0,0 +1,111 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package sorter + +import ( + "sort" +) + +type comparabeSorter interface { + addComparable(el *comparable) (added bool) + getSorted() []*comparable +} + +// sort elements while adding new one, no following sorting is needed +// if limit applied (>0), only that many elements is in the result +type insertSorter struct { + comparator *comparator + limit int + comparables []*comparable +} + +func newInsertSorter(comparator *comparator, limit int) comparabeSorter { + comparables := make([]*comparable, 0, limit) + return &insertSorter{comparator, limit, comparables} +} + +func (is *insertSorter) addComparable(el *comparable) bool { + count := len(is.comparables) + // insert if there is no limit or limit not reached yet + if is.limit == 0 || count < is.limit { + is.insert(el) + return true + } + // limit reached - compare with last element and insert if "smaller" + // last element can be removed + if is.comparator.compare(el, is.comparables[count-1]) == -1 { + is.comparables = is.comparables[:count-1] + is.insert(el) + return true + } + return false +} + +func (is *insertSorter) insert(el *comparable) { + count := len(is.comparables) + pos := is.findPosition(el, 0, count) + if pos == count { + is.comparables = append(is.comparables, el) + return + } + is.comparables = append(is.comparables[:pos+1], is.comparables[pos:]...) + is.comparables[pos] = el +} + +func (is *insertSorter) findPosition(el *comparable, startInc, endExc int) int { + if startInc == endExc { + return startInc + } + + middle := startInc + (endExc-startInc)/2 + if is.comparator.compare(el, is.comparables[middle]) != -1 { + return is.findPosition(el, middle+1, endExc) + } + return is.findPosition(el, startInc, middle) +} + +func (is *insertSorter) getSorted() []*comparable { + return is.comparables +} + +// implementation of sort.Interface +// sorting is performed in getSorted() method +type defaultSorter struct { + comparator *comparator + comparables []*comparable +} + +func newDefaultSorter(comparator *comparator, cap int) comparabeSorter { + return &defaultSorter{comparator, make([]*comparable, 0, cap)} +} + +func (ds *defaultSorter) addComparable(el *comparable) bool { + ds.comparables = append(ds.comparables, el) + return true +} + +func (ds *defaultSorter) getSorted() []*comparable { + sort.Sort(ds) + return ds.comparables +} + +func (ds *defaultSorter) Len() int { + return len(ds.comparables) +} + +func (ds *defaultSorter) Swap(i, j int) { + ds.comparables[i], ds.comparables[j] = ds.comparables[j], ds.comparables[i] +} + +func (ds *defaultSorter) Less(i, j int) bool { + return ds.comparator.compare(ds.comparables[i], ds.comparables[j]) == -1 +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/sorter/comparable_value_extractor.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/sorter/comparable_value_extractor.go new file mode 100644 index 0000000000000000000000000000000000000000..a9f3b84032f60b885879d5eca881290e699a1a4d --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/sorter/comparable_value_extractor.go @@ -0,0 +1,212 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package sorter + +import ( + "encoding/json" + "strconv" + "time" + + "github.com/weaviate/weaviate/entities/filters" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/schema" + "github.com/weaviate/weaviate/entities/storobj" +) + +type comparableValueExtractor struct { + dataTypesHelper *dataTypesHelper +} + +func newComparableValueExtractor(dataTypesHelper *dataTypesHelper) *comparableValueExtractor { + return &comparableValueExtractor{dataTypesHelper} +} + +func (e *comparableValueExtractor) extractFromBytes(objData []byte, propName string) interface{} { + value, success, _ := storobj.ParseAndExtractProperty(objData, propName) + // in case the property does not exist for the object return nil + if len(value) == 0 { + return nil + } + if success { + switch e.dataTypesHelper.getType(propName) { + case schema.DataTypeBlob: + return &value[0] + case schema.DataTypeText: + return &value[0] + case schema.DataTypeTextArray: + return &value + case schema.DataTypeDate: + d := e.mustExtractDates(value[:1])[0] + return &d + case schema.DataTypeDateArray: + da := e.mustExtractDates(value) + return &da + case schema.DataTypeNumber, schema.DataTypeInt: + n := e.mustExtractNumbers(value[:1])[0] + return &n + case schema.DataTypeNumberArray, schema.DataTypeIntArray: + na := e.mustExtractNumbers(value) + return &na + case schema.DataTypeBoolean: + b := e.mustExtractBools(value[:1])[0] + return &b + case schema.DataTypeBooleanArray: + ba := e.mustExtractBools(value) + return &ba + case schema.DataTypePhoneNumber: + fa := e.toFloatArrayFromPhoneNumber(e.mustExtractPhoneNumber(value)) + return &fa + case schema.DataTypeGeoCoordinates: + fa := e.toFloatArrayFromGeoCoordinates(e.mustExtractGeoCoordinates(value)) + return &fa + default: + return nil + } + } + return nil +} + +func (e *comparableValueExtractor) extractFromObject(object *storobj.Object, propName string) interface{} { + if propName == filters.InternalPropID || propName == filters.InternalPropBackwardsCompatID { + id := object.ID().String() + return &id + } + if propName == filters.InternalPropCreationTimeUnix { + ts := float64(object.CreationTimeUnix()) + return &ts + } + if propName == filters.InternalPropLastUpdateTimeUnix { + ts := float64(object.LastUpdateTimeUnix()) + return &ts + } + + propertiesMap, ok := object.Properties().(map[string]interface{}) + if !ok { + return nil + } + value, ok := propertiesMap[propName] + if !ok { + return nil + } + + switch e.dataTypesHelper.getType(propName) { + case schema.DataTypeBlob: + s := value.(string) + return &s + case schema.DataTypeText: + s := value.(string) + return &s + case schema.DataTypeTextArray: + sa := value.([]string) + return &sa + case schema.DataTypeDate: + d := e.mustExtractDates([]string{value.(string)})[0] + return &d + case schema.DataTypeDateArray: + da := e.mustExtractDates(value.([]string)) + return &da + case schema.DataTypeNumber, schema.DataTypeInt: + n := value.(float64) + return &n + case schema.DataTypeNumberArray, schema.DataTypeIntArray: + na := value.([]float64) + return &na + case schema.DataTypeBoolean: + b := value.(bool) + return &b + case schema.DataTypeBooleanArray: + ba := value.([]bool) + return &ba + case schema.DataTypePhoneNumber: + fa := e.toFloatArrayFromPhoneNumber(value.(*models.PhoneNumber)) + return &fa + case schema.DataTypeGeoCoordinates: + fa := e.toFloatArrayFromGeoCoordinates(value.(*models.GeoCoordinates)) + return &fa + default: + return nil + } +} + +func (e *comparableValueExtractor) mustExtractNumbers(value []string) []float64 { + numbers := make([]float64, len(value)) + for i := range value { + number, err := strconv.ParseFloat(value[i], 64) + if err != nil { + panic("sorter: not a number") + } + numbers[i] = number + } + return numbers +} + +func (e *comparableValueExtractor) mustExtractBools(value []string) []bool { + bools := make([]bool, len(value)) + for i := range value { + switch value[i] { + case "true": + bools[i] = true + case "false": + bools[i] = false + default: + panic("sorter: not a bool") + } + } + return bools +} + +func (e *comparableValueExtractor) mustExtractDates(value []string) []time.Time { + dates := make([]time.Time, len(value)) + for i := range value { + date, err := time.Parse(time.RFC3339, value[i]) + if err != nil { + panic("sorter: not a date") + } + dates[i] = date + } + return dates +} + +func (e *comparableValueExtractor) mustExtractPhoneNumber(value []string) *models.PhoneNumber { + if len(value) == 1 { + var phoneNumber *models.PhoneNumber + if err := json.Unmarshal([]byte(value[0]), &phoneNumber); err == nil { + return phoneNumber + } + } + panic("sorter: not a phone number") +} + +func (e *comparableValueExtractor) mustExtractGeoCoordinates(value []string) *models.GeoCoordinates { + if len(value) == 1 { + var geoCoordinates *models.GeoCoordinates + if err := json.Unmarshal([]byte(value[0]), &geoCoordinates); err == nil { + return geoCoordinates + } + } + panic("sorter: not a geo coordinates") +} + +func (e *comparableValueExtractor) toFloatArrayFromPhoneNumber(value *models.PhoneNumber) []float64 { + return []float64{float64(value.CountryCode), float64(value.National)} +} + +func (e *comparableValueExtractor) toFloatArrayFromGeoCoordinates(value *models.GeoCoordinates) []float64 { + fa := make([]float64, 2) + if value.Longitude != nil { + fa[0] = float64(*value.Longitude) + } + if value.Latitude != nil { + fa[1] = float64(*value.Latitude) + } + return fa +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/sorter/comparable_value_extractor_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/sorter/comparable_value_extractor_test.go new file mode 100644 index 0000000000000000000000000000000000000000..09c507b523dfcfc6dab53fe0374fe1c9176558b8 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/sorter/comparable_value_extractor_test.go @@ -0,0 +1,137 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package sorter + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestComparableValueExtractor(t *testing.T) { + schema := getMyFavoriteClassSchemaForTests() + class := schema.GetClass(testClassName) + helper := newDataTypesHelper(class) + extractor := newComparableValueExtractor(helper) + object := createMyFavoriteClassObject() + + params := []struct { + propName string + expected interface{} + }{ + { + "id", + ptrString("73f2eb5f-5abf-447a-81ca-74b1dd168247"), + }, + { + "_creationTimeUnix", + ptrFloat64(900000000001), + }, + { + "_lastUpdateTimeUnix", + ptrFloat64(900000000002), + }, + { + "textProp", + ptrString("text"), + }, + { + "textPropArray", + ptrStringArray("text", "text"), + }, + { + "intProp", + ptrFloat64(100), + }, + { + "numberProp", + ptrFloat64(17), + }, + { + "intPropArray", + ptrFloat64Array(10, 20, 30), + }, + { + "numberPropArray", + ptrFloat64Array(1, 2, 3), + }, + { + "boolProp", + ptrBool(true), + }, + { + "boolPropArray", + ptrBoolArray(true, false, true), + }, + { + "dateProp", + ptrTime("1980-01-01T00:00:00+02:00"), + }, + { + "datePropArray", + ptrTimeArray("1980-01-01T00:00:00+02:00"), + }, + { + "phoneProp", + ptrFloat64Array(49, 1000000), + }, + { + "geoProp", + ptrFloat64Array(1, 2), + }, + { + "emptyStringProp", + nil, + }, + { + "emptyBoolProp", + nil, + }, + { + "emptyNumberProp", + nil, + }, + { + "emptyIntProp", + nil, + }, + { + "crefProp", + nil, + }, + { + "nonExistentProp", + nil, + }, + } + + t.Run("extract comparable values from binary", func(t *testing.T) { + objData, err := object.MarshalBinary() + require.Nil(t, err) + + for _, p := range params { + t.Run(fmt.Sprintf("data %s", p.propName), func(t *testing.T) { + assert.Equal(t, p.expected, extractor.extractFromBytes(objData, p.propName)) + }) + } + }) + + t.Run("extract comparable values from object", func(t *testing.T) { + for _, p := range params { + t.Run(fmt.Sprintf("data %s", p.propName), func(t *testing.T) { + assert.Equal(t, p.expected, extractor.extractFromObject(object, p.propName)) + }) + } + }) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/sorter/comparator.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/sorter/comparator.go new file mode 100644 index 0000000000000000000000000000000000000000..a3e7a921e9c02abff499bab9f59dbeea45c8ffd2 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/sorter/comparator.go @@ -0,0 +1,35 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package sorter + +type comparator struct { + comparators []basicComparator +} + +func newComparator(dataTypesHelper *dataTypesHelper, propNames []string, orders []string) *comparator { + provider := &basicComparatorProvider{} + comparators := make([]basicComparator, len(propNames)) + for level, propName := range propNames { + dataType := dataTypesHelper.getType(propName) + comparators[level] = provider.provide(dataType, orders[level]) + } + return &comparator{comparators} +} + +func (c *comparator) compare(a, b *comparable) int { + for level, comparator := range c.comparators { + if res := comparator.compare(a.values[level], b.values[level]); res != 0 { + return res + } + } + return 0 +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/sorter/datatypes_helper.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/sorter/datatypes_helper.go new file mode 100644 index 0000000000000000000000000000000000000000..f9be719d99446e9078cfeaf5db73cf367b0e3c50 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/sorter/datatypes_helper.go @@ -0,0 +1,69 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package sorter + +import ( + "github.com/weaviate/weaviate/entities/filters" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/schema" +) + +type dataTypesHelper struct { + class *models.Class + dataTypes map[string][]string +} + +func newDataTypesHelper(class *models.Class) *dataTypesHelper { + return &dataTypesHelper{class, make(map[string][]string)} +} + +func (h *dataTypesHelper) getStrings(propName string) []string { + if dataType, ok := h.dataTypes[propName]; ok { + return dataType + } + + h.dataTypes[propName] = h.find(propName) + return h.dataTypes[propName] +} + +func (h *dataTypesHelper) find(propName string) []string { + if propName == filters.InternalPropID || propName == filters.InternalPropBackwardsCompatID { + return schema.DataTypeText.PropString() + } + if propName == filters.InternalPropCreationTimeUnix || propName == filters.InternalPropLastUpdateTimeUnix { + return []string{string(schema.DataTypeInt)} + } + for _, property := range h.class.Properties { + if property.Name == propName { + return property.DataType + } + } + return nil +} + +func (h *dataTypesHelper) getType(propName string) schema.DataType { + strings := h.getStrings(propName) + if len(strings) > 0 { + return schema.DataType(strings[0]) + } + return "" +} + +func (h *dataTypesHelper) hasFilterableIndex(propName string) bool { + for _, property := range h.class.Properties { + if property.Name == propName { + return property.IndexFilterable != nil && *property.IndexFilterable + } + } + + return false +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/sorter/datatypes_helper_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/sorter/datatypes_helper_test.go new file mode 100644 index 0000000000000000000000000000000000000000..00aea639c6520343be25ed240370383bb7183eb0 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/sorter/datatypes_helper_test.go @@ -0,0 +1,80 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package sorter + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/weaviate/weaviate/entities/schema" +) + +func TestDataTypesHelper(t *testing.T) { + sch := getMyFavoriteClassSchemaForTests() + class := sch.GetClass(testClassName) + helper := newDataTypesHelper(class) + + t.Run("get data types as strings", func(t *testing.T) { + params := []struct { + propName string + expected []string + }{ + {"textProp", []string{string(schema.DataTypeText)}}, + {"textPropArray", []string{string(schema.DataTypeTextArray)}}, + {"intProp", []string{string(schema.DataTypeInt)}}, + {"numberProp", []string{string(schema.DataTypeNumber)}}, + {"intPropArray", []string{string(schema.DataTypeIntArray)}}, + {"numberPropArray", []string{string(schema.DataTypeNumberArray)}}, + {"boolProp", []string{string(schema.DataTypeBoolean)}}, + {"boolPropArray", []string{string(schema.DataTypeBooleanArray)}}, + {"dateProp", []string{string(schema.DataTypeDate)}}, + {"datePropArray", []string{string(schema.DataTypeDateArray)}}, + {"phoneProp", []string{string(schema.DataTypePhoneNumber)}}, + {"geoProp", []string{string(schema.DataTypeGeoCoordinates)}}, + {"crefProp", []string{string(schema.DataTypeCRef)}}, + } + + for i, p := range params { + t.Run(fmt.Sprintf("data #%d", i), func(t *testing.T) { + assert.Equal(t, p.expected, helper.getStrings(p.propName)) + }) + } + }) + + t.Run("get data types as type", func(t *testing.T) { + params := []struct { + propName string + expected schema.DataType + }{ + {"textProp", schema.DataTypeText}, + {"textPropArray", schema.DataTypeTextArray}, + {"intProp", schema.DataTypeInt}, + {"numberProp", schema.DataTypeNumber}, + {"intPropArray", schema.DataTypeIntArray}, + {"numberPropArray", schema.DataTypeNumberArray}, + {"boolProp", schema.DataTypeBoolean}, + {"boolPropArray", schema.DataTypeBooleanArray}, + {"dateProp", schema.DataTypeDate}, + {"datePropArray", schema.DataTypeDateArray}, + {"phoneProp", schema.DataTypePhoneNumber}, + {"geoProp", schema.DataTypeGeoCoordinates}, + {"crefProp", schema.DataTypeCRef}, + } + + for i, p := range params { + t.Run(fmt.Sprintf("data #%d", i), func(t *testing.T) { + assert.Equal(t, p.expected, helper.getType(p.propName)) + }) + } + }) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/sorter/fakes_for_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/sorter/fakes_for_test.go new file mode 100644 index 0000000000000000000000000000000000000000..9084767205b6528e1adb04abffd244f076ac587b --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/sorter/fakes_for_test.go @@ -0,0 +1,412 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package sorter + +import ( + "time" + + "github.com/go-openapi/strfmt" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/schema" + "github.com/weaviate/weaviate/entities/storobj" +) + +const testClassName = "MyFavoriteClass" + +func getMyFavoriteClassSchemaForTests() schema.Schema { + return schema.Schema{ + Objects: &models.Schema{ + Classes: []*models.Class{ + { + Class: testClassName, + Properties: []*models.Property{ + { + Name: "textProp", + DataType: []string{string(schema.DataTypeText)}, + }, + { + Name: "textPropArray", + DataType: []string{string(schema.DataTypeTextArray)}, + }, + { + Name: "intProp", + DataType: []string{string(schema.DataTypeInt)}, + }, + { + Name: "numberProp", + DataType: []string{string(schema.DataTypeNumber)}, + }, + { + Name: "intPropArray", + DataType: []string{string(schema.DataTypeIntArray)}, + }, + { + Name: "numberPropArray", + DataType: []string{string(schema.DataTypeNumberArray)}, + }, + { + Name: "boolProp", + DataType: []string{string(schema.DataTypeBoolean)}, + }, + { + Name: "boolPropArray", + DataType: []string{string(schema.DataTypeBooleanArray)}, + }, + { + Name: "dateProp", + DataType: []string{string(schema.DataTypeDate)}, + }, + { + Name: "datePropArray", + DataType: []string{string(schema.DataTypeDateArray)}, + }, + { + Name: "phoneProp", + DataType: []string{string(schema.DataTypePhoneNumber)}, + }, + { + Name: "geoProp", + DataType: []string{string(schema.DataTypeGeoCoordinates)}, + }, + { + Name: "emptyStringProp", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationWhitespace, + }, + { + Name: "emptyBoolProp", + DataType: []string{string(schema.DataTypeBoolean)}, + }, + { + Name: "emptyNumberProp", + DataType: []string{string(schema.DataTypeNumber)}, + }, + { + Name: "emptyIntProp", + DataType: []string{string(schema.DataTypeInt)}, + }, + { + Name: "crefProp", + DataType: []string{string(schema.DataTypeCRef)}, + }, + }, + }, + }, + }, + } +} + +func createMyFavoriteClassObject() *storobj.Object { + return storobj.FromObject( + &models.Object{ + Class: testClassName, + CreationTimeUnix: 900000000001, + LastUpdateTimeUnix: 900000000002, + ID: strfmt.UUID("73f2eb5f-5abf-447a-81ca-74b1dd168247"), + Properties: map[string]interface{}{ + "textProp": "text", + "textPropArray": []string{"text", "text"}, + "intProp": float64(100), + "numberProp": float64(17), + "intPropArray": []float64{10, 20, 30}, + "numberPropArray": []float64{1, 2, 3}, + "boolProp": true, + "boolPropArray": []bool{true, false, true}, + "dateProp": "1980-01-01T00:00:00+02:00", + "datePropArray": []string{"1980-01-01T00:00:00+02:00"}, + "phoneProp": &models.PhoneNumber{ + CountryCode: 49, + DefaultCountry: "DE", + Input: "0171 1000000", + Valid: true, + InternationalFormatted: "+49 171 1000000", + National: 1000000, + NationalFormatted: "0171 1000000", + }, + "geoProp": &models.GeoCoordinates{ + Longitude: ptrFloat32(1), + Latitude: ptrFloat32(2), + }, + }, + }, + []float32{1, 2, 0.7}, + nil, + nil, + ) +} + +func sorterCitySchema() *schema.Schema { + return &schema.Schema{ + Objects: &models.Schema{ + Classes: []*models.Class{ + { + Class: "City", + Properties: []*models.Property{ + { + Name: "name", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationWhitespace, + }, + { + Name: "country", + DataType: []string{string(schema.DataTypeText)}, + }, + { + Name: "population", + DataType: []string{string(schema.DataTypeInt)}, + }, + { + Name: "cityArea", + DataType: []string{string(schema.DataTypeNumber)}, + }, + { + Name: "cityRights", + DataType: []string{string(schema.DataTypeDate)}, + }, + { + Name: "timezones", + DataType: schema.DataTypeTextArray.PropString(), + Tokenization: models.PropertyTokenizationWhitespace, + }, + { + Name: "timezonesUTC", + DataType: []string{string(schema.DataTypeTextArray)}, + }, + { + Name: "isCapital", + DataType: []string{string(schema.DataTypeBoolean)}, + }, + { + Name: "isCapitalArray", + DataType: []string{string(schema.DataTypeBooleanArray)}, + }, + { + Name: "favoriteNumbers", + DataType: []string{string(schema.DataTypeNumberArray)}, + }, + { + Name: "favoriteInts", + DataType: []string{string(schema.DataTypeIntArray)}, + }, + { + Name: "favoriteDates", + DataType: []string{string(schema.DataTypeDateArray)}, + }, + { + Name: "phoneNumber", + DataType: []string{string(schema.DataTypePhoneNumber)}, + }, + { + Name: "location", + DataType: []string{string(schema.DataTypeGeoCoordinates)}, + }, + }, + }, + }, + }, + } +} + +func sorterCitySchemaDistances() []float32 { + return []float32{0.1, 0.0, 0.2, 0.3, 0.4, 0.0} +} + +func sorterCitySchemaObjects() []*storobj.Object { + return []*storobj.Object{cityWroclaw, cityNil2, cityBerlin, cityNewYork, cityAmsterdam, cityNil} +} + +var ( + cityWroclaw = &storobj.Object{ + Object: models.Object{ + Class: "City", + ID: strfmt.UUID("f10018a7-ad67-4774-a9ac-86a04df51cb6"), + CreationTimeUnix: 9000000006, + LastUpdateTimeUnix: 9100000006, + Properties: map[string]interface{}{ + "name": "Wroclaw", + "country": "Poland", + "population": float64(641928), + "cityArea": float64(292.23), + "cityRights": "1214-01-01T00:00:00+02:00", + "timezones": []string{"CET", "CEST"}, + "timezonesUTC": []string{"UTC+1", "UTC+2"}, + "isCapital": false, + "isCapitalArray": []bool{false, false}, + "favoriteNumbers": []float64{0, 0, 0}, + "favoriteInts": []float64{0, 0, 0}, + "favoriteDates": []string{"1214-01-01T00:00:00+02:00", "1214-01-01T00:00:00+02:00"}, + "phoneNumber": &models.PhoneNumber{ + CountryCode: 0, + National: 400500600, + }, + "location": &models.GeoCoordinates{ + Latitude: ptrFloat32(51.11), + Longitude: ptrFloat32(17.022222), + }, + }, + }, + } + cityBerlin = &storobj.Object{ + Object: models.Object{ + Class: "City", + ID: strfmt.UUID("b06bb8a7-ad67-4774-a9ac-86a04df51cb6"), + CreationTimeUnix: 9000000002, + LastUpdateTimeUnix: 9100000002, + Properties: map[string]interface{}{ + "name": "Berlin", + "country": "Germany", + "population": float64(3664088), + "cityArea": float64(891.95), + "cityRights": "1400-01-01T00:00:00+02:00", + "timezones": []string{"CET", "CEST"}, + "timezonesUTC": []string{"UTC+1", "UTC+2"}, + "isCapital": true, + "isCapitalArray": []bool{false, false, true}, + "favoriteNumbers": []float64{0, 10, 1}, + "favoriteInts": []float64{0, 10, 1}, + "favoriteDates": []string{"1400-01-01T00:00:00+02:00"}, + "phoneNumber": &models.PhoneNumber{ + CountryCode: 33, + National: 400500610, + }, + "location": &models.GeoCoordinates{ + Latitude: ptrFloat32(52.518611), + Longitude: ptrFloat32(13.408333), + }, + }, + }, + } + cityNewYork = &storobj.Object{ + Object: models.Object{ + Class: "City", + ID: strfmt.UUID("e06bb8a7-ad67-4774-a9ac-86a04df51cb6"), + CreationTimeUnix: 9000000003, + LastUpdateTimeUnix: 9100000003, + Properties: map[string]interface{}{ + "name": "New York", + "country": "USA", + "population": float64(8336817), + "cityArea": float64(1223.59), + "cityRights": "1653-01-01T00:00:00+02:00", + "timezones": []string{"EST", "EDT"}, + "timezonesUTC": []string{"UTC-5", "UTC-4"}, + "isCapital": false, + "isCapitalArray": []bool{true, true, true}, + "favoriteNumbers": []float64{-100000.23, -8.909}, + "favoriteInts": []float64{-100000, -8}, + "favoriteDates": []string{"1400-01-01T00:00:00+02:00", "1653-01-01T00:00:00+02:00"}, + "phoneNumber": &models.PhoneNumber{ + CountryCode: 33, + National: 400500609, + }, + "location": &models.GeoCoordinates{ + Latitude: ptrFloat32(40.716667), + Longitude: ptrFloat32(-74), + }, + }, + }, + } + cityAmsterdam = &storobj.Object{ + Object: models.Object{ + Class: "City", + ID: strfmt.UUID("a06bb8a7-ad67-4774-a9ac-86a04df51cb6"), + CreationTimeUnix: 9000000001, + LastUpdateTimeUnix: 9100000001, + Properties: map[string]interface{}{ + "name": "Amsterdam", + "country": "The Netherlands", + "population": float64(905234), + "cityArea": float64(219.32), + "cityRights": "1100-01-01T00:00:00+02:00", + "timezones": []string{"CET", "CEST"}, + "timezonesUTC": []string{"UTC+1", "UTC+2"}, + "isCapital": true, + "isCapitalArray": []bool{true}, + "favoriteNumbers": []float64{1, 2, 3, 4, 5, 6, 8.8, 9.9}, + "favoriteInts": []float64{1, 2, 3, 4, 5, 6, 8, 9}, + "favoriteDates": []string{"1100-01-01T00:00:00+02:00"}, + "phoneNumber": &models.PhoneNumber{ + CountryCode: 33, + National: 400500602, + }, + "location": &models.GeoCoordinates{ + Latitude: ptrFloat32(52.366667), + Longitude: ptrFloat32(4.9), + }, + }, + }, + } + cityNil = &storobj.Object{ + Object: models.Object{ + Class: "City", + ID: strfmt.UUID("f00018a7-ad67-4774-a9ac-86a04df51cb6"), + CreationTimeUnix: 9000000004, + LastUpdateTimeUnix: 9100000004, + Properties: map[string]interface{}{ + "name": "Nil", + }, + }, + } + cityNil2 = &storobj.Object{ + Object: models.Object{ + Class: "City", + ID: strfmt.UUID("f00028a7-ad67-4774-a9ac-86a04df51cb6"), + CreationTimeUnix: 9000000005, + LastUpdateTimeUnix: 9100000005, + Properties: map[string]interface{}{ + "name": "Nil2", + }, + }, + } +) + +func ptrString(s string) *string { + return &s +} + +func ptrStringArray(sa ...string) *[]string { + return &sa +} + +func ptrFloat32(f float32) *float32 { + return &f +} + +func ptrFloat64(f float64) *float64 { + return &f +} + +func ptrFloat64Array(fa ...float64) *[]float64 { + return &fa +} + +func ptrBool(b bool) *bool { + return &b +} + +func ptrBoolArray(ba ...bool) *[]bool { + return &ba +} + +func ptrTime(s string) *time.Time { + t, _ := time.Parse(time.RFC3339, s) + return &t +} + +func ptrTimeArray(sa ...string) *[]time.Time { + res := make([]time.Time, len(sa)) + for i := range sa { + t, _ := time.Parse(time.RFC3339, sa[i]) + res[i] = t + } + return &res +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/sorter/inverted_sorter.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/sorter/inverted_sorter.go new file mode 100644 index 0000000000000000000000000000000000000000..7e81a80135d876f6e38cfb323dc65cfca4539858 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/sorter/inverted_sorter.go @@ -0,0 +1,439 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package sorter + +import ( + "bytes" + "context" + "fmt" + "slices" + "time" + + "github.com/weaviate/sroar" + "github.com/weaviate/weaviate/adapters/repos/db/helpers" + "github.com/weaviate/weaviate/adapters/repos/db/lsmkv" + "github.com/weaviate/weaviate/entities/filters" +) + +type invertedSorter struct { + store *lsmkv.Store + dataTypesHelper *dataTypesHelper +} + +// NewInvertedSorter constructs the specialised sorter that walks an +// inverted-index bucket whose *value* layout is a roaring bitmap of docIDs. +// +// Compared with the generic objects-bucket sorter this variant can avoid +// deserialising whole objects and instead streams the docIDs that already +// appear in the required byte order. It wins whenever: +// +// - The *first* ORDER-BY key is filterable (hence indexed) **and** uses a +// byte-order-preserving encoding (date, int, number). +// +// - Additional sort keys are optional. Whenever two objects tie on the +// current key, the sorter **recursively** invokes itself on the tied set +// with the remaining keys, guaranteeing full lexicographic ordering without +// resorting to an in-memory N log N sort. +// +// No locks are taken; the sorter assumes the underlying lsmkv.Store provides +// its usual read-concurrency guarantees. +func NewInvertedSorter(store *lsmkv.Store, dataTypesHelper *dataTypesHelper) *invertedSorter { + return &invertedSorter{ + store: store, + dataTypesHelper: dataTypesHelper, + } +} + +// SortDocIDs returns at most *limit* document IDs ordered lexicographically by +// the sequence of sort clauses in *sort*. All candidate IDs come from *ids*, +// a pre-filtered allow-list. +// +// ## Fast path vs. Slow path +// +// - **Fast path (ASC)** – a forward cursor walk on the roaring-set bucket of +// the first sort key. Because the inverted index is already ordered +// `(value, docID)`, the first *limit* matches are immediately correct. +// +// - **Slow path (DESC)** – instead of a full reverse scan we probe the key +// space at coarse quantiles (see `quantileKeysForDescSort`) and walk only the +// tail window that can contain the last *limit* rows. Each chunk is reversed +// locally so ties remain docID-ascending; the whole result slice is reversed +// once at the end. +// +// ## Handling multi-column ORDER BY +// +// When several rows tie on the current key **and** further sort clauses +// remain, the sorter +// +// 1. Builds an `AllowList` for just the tied IDs. +// 2. Recursively calls `sortDocIDsWithNesting` with the remaining clauses. +// 3. Splices the recursively ordered IDs back into the output slice. +// +// This yields full lexicographic ordering without an in-memory *N log N* sort +// and keeps memory usage ≤ *limit*. +// +// ## Early exit & complexity +// +// - Each recursion level stops scanning as soon as *limit* IDs are finalised. +// - **Best case** (few ties, small limit): *O(rowsHit)*. +// - **Worst case** (deep recursion with many ties): *O(rowsHit × depth)*, +// where *depth* ≤ `len(sort)`. +// +// A concise trace (rows scanned, seeks, recursion depth, elapsed time) is +// appended to the slow-query log stored in `ctx`, allowing operators to review +// plan quality under production load. +func (is *invertedSorter) SortDocIDs( + ctx context.Context, + limit int, + sort []filters.Sort, + ids helpers.AllowList, +) ([]uint64, error) { + return is.sortDocIDsWithNesting(ctx, limit, sort, ids, 0) +} + +// sortDocIDsWithNesting is the entrypoint into a (potentially recursive) sort, +// the nesting indicator is purely used for the query slow log annotation +// +// on each entry, we make a new decision on whether to go down the ASC fast +// path or the DESC windowed walk +func (is *invertedSorter) sortDocIDsWithNesting( + ctx context.Context, + limit int, + sort []filters.Sort, + ids helpers.AllowList, + nesting int, +) ([]uint64, error) { + if len(sort) < 1 { + // this should never happen, the query planner should already have chosen + // another strategy + return nil, fmt.Errorf("no sort clause provided, expected at least one sort clause") + } + + propNames, orders, err := extractPropNamesAndOrders(sort) + if err != nil { + return nil, err + } + + bucket := is.store.Bucket(helpers.BucketFromPropNameLSM(propNames[0])) + if bucket.Strategy() != lsmkv.StrategyRoaringSet { + // this should never happen, the query planner should already have chosen + // another strategy + return nil, fmt.Errorf("expected roaring set bucket for property %s, got %s", + propNames[0], bucket.Strategy()) + } + + switch orders[0] { + case "asc": + return is.sortRoaringSetASC(ctx, bucket, limit, sort, ids, nesting) + case "desc": + return is.sortRoaringSetDESC(ctx, bucket, limit, sort, ids, nesting) + default: + return nil, fmt.Errorf("unsupported sort order %s", orders[0]) + } +} + +// sortRoaringSetASC walks the roaring set bucket in ascending order, i.e. the +// fast path. +// +// If only a single clause is provided, it will exist as soon as the limit is +// reached. If a second clause is provided, it will make sure to finish reading +// each row, then start a nested tie-breaker sort, to sort the duplicate IDs. +func (is *invertedSorter) sortRoaringSetASC(ctx context.Context, bucket *lsmkv.Bucket, + limit int, sort []filters.Sort, ids helpers.AllowList, nesting int, +) ([]uint64, error) { + startTime := time.Now() + hasMoreNesting := len(sort) > 1 + cursor := bucket.CursorRoaringSet() + defer cursor.Close() + + foundIDs := make([]uint64, 0, limit) + rowsEvaluated := 0 + defer is.annotateASC(ctx, nesting, &rowsEvaluated, &foundIDs, startTime) + + for k, v := cursor.First(); k != nil; k, v = cursor.Next() { + rowsEvaluated++ + + forbidEarlyExit := hasMoreNesting + idsFoundInRow, earlyExit := is.extractDocIDsFromBitmap(ctx, limit, ids, v, len(foundIDs), forbidEarlyExit) + if earlyExit { + foundIDs = append(foundIDs, idsFoundInRow...) + return foundIDs, nil + } + + if len(idsFoundInRow) > 1 && hasMoreNesting { + var err error + idsFoundInRow, err = is.startNestedSort(ctx, idsFoundInRow, sort[1:], nesting) + if err != nil { + return nil, err + } + } + + foundIDs = append(foundIDs, idsFoundInRow...) + if len(foundIDs) >= limit { + foundIDs = foundIDs[:limit] + break + } + } + + return foundIDs, nil +} + +// sortRoaringSetDESC uses quantile-keys to estimate a window where the +// matching IDs are contained. It will start with the last/highest window as +// the index is in ASC order, but we are interested in DESC values. If it +// cannot find enough IDs, it will move to the previous window. In the worst +// case (perfect negative correlation) this will lead to a full scan of the +// inverted index bucket. +// +// A DESC search can never exit a window early, it always needs to read the +// full window because the best matches will always be at the end of the +// window. The minimum read is always an entire window. However, it uses +// quantile keys to estimate good windows and should not use more than 1 or 2 +// windows in most cases. +// +// If more than one sort clause is provided, it will start a secondary (nested) +// sort for all IDs that share the same value. +func (is *invertedSorter) sortRoaringSetDESC( + ctx context.Context, + bucket *lsmkv.Bucket, + limit int, + sort []filters.Sort, + ids helpers.AllowList, + nesting int, +) ([]uint64, error) { + startTime := time.Now() + hasMoreNesting := len(sort) > 1 + qks := is.quantileKeysForDescSort(ctx, limit, ids, bucket, nesting) + + foundIDs := make([]uint64, 0, limit) + seeksRequired := 0 + idCountBeforeCutoff := 0 + rowsEvaluated := 0 + whenComplete := is.annotateDESC(ctx, nesting, len(qks), startTime, &rowsEvaluated, &idCountBeforeCutoff, &seeksRequired) + defer whenComplete() + + for qkIndex := len(qks) - 1; qkIndex >= 0; qkIndex-- { + seeksRequired++ + startKey, endKey := cursorKeysForDESCWindow(qks, qkIndex) + + idsInWindow, rowsInWindow, err := is.processDESCWindow(ctx, bucket, + startKey, endKey, ids, limit, nesting, hasMoreNesting, sort) + if err != nil { + return nil, fmt.Errorf("process descending window: %w", err) + } + + rowsEvaluated += rowsInWindow + + // prepend ids from window, the full list will be reversed at the end + foundIDs = append(idsInWindow, foundIDs...) + if len(foundIDs) >= limit { + // we have enough ids, no need to continue + break + } + } + + // the inverted index is in ASC order meaning our best matches are at the + // very end of the slice, we need to reverse it before applying the cut-off + slices.Reverse(foundIDs) + idCountBeforeCutoff = len(foundIDs) + if len(foundIDs) > limit { + foundIDs = foundIDs[:limit] + } + return foundIDs, nil +} + +func cursorKeysForDESCWindow(qks [][]byte, qkIndex int) ([]byte, []byte) { + startKey := qks[qkIndex] + + var endKey []byte + if qkIndex < len(qks)-1 { + endKey = qks[qkIndex+1] + } + + return startKey, endKey +} + +// within a single window the logic is almost the same as the ASC case, except +// that there can never be an early exit (the best matches are at the end of +// the window) +func (is *invertedSorter) processDESCWindow( + ctx context.Context, + bucket *lsmkv.Bucket, + startKey []byte, + endKey []byte, + ids helpers.AllowList, + limit int, + nesting int, + hasMoreNesting bool, + sort []filters.Sort, +) ([]uint64, int, error) { + rowsEvaluated := 0 + idsFoundInWindow := make([]uint64, 0, limit) + cursor := bucket.CursorRoaringSet() + defer cursor.Close() + + for k, v := cursor.Seek(startKey); k != nil; k, v = cursor.Next() { + if endKey != nil && bytes.Compare(k, endKey) >= 0 { + break + } + + rowsEvaluated++ + + forbidEarlyExit := true // early exit is never possible on DESC order + idsFoundInRow, _ := is.extractDocIDsFromBitmap(ctx, limit, ids, v, 0, forbidEarlyExit) + if len(idsFoundInRow) > 1 && hasMoreNesting { + var err error + idsFoundInRow, err = is.startNestedSort(ctx, idsFoundInRow, sort[1:], nesting) + if err != nil { + return nil, rowsEvaluated, err + } + } + + // we need to reverse the ids found in this chunk, because the inverted + // index is in ASC order, so we will reverse the full list at the end. + // However, for tie-breaker reasons we need to make sure that IDs with + // identical values appear in docID-ASC order. If we don't reverse them + // right now, they would end up in DESC order in the final list + slices.Reverse(idsFoundInRow) + idsFoundInWindow = append(idsFoundInWindow, idsFoundInRow...) + } + + return idsFoundInWindow, rowsEvaluated, nil +} + +// extractDocIDsFromBitmap extracts all the docIDs from the current row's +// bitmap that match the specified (pre-filtered) allow-list bitmap. If no +// allow-list is provided, an unfiltered sort is assumed and every bitmap entry +// is considered a match. +func (is *invertedSorter) extractDocIDsFromBitmap( + ctx context.Context, + limit int, + ids helpers.AllowList, + bm *sroar.Bitmap, + totalCountBefore int, + forbidEarlyExit bool, +) (found []uint64, earlyExit bool) { + found = make([]uint64, 0, bm.GetCardinality()) + it := bm.NewIterator() + for i := 0; i < bm.GetCardinality(); i++ { + id := it.Next() + if ids == nil || ids.Contains(id) { + found = append(found, id) + // we can early exit if the search generally permits it (e.g. ASC) where + // the natural order is already doc_id ASC *and* there is no secondary + // sort clause. However, if there is a secondary clause we need to make + // sure we have read the full row. The same is true on DESC where we can + // never perform an early exit + // + // If an early exit is allowed the exit condition is exceeding the limit + // through the ids found in this row as well as the starting offset + if !forbidEarlyExit && totalCountBefore+len(found) >= limit { + earlyExit = true + return + } + } + } + + earlyExit = false + return +} + +func (is *invertedSorter) startNestedSort( + ctx context.Context, + ids []uint64, + remainingSort []filters.Sort, + nesting int, +) ([]uint64, error) { + // we have identical ids and we have more than one sort clause, so we + // need to start a sub-query to sort them + helpers.AnnotateSlowQueryLogAppend(ctx, "sort_query_planner", + helpers.SprintfWithNesting(nesting, "start sub-query for %d ids with identical value", len(ids))) + + sortedIDs, err := is.sortDocIDsWithNesting(ctx, len(ids), remainingSort, helpers.NewAllowList(ids...), nesting+1) + if err != nil { + return nil, fmt.Errorf("failed to sort ids with identical value: %w", err) + } + + return sortedIDs, nil +} + +func (is *invertedSorter) quantileKeysForDescSort(ctx context.Context, limit int, + ids helpers.AllowList, invertedBucket *lsmkv.Bucket, nesting int, +) [][]byte { + ob := is.store.Bucket(helpers.ObjectsBucketLSM) + totalCount := ob.CountAsync() + zeroByte := [][]byte{{0x00}} + + if totalCount == 0 { + // no objects, likely no disk segments yet, force a full index scan + return zeroByte + } + var matchRate float64 + if ids == nil { + // no allow-list, so we assume all IDs match + matchRate = 1.0 + } else { + matchRate = float64(ids.Len()) / float64(totalCount) + } + + estimatedRowsHit := max(1, int(float64(limit)/matchRate*2)) // safety factor of 20 + if estimatedRowsHit > totalCount { + helpers.AnnotateSlowQueryLogAppend(ctx, "sort_query_planner", + helpers.SprintfWithNesting(nesting, "estimated rows hit (%d) is greater than total count (%d), "+ + "force a full index scan", estimatedRowsHit, totalCount)) + // full scan, just return zero byte (effectively same as cursor.First()) + return zeroByte + } + + neededQuantiles := totalCount / estimatedRowsHit + quantiles := invertedBucket.QuantileKeys(neededQuantiles) + if len(quantiles) == 0 { + // no quantiles found, this can happen if there are no disk segments, but + // there could still be memtables, force a full scan + helpers.AnnotateSlowQueryLogAppend(ctx, "sort_query_planner", + "no quantiles found, force a full index scan") + return zeroByte + } + + return append(zeroByte, quantiles...) +} + +func (is *invertedSorter) annotateASC( + ctx context.Context, + nesting int, + rowsEvaluated *int, + foundIDs *[]uint64, + startTime time.Time, +) { + helpers.AnnotateSlowQueryLogAppend(ctx, "sort_query_planner", + helpers.SprintfWithNesting(nesting, "evaluated %d rows, found %d ids in %s", + *rowsEvaluated, len(*foundIDs), time.Since(startTime))) +} + +func (is *invertedSorter) annotateDESC( + ctx context.Context, + nesting int, + qksLen int, + startTime time.Time, + rowsEvaluated, idCountBeforeCutoff, seeksRequired *int, +) func() { + helpers.AnnotateSlowQueryLogAppend(ctx, "sort_query_planner", + helpers.SprintfWithNesting(nesting, "identified %d quantile keys for descending sort", qksLen)) + + return func() { + helpers.AnnotateSlowQueryLogAppend(ctx, "sort_query_planner", + helpers.SprintfWithNesting(nesting, "evaluated %d rows, found %d ids, "+ + "actual match ratio is %.2f, seeks required: %d (took %s)", + *rowsEvaluated, *idCountBeforeCutoff, float64(*idCountBeforeCutoff)/float64(*rowsEvaluated), + *seeksRequired, time.Since(startTime))) + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/sorter/inverted_sorter_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/sorter/inverted_sorter_test.go new file mode 100644 index 0000000000000000000000000000000000000000..545c2642a6cd238b2f41927e7fc8ae2b1d5e5447 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/sorter/inverted_sorter_test.go @@ -0,0 +1,462 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package sorter + +import ( + "context" + "fmt" + "math/rand" + "reflect" + "runtime" + "slices" + "sort" + "strings" + "testing" + "time" + + "github.com/go-openapi/strfmt" + "github.com/google/uuid" + "github.com/sirupsen/logrus" + "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/adapters/repos/db/helpers" + "github.com/weaviate/weaviate/adapters/repos/db/lsmkv" + "github.com/weaviate/weaviate/entities/cyclemanager" + "github.com/weaviate/weaviate/entities/filters" + "github.com/weaviate/weaviate/entities/inverted" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/schema" + "github.com/weaviate/weaviate/entities/storobj" + "github.com/weaviate/weaviate/test/helper" +) + +// this test is invoked through inverted_sorter_race_test.go and +// inverted_sorter_no_race_test.go respectively +func TestInvertedSorter(t *testing.T) { + forceFlush := []bool{false, true} + propNames := []string{"int", "int2", "number", "date"} + limits := []int{1, 2, 5, 10, 100, 373, 500, 1000, 2000} + order := []string{"asc", "desc"} + objectCounts := []int{87, 100, 133, 500, 1000, 10000} + matchers := []func(t *testing.T, count int) helpers.AllowList{ + matchAllBitmap, + matchEveryOtherBitmap, + match10PercentBitmap, + matchRandomBitmap, + matchSingleBitmap, + nilBitmap, + } + + if helper.RaceDetectorEnabled { + t.Log("race detector is on, reduce scope of test to avoid timeouts") + propNames = []string{"int"} + limits = []int{5} + order = []string{"asc", "desc"} + objectCounts = []int{500} + matchers = matchers[:1] + } else { + t.Log("race detector is off, run full test suite") + } + + for _, objectCount := range objectCounts { + var ( + dirName = t.TempDir() + logger, _ = test.NewNullLogger() + ctx = context.Background() + ) + + t.Run(fmt.Sprintf("object count %d", objectCount), func(t *testing.T) { + store := createStoreAndInitWithObjects(t, ctx, objectCount, dirName, logger) + defer store.Shutdown(ctx) + + props := generateRandomProps(objectCount) + dummyInvertedIndex(t, ctx, store, props) + + for _, flush := range forceFlush { + t.Run(fmt.Sprintf("force flush %t", flush), func(t *testing.T) { + if flush { + err := store.Bucket(helpers.ObjectsBucketLSM).FlushAndSwitch() + require.Nil(t, err) + + for _, propName := range propNames { + err := store.Bucket(helpers.BucketFromPropNameLSM(propName)).FlushAndSwitch() + require.Nil(t, err) + } + } + + for _, propName := range propNames { + for _, limit := range limits { + for _, ord := range order { + for _, matcher := range matchers { + fullFuncName := runtime.FuncForPC(reflect.ValueOf(matcher).Pointer()).Name() + parts := strings.Split(fullFuncName, ".") + matcherStr := parts[len(parts)-1] + + t.Run(fmt.Sprintf("prop=%s, order=%s limit=%d matcher %s", propName, ord, limit, matcherStr), func(t *testing.T) { + sortParams := []filters.Sort{{Path: []string{propName}, Order: ord}} + assertSorting(t, ctx, store, props, objectCount, limit, sortParams, matcher) + }) + } + } + } + } + }) + } + }) + } +} + +func TestInvertedSorterMultiOrder(t *testing.T) { + sortPlans := [][]filters.Sort{ + {{Path: []string{"int"}, Order: "desc"}, {Path: []string{"number"}, Order: "desc"}}, + {{Path: []string{"int"}, Order: "desc"}, {Path: []string{"number"}, Order: "asc"}}, + {{Path: []string{"int"}, Order: "asc"}, {Path: []string{"number"}, Order: "asc"}}, + {{Path: []string{"int"}, Order: "asc"}, {Path: []string{"number"}, Order: "desc"}}, + {{Path: []string{"int"}, Order: "asc"}, {Path: []string{"int2"}, Order: "desc"}, {Path: []string{"number"}, Order: "desc"}}, + } + + forceFlush := []bool{false, true} + limits := []int{1, 2, 5, 10, 100, 500, 1000, 2000} + objectCounts := []int{87, 100, 133, 500, 1000, 2000} + + matchers := []func(t *testing.T, count int) helpers.AllowList{ + matchAllBitmap, + matchEveryOtherBitmap, + match10PercentBitmap, + matchRandomBitmap, + matchSingleBitmap, + nilBitmap, + } + + if helper.RaceDetectorEnabled { + t.Log("race detector is on, reduce scope of test to avoid timeouts") + limits = []int{5} + objectCounts = []int{500} + matchers = matchers[:1] + } else { + t.Log("race detector is off, run full test suite") + } + + for _, objectCount := range objectCounts { + t.Run(fmt.Sprintf("object count %d", objectCount), func(t *testing.T) { + var ( + dirName = t.TempDir() + logger, _ = test.NewNullLogger() + ctx = context.Background() + ) + + store := createStoreAndInitWithObjects(t, ctx, objectCount, dirName, logger) + defer store.Shutdown(ctx) + + props := generateRandomProps(objectCount) + dummyInvertedIndex(t, ctx, store, props) + + for _, flush := range forceFlush { + t.Run(fmt.Sprintf("force flush %t", flush), func(t *testing.T) { + if flush { + err := store.Bucket(helpers.ObjectsBucketLSM).FlushAndSwitch() + require.Nil(t, err) + + for _, propName := range []string{"int", "number", "date"} { + err := store.Bucket(helpers.BucketFromPropNameLSM(propName)).FlushAndSwitch() + require.Nil(t, err) + } + } + + for _, sortParam := range sortPlans { + sortPlanStrings := make([]string, 0, len(sortParam)) + for _, sp := range sortParam { + sortPlanStrings = append(sortPlanStrings, fmt.Sprintf("%s %s", sp.Path[0], sp.Order)) + } + sortPlanString := strings.Join(sortPlanStrings, " -> ") + for _, limit := range limits { + for _, matcher := range matchers { + fullFuncName := runtime.FuncForPC(reflect.ValueOf(matcher).Pointer()).Name() + parts := strings.Split(fullFuncName, ".") + matcherStr := parts[len(parts)-1] + + t.Run(fmt.Sprintf("sort=%s limit=%d matcher=%s", sortPlanString, limit, matcherStr), func(t *testing.T) { + assertSorting(t, ctx, store, props, objectCount, limit, sortParam, matcher) + }) + } + } + } + }) + } + }) + } +} + +func createStoreAndInitWithObjects(t *testing.T, ctx context.Context, objectCount int, + dirName string, logger logrus.FieldLogger, +) *lsmkv.Store { + store, err := lsmkv.New(dirName, dirName, logger, nil, + cyclemanager.NewCallbackGroupNoop(), + cyclemanager.NewCallbackGroupNoop(), + cyclemanager.NewCallbackGroupNoop()) + require.Nil(t, err) + + err = store.CreateOrLoadBucket(ctx, helpers.ObjectsBucketLSM) + require.Nil(t, err) + + objectsB := store.Bucket(helpers.ObjectsBucketLSM) + for i := 0; i < objectCount; i++ { + objBytes, docID := createDummyObject(t, i) + objectsB.Put([]byte(fmt.Sprintf("%08d", docID)), objBytes) + require.Nil(t, err) + } + + for _, propName := range []string{"int", "int2", "number", "date"} { + err = store.CreateOrLoadBucket(ctx, helpers.BucketFromPropNameLSM(propName), + lsmkv.WithStrategy(lsmkv.StrategyRoaringSet)) + require.Nil(t, err) + } + + return store +} + +func assertSorting(t *testing.T, ctx context.Context, store *lsmkv.Store, + props []dummyProps, objectCount int, limit int, sortParams []filters.Sort, + matcher func(t *testing.T, count int) helpers.AllowList, +) { + ctx = helpers.InitSlowQueryDetails(ctx) + sorter := NewInvertedSorter(store, newDataTypesHelper(dummyClass())) + bm := matcher(t, objectCount) + + actual, err := sorter.SortDocIDs(ctx, limit, sortParams, bm) + require.Nil(t, err) + + sortedProps := filterAndSortControl(t, props, bm, sortParams) + + expectedLength := min(len(sortedProps), limit) + assert.Len(t, actual, expectedLength) + + for i, docID := range actual { + assert.Equal(t, int(sortedProps[i].docID), int(docID)) + } + + // enable below for debugging + // sl := helpers.ExtractSlowQueryDetails(ctx) + // t.Log(sl) +} + +func createDummyObject(t *testing.T, i int) ([]byte, uint64) { + docID := uint64(i) + // we will never read those objects, so we don't actually have to store any + // props, empty objects are fine + obj := storobj.New(docID) + obj.SetID(strfmt.UUID(uuid.New().String())) + objBytes, err := obj.MarshalBinary() + require.Nil(t, err) + + return objBytes, docID +} + +type dummyProps struct { + docID uint64 + int int64 + int2 int64 + number float64 + date time.Time +} + +func generateRandomProps(count int) []dummyProps { + props := make([]dummyProps, count) + for i := 0; i < count; i++ { + props[i] = dummyProps{ + docID: uint64(i), + int: rand.Int63n(10), // few values, many collisions + int2: rand.Int63n(100), // still some collisions, but not as many + number: rand.Float64() * 100, // many values, few collisions + date: time.Now().Add(time.Duration(i) * time.Hour), // guaranteed to be unique + } + } + return props +} + +func dummyInvertedIndex(t *testing.T, ctx context.Context, store *lsmkv.Store, props []dummyProps) { + for _, propName := range []string{"int", "int2", "number", "date"} { + bucket := store.Bucket(helpers.BucketFromPropNameLSM(propName)) + for _, p := range props { + var key []byte + var err error + switch propName { + case "int": + key, err = inverted.LexicographicallySortableInt64(p.int) + require.Nil(t, err) + case "int2": + key, err = inverted.LexicographicallySortableInt64(p.int2) + require.Nil(t, err) + case "number": + key, err = inverted.LexicographicallySortableFloat64(p.number) + require.Nil(t, err) + case "date": + key, err = inverted.LexicographicallySortableInt64(p.date.UnixNano()) + require.Nil(t, err) + } + err = bucket.RoaringSetAddOne(key, p.docID) + require.Nil(t, err) + } + } +} + +func dummyClass() *models.Class { + f := false + t := true + return &models.Class{ + Class: "DummyClass", + Properties: []*models.Property{ + { + Name: "int", + DataType: []string{string(schema.DataTypeInt)}, + IndexInverted: &t, + }, + { + Name: "int2", + DataType: []string{string(schema.DataTypeInt)}, + IndexInverted: &t, + }, + { + Name: "number", + DataType: []string{string(schema.DataTypeNumber)}, + IndexInverted: &t, + }, + { + Name: "date", + DataType: []string{string(schema.DataTypeDate)}, + IndexInverted: &t, + }, + { + Name: "int_not_indexed", + DataType: []string{string(schema.DataTypeInt)}, + IndexInverted: &f, + }, + { + Name: "int_corrupt_index", + DataType: []string{string(schema.DataTypeInt)}, + IndexInverted: &t, + }, + { + Name: "text", + DataType: []string{string(schema.DataTypeText)}, + IndexInverted: &t, + }, + }, + } +} + +func filterAndSortControl(t *testing.T, input []dummyProps, bm helpers.AllowList, + sortParams []filters.Sort, +) []dummyProps { + // sort props as control, always first by doc id, then by the prop, + // this way we have a consistent tie breaker + sortedProps := make([]dummyProps, 0, len(input)) + for _, p := range input { + if bm == nil || bm.Contains(p.docID) { + sortedProps = append(sortedProps, p) + } + } + + sort.Slice(sortedProps, func(i, j int) bool { + return sortedProps[i].docID < sortedProps[j].docID + }) + + for sortIndex := len(sortParams) - 1; sortIndex >= 0; sortIndex-- { + var sortFn func(i, j int) bool + sortParam := sortParams[sortIndex] + switch sortParam.Path[0] { + case "int": + sortFn = func(i, j int) bool { + if sortParam.Order == "desc" { + return sortedProps[j].int < sortedProps[i].int + } + return sortedProps[i].int < sortedProps[j].int + } + case "int2": + sortFn = func(i, j int) bool { + if sortParam.Order == "desc" { + return sortedProps[j].int2 < sortedProps[i].int2 + } + return sortedProps[i].int2 < sortedProps[j].int2 + } + case "number": + sortFn = func(i, j int) bool { + if sortParam.Order == "desc" { + return sortedProps[i].number > sortedProps[j].number + } + return sortedProps[i].number < sortedProps[j].number + } + case "date": + sortFn = func(i, j int) bool { + if sortParam.Order == "desc" { + return sortedProps[i].date.After(sortedProps[j].date) + } + return sortedProps[i].date.Before(sortedProps[j].date) + } + } + sort.SliceStable(sortedProps, sortFn) + } + + return sortedProps +} + +func matchAllBitmap(t *testing.T, count int) helpers.AllowList { + ids := make([]uint64, count) + for i := 0; i < count; i++ { + ids[i] = uint64(i) + } + return helpers.NewAllowList(ids...) +} + +func matchEveryOtherBitmap(t *testing.T, count int) helpers.AllowList { + ids := make([]uint64, count/2) + for i := 0; i < count; i += 2 { + if i/2 >= len(ids)-1 { + break + } + ids[i/2] = uint64(i) + } + return helpers.NewAllowList(ids...) +} + +func match10PercentBitmap(t *testing.T, count int) helpers.AllowList { + ids := make([]uint64, count/10) + for i := 0; i < count; i += 10 { + if i/10 >= len(ids)-1 { + break + } + + ids[i/10] = uint64(i) + } + return helpers.NewAllowList(ids...) +} + +func matchRandomBitmap(t *testing.T, count int) helpers.AllowList { + ids := make([]uint64, 0, count) + for len(ids) < count/2 { + id := uint64(rand.Intn(count)) + if !slices.Contains(ids, id) { + ids = append(ids, id) + } + } + return helpers.NewAllowList(ids...) +} + +func matchSingleBitmap(t *testing.T, count int) helpers.AllowList { + id := uint64(rand.Intn(count)) + return helpers.NewAllowList(id) +} + +func nilBitmap(t *testing.T, count int) helpers.AllowList { + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/sorter/lsm_sorter.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/sorter/lsm_sorter.go new file mode 100644 index 0000000000000000000000000000000000000000..395f5a7de67852848ee7295bd15de39cb769e10f --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/sorter/lsm_sorter.go @@ -0,0 +1,218 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package sorter + +import ( + "context" + "encoding/binary" + "fmt" + "time" + + "github.com/pkg/errors" + "github.com/weaviate/weaviate/adapters/repos/db/helpers" + "github.com/weaviate/weaviate/adapters/repos/db/lsmkv" + "github.com/weaviate/weaviate/entities/filters" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/schema" + "github.com/weaviate/weaviate/entities/storobj" + "github.com/weaviate/weaviate/usecases/config/runtime" +) + +type LSMSorter interface { + Sort(ctx context.Context, limit int, sort []filters.Sort) ([]uint64, error) + SortDocIDs(ctx context.Context, limit int, sort []filters.Sort, ids helpers.AllowList) ([]uint64, error) + SortDocIDsAndDists(ctx context.Context, limit int, sort []filters.Sort, + ids []uint64, dists []float32) ([]uint64, []float32, error) +} + +type lsmSorter struct { + bucket *lsmkv.Bucket + dataTypesHelper *dataTypesHelper + valueExtractor *comparableValueExtractor + store *lsmkv.Store + invertedSorterDisabled *runtime.DynamicValue[bool] +} + +func NewLSMSorter(store *lsmkv.Store, fn func(string) *models.Class, className schema.ClassName, + invertedDisabled *runtime.DynamicValue[bool], +) (LSMSorter, error) { + bucket := store.Bucket(helpers.ObjectsBucketLSM) + if bucket == nil { + return nil, fmt.Errorf("lsm sorter - bucket %s for class %s not found", helpers.ObjectsBucketLSM, className) + } + + class := fn(className.String()) + if class == nil { + return nil, fmt.Errorf("lsm sorter - class %s not found", className) + } + dataTypesHelper := newDataTypesHelper(class) + comparableValuesExtractor := newComparableValueExtractor(dataTypesHelper) + + return &lsmSorter{bucket, dataTypesHelper, comparableValuesExtractor, store, invertedDisabled}, nil +} + +func (s *lsmSorter) Sort(ctx context.Context, limit int, sort []filters.Sort) ([]uint64, error) { + queryPlanner := NewQueryPlanner(s.store, s.dataTypesHelper, s.invertedSorterDisabled) + useInverted, err := queryPlanner.Do(ctx, nil, limit, sort) + if err != nil { + return nil, fmt.Errorf("plan sort query: %w", err) + } + + startTime := time.Now() + helpers.AnnotateSlowQueryLogAppend(ctx, "sort_query_planner", "START EXECUTING") + defer func() { + helpers.AnnotateSlowQueryLogAppend(ctx, "sort_query_planner", + fmt.Sprintf("COMPLETED EXECUTING in %s", time.Since(startTime))) + }() + + if useInverted { + is := NewInvertedSorter(s.store, s.dataTypesHelper) + return is.SortDocIDs(ctx, limit, sort, nil) + } + + helper, err := s.createHelper(sort, validateLimit(limit, s.bucket.Count())) + if err != nil { + return nil, err + } + return helper.getSorted(ctx) +} + +func (s *lsmSorter) SortDocIDs(ctx context.Context, limit int, sort []filters.Sort, ids helpers.AllowList) ([]uint64, error) { + queryPlanner := NewQueryPlanner(s.store, s.dataTypesHelper, s.invertedSorterDisabled) + useInverted, err := queryPlanner.Do(ctx, ids, limit, sort) + if err != nil { + return nil, fmt.Errorf("plan sort query: %w", err) + } + + startTime := time.Now() + helpers.AnnotateSlowQueryLogAppend(ctx, "sort_query_planner", "START EXECUTING") + defer func() { + helpers.AnnotateSlowQueryLogAppend(ctx, "sort_query_planner", + fmt.Sprintf("COMPLETED EXECUTING in %s", time.Since(startTime))) + }() + + if useInverted { + is := NewInvertedSorter(s.store, s.dataTypesHelper) + return is.SortDocIDs(ctx, limit, sort, ids) + } + + helper, err := s.createHelper(sort, validateLimit(limit, ids.Len())) + if err != nil { + return nil, err + } + return helper.getSortedDocIDs(ctx, ids) +} + +func (s *lsmSorter) SortDocIDsAndDists(ctx context.Context, limit int, sort []filters.Sort, + ids []uint64, dists []float32, +) ([]uint64, []float32, error) { + helper, err := s.createHelper(sort, validateLimit(limit, len(ids))) + if err != nil { + return nil, nil, err + } + return helper.getSortedDocIDsAndDistances(ctx, ids, dists) +} + +func (s *lsmSorter) createHelper(sort []filters.Sort, limit int) (*lsmSorterHelper, error) { + propNames, orders, err := extractPropNamesAndOrders(sort) + if err != nil { + return nil, err + } + + comparator := newComparator(s.dataTypesHelper, propNames, orders) + creator := newComparableCreator(s.valueExtractor, propNames) + return newLsmSorterHelper(s.bucket, comparator, creator, limit), nil +} + +type lsmSorterHelper struct { + bucket *lsmkv.Bucket + comparator *comparator + creator *comparableCreator + limit int +} + +func newLsmSorterHelper(bucket *lsmkv.Bucket, comparator *comparator, + creator *comparableCreator, limit int, +) *lsmSorterHelper { + return &lsmSorterHelper{bucket, comparator, creator, limit} +} + +func (h *lsmSorterHelper) getSorted(ctx context.Context) ([]uint64, error) { + cursor := h.bucket.Cursor() + defer cursor.Close() + + sorter := newInsertSorter(h.comparator, h.limit) + + for k, objData := cursor.First(); k != nil; k, objData = cursor.Next() { + docID, _, err := storobj.DocIDAndTimeFromBinary(objData) + if err != nil { + return nil, errors.Wrapf(err, "lsm sorter - could not get doc id") + } + comparable := h.creator.createFromBytes(docID, objData) + sorter.addComparable(comparable) + } + + return h.creator.extractDocIDs(sorter.getSorted()), nil +} + +func (h *lsmSorterHelper) getSortedDocIDs(ctx context.Context, docIDs helpers.AllowList) ([]uint64, error) { + sorter := newInsertSorter(h.comparator, h.limit) + docIDBytes := make([]byte, 8) + it := docIDs.Iterator() + + for docID, ok := it.Next(); ok; docID, ok = it.Next() { + binary.LittleEndian.PutUint64(docIDBytes, docID) + objData, err := h.bucket.GetBySecondary(0, docIDBytes) + if err != nil { + return nil, errors.Wrapf(err, "lsm sorter - could not get obj by doc id %d", docID) + } + if objData == nil { + continue + } + + comparable := h.creator.createFromBytes(docID, objData) + sorter.addComparable(comparable) + } + + return h.creator.extractDocIDs(sorter.getSorted()), nil +} + +func (h *lsmSorterHelper) getSortedDocIDsAndDistances(ctx context.Context, docIDs []uint64, + distances []float32, +) ([]uint64, []float32, error) { + sorter := newInsertSorter(h.comparator, h.limit) + docIDBytes := make([]byte, 8) + + for i, docID := range docIDs { + binary.LittleEndian.PutUint64(docIDBytes, docID) + objData, err := h.bucket.GetBySecondary(0, docIDBytes) + if err != nil { + return nil, nil, errors.Wrapf(err, "lsm sorter - could not get obj by doc id %d", docID) + } + if objData == nil { + continue + } + + comparable := h.creator.createFromBytesWithPayload(docID, objData, distances[i]) + sorter.addComparable(comparable) + } + + sorted := sorter.getSorted() + sortedDistances := make([]float32, len(sorted)) + consume := func(i int, _ uint64, payload interface{}) bool { + sortedDistances[i] = payload.(float32) + return false + } + h.creator.extractPayloads(sorted, consume) + + return h.creator.extractDocIDs(sorted), sortedDistances, nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/sorter/objects_sorter.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/sorter/objects_sorter.go new file mode 100644 index 0000000000000000000000000000000000000000..12a8914efa5ad2404a12ced644fcf908ec0f361e --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/sorter/objects_sorter.go @@ -0,0 +1,109 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package sorter + +import ( + "github.com/weaviate/weaviate/entities/filters" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/storobj" +) + +type Sorter interface { + Sort(objects []*storobj.Object, distances []float32, + limit int, sort []filters.Sort) ([]*storobj.Object, []float32, error) +} + +type objectsSorter struct { + readOnlyClass func(string) *models.Class +} + +func NewObjectsSorter(fn func(string) *models.Class) *objectsSorter { + return &objectsSorter{readOnlyClass: fn} +} + +func (s objectsSorter) Sort(objects []*storobj.Object, + scores []float32, limit int, sort []filters.Sort, +) ([]*storobj.Object, []float32, error) { + count := len(objects) + if count == 0 { + return objects, scores, nil + } + + limit = validateLimit(limit, count) + propNames, orders, err := extractPropNamesAndOrders(sort) + if err != nil { + return nil, nil, err + } + + class := s.readOnlyClass(objects[0].Class().String()) + dataTypesHelper := newDataTypesHelper(class) + valueExtractor := newComparableValueExtractor(dataTypesHelper) + comparator := newComparator(dataTypesHelper, propNames, orders) + creator := newComparableCreator(valueExtractor, propNames) + + return newObjectsSorterHelper(comparator, creator, limit). + sort(objects, scores) +} + +type objectsSorterHelper struct { + comparator *comparator + creator *comparableCreator + limit int +} + +func newObjectsSorterHelper(comparator *comparator, creator *comparableCreator, limit int) *objectsSorterHelper { + return &objectsSorterHelper{comparator, creator, limit} +} + +func (h *objectsSorterHelper) sort(objects []*storobj.Object, distances []float32) ([]*storobj.Object, []float32, error) { + withDistances := len(distances) > 0 + count := len(objects) + sorter := newDefaultSorter(h.comparator, count) + + for i := range objects { + payload := objectDistancePayload{o: objects[i]} + if withDistances { + payload.d = distances[i] + } + comparable := h.creator.createFromObjectWithPayload(objects[i], payload) + sorter.addComparable(comparable) + } + + slice := h.limit + if slice == 0 { + slice = count + } + + sorted := sorter.getSorted() + consume := func(i int, _ uint64, payload interface{}) bool { + if i >= slice { + return true + } + p := payload.(objectDistancePayload) + objects[i] = p.o + if withDistances { + distances[i] = p.d + } + return false + } + h.creator.extractPayloads(sorted, consume) + + if withDistances { + return objects[:slice], distances[:slice], nil + } + return objects[:slice], distances, nil +} + +type objectDistancePayload struct { + o *storobj.Object + d float32 +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/sorter/objects_sorter_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/sorter/objects_sorter_test.go new file mode 100644 index 0000000000000000000000000000000000000000..44ecde21a03c5fe6560bcadca4328d04e3bba8b9 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/sorter/objects_sorter_test.go @@ -0,0 +1,416 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package sorter + +import ( + "reflect" + "testing" + + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/entities/filters" + "github.com/weaviate/weaviate/entities/storobj" +) + +func TestObjectsSorter(t *testing.T) { + tests := []struct { + name string + sort []filters.Sort + limit int + wantObjs []*storobj.Object + wantDists []float32 + }{ + { + name: "sort by string asc", + sort: sort1("name", "asc"), + limit: 3, + wantObjs: []*storobj.Object{cityAmsterdam, cityBerlin, cityNewYork, cityNil, cityNil2, cityWroclaw}, + wantDists: []float32{0.4, 0.2, 0.3, 0.0, 0.0, 0.1}, + }, + { + name: "sort by string desc", + sort: sort1("name", "desc"), + limit: 4, + wantObjs: []*storobj.Object{cityWroclaw, cityNil2, cityNil, cityNewYork, cityBerlin, cityAmsterdam}, + wantDists: []float32{0.1, 0.0, 0.0, 0.3, 0.2, 0.4}, + }, + { + name: "sort by text asc", + sort: sort1("country", "asc"), + limit: 5, + wantObjs: []*storobj.Object{cityNil2, cityNil, cityBerlin, cityWroclaw, cityAmsterdam, cityNewYork}, + wantDists: []float32{0.0, 0.0, 0.2, 0.1, 0.4, 0.3}, + }, + { + name: "sort by text desc", + sort: sort1("country", "desc"), + limit: 3, + wantObjs: []*storobj.Object{cityNewYork, cityAmsterdam, cityWroclaw, cityBerlin, cityNil2, cityNil}, + wantDists: []float32{0.3, 0.4, 0.1, 0.2, 0.0, 0.0}, + }, + { + name: "sort by int asc", + sort: sort1("population", "asc"), + limit: 4, + wantObjs: []*storobj.Object{cityNil2, cityNil, cityWroclaw, cityAmsterdam, cityBerlin, cityNewYork}, + wantDists: []float32{0.0, 0.0, 0.1, 0.4, 0.2, 0.3}, + }, + { + name: "sort by int desc", + sort: sort1("population", "desc"), + limit: 5, + wantObjs: []*storobj.Object{cityNewYork, cityBerlin, cityAmsterdam, cityWroclaw, cityNil2, cityNil}, + wantDists: []float32{0.3, 0.2, 0.4, 0.1, 0.0, 0.0}, + }, + { + name: "sort by number asc", + sort: sort1("cityArea", "asc"), + limit: 3, + wantObjs: []*storobj.Object{cityNil2, cityNil, cityAmsterdam, cityWroclaw, cityBerlin, cityNewYork}, + wantDists: []float32{0.0, 0.0, 0.4, 0.1, 0.2, 0.3}, + }, + { + name: "sort by number desc", + sort: sort1("cityArea", "desc"), + limit: 4, + wantObjs: []*storobj.Object{cityNewYork, cityBerlin, cityWroclaw, cityAmsterdam, cityNil2, cityNil}, + wantDists: []float32{0.3, 0.2, 0.1, 0.4, 0.0, 0.0}, + }, + { + name: "sort by date asc", + sort: sort1("cityRights", "asc"), + limit: 5, + wantObjs: []*storobj.Object{cityNil2, cityNil, cityAmsterdam, cityWroclaw, cityBerlin, cityNewYork}, + wantDists: []float32{0.0, 0.0, 0.4, 0.1, 0.2, 0.3}, + }, + { + name: "sort by date desc", + sort: sort1("cityRights", "desc"), + limit: 3, + wantObjs: []*storobj.Object{cityNewYork, cityBerlin, cityWroclaw, cityAmsterdam, cityNil2, cityNil}, + wantDists: []float32{0.3, 0.2, 0.1, 0.4, 0.0, 0.0}, + }, + { + name: "sort by string array asc", + sort: sort1("timezones", "asc"), + limit: 4, + wantObjs: []*storobj.Object{cityNil2, cityNil, cityWroclaw, cityBerlin, cityAmsterdam, cityNewYork}, + wantDists: []float32{0.0, 0.0, 0.1, 0.2, 0.4, 0.3}, + }, + { + name: "sort by string array desc", + sort: sort1("timezones", "desc"), + limit: 5, + wantObjs: []*storobj.Object{cityNewYork, cityWroclaw, cityBerlin, cityAmsterdam, cityNil2, cityNil}, + wantDists: []float32{0.3, 0.1, 0.2, 0.4, 0.0, 0.0}, + }, + { + name: "sort by text array asc", + sort: sort1("timezonesUTC", "asc"), + limit: 3, + wantObjs: []*storobj.Object{cityNil2, cityNil, cityWroclaw, cityBerlin, cityAmsterdam, cityNewYork}, + wantDists: []float32{0.0, 0.0, 0.1, 0.2, 0.4, 0.3}, + }, + { + name: "sort by text array desc", + sort: sort1("timezonesUTC", "desc"), + limit: 4, + wantObjs: []*storobj.Object{cityNewYork, cityWroclaw, cityBerlin, cityAmsterdam, cityNil2, cityNil}, + wantDists: []float32{0.3, 0.1, 0.2, 0.4, 0.0, 0.0}, + }, + { + name: "sort by bool asc", + sort: sort1("isCapital", "asc"), + limit: 5, + wantObjs: []*storobj.Object{cityNil2, cityNil, cityWroclaw, cityNewYork, cityBerlin, cityAmsterdam}, + wantDists: []float32{0.0, 0.0, 0.1, 0.3, 0.2, 0.4}, + }, + { + name: "sort by bool desc", + sort: sort1("isCapital", "desc"), + limit: 3, + wantObjs: []*storobj.Object{cityBerlin, cityAmsterdam, cityWroclaw, cityNewYork, cityNil2, cityNil}, + wantDists: []float32{0.2, 0.4, 0.1, 0.3, 0.0, 0.0}, + }, + { + name: "sort by bool array asc", + sort: sort1("isCapitalArray", "asc"), + limit: 4, + wantObjs: []*storobj.Object{cityNil2, cityNil, cityWroclaw, cityBerlin, cityAmsterdam, cityNewYork}, + wantDists: []float32{0.0, 0.0, 0.1, 0.2, 0.4, 0.3}, + }, + { + name: "sort by bool array desc", + sort: sort1("isCapitalArray", "desc"), + limit: 5, + wantObjs: []*storobj.Object{cityNewYork, cityAmsterdam, cityBerlin, cityWroclaw, cityNil2, cityNil}, + wantDists: []float32{0.3, 0.4, 0.2, 0.1, 0.0, 0.0}, + }, + { + name: "sort by number array asc", + sort: sort1("favoriteNumbers", "asc"), + limit: 3, + wantObjs: []*storobj.Object{cityNil2, cityNil, cityNewYork, cityWroclaw, cityBerlin, cityAmsterdam}, + wantDists: []float32{0.0, 0.0, 0.3, 0.1, 0.2, 0.4}, + }, + { + name: "sort by number array desc", + sort: sort1("favoriteNumbers", "desc"), + limit: 4, + wantObjs: []*storobj.Object{cityAmsterdam, cityBerlin, cityWroclaw, cityNewYork, cityNil2, cityNil}, + wantDists: []float32{0.4, 0.2, 0.1, 0.3, 0.0, 0.0}, + }, + { + name: "sort by int array asc", + sort: sort1("favoriteInts", "asc"), + limit: 5, + wantObjs: []*storobj.Object{cityNil2, cityNil, cityNewYork, cityWroclaw, cityBerlin, cityAmsterdam}, + wantDists: []float32{0.0, 0.0, 0.3, 0.1, 0.2, 0.4}, + }, + { + name: "sort by int array desc", + sort: sort1("favoriteInts", "desc"), + limit: 3, + wantObjs: []*storobj.Object{cityAmsterdam, cityBerlin, cityWroclaw, cityNewYork, cityNil2, cityNil}, + wantDists: []float32{0.4, 0.2, 0.1, 0.3, 0.0, 0.0}, + }, + { + name: "sort by date array asc", + sort: sort1("favoriteDates", "asc"), + limit: 4, + wantObjs: []*storobj.Object{cityNil2, cityNil, cityAmsterdam, cityWroclaw, cityBerlin, cityNewYork}, + wantDists: []float32{0.0, 0.0, 0.4, 0.1, 0.2, 0.3}, + }, + { + name: "sort by date array desc", + sort: sort1("favoriteDates", "desc"), + limit: 5, + wantObjs: []*storobj.Object{cityNewYork, cityBerlin, cityWroclaw, cityAmsterdam, cityNil2, cityNil}, + wantDists: []float32{0.3, 0.2, 0.1, 0.4, 0.0, 0.0}, + }, + { + name: "sort by phoneNumber asc", + sort: sort1("phoneNumber", "asc"), + limit: 3, + wantObjs: []*storobj.Object{cityNil2, cityNil, cityWroclaw, cityAmsterdam, cityNewYork, cityBerlin}, + wantDists: []float32{0.0, 0.0, 0.1, 0.4, 0.3, 0.2}, + }, + { + name: "sort by phoneNumber desc", + sort: sort1("phoneNumber", "desc"), + limit: 4, + wantObjs: []*storobj.Object{cityBerlin, cityNewYork, cityAmsterdam, cityWroclaw, cityNil2, cityNil}, + wantDists: []float32{0.2, 0.3, 0.4, 0.1, 0.0, 0.0}, + }, + { + name: "sort by location asc", + sort: sort1("location", "asc"), + limit: 5, + wantObjs: []*storobj.Object{cityNil2, cityNil, cityNewYork, cityAmsterdam, cityBerlin, cityWroclaw}, + wantDists: []float32{0.0, 0.0, 0.3, 0.4, 0.2, 0.1}, + }, + { + name: "sort by location desc", + sort: sort1("location", "desc"), + limit: 3, + wantObjs: []*storobj.Object{cityWroclaw, cityBerlin, cityAmsterdam, cityNewYork, cityNil2, cityNil}, + wantDists: []float32{0.1, 0.2, 0.4, 0.3, 0.0, 0.0}, + }, + { + name: "sort by special id property asc", + sort: sort1("id", "asc"), + limit: 4, + wantObjs: []*storobj.Object{cityAmsterdam, cityBerlin, cityNewYork, cityNil, cityNil2, cityWroclaw}, + wantDists: []float32{0.4, 0.2, 0.3, 0.0, 0.0, 0.1}, + }, + { + name: "sort by special id property desc", + sort: sort1("id", "desc"), + limit: 5, + wantObjs: []*storobj.Object{cityWroclaw, cityNil2, cityNil, cityNewYork, cityBerlin, cityAmsterdam}, + wantDists: []float32{0.1, 0.0, 0.0, 0.3, 0.2, 0.4}, + }, + { + name: "sort by special _id property asc", + sort: sort1("_id", "asc"), + limit: 3, + wantObjs: []*storobj.Object{cityAmsterdam, cityBerlin, cityNewYork, cityNil, cityNil2, cityWroclaw}, + wantDists: []float32{0.4, 0.2, 0.3, 0.0, 0.0, 0.1}, + }, + { + name: "sort by special _id property desc", + sort: sort1("_id", "desc"), + limit: 4, + wantObjs: []*storobj.Object{cityWroclaw, cityNil2, cityNil, cityNewYork, cityBerlin, cityAmsterdam}, + wantDists: []float32{0.1, 0.0, 0.0, 0.3, 0.2, 0.4}, + }, + { + name: "sort by special _creationTimeUnix property asc", + sort: sort1("_creationTimeUnix", "asc"), + limit: 5, + wantObjs: []*storobj.Object{cityAmsterdam, cityBerlin, cityNewYork, cityNil, cityNil2, cityWroclaw}, + wantDists: []float32{0.4, 0.2, 0.3, 0.0, 0.0, 0.1}, + }, + { + name: "sort by special _creationTimeUnix property desc", + sort: sort1("_creationTimeUnix", "desc"), + limit: 3, + wantObjs: []*storobj.Object{cityWroclaw, cityNil2, cityNil, cityNewYork, cityBerlin, cityAmsterdam}, + wantDists: []float32{0.1, 0.0, 0.0, 0.3, 0.2, 0.4}, + }, + { + name: "sort by special _lastUpdateTimeUnix property asc", + sort: sort1("_lastUpdateTimeUnix", "asc"), + limit: 4, + wantObjs: []*storobj.Object{cityAmsterdam, cityBerlin, cityNewYork, cityNil, cityNil2, cityWroclaw}, + wantDists: []float32{0.4, 0.2, 0.3, 0.0, 0.0, 0.1}, + }, + { + name: "sort by special _lastUpdateTimeUnix property desc", + sort: sort1("_lastUpdateTimeUnix", "desc"), + limit: 5, + wantObjs: []*storobj.Object{cityWroclaw, cityNil2, cityNil, cityNewYork, cityBerlin, cityAmsterdam}, + wantDists: []float32{0.1, 0.0, 0.0, 0.3, 0.2, 0.4}, + }, + { + name: "sort by isCapital asc & name asc", + sort: sort2("isCapital", "asc", "name", "asc"), + limit: 3, + wantObjs: []*storobj.Object{cityNil, cityNil2, cityNewYork, cityWroclaw, cityAmsterdam, cityBerlin}, + wantDists: []float32{0.0, 0.0, 0.3, 0.1, 0.4, 0.2}, + }, + { + name: "sort by isCapital desc & name asc", + sort: sort2("isCapital", "desc", "name", "asc"), + limit: 4, + wantObjs: []*storobj.Object{cityAmsterdam, cityBerlin, cityNewYork, cityWroclaw, cityNil, cityNil2}, + wantDists: []float32{0.4, 0.2, 0.3, 0.1, 0.0, 0.0}, + }, + { + name: "sort by timezones desc & name desc", + sort: sort2("timezones", "desc", "name", "desc"), + limit: 5, + wantObjs: []*storobj.Object{cityNewYork, cityWroclaw, cityBerlin, cityAmsterdam, cityNil2, cityNil}, + wantDists: []float32{0.3, 0.1, 0.2, 0.4, 0.0, 0.0}, + }, + { + name: "sort by timezones desc & name asc", + sort: sort2("timezones", "desc", "name", "asc"), + limit: 3, + wantObjs: []*storobj.Object{cityNewYork, cityAmsterdam, cityBerlin, cityWroclaw, cityNil, cityNil2}, + wantDists: []float32{0.3, 0.4, 0.2, 0.1, 0.0, 0.0}, + }, + { + name: "sort by timezonesUTC asc & timezones desc & isCapital asc & population asc", + sort: sort4("timezonesUTC", "asc", "timezones", "desc", "isCapital", "asc", "population", "asc"), + limit: 4, + wantObjs: []*storobj.Object{cityNil2, cityNil, cityWroclaw, cityAmsterdam, cityBerlin, cityNewYork}, + wantDists: []float32{0.0, 0.0, 0.1, 0.4, 0.2, 0.3}, + }, + { + name: "sort by timezonesUTC asc & timezones desc & isCapital desc & population asc", + sort: sort4("timezonesUTC", "asc", "timezones", "desc", "isCapital", "desc", "population", "asc"), + limit: 5, + wantObjs: []*storobj.Object{cityNil2, cityNil, cityAmsterdam, cityBerlin, cityWroclaw, cityNewYork}, + wantDists: []float32{0.0, 0.0, 0.4, 0.2, 0.1, 0.3}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Run("with distance", func(t *testing.T) { + sorter := NewObjectsSorter(sorterCitySchema().GetClass) + gotObjs, gotDists, err := sorter.Sort(sorterCitySchemaObjects(), sorterCitySchemaDistances(), 0, tt.sort) + + require.Nil(t, err) + + if !reflect.DeepEqual(gotObjs, tt.wantObjs) { + t.Fatalf("objects got = %v, want %v", + extractCityNames(gotObjs), extractCityNames(tt.wantObjs)) + } + if !reflect.DeepEqual(gotDists, tt.wantDists) { + t.Fatalf("distances got = %v, want %v", + gotDists, tt.wantDists) + } + }) + + t.Run("without distance", func(t *testing.T) { + sorter := NewObjectsSorter(sorterCitySchema().GetClass) + gotObjs, gotDists, err := sorter.Sort(sorterCitySchemaObjects(), nil, 0, tt.sort) + + require.Nil(t, err) + + if !reflect.DeepEqual(gotObjs, tt.wantObjs) { + t.Fatalf("objects got = %v, want %v", + extractCityNames(gotObjs), extractCityNames(tt.wantObjs)) + } + if gotDists != nil { + t.Fatalf("distances got = %v, want nil", + gotDists) + } + }) + + t.Run("with limit", func(t *testing.T) { + sorter := NewObjectsSorter(sorterCitySchema().GetClass) + gotObjs, gotDists, err := sorter.Sort(sorterCitySchemaObjects(), sorterCitySchemaDistances(), tt.limit, tt.sort) + + require.Nil(t, err) + + if !reflect.DeepEqual(gotObjs, tt.wantObjs[:tt.limit]) { + t.Fatalf("objects got = %v, want %v", + extractCityNames(gotObjs), extractCityNames(tt.wantObjs)) + } + if !reflect.DeepEqual(gotDists, tt.wantDists[:tt.limit]) { + t.Fatalf("distances got = %v, want %v", + gotDists, tt.wantDists) + } + }) + }) + } +} + +func createSort(property, order string) filters.Sort { + return filters.Sort{Path: []string{property}, Order: order} +} + +func sort1(property, order string) []filters.Sort { + return []filters.Sort{createSort(property, order)} +} + +func sort2(property1, order1, property2, order2 string) []filters.Sort { + return []filters.Sort{ + createSort(property1, order1), + createSort(property2, order2), + } +} + +func sort4(property1, order1, property2, order2, property3, order3, property4, order4 string) []filters.Sort { + return []filters.Sort{ + createSort(property1, order1), + createSort(property2, order2), + createSort(property3, order3), + createSort(property4, order4), + } +} + +func extractCityNames(in []*storobj.Object) []string { + out := make([]string, len(in)) + for i := range in { + if asMap, ok := in[i].Properties().(map[string]interface{}); ok { + for k, v := range asMap { + if k == "name" { + out[i] = v.(string) + } + } + } + } + return out +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/sorter/query_planner.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/sorter/query_planner.go new file mode 100644 index 0000000000000000000000000000000000000000..0b1e234b49b347b7919155abe5a12d6d44d3a42d --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/sorter/query_planner.go @@ -0,0 +1,254 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package sorter + +import ( + "context" + "fmt" + "time" + + "github.com/weaviate/weaviate/adapters/repos/db/helpers" + "github.com/weaviate/weaviate/adapters/repos/db/lsmkv" + "github.com/weaviate/weaviate/entities/filters" + "github.com/weaviate/weaviate/entities/schema" + "github.com/weaviate/weaviate/usecases/config/runtime" +) + +const ( + + // Will involve some random disk access + FixedCostIndexSeek = 200 + + FixedCostRowRead = 100 + + FixedCostJSONUnmarshal = 500 + + // FixedCostObjectsBucketRow is roughly made up of the cost of identifying + // the correct key in the segment index (which is multiple IOPS), then + // reading the whole row, which contains the full object (including the + // vector). We are applying a 10x read penalty compared to inverted index + // rows which tend to be fairly tiny. Last but not least, we need to do some + // JSON marshalling to extract the desired prop value + FixedCostObjectsBucketRow = FixedCostIndexSeek + 10*FixedCostRowRead + FixedCostJSONUnmarshal + + // FixedCostInvertedBucketRow is the cost of reading the inverted index, + // since it is already sorted in ascending order, we can skip the index and + // start a cursor which has no cost other than reading the payload in each + // iteration. + FixedCostInvertedBucketRow = FixedCostRowRead +) + +type queryPlanner struct { + store *lsmkv.Store + dataTypesHelper *dataTypesHelper + invertedDisabled *runtime.DynamicValue[bool] +} + +// NewQueryPlanner wires a lightweight cost-based planner around an lsmkv.Store. +// The planner’s only job is to decide—per user query—whether it is cheaper to +// satisfy an ORDER-BY clause by +// +// 1. streaming rows directly from the objects bucket and sorting in memory, or +// 2. exploiting the fact that an inverted-index bucket is already ordered +// lexicographically on the property being sorted. +// +// Both the store and the DataTypesHelper are passed by reference and may be +// shared across many concurrent planners; the function does not assume +// ownership or perform any locking. +func NewQueryPlanner(store *lsmkv.Store, dataTypesHelper *dataTypesHelper, + invertedDisabled *runtime.DynamicValue[bool], +) *queryPlanner { + return &queryPlanner{ + store: store, + dataTypesHelper: dataTypesHelper, + invertedDisabled: invertedDisabled, + } +} + +// EstimateCosts returns two independent cost estimates—(objectsCost, +// invertedCost)—for producing the first *limit* rows of a sorted result: +// +// - objectsCost – walk the objects bucket, deserialize the full object, +// extract the sort value, keep the top-N in memory. +// +// - invertedCost – open a cursor on the inverted-index bucket that is +// already sorted by the required key and stream until N +// matches have been found (with extra work if DESC order +// forces a reverse scan). +// +// Costs are *dimensionless units* calibrated to roughly match the relative +// latency of random I/O, sequential I/O and JSON unmarshalling on a modern +// NVMe or similar system. The estimate deliberately: +// +// - Discounts the first *limit* object fetches because the query will need +// those pages anyway, hence they are almost always in cache. +// +// - Inflates random seeks (≃ FixedCostIndexSeek) so that HDD-backed +// deployments or other IOPS-constrained systems (e.g. EBS volumes) +// still pick the right plan. +// +// - Ignores filter/sort correlation: the result is slightly pessimistic when +// the predicate is negatively correlated with the sort key, optimistic when +// positively correlated. That simplification keeps cost evaluation O(1) and +// good-enough in practice. +// +// The method appends a human-readable trace of its arithmetic to the slow-query +// log embedded in *ctx* so that operators can review the decision later. +func (s *queryPlanner) EstimateCosts(ctx context.Context, ids helpers.AllowList, limit int, + sort []filters.Sort, +) (float64, float64) { + totalObjects := s.store.Bucket(helpers.ObjectsBucketLSM).CountAsync() + var matches int + if ids == nil { + matches = totalObjects + } else { + matches = ids.Len() + } + var filterMatchRatio float64 + if totalObjects == 0 { + // this can happen when there are no disk segments yet, just assume a 100% match ratio + filterMatchRatio = 1.0 + } else { + filterMatchRatio = float64(matches) / float64(totalObjects) + } + + helpers.AnnotateSlowQueryLogAppend(ctx, "sort_query_planner", + fmt.Sprintf("matches=%d, limit=%d, filterMatchRatio=%.2f", + matches, limit, filterMatchRatio)) + + // The rationale for the discount is that we will eventually read n=limit + // objects from the object store anyway, so pages are extremely likely to be + // cached. Therefore we can discount the disk interaction subustantially. + // + // This effectively means for queries where matches<=limit, the objects + // bucket strategy is a lot more attractive + costObjectsDiscounted := float64(min(limit, matches) * (0.3*(FixedCostRowRead+FixedCostIndexSeek) + FixedCostJSONUnmarshal)) + costObjectsBucketUndiscounted := float64(FixedCostObjectsBucketRow * max(matches-limit, 0)) + costObjectsBucket := costObjectsDiscounted + costObjectsBucketUndiscounted + helpers.AnnotateSlowQueryLogAppend(ctx, "sort_query_planner", + fmt.Sprintf("estimated costs for objects bucket strategy: %.2f (discounted: %.2f, undiscounted: %.2f)", + costObjectsBucket, costObjectsDiscounted, costObjectsBucketUndiscounted)) + + costInvertedBucket := float64(FixedCostInvertedBucketRow*limit) / filterMatchRatio + + if sort[0].Order == "desc" { + // If the sort order is descending, we need to guess an entrypoint and then + // read from there. This requires a safety margin and a seek + // + // Note: We are not estimating filter correlation at this point, so in the + // worst case (perfect negative correlation) the cost could be much higher + initial := costInvertedBucket + costInvertedBucket *= 2 + costInvertedBucket += FixedCostIndexSeek + helpers.AnnotateSlowQueryLogAppend(ctx, "sort_query_planner", + fmt.Sprintf("sort order is 'desc', inverted bucket cost adjusted from %.2f to %.2f", + initial, costInvertedBucket)) + + } + + helpers.AnnotateSlowQueryLogAppend(ctx, "sort_query_planner", + fmt.Sprintf("estimated costs for inverted bucket strategy: %.2f", costInvertedBucket)) + return costObjectsBucket, costInvertedBucket +} + +// Do is the public entry point that turns a query description into a binary +// plan choice. It returns *useInverted == true* when **all** of the following +// hold: +// +// - The inverted-index strategy is cheaper according to EstimateCosts. +// +// - The key’s logical type (date, int, number) preserves the same ordering +// at the byte level that the inverted index uses. +// +// - The LSM bucket for that property is present; if not, the method +// opportunistically reminds the caller that marking the field as +// filterable would unlock the faster plan. +// +// When any pre-condition fails, Do silently falls back to the objects-bucket +// scan and records the reason in the slow-query trace. +func (s *queryPlanner) Do(ctx context.Context, ids helpers.AllowList, limit int, sort []filters.Sort) (useInverted bool, err error) { + startTime := time.Now() + helpers.AnnotateSlowQueryLogAppend(ctx, "sort_query_planner", "START PLANNING") + defer func() { + helpers.AnnotateSlowQueryLogAppend(ctx, "sort_query_planner", + fmt.Sprintf("COMPLETED PLANNING in %s", time.Since(startTime))) + }() + + costObjectsBucket, costInvertedBucket := s.EstimateCosts(ctx, ids, limit, sort) + + useInverted = false + if costInvertedBucket > costObjectsBucket { + // inverted strategy is more expensive, no need to plan further, just use + // objects strategy + return + } + + if len(sort) > 1 { + helpers.AnnotateSlowQueryLogAppend(ctx, "sort_query_planner", + fmt.Sprintf("inverted strategy has lower cost, but query has multiple (%d) "+ + "sort criteria which is currently not supported, "+ + "fallback to objects bucket strategy", len(sort))) + return + } + + propNames, _, err := extractPropNamesAndOrders(sort) + if err != nil { + return + } + + dt := s.dataTypesHelper.getType(propNames[0]) + switch dt { + case schema.DataTypeDate, schema.DataTypeInt, schema.DataTypeNumber: + // supported data type + + default: + helpers.AnnotateSlowQueryLogAppend(ctx, "sort_query_planner", + fmt.Sprintf("inverted strategy has lower cost, but data type '%s' of property '%s' "+ + "is currently not supported, falling back to objects bucket strategy", dt, propNames[0])) + return + + } + costSavings := float64(costObjectsBucket) / float64(costInvertedBucket) + + if s.store.Bucket(helpers.BucketFromPropNameLSM(propNames[0])) == nil { + // no bucket found could mean that property is not indexed or some + // unexpected error + if !s.dataTypesHelper.hasFilterableIndex(propNames[0]) { + helpers.AnnotateSlowQueryLogAppend(ctx, "sort_query_planner", + fmt.Sprintf("property '%s' is not indexed (filterable), the query planner "+ + "predicts an estimated cost savings of %.2fx, consider indexing this property, "+ + "falling back to objects bucket strategy", propNames[0], costSavings)) + return + } + + // the prop is indexed, but no bucket found, this is unexpected, but we can + // still fall back to the slower strategy + helpers.AnnotateSlowQueryLogAppend(ctx, "sort_query_planner", + fmt.Sprintf("unexpected: property '%s' is indexed (filterable), but no bucket found, "+ + "falling back to objects bucket strategy", propNames[0])) + return + } + + if s.invertedDisabled.Get() { + helpers.AnnotateSlowQueryLogAppend(ctx, "sort_query_planner", + fmt.Sprintf("the query planner predicts an estimated cost savings of %.2fx, "+ + "however the inverted sorter is globally disabled using a feature flag", + costSavings)) + return + } + + helpers.AnnotateSlowQueryLogAppend(ctx, "sort_query_planner", + fmt.Sprintf("predicted cost savings of %.2fx for inverted sorter on property '%s'", + costSavings, propNames[0])) + useInverted = true + return +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/sorter/query_planner_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/sorter/query_planner_test.go new file mode 100644 index 0000000000000000000000000000000000000000..b599baf0e8835f103cc0a5b45e309284d90e40ab --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/sorter/query_planner_test.go @@ -0,0 +1,236 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package sorter + +import ( + "context" + "fmt" + "testing" + + "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/adapters/repos/db/helpers" + "github.com/weaviate/weaviate/adapters/repos/db/lsmkv" + "github.com/weaviate/weaviate/entities/cyclemanager" + "github.com/weaviate/weaviate/entities/filters" + "github.com/weaviate/weaviate/usecases/config/runtime" +) + +func TestQueryPlanner(t *testing.T) { + type testCase struct { + name string + objectCount int + matchCount int + nilBitmap bool + limit int + sort []filters.Sort + shouldChooseInverted bool + disabled bool + } + + testCases := []testCase{ + { + name: "fewer matches than limit", + objectCount: 1000, + matchCount: 50, + limit: 100, + sort: []filters.Sort{ + { + Path: []string{"int"}, + Order: "asc", + }, + }, + // with fewer than limit matches, we need to read every object from the + // object store anyway. cheaper to sort using the object store + shouldChooseInverted: false, + }, + { + name: "high match ratio", + objectCount: 1000, + matchCount: 800, + limit: 100, + sort: []filters.Sort{ + { + Path: []string{"int"}, + Order: "asc", + }, + }, + shouldChooseInverted: true, + }, + { + name: "low match ratio, but high absolute count", + objectCount: 10000, + matchCount: 800, + limit: 10, + sort: []filters.Sort{ + { + Path: []string{"int"}, + Order: "asc", + }, + }, + shouldChooseInverted: true, + }, + { + name: "prop is not indexed", + objectCount: 1000, + matchCount: 800, + limit: 100, + sort: []filters.Sort{ + { + Path: []string{"int_not_indexed"}, + Order: "asc", + }, + }, + shouldChooseInverted: false, + }, + { + name: "prop is indexed, but missing", + objectCount: 1000, + matchCount: 800, + limit: 100, + sort: []filters.Sort{ + { + Path: []string{"int_corrupt_index"}, + Order: "asc", + }, + }, + // possibly corrupt inverted index, fall back to objects bucket strategy + shouldChooseInverted: false, + }, + { + name: "more than one sort arg – not supported yet", + objectCount: 1000, + matchCount: 800, + limit: 100, + sort: []filters.Sort{ + { + Path: []string{"int"}, + Order: "desc", + }, + { + Path: []string{"number"}, + Order: "asc", + }, + }, + shouldChooseInverted: false, + }, + { + name: "prop is not a supported type", + objectCount: 1000, + matchCount: 800, + limit: 100, + sort: []filters.Sort{ + { + Path: []string{"text"}, + Order: "asc", + }, + }, + shouldChooseInverted: false, + }, + { + name: "nil bitmap, i.e. unfiltered search", + objectCount: 1000, + nilBitmap: true, + limit: 100, + sort: []filters.Sort{ + { + Path: []string{"int"}, + Order: "asc", + }, + }, + shouldChooseInverted: true, + }, + { + name: "nil bitmap, i.e. unfiltered search - unsupported prop", + objectCount: 1000, + nilBitmap: true, + limit: 100, + sort: []filters.Sort{ + { + Path: []string{"text"}, + Order: "asc", + }, + }, + shouldChooseInverted: false, + }, + { + name: "high match ratio, but globally disabled", + objectCount: 1000, + matchCount: 800, + limit: 100, + disabled: true, + sort: []filters.Sort{ + { + Path: []string{"int"}, + Order: "asc", + }, + }, + shouldChooseInverted: false, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + var ( + dirName = t.TempDir() + logger, _ = test.NewNullLogger() + ctx = context.Background() + ) + store, err := lsmkv.New(dirName, dirName, logger, nil, + cyclemanager.NewCallbackGroupNoop(), + cyclemanager.NewCallbackGroupNoop(), + cyclemanager.NewCallbackGroupNoop()) + require.Nil(t, err) + defer store.Shutdown(ctx) + + err = store.CreateOrLoadBucket(ctx, helpers.ObjectsBucketLSM, lsmkv.WithCalcCountNetAdditions(true)) + require.Nil(t, err) + + objectsB := store.Bucket(helpers.ObjectsBucketLSM) + for i := 0; i < tc.objectCount; i++ { + objBytes, docID := createDummyObject(t, i) + objectsB.Put([]byte(fmt.Sprintf("%08d", docID)), objBytes) + require.Nil(t, err) + } + + for _, propName := range []string{"int", "number", "date"} { + err = store.CreateOrLoadBucket(ctx, helpers.BucketFromPropNameLSM(propName), + lsmkv.WithStrategy(lsmkv.StrategyRoaringSet)) + require.Nil(t, err) + } + + require.Nil(t, objectsB.FlushAndSwitch()) + + var bm helpers.AllowList + if !tc.nilBitmap { + bm = allowlistWithExactMatchCount(t, tc.matchCount) + } + var disabled *runtime.DynamicValue[bool] + if tc.disabled { + disabled = runtime.NewDynamicValue[bool](true) + } + qp := NewQueryPlanner(store, newDataTypesHelper(dummyClass()), disabled) + shouldUseInverted, err := qp.Do(ctx, bm, tc.limit, tc.sort) + require.Nil(t, err) + assert.Equal(t, tc.shouldChooseInverted, shouldUseInverted) + }) + } +} + +func allowlistWithExactMatchCount(t *testing.T, count int) helpers.AllowList { + ids := make([]uint64, 0, count) + for i := 0; i < count; i++ { + ids = append(ids, uint64(i)) + } + return helpers.NewAllowList(ids...) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/sorter/utils.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/sorter/utils.go new file mode 100644 index 0000000000000000000000000000000000000000..291896706d56e17ad64319ccfa06b6a31c3c6d9f --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/sorter/utils.go @@ -0,0 +1,44 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package sorter + +import ( + "github.com/pkg/errors" + "github.com/weaviate/weaviate/entities/filters" +) + +func extractPropNamesAndOrders(sort []filters.Sort) ([]string, []string, error) { + propNames := make([]string, len(sort)) + orders := make([]string, len(sort)) + + for i, srt := range sort { + if len(srt.Path) == 0 { + return nil, nil, errors.New("path parameter cannot be empty") + } + if len(srt.Path) > 1 { + return nil, nil, errors.New("sorting by reference not supported, path must have exactly one argument") + } + propNames[i] = srt.Path[0] + orders[i] = srt.Order + } + return propNames, orders, nil +} + +func validateLimit(limit, elementsCount int) int { + if limit > elementsCount { + return elementsCount + } + if limit < 0 { + return 0 + } + return limit +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/cache/cache.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/cache/cache.go new file mode 100644 index 0000000000000000000000000000000000000000..b53cbb77f8a4f9f93845d7cd6833fa6bf654a4ad --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/cache/cache.go @@ -0,0 +1,49 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package cache + +import ( + "context" + "time" +) + +const DefaultDeletionInterval = 3 * time.Second + +type MultiCache[T any] interface { + PreloadMulti(docID uint64, ids []uint64, vecs [][]T) + PreloadPassage(id uint64, docID uint64, relativeID uint64, vec []T) + GetDoc(ctx context.Context, docID uint64) ([][]float32, error) + GetKeys(id uint64) (uint64, uint64) + SetKeys(id uint64, docID uint64, relativeID uint64) +} + +type Cache[T any] interface { + MultiCache[T] + Get(ctx context.Context, id uint64) ([]T, error) + MultiGet(ctx context.Context, ids []uint64) ([][]T, []error) + GetAllInCurrentLock(ctx context.Context, id uint64, out [][]T, errs []error) ([][]T, []error, uint64, uint64) + PageSize() uint64 + Len() int32 + CountVectors() int64 + Delete(ctx context.Context, id uint64) + Preload(id uint64, vec []T) + PreloadNoLock(id uint64, vec []T) + SetSizeAndGrowNoLock(id uint64) + Prefetch(id uint64) + Grow(size uint64) + Drop() + UpdateMaxSize(size int64) + CopyMaxSize() int64 + All() [][]T + LockAll() + UnlockAll() +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/cache/prefetch_amd64.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/cache/prefetch_amd64.go new file mode 100644 index 0000000000000000000000000000000000000000..0fc0f08d24ab846c15acb657f63a269fb28ef248 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/cache/prefetch_amd64.go @@ -0,0 +1,18 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package cache + +import "github.com/weaviate/weaviate/adapters/repos/db/vector/hnsw/distancer/asm" + +func init() { + prefetchFunc = asm.Prefetch +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/cache/sharded_lock_cache.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/cache/sharded_lock_cache.go new file mode 100644 index 0000000000000000000000000000000000000000..55f5d9285fb25f30f51677f4f4cbba1a1f346df2 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/cache/sharded_lock_cache.go @@ -0,0 +1,810 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package cache + +import ( + "context" + "sync" + "sync/atomic" + "time" + "unsafe" + + enterrors "github.com/weaviate/weaviate/entities/errors" + "github.com/weaviate/weaviate/usecases/memwatch" + + "github.com/sirupsen/logrus" + "github.com/weaviate/weaviate/adapters/repos/db/vector/common" + "github.com/weaviate/weaviate/adapters/repos/db/vector/hnsw/distancer" +) + +type shardedLockCache[T float32 | byte | uint64] struct { + shardedLocks *common.ShardedRWLocks + cache [][]T + vectorForID common.VectorForID[T] + multipleVectorForDocID common.VectorForID[[]float32] + normalizeOnRead bool + maxSize int64 + count int64 + cancel chan bool + logger logrus.FieldLogger + deletionInterval time.Duration + allocChecker memwatch.AllocChecker + + // The maintenanceLock makes sure that only one maintenance operation, such + // as growing the cache or clearing the cache happens at the same time. + maintenanceLock sync.RWMutex +} + +const ( + InitialSize = 1000 + RelativeInitialSize = 100 + MinimumIndexGrowthDelta = 2000 + MinimumRelativeGrowthDelta = 20 + indexGrowthRate = 1.25 + defaultCacheMaxSize = 1e12 +) + +func NewShardedFloat32LockCache(vecForID common.VectorForID[float32], multiVecForID common.VectorForID[[]float32], maxSize int, pageSize uint64, + logger logrus.FieldLogger, normalizeOnRead bool, deletionInterval time.Duration, + allocChecker memwatch.AllocChecker, +) Cache[float32] { + vc := &shardedLockCache[float32]{ + vectorForID: func(ctx context.Context, id uint64) ([]float32, error) { + vec, err := vecForID(ctx, id) + if err != nil { + return nil, err + } + if normalizeOnRead { + vec = distancer.Normalize(vec) + } + return vec, nil + }, + multipleVectorForDocID: multiVecForID, + cache: make([][]float32, InitialSize), + normalizeOnRead: normalizeOnRead, + count: 0, + maxSize: int64(maxSize), + cancel: make(chan bool), + logger: logger, + shardedLocks: common.NewShardedRWLocksWithPageSize(pageSize), + maintenanceLock: sync.RWMutex{}, + deletionInterval: deletionInterval, + allocChecker: allocChecker, + } + + vc.watchForDeletion() + return vc +} + +func NewShardedByteLockCache(vecForID common.VectorForID[byte], maxSize int, pageSize uint64, + logger logrus.FieldLogger, deletionInterval time.Duration, + allocChecker memwatch.AllocChecker, +) Cache[byte] { + vc := &shardedLockCache[byte]{ + vectorForID: vecForID, + cache: make([][]byte, InitialSize), + normalizeOnRead: false, + count: 0, + maxSize: int64(maxSize), + cancel: make(chan bool), + logger: logger, + shardedLocks: common.NewShardedRWLocksWithPageSize(pageSize), + maintenanceLock: sync.RWMutex{}, + deletionInterval: deletionInterval, + allocChecker: allocChecker, + } + + vc.watchForDeletion() + return vc +} + +func NewShardedUInt64LockCache(vecForID common.VectorForID[uint64], maxSize int, pageSize uint64, + logger logrus.FieldLogger, deletionInterval time.Duration, + allocChecker memwatch.AllocChecker, +) Cache[uint64] { + vc := &shardedLockCache[uint64]{ + vectorForID: vecForID, + cache: make([][]uint64, InitialSize), + normalizeOnRead: false, + count: 0, + maxSize: int64(maxSize), + cancel: make(chan bool), + logger: logger, + shardedLocks: common.NewShardedRWLocksWithPageSize(pageSize), + maintenanceLock: sync.RWMutex{}, + deletionInterval: deletionInterval, + allocChecker: allocChecker, + } + + vc.watchForDeletion() + return vc +} + +func (s *shardedLockCache[T]) All() [][]T { + return s.cache +} + +func (s *shardedLockCache[T]) Get(ctx context.Context, id uint64) ([]T, error) { + s.shardedLocks.RLock(id) + vec := s.cache[id] + s.shardedLocks.RUnlock(id) + + if vec != nil { + return vec, nil + } + + return s.handleCacheMiss(ctx, id) +} + +func (s *shardedLockCache[T]) Delete(ctx context.Context, id uint64) { + s.shardedLocks.Lock(id) + defer s.shardedLocks.Unlock(id) + + if int(id) >= len(s.cache) || s.cache[id] == nil { + return + } + + s.cache[id] = nil + atomic.AddInt64(&s.count, -1) +} + +func (s *shardedLockCache[T]) handleCacheMiss(ctx context.Context, id uint64) ([]T, error) { + if s.allocChecker != nil { + // we don't really know the exact size here, but we don't have to be + // accurate. If mem pressure is this high, we basically want to prevent any + // new permanent heap alloc. If we underestimate the size of a vector a + // bit, we might allow one more vector than we can really fit. But the one + // after would fail then. Given that vectors are typically somewhat small + // (in the single-digit KBs), it doesn't matter much, as long as we stop + // allowing vectors when we're out of memory. + estimatedSize := int64(1024) + if err := s.allocChecker.CheckAlloc(estimatedSize); err != nil { + s.logger.WithFields(logrus.Fields{ + "action": "vector_cache_miss", + "event": "vector_load_skipped_oom", + "doc_id": id, + }).WithError(err). + Warnf("cannot load vector into cache due to memory pressure") + return nil, err + } + } + + vec, err := s.vectorForID(ctx, id) + if err != nil { + return nil, err + } + + atomic.AddInt64(&s.count, 1) + + if vec != nil { + s.shardedLocks.Lock(id) + s.cache[id] = vec + s.shardedLocks.Unlock(id) + } + + return vec, nil +} + +func (s *shardedLockCache[T]) MultiGet(ctx context.Context, ids []uint64) ([][]T, []error) { + out := make([][]T, len(ids)) + errs := make([]error, len(ids)) + + for i, id := range ids { + s.shardedLocks.RLock(id) + vec := s.cache[id] + s.shardedLocks.RUnlock(id) + + if vec == nil { + vecFromDisk, err := s.handleCacheMiss(ctx, id) + errs[i] = err + vec = vecFromDisk + } + + out[i] = vec + } + + return out, errs +} + +func (s *shardedLockCache[T]) GetAllInCurrentLock(ctx context.Context, id uint64, out [][]T, errs []error) ([][]T, []error, uint64, uint64) { + start := (id / s.shardedLocks.PageSize) * s.shardedLocks.PageSize + end := start + s.shardedLocks.PageSize + cacheMiss := false + + if end > uint64(len(s.cache)) { + end = uint64(len(s.cache)) + } + + s.shardedLocks.RLock(start) + for i := start; i < end; i++ { + vec := s.cache[i] + if vec == nil { + cacheMiss = true + } + out[i-start] = vec + } + s.shardedLocks.RUnlock(start) + + // We don't expect cache misses in general as the default cache size is very large (1e12). + // Until the vector index cache is improved to handle nil vectors better, it makes sense here + // to exclude handling cache misses unless the cache size has been altered. + if cacheMiss && atomic.LoadInt64(&s.maxSize) != defaultCacheMaxSize { + for i := start; i < end; i++ { + if out[i-start] == nil { + vecFromDisk, err := s.handleCacheMiss(ctx, i) + errs[i-start] = err + out[i-start] = vecFromDisk + } + } + } + + return out, errs, start, end +} + +func (s *shardedLockCache[T]) PageSize() uint64 { + return s.shardedLocks.PageSize +} + +var prefetchFunc func(in uintptr) = func(in uintptr) { + // do nothing on default arch + // this function will be overridden for amd64 +} + +func (s *shardedLockCache[T]) LockAll() { + s.shardedLocks.LockAll() +} + +func (s *shardedLockCache[T]) UnlockAll() { + s.shardedLocks.UnlockAll() +} + +func (s *shardedLockCache[T]) Prefetch(id uint64) { + s.shardedLocks.RLock(id) + defer s.shardedLocks.RUnlock(id) + + prefetchFunc(uintptr(unsafe.Pointer(&s.cache[id]))) +} + +func (s *shardedLockCache[T]) Preload(id uint64, vec []T) { + s.shardedLocks.Lock(id) + defer s.shardedLocks.Unlock(id) + + atomic.AddInt64(&s.count, 1) + s.cache[id] = vec +} + +func (s *shardedLockCache[T]) PreloadNoLock(id uint64, vec []T) { + s.cache[id] = vec +} + +func (s *shardedLockCache[T]) SetSizeAndGrowNoLock(size uint64) { + atomic.StoreInt64(&s.count, int64(size)) + + if size < uint64(len(s.cache)) { + return + } + newSize := size + MinimumIndexGrowthDelta + newCache := make([][]T, newSize) + copy(newCache, s.cache) + s.cache = newCache +} + +func (s *shardedLockCache[T]) Grow(node uint64) { + s.maintenanceLock.RLock() + if node < uint64(len(s.cache)) { + s.maintenanceLock.RUnlock() + return + } + s.maintenanceLock.RUnlock() + + s.maintenanceLock.Lock() + defer s.maintenanceLock.Unlock() + + // make sure cache still needs growing + // (it could have grown while waiting for maintenance lock) + if node < uint64(len(s.cache)) { + return + } + + s.shardedLocks.LockAll() + defer s.shardedLocks.UnlockAll() + + newSize := node + MinimumIndexGrowthDelta + newCache := make([][]T, newSize) + copy(newCache, s.cache) + s.cache = newCache +} + +func (s *shardedLockCache[T]) Len() int32 { + s.maintenanceLock.RLock() + defer s.maintenanceLock.RUnlock() + + return int32(len(s.cache)) +} + +func (s *shardedLockCache[T]) CountVectors() int64 { + return atomic.LoadInt64(&s.count) +} + +func (s *shardedLockCache[T]) Drop() { + s.deleteAllVectors() + if s.deletionInterval != 0 { + s.cancel <- true + } +} + +func (s *shardedLockCache[T]) deleteAllVectors() { + s.shardedLocks.LockAll() + defer s.shardedLocks.UnlockAll() + + s.logger.WithField("action", "hnsw_delete_vector_cache"). + Debug("deleting full vector cache") + for i := range s.cache { + s.cache[i] = nil + } + + atomic.StoreInt64(&s.count, 0) +} + +func (s *shardedLockCache[T]) watchForDeletion() { + if s.deletionInterval != 0 { + f := func() { + t := time.NewTicker(s.deletionInterval) + defer t.Stop() + for { + select { + case <-s.cancel: + return + case <-t.C: + s.replaceIfFull() + } + } + } + enterrors.GoWrapper(f, s.logger) + } +} + +func (s *shardedLockCache[T]) replaceIfFull() { + if atomic.LoadInt64(&s.count) >= atomic.LoadInt64(&s.maxSize) { + s.deleteAllVectors() + } +} + +func (s *shardedLockCache[T]) UpdateMaxSize(size int64) { + atomic.StoreInt64(&s.maxSize, size) +} + +func (s *shardedLockCache[T]) CopyMaxSize() int64 { + sizeCopy := atomic.LoadInt64(&s.maxSize) + return sizeCopy +} + +func (s *shardedLockCache[T]) GetKeys(id uint64) (uint64, uint64) { + panic("not implemented") +} + +func (s *shardedLockCache[T]) PreloadMulti(docID uint64, ids []uint64, vecs [][]T) { + panic("not implemented") +} + +func (s *shardedLockCache[T]) SetKeys(id uint64, docID uint64, relativeID uint64) { + panic("not implemented") +} + +func (s *shardedLockCache[T]) PreloadPassage(id uint64, docID uint64, relativeID uint64, vec []T) { + panic("not implemented") +} + +// noopCache can be helpful in debugging situations, where we want to +// explicitly pass through each vectorForID call to the underlying vectorForID +// function without caching in between. +type noopCache struct { + vectorForID common.VectorForID[float32] +} + +func NewNoopCache(vecForID common.VectorForID[float32], maxSize int, + logger logrus.FieldLogger, +) *noopCache { + return &noopCache{vectorForID: vecForID} +} + +type CacheKeys struct { + DocID uint64 + RelativeID uint64 +} + +type shardedMultipleLockCache[T float32 | uint64 | byte] struct { + shardedLocks *common.ShardedRWLocks + cache [][]T + multipleVectorForID common.MultipleVectorForID[T] + multipleVectorForDocID common.VectorForID[[]float32] + normalizeOnRead bool + maxSize int64 + count int64 + ctx context.Context + cancelFn func() + logger logrus.FieldLogger + deletionInterval time.Duration + allocChecker memwatch.AllocChecker + vectorDocID []CacheKeys + + // The maintenanceLock makes sure that only one maintenance operation, such + // as growing the cache or clearing the cache happens at the same time. + maintenanceLock sync.RWMutex +} + +func NewShardedMultiFloat32LockCache(multipleVecForID common.VectorForID[[]float32], maxSize int, + logger logrus.FieldLogger, normalizeOnRead bool, deletionInterval time.Duration, + allocChecker memwatch.AllocChecker, +) Cache[float32] { + multipleVecForIDValue := func(ctx context.Context, id uint64, relativeID uint64) ([]float32, error) { + vecs, err := multipleVecForID(ctx, id) + if err != nil { + return nil, err + } + vec := vecs[relativeID] + if normalizeOnRead { + vec = distancer.Normalize(vec) + } + return vec, nil + } + + cache := make([][]float32, InitialSize) + + vc := &shardedMultipleLockCache[float32]{ + multipleVectorForID: multipleVecForIDValue, + multipleVectorForDocID: multipleVecForID, + cache: cache, + normalizeOnRead: normalizeOnRead, + count: 0, + maxSize: int64(maxSize), + logger: logger, + shardedLocks: common.NewDefaultShardedRWLocks(), + maintenanceLock: sync.RWMutex{}, + deletionInterval: deletionInterval, + allocChecker: allocChecker, + vectorDocID: make([]CacheKeys, InitialSize), + } + + vc.ctx, vc.cancelFn = context.WithCancel(context.Background()) + + vc.watchForDeletion() + return vc +} + +func NewShardedMultiUInt64LockCache(multipleVecForID common.VectorForID[uint64], maxSize int, + logger logrus.FieldLogger, deletionInterval time.Duration, + allocChecker memwatch.AllocChecker, +) Cache[uint64] { + multipleVecForIDValue := func(ctx context.Context, id uint64, relativeID uint64) ([]uint64, error) { + vec, err := multipleVecForID(ctx, id) + if err != nil { + return nil, err + } + return vec, nil + } + + cache := make([][]uint64, InitialSize) + + vc := &shardedMultipleLockCache[uint64]{ + multipleVectorForID: multipleVecForIDValue, + cache: cache, + count: 0, + maxSize: int64(maxSize), + logger: logger, + shardedLocks: common.NewDefaultShardedRWLocks(), + maintenanceLock: sync.RWMutex{}, + deletionInterval: deletionInterval, + allocChecker: allocChecker, + vectorDocID: make([]CacheKeys, InitialSize), + } + + vc.ctx, vc.cancelFn = context.WithCancel(context.Background()) + + vc.watchForDeletion() + return vc +} + +func NewShardedMultiByteLockCache(multipleVecForID common.VectorForID[byte], maxSize int, + logger logrus.FieldLogger, deletionInterval time.Duration, + allocChecker memwatch.AllocChecker, +) Cache[byte] { + multipleVecForIDValue := func(ctx context.Context, id uint64, relativeID uint64) ([]byte, error) { + vec, err := multipleVecForID(ctx, id) + if err != nil { + return nil, err + } + return vec, nil + } + + cache := make([][]byte, InitialSize) + + vc := &shardedMultipleLockCache[byte]{ + multipleVectorForID: multipleVecForIDValue, + cache: cache, + count: 0, + maxSize: int64(maxSize), + logger: logger, + shardedLocks: common.NewDefaultShardedRWLocks(), + maintenanceLock: sync.RWMutex{}, + deletionInterval: deletionInterval, + allocChecker: allocChecker, + vectorDocID: make([]CacheKeys, InitialSize), + } + + vc.ctx, vc.cancelFn = context.WithCancel(context.Background()) + + vc.watchForDeletion() + return vc +} + +func (s *shardedMultipleLockCache[T]) All() [][]T { + return s.cache +} + +func (s *shardedMultipleLockCache[T]) GetKeys(id uint64) (uint64, uint64) { + s.shardedLocks.RLock(id) + keys := s.vectorDocID[id] + s.shardedLocks.RUnlock(id) + return keys.DocID, keys.RelativeID +} + +func (s *shardedMultipleLockCache[T]) SetKeys(id uint64, docID uint64, relativeID uint64) { + s.shardedLocks.Lock(id) + defer s.shardedLocks.Unlock(id) + + s.vectorDocID[id] = CacheKeys{DocID: docID, RelativeID: relativeID} +} + +func (s *shardedMultipleLockCache[T]) GetKeysNoLock(id uint64) (uint64, uint64) { + keys := s.vectorDocID[id] + return keys.DocID, keys.RelativeID +} + +func (s *shardedMultipleLockCache[T]) Get(ctx context.Context, id uint64) ([]T, error) { + s.shardedLocks.RLock(id) + vec := s.cache[id] + s.shardedLocks.RUnlock(id) + + if len(vec) == 0 { + docID, relativeID := s.GetKeys(id) + return s.handleMultipleCacheMiss(ctx, id, docID, relativeID) + } + + return vec, nil +} + +func (s *shardedLockCache[T]) GetDoc(ctx context.Context, docID uint64) ([][]float32, error) { + return s.multipleVectorForDocID(ctx, docID) +} + +func (s *shardedMultipleLockCache[T]) GetDoc(ctx context.Context, docID uint64) ([][]float32, error) { + return s.multipleVectorForDocID(ctx, docID) +} + +func (s *shardedMultipleLockCache[T]) MultiGet(ctx context.Context, ids []uint64) ([][]T, []error) { + out := make([][]T, len(ids)) + errs := make([]error, len(ids)) + + for i, id := range ids { + + s.shardedLocks.RLock(id) + vec := s.cache[id] + s.shardedLocks.RUnlock(id) + if len(vec) == 0 { + docID, relativeID := s.GetKeys(id) + vec, errs[i] = s.handleMultipleCacheMiss(ctx, id, docID, relativeID) + } + + out[i] = vec + } + + return out, errs +} + +func (s *shardedMultipleLockCache[T]) Delete(ctx context.Context, id uint64) { + s.shardedLocks.Lock(id) + defer s.shardedLocks.Unlock(id) + + if int(id) >= len(s.cache) || len(s.cache[id]) == 0 { + return + } + + s.cache[id] = nil + s.vectorDocID[id] = CacheKeys{} + atomic.AddInt64(&s.count, -1) +} + +func (s *shardedMultipleLockCache[T]) handleMultipleCacheMiss(ctx context.Context, id uint64, docID uint64, relativeID uint64) ([]T, error) { + if s.allocChecker != nil { + // we don't really know the exact size here, but we don't have to be + // accurate. If mem pressure is this high, we basically want to prevent any + // new permanent heap alloc. If we underestimate the size of a vector a + // bit, we might allow one more vector than we can really fit. But the one + // after would fail then. Given that vectors are typically somewhat small + // (in the single-digit KBs), it doesn't matter much, as long as we stop + // allowing vectors when we're out of memory. + estimatedSize := int64(1024) + if err := s.allocChecker.CheckAlloc(estimatedSize); err != nil { + s.logger.WithFields(logrus.Fields{ + "action": "vector_cache_miss", + "event": "vector_load_skipped_oom", + "doc_id": docID, + "vec_id": relativeID, + }).WithError(err). + Warnf("cannot load vector into cache due to memory pressure") + return nil, err + } + } + + vec, err := s.multipleVectorForID(ctx, docID, relativeID) + if err != nil { + return nil, err + } + + atomic.AddInt64(&s.count, 1) + if len(vec) != 0 { + s.shardedLocks.Lock(id) + s.cache[id] = vec + s.shardedLocks.Unlock(id) + } + + return vec, nil +} + +func (s *shardedMultipleLockCache[T]) LockAll() { + s.shardedLocks.LockAll() +} + +func (s *shardedMultipleLockCache[T]) UnlockAll() { + s.shardedLocks.UnlockAll() +} + +func (s *shardedMultipleLockCache[T]) Prefetch(id uint64) { + s.shardedLocks.RLock(id) + defer s.shardedLocks.RUnlock(id) + + prefetchFunc(uintptr(unsafe.Pointer(&s.cache[id]))) +} + +func (s *shardedMultipleLockCache[T]) PreloadMulti(docID uint64, ids []uint64, vecs [][]T) { + atomic.AddInt64(&s.count, int64(len(ids))) + for i, id := range ids { + s.shardedLocks.Lock(id) + s.cache[id] = vecs[i] + s.vectorDocID[id] = CacheKeys{DocID: docID, RelativeID: uint64(i)} + s.shardedLocks.Unlock(id) + } +} + +func (s *shardedMultipleLockCache[T]) PreloadPassage(id uint64, docID uint64, relativeID uint64, vec []T) { + s.shardedLocks.Lock(id) + defer s.shardedLocks.Unlock(id) + + s.cache[id] = vec + s.vectorDocID[id] = CacheKeys{DocID: docID, RelativeID: relativeID} + atomic.AddInt64(&s.count, int64(1)) +} + +func (s *shardedMultipleLockCache[T]) Preload(docID uint64, vec []T) { + panic("not implemented") +} + +func (s *shardedMultipleLockCache[T]) PreloadNoLock(docID uint64, vec []T) { + panic("not implemented") +} + +func (s *shardedMultipleLockCache[T]) SetSizeAndGrowNoLock(size uint64) { + panic("not implemented") +} + +func (s *shardedMultipleLockCache[T]) Grow(node uint64) { + s.maintenanceLock.RLock() + if node < uint64(len(s.vectorDocID)) { + s.maintenanceLock.RUnlock() + return + } + s.maintenanceLock.RUnlock() + + s.maintenanceLock.Lock() + defer s.maintenanceLock.Unlock() + + // make sure cache still needs growing + // (it could have grown while waiting for maintenance lock) + if node < uint64(len(s.vectorDocID)) { + return + } + + s.shardedLocks.LockAll() + defer s.shardedLocks.UnlockAll() + + newSizeVector := node + MinimumIndexGrowthDelta + newVectorDocID := make([]CacheKeys, newSizeVector) + copy(newVectorDocID, s.vectorDocID) + s.vectorDocID = newVectorDocID + newCache := make([][]T, newSizeVector) + copy(newCache, s.cache) + s.cache = newCache +} + +func (s *shardedMultipleLockCache[T]) Len() int32 { + s.maintenanceLock.RLock() + defer s.maintenanceLock.RUnlock() + + return int32(len(s.cache)) +} + +func (s *shardedMultipleLockCache[T]) CountVectors() int64 { + return atomic.LoadInt64(&s.count) +} + +func (s *shardedMultipleLockCache[T]) Drop() { + s.deleteAllVectors() + if s.deletionInterval != 0 { + s.cancelFn() + } +} + +func (s *shardedMultipleLockCache[T]) deleteAllVectors() { + s.shardedLocks.LockAll() + defer s.shardedLocks.UnlockAll() + + s.logger.WithField("action", "hnsw_delete_vector_cache"). + Debug("deleting full vector cache") + for i := range s.cache { + s.cache[i] = nil + s.vectorDocID[i] = CacheKeys{} + } + + atomic.StoreInt64(&s.count, 0) +} + +func (s *shardedMultipleLockCache[T]) watchForDeletion() { + if s.deletionInterval != 0 { + f := func() { + t := time.NewTicker(s.deletionInterval) + defer t.Stop() + for { + select { + case <-s.ctx.Done(): + return + case <-t.C: + s.replaceIfFull() + } + } + } + enterrors.GoWrapper(f, s.logger) + } +} + +func (s *shardedMultipleLockCache[T]) replaceIfFull() { + if atomic.LoadInt64(&s.count) >= atomic.LoadInt64(&s.maxSize) { + s.deleteAllVectors() + } +} + +func (s *shardedMultipleLockCache[T]) UpdateMaxSize(size int64) { + atomic.StoreInt64(&s.maxSize, size) +} + +func (s *shardedMultipleLockCache[T]) CopyMaxSize() int64 { + sizeCopy := atomic.LoadInt64(&s.maxSize) + return sizeCopy +} + +func (s *shardedMultipleLockCache[T]) GetAllInCurrentLock(ctx context.Context, id uint64, out [][]T, errs []error) ([][]T, []error, uint64, uint64) { + panic("not implemented") +} + +func (s *shardedMultipleLockCache[T]) PageSize() uint64 { + panic("not implemented") +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/cache/sharded_lock_cache_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/cache/sharded_lock_cache_test.go new file mode 100644 index 0000000000000000000000000000000000000000..4f88e56364c5450c69cf757939e9c15023304b5c --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/cache/sharded_lock_cache_test.go @@ -0,0 +1,360 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package cache + +import ( + "context" + "errors" + "math/rand" + "sync" + "testing" + "time" + + "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/assert" + "github.com/weaviate/weaviate/adapters/repos/db/vector/common" +) + +func TestVectorCacheGrowth(t *testing.T) { + logger, _ := test.NewNullLogger() + var vecForId common.VectorForID[float32] = nil + id := 100_000 + expectedCount := int64(0) + + vectorCache := NewShardedFloat32LockCache(vecForId, nil, 1_000_000, 1, logger, false, time.Duration(10_000), nil) + initialSize := vectorCache.Len() + assert.Less(t, int(initialSize), id) + assert.Equal(t, expectedCount, vectorCache.CountVectors()) + + vectorCache.Grow(uint64(id)) + size1stGrow := vectorCache.Len() + assert.Greater(t, int(size1stGrow), id) + assert.Equal(t, expectedCount, vectorCache.CountVectors()) + + vectorCache.Grow(uint64(id)) + size2ndGrow := vectorCache.Len() + assert.Equal(t, size1stGrow, size2ndGrow) + assert.Equal(t, expectedCount, vectorCache.CountVectors()) +} + +func TestCache_ParallelGrowth(t *testing.T) { + // no asserts + // ensures there is no "index out of range" panic on get + + logger, _ := test.NewNullLogger() + var vecForId common.VectorForID[float32] = func(context.Context, uint64) ([]float32, error) { return nil, nil } + vectorCache := NewShardedFloat32LockCache(vecForId, nil, 1_000_000, 1, logger, false, time.Second, nil) + + r := rand.New(rand.NewSource(time.Now().UnixNano())) + count := 10_000 + maxNode := 100_000 + + wg := new(sync.WaitGroup) + wg.Add(count) + for i := 0; i < count; i++ { + node := uint64(r.Intn(maxNode)) + go func(node uint64) { + defer wg.Done() + + vectorCache.Grow(node) + vectorCache.Get(context.Background(), node) + }(node) + } + + wg.Wait() +} + +func TestCacheCleanup(t *testing.T) { + logger, _ := test.NewNullLogger() + var vecForId common.VectorForID[float32] = nil + + maxSize := 10 + batchSize := maxSize - 1 + deletionInterval := 200 * time.Millisecond // overwrite default deletionInterval of 3s + sleepDuration := deletionInterval + 100*time.Millisecond + + t.Run("count is not reset on unnecessary deletion", func(t *testing.T) { + vectorCache := NewShardedFloat32LockCache(vecForId, nil, maxSize, 1, logger, false, deletionInterval, nil) + shardedLockCache, ok := vectorCache.(*shardedLockCache[float32]) + assert.True(t, ok) + + for i := 0; i < batchSize; i++ { + shardedLockCache.Preload(uint64(i), []float32{float32(i), float32(i)}) + } + time.Sleep(sleepDuration) // wait for deletion to fire + + assert.Equal(t, batchSize, int(shardedLockCache.CountVectors())) + assert.Equal(t, batchSize, countCached(shardedLockCache)) + + shardedLockCache.Drop() + + assert.Equal(t, 0, int(shardedLockCache.count)) + assert.Equal(t, 0, countCached(shardedLockCache)) + }) + + t.Run("deletion clears cache and counter when maxSize exceeded", func(t *testing.T) { + vectorCache := NewShardedFloat32LockCache(vecForId, nil, maxSize, 1, logger, false, deletionInterval, nil) + shardedLockCache, ok := vectorCache.(*shardedLockCache[float32]) + assert.True(t, ok) + + for b := 0; b < 2; b++ { + for i := 0; i < batchSize; i++ { + id := b*batchSize + i + shardedLockCache.Preload(uint64(id), []float32{float32(id), float32(id)}) + } + time.Sleep(sleepDuration) // wait for deletion to fire, 2nd should clean the cache + } + + assert.Equal(t, 0, int(shardedLockCache.CountVectors())) + assert.Equal(t, 0, countCached(shardedLockCache)) + + shardedLockCache.Drop() + }) +} + +func countCached(c *shardedLockCache[float32]) int { + c.shardedLocks.LockAll() + defer c.shardedLocks.UnlockAll() + + count := 0 + for _, vec := range c.cache { + if vec != nil { + count++ + } + } + return count +} + +func TestGetAllInCurrentLock(t *testing.T) { + logger, _ := test.NewNullLogger() + pageSize := uint64(10) + maxSize := 1000 + + t.Run("fully cached page", func(t *testing.T) { + // Setup a cache with some pre-loaded vectors + vectorCache := NewShardedFloat32LockCache(nil, nil, maxSize, pageSize, logger, false, 0, nil) + cache := vectorCache.(*shardedLockCache[float32]) + + // Preload vectors for a full page + for i := uint64(0); i < pageSize; i++ { + cache.Preload(i, []float32{float32(i)}) + } + + // Test retrieving the full page + out := make([][]float32, pageSize) + errs := make([]error, pageSize) + resultOut, resultErrs, start, end := cache.GetAllInCurrentLock(context.Background(), 5, out, errs) + + assert.Equal(t, uint64(0), start) + assert.Equal(t, pageSize, end) + assert.Equal(t, pageSize, uint64(len(resultOut))) + assert.Equal(t, pageSize, uint64(len(resultErrs))) + + // Verify all vectors are present and correct + for i := uint64(0); i < pageSize; i++ { + assert.NotNil(t, resultOut[i]) + assert.Equal(t, []float32{float32(i)}, resultOut[i]) + assert.Nil(t, resultErrs[i]) + } + }) + + t.Run("partially cached page", func(t *testing.T) { + // Setup mock vector retrieval function + vecForID := func(ctx context.Context, id uint64) ([]float32, error) { + return []float32{float32(id * 100)}, nil + } + + vectorCache := NewShardedFloat32LockCache(vecForID, nil, maxSize, pageSize, logger, false, 0, nil) + cache := vectorCache.(*shardedLockCache[float32]) + + // Preload only some vectors + for i := uint64(0); i < pageSize/2; i++ { + cache.Preload(i, []float32{float32(i)}) + } + + out := make([][]float32, pageSize) + errs := make([]error, pageSize) + resultOut, resultErrs, start, end := cache.GetAllInCurrentLock(context.Background(), 5, out, errs) + + assert.Equal(t, uint64(0), start) + assert.Equal(t, pageSize, end) + + // Verify cached vectors + for i := uint64(0); i < pageSize/2; i++ { + assert.NotNil(t, resultOut[i]) + assert.Equal(t, []float32{float32(i)}, resultOut[i]) + assert.Nil(t, resultErrs[i]) + } + + // Verify vectors loaded from storage + for i := pageSize / 2; i < pageSize; i++ { + assert.NotNil(t, resultOut[i]) + assert.Equal(t, []float32{float32(i * 100)}, resultOut[i]) + assert.Nil(t, resultErrs[i]) + } + }) + + t.Run("page beyond cache size", func(t *testing.T) { + vectorCache := NewShardedFloat32LockCache(nil, nil, maxSize, pageSize, logger, false, 0, nil) + cache := vectorCache.(*shardedLockCache[float32]) + + // Request vectors beyond current cache size + beyondCacheID := uint64(len(cache.cache) + int(pageSize)) + out := make([][]float32, pageSize) + errs := make([]error, pageSize) + resultOut, resultErrs, start, end := cache.GetAllInCurrentLock(context.Background(), beyondCacheID, out, errs) + + // Verify we get the last complete page + expectedStart := (beyondCacheID / pageSize) * pageSize + expectedEnd := uint64(len(cache.cache)) + assert.Equal(t, expectedStart, start) + assert.Equal(t, expectedEnd, end) + + // All vectors should be nil since they're beyond cache size + for i := uint64(0); i < uint64(len(resultOut)); i++ { + assert.Nil(t, resultOut[i]) + assert.Nil(t, resultErrs[i]) + } + }) + + t.Run("error handling from storage", func(t *testing.T) { + expectedErr := errors.New("storage error") + vecForID := func(ctx context.Context, id uint64) ([]float32, error) { + return nil, expectedErr + } + + vectorCache := NewShardedFloat32LockCache(vecForID, nil, maxSize, pageSize, logger, false, 0, nil) + cache := vectorCache.(*shardedLockCache[float32]) + + out := make([][]float32, pageSize) + errs := make([]error, pageSize) + resultOut, resultErrs, start, end := cache.GetAllInCurrentLock(context.Background(), 5, out, errs) + + assert.Equal(t, uint64(0), start) + assert.Equal(t, pageSize, end) + + // All vectors should be nil and have errors + for i := uint64(0); i < pageSize; i++ { + assert.Nil(t, resultOut[i]) + assert.Equal(t, expectedErr, resultErrs[i]) + } + }) +} + +func TestMultiVectorCacheGrowth(t *testing.T) { + logger, _ := test.NewNullLogger() + var multivecForId common.VectorForID[[]float32] = nil + id := 100_000 + expectedCount := int64(0) + + vectorCache := NewShardedMultiFloat32LockCache(multivecForId, 1_000_000, logger, false, time.Duration(10_000), nil) + initialSize := vectorCache.Len() + assert.Less(t, int(initialSize), id) + assert.Equal(t, expectedCount, vectorCache.CountVectors()) + + vectorCache.Grow(uint64(id)) + size1stGrow := vectorCache.Len() + assert.Greater(t, int(size1stGrow), id) + assert.Equal(t, expectedCount, vectorCache.CountVectors()) + + vectorCache.Grow(uint64(id)) + size2ndGrow := vectorCache.Len() + assert.Equal(t, size1stGrow, size2ndGrow) + assert.Equal(t, expectedCount, vectorCache.CountVectors()) +} + +func TestMultiCache_ParallelGrowth(t *testing.T) { + // no asserts + // ensures there is no "index out of range" panic on get + + logger, _ := test.NewNullLogger() + var multivecForId common.VectorForID[[]float32] = func(context.Context, uint64) ([][]float32, error) { return nil, nil } + vectorCache := NewShardedMultiFloat32LockCache(multivecForId, 1_000_000, logger, false, time.Second, nil) + + r := rand.New(rand.NewSource(time.Now().UnixNano())) + count := 10_000 + maxNode := 100_000 + + wg := new(sync.WaitGroup) + wg.Add(count) + for i := 0; i < count; i++ { + node := uint64(r.Intn(maxNode)) + go func(node uint64) { + defer wg.Done() + + vectorCache.Grow(node) + }(node) + } + + wg.Wait() +} + +func TestMultiCacheCleanup(t *testing.T) { + logger, _ := test.NewNullLogger() + var multivecForId common.VectorForID[[]float32] = nil + + maxSize := 10 + batchSize := maxSize - 1 + deletionInterval := 200 * time.Millisecond // overwrite default deletionInterval of 3s + sleepDuration := deletionInterval + 100*time.Millisecond + + t.Run("count is not reset on unnecessary deletion", func(t *testing.T) { + vectorCache := NewShardedMultiFloat32LockCache(multivecForId, maxSize, logger, false, deletionInterval, nil) + shardedLockCache, ok := vectorCache.(*shardedMultipleLockCache[float32]) + assert.True(t, ok) + + for i := 0; i < batchSize; i++ { + shardedLockCache.PreloadMulti(uint64(i), []uint64{uint64(i)}, [][]float32{{float32(i), float32(i)}}) + } + time.Sleep(sleepDuration) // wait for deletion to fire + + assert.Equal(t, batchSize, int(shardedLockCache.CountVectors())) + assert.Equal(t, batchSize, countMultiCached(shardedLockCache)) + + shardedLockCache.Drop() + + assert.Equal(t, 0, int(shardedLockCache.count)) + assert.Equal(t, 0, countMultiCached(shardedLockCache)) + }) + + t.Run("deletion clears cache and counter when maxSize exceeded", func(t *testing.T) { + vectorCache := NewShardedMultiFloat32LockCache(multivecForId, maxSize, logger, false, deletionInterval, nil) + shardedLockCache, ok := vectorCache.(*shardedMultipleLockCache[float32]) + assert.True(t, ok) + + for b := 0; b < 2; b++ { + for i := 0; i < batchSize; i++ { + id := b*batchSize + i + shardedLockCache.PreloadMulti(uint64(id), []uint64{uint64(id)}, [][]float32{{float32(id), float32(id)}}) + } + time.Sleep(sleepDuration) // wait for deletion to fire, 2nd should clean the cache + } + + assert.Equal(t, 0, int(shardedLockCache.CountVectors())) + assert.Equal(t, 0, countMultiCached(shardedLockCache)) + + shardedLockCache.Drop() + }) +} + +func countMultiCached(c *shardedMultipleLockCache[float32]) int { + c.shardedLocks.LockAll() + defer c.shardedLocks.UnlockAll() + + count := 0 + for _, vec := range c.cache { + if vec != nil { + count++ + } + } + return count +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/common/index_stats.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/common/index_stats.go new file mode 100644 index 0000000000000000000000000000000000000000..8c3f27af98aeae4d5b2f3143b16366eb5a92b970 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/common/index_stats.go @@ -0,0 +1,33 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package common + +type IndexType string + +const ( + IndexTypeHNSW = "hnsw" + IndexTypeFlat = "flat" + IndexTypeNoop = "noop" + IndexTypeDynamic = "dynamic" +) + +type IndexStats interface { + IndexType() IndexType +} + +func (i IndexType) String() string { + return string(i) +} + +func IsDynamic(indexType IndexType) bool { + return indexType == IndexTypeDynamic +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/common/index_stats_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/common/index_stats_test.go new file mode 100644 index 0000000000000000000000000000000000000000..70198dd4b54307cad0876bca8e542583ace0b406 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/common/index_stats_test.go @@ -0,0 +1,53 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package common + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestDynamicIndexDetection(t *testing.T) { + // Test the core logic for detecting dynamic indexes + t.Run("test dynamic index type detection", func(t *testing.T) { + // Test that dynamic indexes are correctly identified + indexType := IndexTypeDynamic + isDynamic := IsDynamic(IndexType(indexType)) + + assert.True(t, isDynamic, "Dynamic index should be detected as dynamic") + }) + + t.Run("test non-dynamic index type detection", func(t *testing.T) { + // Test that regular indexes are not detected as dynamic + indexType := IndexTypeHNSW + isDynamic := IsDynamic(IndexType(indexType)) + + assert.False(t, isDynamic, "HNSW index should not be detected as dynamic") + }) + + t.Run("test flat index type detection", func(t *testing.T) { + // Test that flat indexes are not detected as dynamic + indexType := IndexTypeFlat + isDynamic := IsDynamic(IndexType(indexType)) + + assert.False(t, isDynamic, "Flat index should not be detected as dynamic") + }) + + t.Run("test empty index type", func(t *testing.T) { + // Test that empty index type is not detected as dynamic + indexType := IndexTypeNoop + isDynamic := IsDynamic(IndexType(indexType)) + + assert.False(t, isDynamic, "Empty index type should not be detected as dynamic") + }) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/common/iterator.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/common/iterator.go new file mode 100644 index 0000000000000000000000000000000000000000..4a11984bccda65a98ebea8ba9e5d56ae3ed5ea01 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/common/iterator.go @@ -0,0 +1,64 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package common + +import ( + "math/rand" +) + +// Iterator interface defines the methods for sampling elements. +type Iterator interface { + Next() (int, error) + IsDone() bool +} + +// SparseFisherYatesIterator implements the Iterator interface using the Sparse Fisher-Yates algorithm. +type SparseFisherYatesIterator struct { + size int + swapHistory map[int]int + currentPosition int +} + +// NewSparseFisherYatesIterator creates a new SparseFisherYatesIterator with the given size. +func NewSparseFisherYatesIterator(size int) *SparseFisherYatesIterator { + return &SparseFisherYatesIterator{ + size: size, + swapHistory: make(map[int]int), + currentPosition: 0, + } +} + +// Next returns the next sampled index using the Sparse Fisher-Yates algorithm. +func (s *SparseFisherYatesIterator) Next() *int { + // Sparse Fisher Yates sampling algorithm to choose random element + if s.currentPosition >= s.size { + return nil + } + randIndex := rand.Intn(s.size - s.currentPosition) + chosenIndex, ok := s.swapHistory[randIndex] + if !ok { + chosenIndex = randIndex + } + currentIndex, ok := s.swapHistory[s.size-s.currentPosition-1] + if !ok { + currentIndex = s.size - s.currentPosition - 1 + } + s.swapHistory[randIndex] = currentIndex + delete(s.swapHistory, s.size-s.currentPosition-1) + s.currentPosition++ + return &chosenIndex +} + +// IsDone checks if all elements have been sampled. +func (s *SparseFisherYatesIterator) IsDone() bool { + return s.currentPosition >= s.size +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/common/iterator_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/common/iterator_test.go new file mode 100644 index 0000000000000000000000000000000000000000..0248c8a13ec373d9353ed9666a6190d188718022 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/common/iterator_test.go @@ -0,0 +1,44 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package common + +import "testing" + +func Test_SparseFisherYatesIterator(t *testing.T) { + // Test the SparseFisherYatesSampler + size := 10 + sampler := NewSparseFisherYatesIterator(size) + sampledIndices := make(map[int]bool) + + for i := 0; i < size; i++ { + index := sampler.Next() + if index == nil { + t.Fatalf("expected index, got nil at iteration %d", i) + } + if *index < 0 || *index >= size { + t.Fatalf("index out of bounds: got %d, expected between 0 and %d", *index, size-1) + } + if _, exists := sampledIndices[*index]; exists { + t.Fatalf("duplicate index: %d", *index) + } + sampledIndices[*index] = true + } + + if !sampler.IsDone() { + t.Fatalf("expected sampler to be done after %d iterations", size) + } + + // Ensure that all indices were sampled + if len(sampledIndices) != size { + t.Fatalf("expected %d unique indices, got %d", size, len(sampledIndices)) + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/common/paged_cache.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/common/paged_cache.go new file mode 100644 index 0000000000000000000000000000000000000000..c505a38c07937db580049c783defab8c3a4cc5fd --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/common/paged_cache.go @@ -0,0 +1,100 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package common + +// PagedCache is a cache that stores elements in pages of a fixed size. +// It is optimized for cases where the cache is sparse and the number of elements is not known in advance. +// The cache will grow as needed and will reuse pages that have been freed. +type PagedCache[T any] struct { + cache [][]*T + pageSize int + freePages [][]*T +} + +// NewPagedCache creates a new PagedCache with the given page size. +// The cache will start with 10 pages. +func NewPagedCache[T any](pageSize int) *PagedCache[T] { + return NewPagedCacheWith[T](pageSize, 10) +} + +// NewPagedCacheWith creates a new PagedCache with the given page size and initial number of pages. +func NewPagedCacheWith[T any](pageSize int, initialPages int) *PagedCache[T] { + return &PagedCache[T]{ + pageSize: pageSize, + cache: make([][]*T, initialPages), + } +} + +// Get returns the element at the given index. +// If the element is not in the cache, it will return nil. +func (p *PagedCache[T]) Get(id int) *T { + pageID := id / p.pageSize + slotID := id % p.pageSize + + if p.cache[pageID] == nil { + return nil + } + + return p.cache[pageID][slotID] +} + +// Set sets the element at the given index. +// If the page does not exist, it will be created. +func (p *PagedCache[T]) Set(id int, value *T) { + pageID := id / p.pageSize + slotID := id % p.pageSize + + if pageID >= len(p.cache) { + p.grow(pageID) + } + + if p.cache[pageID] == nil { + p.cache[pageID] = p.getPage() + } + + p.cache[pageID][slotID] = value +} + +func (p *PagedCache[T]) grow(page int) { + newSize := max(page+10, len(p.cache)*2) + newCache := make([][]*T, newSize) + copy(newCache, p.cache) + p.cache = newCache +} + +func (p *PagedCache[T]) getPage() []*T { + if len(p.freePages) > 0 { + lastIndex := len(p.freePages) - 1 + page := p.freePages[lastIndex] + p.freePages = p.freePages[:lastIndex] + return page + } + + return make([]*T, p.pageSize) +} + +// Reset clears the cache and frees all pages. +// Free pages are reused when new pages are needed. +func (p *PagedCache[T]) Reset() { + for i := range p.cache { + if p.cache[i] != nil { + clear(p.cache[i]) + p.freePages = append(p.freePages, p.cache[i]) + p.cache[i] = nil + } + } +} + +// Cap returns the current capacity of the cache. +func (p *PagedCache[T]) Cap() int { + return len(p.cache) * p.pageSize +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/common/paged_cache_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/common/paged_cache_test.go new file mode 100644 index 0000000000000000000000000000000000000000..25b6011a92c436e39fbddad0145bf0b0d937fcb9 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/common/paged_cache_test.go @@ -0,0 +1,129 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package common + +import ( + "math/rand" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestPagedCache(t *testing.T) { + cache := NewPagedCacheWith[int](10, 2) + require.Len(t, cache.cache, 2, "wrong initial number of pages") + + setN := func(n int) { + for i := 0; i < n; i++ { + cache.Set(i, &i) + } + } + + checkN := func(n int) { + for i := 0; i < n; i++ { + v := cache.Get(i) + if *v != i { + t.Errorf("expected %d, got %d", i, *v) + } + } + } + + setN(10) + checkN(10) + + setN(1000) + checkN(1000) + + cache.Reset() + + setN(1000) + checkN(1000) + + cache.Reset() + + setN(100) + require.Equal(t, 10, *cache.Get(10)) + require.Nil(t, cache.Get(140)) + + cache.Reset() + for i := 0; i < 100; i += 2 { + cache.Set(i, &i) + } + for i := 0; i < 100; i += 2 { + require.Equal(t, i, *cache.Get(i)) + } + for i := 1; i < 100; i += 2 { + require.Nil(t, cache.Get(i)) + } +} + +func BenchmarkPagedCache(b *testing.B) { + pageSize := 512 + keys := make([]int, 10000) + values := make([]int, 10000) + for i := range 10000 { + keys[i] = int(rand.Int31n(500_000_000)) + values[i] = rand.Int() + } + + b.Run("PagedCache/Set", func(b *testing.B) { + cache := NewPagedCacheWith[int](pageSize, 10) + + for i := 0; i < b.N; i++ { + cache.Reset() + + for j := 0; j < 10000; j++ { + cache.Set(keys[j], &values[j]) + } + } + }) + + b.Run("FlatCache/Set", func(b *testing.B) { + cache := make([]*int, 500_000_000) + + for i := 0; i < b.N; i++ { + clear(cache) + + for j := 0; j < 10000; j++ { + cache[keys[j]] = &values[j] + } + } + }) + + b.Run("PagedCache/Get", func(b *testing.B) { + cache := NewPagedCacheWith[int](pageSize, 10) + + for j := 0; j < 10000; j++ { + cache.Set(keys[j], &values[j]) + } + + for i := 0; i < b.N; i++ { + for j := 0; j < 10000; j++ { + _ = cache.Get(keys[j]) + } + } + }) + + b.Run("FlatCache/Get", func(b *testing.B) { + cache := make([]*int, 500_000_000) + + for j := 0; j < 10000; j++ { + cache[keys[j]] = &values[j] + } + + for i := 0; i < b.N; i++ { + for j := 0; j < 10000; j++ { + _ = cache[keys[j]] + } + } + }) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/common/query_vector_distancer.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/common/query_vector_distancer.go new file mode 100644 index 0000000000000000000000000000000000000000..28e5ef46fcaaae4451ad1eb0bbc198d9031daf0b --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/common/query_vector_distancer.go @@ -0,0 +1,27 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package common + +type QueryVectorDistancer struct { + DistanceFunc func(uint64) (float32, error) + CloseFunc func() +} + +func (q *QueryVectorDistancer) DistanceToNode(nodeID uint64) (float32, error) { + return q.DistanceFunc(nodeID) +} + +func (q *QueryVectorDistancer) Close() { + if q.CloseFunc != nil { + q.CloseFunc() + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/common/search_by_dist_params.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/common/search_by_dist_params.go new file mode 100644 index 0000000000000000000000000000000000000000..44c90b4866a3d747ddf1e00d9788d6c93ad2449a --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/common/search_by_dist_params.go @@ -0,0 +1,83 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package common + +const ( + // DefaultSearchByDistInitialLimit : + // the initial limit of 100 here is an + // arbitrary decision, and can be tuned + // as needed + DefaultSearchByDistInitialLimit = 100 + + // DefaultSearchByDistLimitMultiplier : + // the decision to increase the limit in + // multiples of 10 here is an arbitrary + // decision, and can be tuned as needed + DefaultSearchByDistLimitMultiplier = 10 +) + +type SearchByDistParams struct { + offset int + limit int + totalLimit int + maximumSearchLimit int64 +} + +func NewSearchByDistParams( + offset int, + limit int, + totalLimit int, + maximumSearchLimit int64, +) *SearchByDistParams { + return &SearchByDistParams{ + offset: offset, + limit: limit, + totalLimit: totalLimit, + maximumSearchLimit: maximumSearchLimit, + } +} + +func (params *SearchByDistParams) TotalLimit() int { + return params.totalLimit +} + +func (params *SearchByDistParams) MaximumSearchLimit() int64 { + return params.maximumSearchLimit +} + +func (params *SearchByDistParams) OffsetCapacity(ids []uint64) int { + if l := len(ids); l < params.offset { + return l + } + return params.offset +} + +func (params *SearchByDistParams) TotalLimitCapacity(ids []uint64) int { + if l := len(ids); l < params.totalLimit { + return l + } + return params.totalLimit +} + +func (params *SearchByDistParams) Iterate() { + params.offset = params.totalLimit + params.limit *= DefaultSearchByDistLimitMultiplier + params.totalLimit = params.offset + params.limit +} + +func (params *SearchByDistParams) MaxLimitReached() bool { + if params.maximumSearchLimit < 0 { + return false + } + + return int64(params.totalLimit) > params.maximumSearchLimit +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/common/sharded_locks.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/common/sharded_locks.go new file mode 100644 index 0000000000000000000000000000000000000000..83888943e49c5b8f7336a3bf8d585e413663a292 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/common/sharded_locks.go @@ -0,0 +1,185 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package common + +import "sync" + +const ( + DefaultShardedLocksCount = 512 + DefaultPageSize = 1 +) + +type ShardedLocks struct { + // sharded locks + shards []sync.Mutex + // number of locks + count uint64 + PageSize uint64 +} + +func NewDefaultShardedLocks() *ShardedLocks { + return NewShardedLocks(DefaultShardedLocksCount) +} + +func NewShardedLocks(count uint64) *ShardedLocks { + if count < 2 { + count = 2 + } + + return &ShardedLocks{ + shards: make([]sync.Mutex, count), + count: count, + PageSize: DefaultPageSize, + } +} + +func NewShardedLocksWithPageSize(pageSize uint64) *ShardedLocks { + return &ShardedLocks{ + shards: make([]sync.Mutex, DefaultShardedLocksCount), + count: DefaultShardedLocksCount, + PageSize: pageSize, + } +} + +func (sl *ShardedLocks) LockAll() { + for i := uint64(0); i < sl.count; i++ { + sl.shards[i].Lock() + } +} + +func (sl *ShardedLocks) UnlockAll() { + for i := int(sl.count) - 1; i >= 0; i-- { + sl.shards[i].Unlock() + } +} + +func (sl *ShardedLocks) LockedAll(callback func()) { + sl.LockAll() + defer sl.UnlockAll() + + callback() +} + +func (sl *ShardedLocks) Lock(id uint64) { + sl.shards[(id/sl.PageSize)%sl.count].Lock() +} + +func (sl *ShardedLocks) Unlock(id uint64) { + sl.shards[(id/sl.PageSize)%sl.count].Unlock() +} + +func (sl *ShardedLocks) Locked(id uint64, callback func()) { + sl.Lock(id) + defer sl.Unlock(id) + + callback() +} + +type ShardedRWLocks struct { + // sharded locks + shards []sync.RWMutex + // number of locks + count uint64 + PageSize uint64 +} + +func NewDefaultShardedRWLocks() *ShardedRWLocks { + return NewShardedRWLocks(DefaultShardedLocksCount) +} + +func NewShardedRWLocks(count uint64) *ShardedRWLocks { + if count < 2 { + count = 2 + } + + return &ShardedRWLocks{ + shards: make([]sync.RWMutex, count), + count: count, + PageSize: DefaultPageSize, + } +} + +func NewShardedRWLocksWithPageSize(pageSize uint64) *ShardedRWLocks { + return &ShardedRWLocks{ + shards: make([]sync.RWMutex, DefaultShardedLocksCount), + count: DefaultShardedLocksCount, + PageSize: pageSize, + } +} + +func (sl *ShardedRWLocks) LockAll() { + for i := uint64(0); i < sl.count; i++ { + sl.shards[i].Lock() + } +} + +func (sl *ShardedRWLocks) UnlockAll() { + for i := int(sl.count) - 1; i >= 0; i-- { + sl.shards[i].Unlock() + } +} + +func (sl *ShardedRWLocks) LockedAll(callback func()) { + sl.LockAll() + defer sl.UnlockAll() + + callback() +} + +func (sl *ShardedRWLocks) Lock(id uint64) { + sl.shards[(id/sl.PageSize)%sl.count].Lock() +} + +func (sl *ShardedRWLocks) Unlock(id uint64) { + sl.shards[(id/sl.PageSize)%sl.count].Unlock() +} + +func (sl *ShardedRWLocks) Locked(id uint64, callback func()) { + sl.Lock(id) + defer sl.Unlock(id) + + callback() +} + +func (sl *ShardedRWLocks) RLockAll() { + for i := uint64(0); i < sl.count; i++ { + sl.shards[i].RLock() + } +} + +func (sl *ShardedRWLocks) RUnlockAll() { + for i := int(sl.count) - 1; i >= 0; i-- { + sl.shards[i].RUnlock() + } +} + +func (sl *ShardedRWLocks) RLockedAll(callback func()) { + sl.RLockAll() + defer sl.RUnlockAll() + + callback() +} + +func (sl *ShardedRWLocks) RLock(id uint64) { + sl.shards[(id/sl.PageSize)%sl.count].RLock() +} + +func (sl *ShardedRWLocks) RUnlock(id uint64) { + sl.shards[(id/sl.PageSize)%sl.count].RUnlock() +} + +func (sl *ShardedRWLocks) RLocked(id uint64, callback func()) { + sl.RLock(id) + defer sl.RUnlock(id) + + callback() +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/common/sharded_locks_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/common/sharded_locks_test.go new file mode 100644 index 0000000000000000000000000000000000000000..5d93a1958c7180be2a8f6a53e22b9ed6fc8b712e --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/common/sharded_locks_test.go @@ -0,0 +1,610 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package common + +import ( + "sync" + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +func TestShardedLocks_ParallelLocksAll(t *testing.T) { + // no asserts + // ensures parallel LockAll does not fall into deadlock + count := 10 + sl := NewDefaultShardedLocks() + + wg := new(sync.WaitGroup) + wg.Add(count) + for i := 0; i < count; i++ { + go func() { + defer wg.Done() + sl.LockAll() + sl.UnlockAll() + }() + } + wg.Wait() +} + +func TestShardedLocks_MixedLocks(t *testing.T) { + // no asserts + // ensures parallel LockAll + RLockAll + Lock + RLock does not fall into deadlock + count := 1000 + sl := NewShardedLocks(10) + + wg := new(sync.WaitGroup) + wg.Add(count) + for i := 0; i < count; i++ { + go func(i int) { + defer wg.Done() + id := uint64(i) + if i%5 == 0 { + sl.LockAll() + sl.UnlockAll() + } else { + sl.Lock(id) + sl.Unlock(id) + } + }(i) + } + wg.Wait() +} + +func TestShardedLocks(t *testing.T) { + t.Run("Lock", func(t *testing.T) { + t.Parallel() + m := NewShardedLocks(5) + + m.Lock(1) + + ch := make(chan struct{}) + go func() { + time.Sleep(50 * time.Millisecond) + m.Unlock(1) + + close(ch) + }() + + m.Lock(1) + + select { + case <-ch: + case <-time.After(1 * time.Second): + require.Fail(t, "should be unlocked") + } + + m.Unlock(1) + }) + + t.Run("Lock blocks LockAll", func(t *testing.T) { + t.Parallel() + m := NewShardedLocks(5) + + m.Lock(1) + + ch := make(chan struct{}) + go func() { + time.Sleep(50 * time.Millisecond) + m.Unlock(1) + + close(ch) + }() + + m.LockAll() + + select { + case <-ch: + case <-time.After(1 * time.Second): + require.Fail(t, "should be unlocked") + } + + m.UnlockAll() + }) + + t.Run("LockAll blocks Lock", func(t *testing.T) { + t.Parallel() + m := NewShardedLocks(5) + + m.LockAll() + + ch := make(chan struct{}) + go func() { + time.Sleep(50 * time.Millisecond) + m.UnlockAll() + + close(ch) + }() + + m.Lock(1) + + select { + case <-ch: + case <-time.After(1 * time.Second): + require.Fail(t, "should be unlocked") + } + + m.Unlock(1) + }) + + t.Run("LockAll blocks LockAll", func(t *testing.T) { + t.Parallel() + m := NewShardedLocks(5) + + m.LockAll() + + ch := make(chan struct{}) + go func() { + time.Sleep(50 * time.Millisecond) + m.UnlockAll() + + close(ch) + }() + + m.LockAll() + + select { + case <-ch: + case <-time.After(1 * time.Second): + require.Fail(t, "should be unlocked") + } + + m.UnlockAll() + }) + + t.Run("UnlockAll releases all locks", func(t *testing.T) { + t.Parallel() + m := NewShardedLocks(5) + + m.LockAll() + m.UnlockAll() + + m.Lock(1) + m.Unlock(1) + }) + + t.Run("unlock should wake up next waiting lock", func(t *testing.T) { + t.Parallel() + m := NewShardedLocks(2) + + m.Lock(1) + + ch1 := make(chan struct{}) + ch2 := make(chan struct{}) + + go func() { + defer close(ch1) + + m.Lock(1) + }() + + go func() { + defer close(ch2) + + time.Sleep(100 * time.Millisecond) + m.Lock(1) + }() + + time.Sleep(10 * time.Millisecond) + m.Unlock(1) + + <-ch1 + + m.Unlock(1) + + <-ch2 + + m.Unlock(1) + }) +} + +func TestShardedRWLocks_ParallelLocksAll(t *testing.T) { + // no asserts + // ensures parallel LockAll does not fall into deadlock + count := 10 + sl := NewDefaultShardedRWLocks() + + wg := new(sync.WaitGroup) + wg.Add(count) + for i := 0; i < count; i++ { + go func() { + defer wg.Done() + sl.LockAll() + sl.UnlockAll() + }() + } + wg.Wait() +} + +func TestShardedRWLocks_ParallelRLocksAll(t *testing.T) { + // no asserts + // ensures parallel RLockAll does not fall into deadlock + count := 10 + sl := NewDefaultShardedRWLocks() + + wg := new(sync.WaitGroup) + wg.Add(count) + for i := 0; i < count; i++ { + go func() { + defer wg.Done() + sl.RLockAll() + sl.RUnlockAll() + }() + } + wg.Wait() +} + +func TestShardedRWLocks_ParallelLocksAllAndRLocksAll(t *testing.T) { + // no asserts + // ensures parallel LockAll + RLockAll does not fall into deadlock + count := 50 + sl := NewDefaultShardedRWLocks() + + wg := new(sync.WaitGroup) + wg.Add(count) + for i := 0; i < count; i++ { + go func(i int) { + defer wg.Done() + if i%2 == 0 { + sl.LockAll() + sl.UnlockAll() + } else { + sl.RLockAll() + sl.RUnlockAll() + } + }(i) + } + wg.Wait() +} + +func TestShardedRWLocks_MixedLocks(t *testing.T) { + // no asserts + // ensures parallel LockAll + RLockAll + Lock + RLock does not fall into deadlock + count := 1000 + sl := NewShardedRWLocks(10) + + wg := new(sync.WaitGroup) + wg.Add(count) + for i := 0; i < count; i++ { + go func(i int) { + defer wg.Done() + id := uint64(i) + if i%5 == 0 { + if i%2 == 0 { + sl.LockAll() + sl.UnlockAll() + } else { + sl.RLockAll() + sl.RUnlockAll() + } + } else { + if i%2 == 0 { + sl.Lock(id) + sl.Unlock(id) + } else { + sl.RLock(id) + sl.RUnlock(id) + } + } + }(i) + } + wg.Wait() +} + +func TestShardedRWLocks(t *testing.T) { + t.Run("RLock", func(t *testing.T) { + t.Parallel() + m := NewShardedRWLocks(5) + + m.RLock(1) + m.RLock(1) + + m.RUnlock(1) + m.RUnlock(1) + }) + + t.Run("Lock", func(t *testing.T) { + t.Parallel() + m := NewShardedRWLocks(5) + + m.Lock(1) + + ch := make(chan struct{}) + go func() { + time.Sleep(50 * time.Millisecond) + m.Unlock(1) + + close(ch) + }() + + m.Lock(1) + + select { + case <-ch: + case <-time.After(1 * time.Second): + require.Fail(t, "should be unlocked") + } + + m.Unlock(1) + }) + + t.Run("RLock blocks Lock", func(t *testing.T) { + t.Parallel() + m := NewShardedRWLocks(5) + + m.RLock(1) + + ch := make(chan struct{}) + go func() { + time.Sleep(50 * time.Millisecond) + m.RUnlock(1) + + close(ch) + }() + + m.Lock(1) + + select { + case <-ch: + case <-time.After(1 * time.Second): + require.Fail(t, "should be unlocked") + } + + m.Unlock(1) + }) + + t.Run("Lock blocks RLock", func(t *testing.T) { + t.Parallel() + m := NewShardedRWLocks(5) + + m.Lock(1) + + ch := make(chan struct{}) + go func() { + time.Sleep(50 * time.Millisecond) + m.Unlock(1) + + close(ch) + }() + + m.RLock(1) + + select { + case <-ch: + case <-time.After(1 * time.Second): + require.Fail(t, "should be unlocked") + } + + m.RUnlock(1) + }) + + t.Run("Lock blocks LockAll", func(t *testing.T) { + t.Parallel() + m := NewShardedRWLocks(5) + + m.Lock(1) + + ch := make(chan struct{}) + go func() { + time.Sleep(50 * time.Millisecond) + m.Unlock(1) + + close(ch) + }() + + m.LockAll() + + select { + case <-ch: + case <-time.After(1 * time.Second): + require.Fail(t, "should be unlocked") + } + + m.UnlockAll() + }) + + t.Run("LockAll blocks Lock", func(t *testing.T) { + t.Parallel() + m := NewShardedRWLocks(5) + + m.LockAll() + + ch := make(chan struct{}) + go func() { + time.Sleep(50 * time.Millisecond) + m.UnlockAll() + + close(ch) + }() + + m.Lock(1) + + select { + case <-ch: + case <-time.After(1 * time.Second): + require.Fail(t, "should be unlocked") + } + + m.Unlock(1) + }) + + t.Run("LockAll blocks RLock", func(t *testing.T) { + t.Parallel() + m := NewShardedRWLocks(5) + + m.LockAll() + + ch := make(chan struct{}) + go func() { + time.Sleep(50 * time.Millisecond) + m.UnlockAll() + + close(ch) + }() + + m.RLock(1) + + select { + case <-ch: + case <-time.After(1 * time.Second): + require.Fail(t, "should be unlocked") + } + + m.RUnlock(1) + }) + + t.Run("LockAll blocks LockAll", func(t *testing.T) { + t.Parallel() + m := NewShardedRWLocks(5) + + m.LockAll() + + ch := make(chan struct{}) + go func() { + time.Sleep(50 * time.Millisecond) + m.UnlockAll() + + close(ch) + }() + + m.LockAll() + + select { + case <-ch: + case <-time.After(1 * time.Second): + require.Fail(t, "should be unlocked") + } + + m.UnlockAll() + }) + + t.Run("UnlockAll releases all locks", func(t *testing.T) { + t.Parallel() + m := NewShardedRWLocks(5) + + m.LockAll() + m.UnlockAll() + + m.Lock(1) + m.Unlock(1) + + m.RLock(1) + m.RUnlock(1) + }) + + t.Run("RLockAll blocks Lock", func(t *testing.T) { + t.Parallel() + m := NewShardedRWLocks(5) + + m.RLockAll() + + ch := make(chan struct{}) + go func() { + time.Sleep(50 * time.Millisecond) + m.RUnlockAll() + + close(ch) + }() + + m.Lock(1) + + select { + case <-ch: + case <-time.After(1 * time.Second): + require.Fail(t, "should be unlocked") + } + + m.Unlock(1) + }) + + t.Run("RLockAll doesn't block/unblock RLock", func(t *testing.T) { + t.Parallel() + m := NewShardedRWLocks(5) + + m.RLockAll() + m.RLock(1) + + m.RUnlockAll() + m.RUnlock(1) + }) + + t.Run("RLockAll blocks LockAll", func(t *testing.T) { + t.Parallel() + m := NewShardedRWLocks(5) + + m.RLockAll() + + ch := make(chan struct{}) + go func() { + time.Sleep(50 * time.Millisecond) + m.RUnlockAll() + + close(ch) + }() + + m.LockAll() + + select { + case <-ch: + case <-time.After(1 * time.Second): + require.Fail(t, "should be unlocked") + } + + m.UnlockAll() + }) + + t.Run("RLockAll doesn't block RLockAll", func(t *testing.T) { + t.Parallel() + m := NewShardedRWLocks(5) + + m.RLockAll() + m.RLockAll() + + m.RUnlockAll() + m.RUnlockAll() + }) + + t.Run("unlock should wake up next waiting lock", func(t *testing.T) { + t.Parallel() + m := NewShardedRWLocks(2) + + m.RLock(1) + + ch1 := make(chan struct{}) + ch2 := make(chan struct{}) + + go func() { + defer close(ch1) + + m.Lock(1) + }() + + go func() { + defer close(ch2) + + time.Sleep(100 * time.Millisecond) + m.Lock(1) + }() + + time.Sleep(10 * time.Millisecond) + m.RUnlock(1) + + <-ch1 + + m.Unlock(1) + + <-ch2 + + m.Unlock(1) + }) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/common/shared_gauge.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/common/shared_gauge.go new file mode 100644 index 0000000000000000000000000000000000000000..cdf9638d57574142f54717cf1882c790f17530e9 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/common/shared_gauge.go @@ -0,0 +1,67 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package common + +import ( + "sync" +) + +// SharedGauge is a thread-safe gauge that can be shared between multiple goroutines. +// It is used to track the number of running tasks, and allows to wait until all tasks are done. +type SharedGauge struct { + count int64 + cond *sync.Cond +} + +func NewSharedGauge() *SharedGauge { + return &SharedGauge{ + cond: sync.NewCond(&sync.Mutex{}), + } +} + +func (sc *SharedGauge) Incr() { + sc.cond.L.Lock() + defer sc.cond.L.Unlock() + + sc.count++ +} + +func (sc *SharedGauge) Decr() { + sc.cond.L.Lock() + defer sc.cond.L.Unlock() + + if sc.count == 0 { + panic("illegal gauge state: count cannot be negative") + } + + sc.count-- + + if sc.count == 0 { + sc.cond.Broadcast() + } +} + +func (sc *SharedGauge) Count() int64 { + sc.cond.L.Lock() + defer sc.cond.L.Unlock() + + return sc.count +} + +func (sc *SharedGauge) Wait() { + sc.cond.L.Lock() + defer sc.cond.L.Unlock() + + for sc.count != 0 { + sc.cond.Wait() + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/common/vector_id.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/common/vector_id.go new file mode 100644 index 0000000000000000000000000000000000000000..e5f041cfc2090b57cfd2dc81237026dc7ddef473 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/common/vector_id.go @@ -0,0 +1,231 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package common + +import ( + "context" + "errors" + "fmt" + "sync" + + "github.com/weaviate/weaviate/adapters/repos/db/priorityqueue" + "github.com/weaviate/weaviate/entities/dto" +) + +var ErrWrongDimensions = errors.New("vector dimensions do not match the index dimensions") + +type VectorIndex interface { + AddBatch(ctx context.Context, ids []uint64, vector [][]float32) error + ValidateBeforeInsert(vector []float32) error +} + +type VectorIndexMulti interface { + AddMultiBatch(ctx context.Context, docIds []uint64, vectors [][][]float32) error + ValidateMultiBeforeInsert(vector [][]float32) error +} + +type VectorRecord interface { + Len() int + Validate(vectorIndex VectorIndex) error +} + +func AddVectorsToIndex(ctx context.Context, vectors []VectorRecord, vectorIndex VectorIndex) error { + // ensure the vector is not empty + if len(vectors) == 0 { + return errors.New("empty vectors") + } + switch vectors[0].(type) { + case *Vector[[]float32]: + ids := make([]uint64, len(vectors)) + vecs := make([][]float32, len(vectors)) + for i, v := range vectors { + ids[i] = v.(*Vector[[]float32]).ID + vecs[i] = v.(*Vector[[]float32]).Vector + } + return vectorIndex.AddBatch(ctx, ids, vecs) + case *Vector[[][]float32]: + ids := make([]uint64, len(vectors)) + vecs := make([][][]float32, len(vectors)) + for i, v := range vectors { + ids[i] = v.(*Vector[[][]float32]).ID + vecs[i] = v.(*Vector[[][]float32]).Vector + } + return vectorIndex.(VectorIndexMulti).AddMultiBatch(ctx, ids, vecs) + default: + return fmt.Errorf("unexpected vector type %T", vectors[0]) + } +} + +type Vector[T dto.Embedding] struct { + ID uint64 + Vector T +} + +func (v *Vector[T]) Len() int { + switch any(v.Vector).(type) { + case []float32: + return len(v.Vector) + case [][]float32: + vec := any(v.Vector).([][]float32) + if len(vec) > 0 { + return len(vec[0]) + } + return 0 + default: + return 0 + } +} + +func (v *Vector[T]) Validate(vectorIndex VectorIndex) error { + // ensure the vector is not empty + if len(v.Vector) == 0 { + return errors.New("empty vector") + } + // delegate the validation to the index + switch any(v.Vector).(type) { + case []float32: + return vectorIndex.ValidateBeforeInsert(any(v.Vector).([]float32)) + case [][]float32: + return vectorIndex.(VectorIndexMulti).ValidateMultiBeforeInsert(any(v.Vector).([][]float32)) + default: + return fmt.Errorf("unexpected vector type %T", v.Vector) + } +} + +type VectorSlice struct { + Slice []float32 + Mem []float32 + Buff8 []byte + Buff []byte +} + +type VectorUint64Slice struct { + Slice []uint64 +} + +type ( + VectorForID[T []float32 | []uint64 | float32 | byte | uint64] func(ctx context.Context, id uint64) ([]T, error) + MultipleVectorForID[T float32 | uint64 | byte] func(ctx context.Context, id uint64, relativeID uint64) ([]T, error) + TempVectorForID[T []float32 | float32] func(ctx context.Context, id uint64, container *VectorSlice) ([]T, error) + MultiVectorForID func(ctx context.Context, ids []uint64) ([][]float32, []error) +) + +type TargetVectorForID[T []float32 | float32 | byte | uint64] struct { + TargetVector string + VectorForIDThunk func(ctx context.Context, id uint64, targetVector string) ([]T, error) +} + +func (t TargetVectorForID[T]) VectorForID(ctx context.Context, id uint64) ([]T, error) { + return t.VectorForIDThunk(ctx, id, t.TargetVector) +} + +type TargetTempVectorForID[T []float32 | float32] struct { + TargetVector string + TempVectorForIDThunk func(ctx context.Context, id uint64, container *VectorSlice, targetVector string) ([]T, error) +} + +func (t TargetTempVectorForID[T]) TempVectorForID(ctx context.Context, id uint64, container *VectorSlice) ([]T, error) { + return t.TempVectorForIDThunk(ctx, id, container, t.TargetVector) +} + +type TempVectorUint64Pool struct { + pool *sync.Pool +} + +func NewTempUint64VectorsPool() *TempVectorUint64Pool { + return &TempVectorUint64Pool{ + pool: &sync.Pool{ + New: func() interface{} { + return &VectorUint64Slice{ + Slice: nil, + } + }, + }, + } +} + +func (pool *TempVectorUint64Pool) Get(capacity int) *VectorUint64Slice { + container := pool.pool.Get().(*VectorUint64Slice) + if cap(container.Slice) >= capacity { + container.Slice = container.Slice[:capacity] + } else { + container.Slice = make([]uint64, capacity) + } + return container +} + +func (pool *TempVectorUint64Pool) Put(container *VectorUint64Slice) { + pool.pool.Put(container) +} + +type TempVectorsPool struct { + pool *sync.Pool +} + +func NewTempVectorsPool() *TempVectorsPool { + return &TempVectorsPool{ + pool: &sync.Pool{ + New: func() interface{} { + return &VectorSlice{ + Mem: nil, + Buff8: make([]byte, 8), + Buff: nil, + Slice: nil, + } + }, + }, + } +} + +func (pool *TempVectorsPool) Get(capacity int) *VectorSlice { + container := pool.pool.Get().(*VectorSlice) + if len(container.Slice) >= capacity { + container.Slice = container.Mem[:capacity] + } else { + container.Mem = make([]float32, capacity) + container.Slice = container.Mem[:capacity] + } + return container +} + +func (pool *TempVectorsPool) Put(container *VectorSlice) { + pool.pool.Put(container) +} + +type PqMaxPool struct { + pool *sync.Pool +} + +func NewPqMaxPool(defaultCap int) *PqMaxPool { + return &PqMaxPool{ + pool: &sync.Pool{ + New: func() interface{} { + return priorityqueue.NewMax[any](defaultCap) + }, + }, + } +} + +func (pqh *PqMaxPool) GetMax(capacity int) *priorityqueue.Queue[any] { + pq := pqh.pool.Get().(*priorityqueue.Queue[any]) + if pq.Cap() < capacity { + pq.ResetCap(capacity) + } else { + pq.Reset() + } + + return pq +} + +func (pqh *PqMaxPool) Put(pq *priorityqueue.Queue[any]) { + pqh.pool.Put(pq) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/common/vector_util.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/common/vector_util.go new file mode 100644 index 0000000000000000000000000000000000000000..789f827d619ae1184af43a64028e3a7a2a89364e --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/common/vector_util.go @@ -0,0 +1,52 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package common + +func VectorsEqual(vecA, vecB []float32) bool { + return vectorsEqual(vecA, vecB, func(valueA, valueB float32) bool { + return valueA == valueB + }) +} + +func MultiVectorsEqual(vecA, vecB [][]float32) bool { + return vectorsEqual(vecA, vecB, VectorsEqual) +} + +// vectorsEqual verifies whether provided vectors are the same +// It considers nil vector as equal to vector of len = 0. +func vectorsEqual[T []C, C float32 | []float32](vecA, vecB T, valuesEqual func(valueA, valueB C) bool) bool { + if lena, lenb := len(vecA), len(vecB); lena != lenb { + return false + } else if lena == 0 { + return true + } + + for i := range vecA { + if !valuesEqual(vecA[i], vecB[i]) { + return false + } + } + return true +} + +func CalculateOptimalSegments(dims int) int { + if dims >= 2048 && dims%8 == 0 { + return dims / 8 + } else if dims >= 768 && dims%6 == 0 { + return dims / 6 + } else if dims >= 256 && dims%4 == 0 { + return dims / 4 + } else if dims%2 == 0 { + return dims / 2 + } + return dims +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/common/vector_util_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/common/vector_util_test.go new file mode 100644 index 0000000000000000000000000000000000000000..e49ce8d21f965daa57d3b16d8c418ca9e26b8051 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/common/vector_util_test.go @@ -0,0 +1,221 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package common + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestVectorUtil_Equal(t *testing.T) { + type testCase struct { + vecA []float32 + vecB []float32 + expectedEqual bool + } + + testCases := []testCase{ + { + vecA: nil, + vecB: nil, + expectedEqual: true, + }, + { + vecA: nil, + vecB: []float32{}, + expectedEqual: true, + }, + { + vecA: []float32{}, + vecB: nil, + expectedEqual: true, + }, + { + vecA: []float32{}, + vecB: []float32{}, + expectedEqual: true, + }, + { + vecA: []float32{1, 2, 3}, + vecB: []float32{1., 2., 3.}, + expectedEqual: true, + }, + { + vecA: []float32{1, 2, 3, 4}, + vecB: []float32{1., 2., 3.}, + expectedEqual: false, + }, + { + vecA: []float32{1, 2, 3}, + vecB: []float32{1., 2., 3., 4.}, + expectedEqual: false, + }, + { + vecA: []float32{}, + vecB: []float32{1., 2., 3.}, + expectedEqual: false, + }, + { + vecA: []float32{1, 2, 3}, + vecB: []float32{}, + expectedEqual: false, + }, + { + vecA: []float32{1, 2, 3}, + vecB: []float32{2, 3, 4}, + expectedEqual: false, + }, + } + + for i, tc := range testCases { + t.Run(fmt.Sprintf("#%d", i+1), func(t *testing.T) { + assert.Equal(t, tc.expectedEqual, VectorsEqual(tc.vecA, tc.vecB)) + }) + } +} + +func TestMultiVectorUtil_Equal(t *testing.T) { + type testCase struct { + vecA [][]float32 + vecB [][]float32 + expectedEqual bool + } + + testCases := []testCase{ + { + vecA: nil, + vecB: nil, + expectedEqual: true, + }, + { + vecA: nil, + vecB: [][]float32{}, + expectedEqual: true, + }, + { + vecA: [][]float32{}, + vecB: nil, + expectedEqual: true, + }, + { + vecA: [][]float32{}, + vecB: [][]float32{}, + expectedEqual: true, + }, + { + vecA: [][]float32{{1, 2, 3}}, + vecB: [][]float32{{1., 2., 3.}}, + expectedEqual: true, + }, + { + vecA: [][]float32{{1, 2, 3, 4}}, + vecB: [][]float32{{1., 2., 3.}}, + expectedEqual: false, + }, + { + vecA: [][]float32{{1, 2, 3}}, + vecB: [][]float32{{1., 2., 3., 4.}}, + expectedEqual: false, + }, + { + vecA: [][]float32{}, + vecB: [][]float32{{1., 2., 3.}}, + expectedEqual: false, + }, + { + vecA: [][]float32{{1, 2, 3}}, + vecB: [][]float32{}, + expectedEqual: false, + }, + { + vecA: [][]float32{{1, 2, 3}, {11, 22, 33}, {111, 222, 333}}, + vecB: [][]float32{{1, 2, 3}, {11, 22, 33}}, + expectedEqual: false, + }, + { + vecA: [][]float32{{1, 2, 3}, {11, 22, 33}}, + vecB: [][]float32{{1, 2, 3}, {11, 22, 33}, {111, 222, 333}}, + expectedEqual: false, + }, + { + vecA: [][]float32{{1, 2, 3}, {11, 22, 33}, {111, 222, 333}}, + vecB: [][]float32{{1, 2, 3}, {11, 22, 33}, {111, 222, 333}}, + expectedEqual: true, + }, + { + vecA: [][]float32{{1, 2, 3}, {11, 22, 33}, {111, 222, 333}}, + vecB: [][]float32{{11, 22, 33}, {111, 222, 333}, {1, 2, 3}}, + expectedEqual: false, + }, + } + + for i, tc := range testCases { + t.Run(fmt.Sprintf("#%d", i+1), func(t *testing.T) { + assert.Equal(t, tc.expectedEqual, MultiVectorsEqual(tc.vecA, tc.vecB)) + }) + } +} + +func Test_CalculateOptimalSegments(t *testing.T) { + type testCase struct { + dimensions int + expectedSegments int + } + + for _, tc := range []testCase{ + { + dimensions: 2048, + expectedSegments: 256, + }, + { + dimensions: 1536, + expectedSegments: 256, + }, + { + dimensions: 768, + expectedSegments: 128, + }, + { + dimensions: 512, + expectedSegments: 128, + }, + { + dimensions: 256, + expectedSegments: 64, + }, + { + dimensions: 125, + expectedSegments: 125, + }, + { + dimensions: 64, + expectedSegments: 32, + }, + { + dimensions: 27, + expectedSegments: 27, + }, + { + dimensions: 19, + expectedSegments: 19, + }, + { + dimensions: 2, + expectedSegments: 1, + }, + } { + segments := CalculateOptimalSegments(tc.dimensions) + assert.Equal(t, tc.expectedSegments, segments) + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/compressionhelpers/binary_quantization.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/compressionhelpers/binary_quantization.go new file mode 100644 index 0000000000000000000000000000000000000000..8285c82f00d0fa45e3a8cda0e5a7c76e4bafed79 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/compressionhelpers/binary_quantization.go @@ -0,0 +1,73 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package compressionhelpers + +import ( + "github.com/weaviate/weaviate/adapters/repos/db/vector/hnsw/distancer" +) + +type BinaryQuantizer struct { + distancer distancer.Provider +} + +func NewBinaryQuantizer(distancer distancer.Provider) BinaryQuantizer { + return BinaryQuantizer{ + distancer: distancer, + } +} + +func (bq BinaryQuantizer) Encode(vec []float32) []uint64 { + len := len(vec) + blocks := (len + 63) >> 6 // ceil(len / 64) + code := make([]uint64, blocks) + i := 0 + for b := range blocks { + var bits uint64 + for bit := uint64(1); bit != 0; bit <<= 1 { + if vec[i] < 0 { + bits |= bit + } + i++ + if i == len { + break + } + } + code[b] = bits + } + return code +} + +func (bq BinaryQuantizer) DistanceBetweenCompressedVectors(x, y []uint64) (float32, error) { + return distancer.HammingBitwise(x, y) +} + +type BQStats struct{} + +func (b BQStats) CompressionType() string { + return "bq" +} + +func (b BQStats) CompressionRatio(_ int) float64 { + // BQ compression: original size = dimensions * 4 bytes (float32) + // compressed size = ceil(dimensions / 64) * 8 bytes (uint64) + // For practical vector dimensions, the ratio approaches 32 + // For 64 dimensions: (64 * 4) / (1 * 8) = 32x + // For 128 dimensions: (128 * 4) / (2 * 8) = 32x + // For 1536 dimensions: (1536 * 4) / (24 * 8) = 32x + // For 1600 dimensions: (1600 * 4) / (25 * 8) = 32x + // The ratio is essentially constant at ~32x compression + return 32.0 +} + +func (bq *BinaryQuantizer) Stats() CompressionStats { + return BQStats{} +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/compressionhelpers/binary_quantization_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/compressionhelpers/binary_quantization_test.go new file mode 100644 index 0000000000000000000000000000000000000000..1424458e2ee16b5092adc44fc1ecdfeb324004bd --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/compressionhelpers/binary_quantization_test.go @@ -0,0 +1,190 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package compressionhelpers_test + +import ( + "fmt" + "math" + "math/rand" + "sync" + "testing" + "time" + + "github.com/sirupsen/logrus/hooks/test" + + "github.com/stretchr/testify/assert" + "github.com/weaviate/weaviate/adapters/repos/db/priorityqueue" + "github.com/weaviate/weaviate/adapters/repos/db/vector/compressionhelpers" + "github.com/weaviate/weaviate/adapters/repos/db/vector/hnsw/distancer" + testinghelpers "github.com/weaviate/weaviate/adapters/repos/db/vector/testinghelpers" +) + +var logger, _ = test.NewNullLogger() + +func TestBinaryQuantizerRecall(t *testing.T) { + k := 10 + distanceProvider := distancer.NewCosineDistanceProvider() + vectors, queryVecs := testinghelpers.RandomVecsFixedSeed(10_000, 100, 1536) + compressionhelpers.Concurrently(logger, uint64(len(vectors)), func(i uint64) { + vectors[i] = distancer.Normalize(vectors[i]) + }) + compressionhelpers.Concurrently(logger, uint64(len(queryVecs)), func(i uint64) { + queryVecs[i] = distancer.Normalize(queryVecs[i]) + }) + bq := compressionhelpers.NewBinaryQuantizer(nil) + + codes := make([][]uint64, len(vectors)) + compressionhelpers.Concurrently(logger, uint64(len(vectors)), func(i uint64) { + codes[i] = bq.Encode(vectors[i]) + }) + neighbors := make([][]uint64, len(queryVecs)) + compressionhelpers.Concurrently(logger, uint64(len(queryVecs)), func(i uint64) { + neighbors[i], _ = testinghelpers.BruteForce(logger, vectors, queryVecs[i], k, func(f1, f2 []float32) float32 { + d, _ := distanceProvider.SingleDist(f1, f2) + return d + }) + }) + correctedK := 200 + hits := uint64(0) + mutex := sync.Mutex{} + duration := time.Duration(0) + compressionhelpers.Concurrently(logger, uint64(len(queryVecs)), func(i uint64) { + before := time.Now() + query := bq.Encode(queryVecs[i]) + heap := priorityqueue.NewMax[any](correctedK) + for j := range codes { + d, _ := bq.DistanceBetweenCompressedVectors(codes[j], query) + if heap.Len() < correctedK || heap.Top().Dist > d { + if heap.Len() == correctedK { + heap.Pop() + } + heap.Insert(uint64(j), d) + } + } + ids := make([]uint64, correctedK) + for j := range ids { + ids[j] = heap.Pop().ID + } + mutex.Lock() + duration += time.Since(before) + hits += testinghelpers.MatchesInLists(neighbors[i][:k], ids) + mutex.Unlock() + }) + recall := float32(hits) / float32(k*len(queryVecs)) + latency := float32(duration.Microseconds()) / float32(len(queryVecs)) + fmt.Println(recall, latency) + assert.True(t, recall > 0.7) + + // Currently BQ does not expose any stats so just check struct exists + _, ok := bq.Stats().(compressionhelpers.BQStats) + assert.True(t, ok) +} + +func TestBinaryQuantizerChecksSize(t *testing.T) { + bq := compressionhelpers.NewBinaryQuantizer(nil) + _, err := bq.DistanceBetweenCompressedVectors(make([]uint64, 3), make([]uint64, 4)) + assert.NotNil(t, err) +} + +func extractBit(code []uint64, idx int) bool { + return code[idx/64]&(uint64(1)<<(idx%64)) != 0 +} + +func TestBinaryQuantizerBitAssignmenFixedValues(t *testing.T) { + test_bits := []struct { + value float32 + quantized_value bool + }{ + {-1.0, true}, + {1.0, false}, + {0.0, false}, + {float32(math.NaN()), false}, + {float32(math.Inf(1)), false}, + {float32(math.Inf(-1)), true}, + } + + bq := compressionhelpers.NewBinaryQuantizer(nil) + for _, b := range test_bits { + y := []float32{b.value} + code := bq.Encode(y) + assert.True(t, extractBit(code, 0) == b.quantized_value, + "Unexpected quantized value: %f should quantize to %t", + b.value, b.quantized_value) + } +} + +func TestBinaryQuantizerBitAssignmenRandomVector(t *testing.T) { + const seed = 42 + r := rand.New(rand.NewSource(seed)) + x := make([]float32, 1000) + for i := range len(x) { + x[i] = 2.0*r.Float32() - 1.0 + } + + bq := compressionhelpers.NewBinaryQuantizer(nil) + code := bq.Encode(x) + for i, v := range x { + assert.True(t, extractBit(code, i) == (v < 0), + "Unexpected quantized value: %f should quantize to %t", v, v < 0) + } + + for i := 1000; i < 1024; i++ { + assert.True(t, extractBit(code, i) == false, + "Remaining bits should be set to zero.") + } +} + +// Verify that using bit shifts produces the same results as the previous +// approach of computing powers of floats and converting to uint64. +func TestBinaryQuantizerBitShiftBackwardsCompatibility(t *testing.T) { + for j := range 64 { + pow_bit := uint64(math.Pow(2, float64(j%64))) + shift_bit := uint64(1) << (j % 64) + assert.True(t, pow_bit == shift_bit) + } +} + +func BenchmarkBinaryQuantization(b *testing.B) { + bq := compressionhelpers.NewBinaryQuantizer(nil) + const seed = 42 + r := rand.New(rand.NewSource(seed)) + for _, d := range []int{32, 64, 100, 256, 500, 1024, 4096} { + x := make([]float32, d) + for i := range d { + x[i] = 2.0*r.Float32() - 1.0 + } + b.Run(fmt.Sprintf("BinaryQuantization-dim-%d", d), func(b *testing.B) { + for i := 0; i < b.N; i++ { + // Even though we do not use the output of bq.Encode() the call + // does not seem to be optimized away by the compiler. + // TODO: Use b.Loop() instead when we move to Go 1.24. + bq.Encode(x) + } + }) + } +} + +func BenchmarkBQDistance(b *testing.B) { + dimensions := []int{64, 128, 256, 512, 1024, 1536, 2048} + for _, dim := range dimensions { + quantizer := compressionhelpers.NewBinaryQuantizer(distancer.NewDotProductProvider()) + q, x := correlatedVectors(dim, 0.5) + cx := quantizer.Encode(x) + distancer := quantizer.NewDistancer(q) + b.Run(fmt.Sprintf("d%d", dim), func(b *testing.B) { + for b.Loop() { + distancer.Distance(cx) + } + b.ReportMetric((float64(b.N)/1e6)/float64(b.Elapsed().Seconds()), "m.ops/sec") + }) + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/compressionhelpers/binary_rotational_quantization.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/compressionhelpers/binary_rotational_quantization.go new file mode 100644 index 0000000000000000000000000000000000000000..464b488cd9f302e475967cf17484ccf61e6ae13b --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/compressionhelpers/binary_rotational_quantization.go @@ -0,0 +1,511 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package compressionhelpers + +import ( + "encoding/binary" + "fmt" + "math" + "math/bits" + "math/rand/v2" + "strings" + + "github.com/weaviate/weaviate/adapters/repos/db/vector/hnsw/distancer" +) + +const ( + minCodeBits = 256 + rotationRounds = 3 +) + +type BinaryRotationalQuantizer struct { + inputDim uint32 + rotation *FastRotation + distancer distancer.Provider + rounding []float32 + l2 float32 + cos float32 +} + +func NewBinaryRotationalQuantizer(inputDim int, seed uint64, distancer distancer.Provider) *BinaryRotationalQuantizer { + // Pad the input if it is low-dimensional. + if inputDim < minCodeBits { + inputDim = minCodeBits + } + // For the rotated point to look fully random we need 4 or 5 rotational + // rounds, but since we only care about the sign of the entries and not the + // complete distribution, it seems like 3 rounds suffice. + rotation := NewFastRotation(inputDim, rotationRounds, seed) + + cos, l2, err := distancerIndicatorsAndError(distancer) + if err != nil { + return nil + } + // Randomized rounding for the query quantization to make the estimator + // unbiased. It may produce better recall to not use randomized rounding + // since adding the random noise increases the quantization error. With + // 8-bit RQ we are not using randomized rounding. + rounding := make([]float32, rotation.OutputDim) + rng := rand.New(rand.NewPCG(seed, 0x4f8ebf70e130707f)) + for i := range rounding { + rounding[i] = rng.Float32() + } + + rq := &BinaryRotationalQuantizer{ + inputDim: uint32(inputDim), + rotation: rotation, + distancer: distancer, + rounding: rounding, + l2: l2, + cos: cos, + } + return rq +} + +func RestoreBinaryRotationalQuantizer(inputDim int, outputDim int, rounds int, swaps [][]Swap, signs [][]float32, rounding []float32, distancer distancer.Provider) (*BinaryRotationalQuantizer, error) { + cos, l2, err := distancerIndicatorsAndError(distancer) + if err != nil { + return nil, err + } + rq := &BinaryRotationalQuantizer{ + inputDim: uint32(inputDim), + rotation: RestoreFastRotation(outputDim, rounds, swaps, signs), + distancer: distancer, + rounding: rounding, + cos: cos, + l2: l2, + } + return rq, nil +} + +func putFloat32Upper(v uint64, x float32) uint64 { + const upper32 uint64 = ((1 << 32) - 1) << 32 + return (v &^ upper32) | uint64(math.Float32bits(x))<<32 +} + +func getFloat32Upper(v uint64) float32 { + return math.Float32frombits(uint32(v >> 32)) +} + +func putFloat32Lower(v uint64, x float32) uint64 { + const lower32 uint64 = (1 << 32) - 1 + return (v &^ lower32) | uint64(math.Float32bits(x)) +} + +func getFloat32Lower(v uint64) float32 { + return math.Float32frombits(uint32(v)) +} + +// RaBitQ 1-bit code. Instead of normalizing explicitly prior to rotating and +// quantizing we can just bake the normalization factor and adjustment of the +// estimator into "step". Suppose that x is a randomly rotated vector. Then we +// quantize the ith entry of x by taking its sign: +// quantized x_i = step * sign(x_i) = (/(sum_i |x_i|)) sign(x_i). +// See the first RaBitQ paper for details https://arxiv.org/abs/2405.12497. +type RQOneBitCode []uint64 + +// /(sum_i |x_i|) +func (c RQOneBitCode) Step() float32 { + return getFloat32Lower(c[0]) +} + +func (c RQOneBitCode) setStep(x float32) { + c[0] = putFloat32Lower(c[0], x) +} + +// Euclidean norm squared. +func (c RQOneBitCode) SquaredNorm() float32 { + return getFloat32Upper(c[0]) +} + +// Convenience function. It may be more performant to store the norm instead of +// the squared norm since we use it in DistanceBetweenCompressedVectors. +func (c RQOneBitCode) Norm() float32 { + return float32(math.Sqrt(float64(c.SquaredNorm()))) +} + +func (c RQOneBitCode) setSquaredNorm(x float32) { + c[0] = putFloat32Upper(c[0], x) +} + +const oneBitFieldWords = 1 + +func (c RQOneBitCode) Bits() []uint64 { + return c[oneBitFieldWords:] +} + +func (c RQOneBitCode) Dimension() int { + return 64 * (len(c) - oneBitFieldWords) +} + +func NewRQOneBitCode(d int) RQOneBitCode { + return make([]uint64, oneBitFieldWords+d/64) +} + +func (c RQOneBitCode) String() string { + return fmt.Sprintf("RQOneBitCode{Step: %.4f, SquaredNorm: %.4f, Bits[0]: %064b", + c.Step(), c.SquaredNorm(), c.Bits()[0]) +} + +func (rq *BinaryRotationalQuantizer) Encode(x []float32) []uint64 { + rx := rq.rotation.Rotate(x) + d := len(rx) + code := NewRQOneBitCode(d) + blocks := d / 64 + var l2NormSquared float32 + var l1Norm float32 + i := 0 + for b := range blocks { + var bits uint64 + for bit := uint64(1); bit != 0; bit <<= 1 { + if rx[i] > 0 { + bits |= bit + l1Norm += rx[i] + } else { + l1Norm += -rx[i] + } + l2NormSquared += rx[i] * rx[i] + i++ + } + code.Bits()[b] = bits + } + if l1Norm == 0 { + return code + } + code.setSquaredNorm(l2NormSquared) + code.setStep(l2NormSquared / l1Norm) + return code +} + +// Restore -> NewCompressedQuantizerDistancer -> NewDistancerFromID -> reassignNeighbor in when deleting +// distancer for PQ,SQ etc. use the compressed vector, in this case we can't use it because we have different encoding for the query and the data. +func (rq *BinaryRotationalQuantizer) Restore(b []uint64) []float32 { + // When restoring a float32 from the binary encoding we use the mapping: + // 0: -||x||/sqrt(D) + // 1: ||x||/sqrt(D) + code := RQOneBitCode(b) + dim := code.Dimension() + avgNorm := code.Norm() / float32(math.Sqrt(float64(dim))) + x := make([]float32, dim) + bits := code.Bits() + for i := range dim { + block := i / 64 + bit := uint(i) % 64 + if (bits[block] & (1 << bit)) != 0 { + x[i] = avgNorm + } else { + x[i] = -avgNorm + } + } + return x +} + +// The binary encoding of q. We quantize q using k bits using the format: +// +// q_i = step * (s_i,k-1 * 2^(k-1) + s_i,k-2 * 2^(k-2) + ... + s_i,0) +// +// where s_i,j is the j-th bit of the k-bit integer quantization of q_i. We +// interpret s_i,j as a sign indicator so that {0,1} corresponds to {-1, +1}. +// For k = 3 this means that we can generate numbers in the set {-7, -5, -3, -1, +// 1, 3, 5, 7}. We use the step variable to scale this interval so that it +// includes the largest absolute value. +type RQMultiBitCode struct { + Dimension int + SquaredNorm float32 + Step float32 + bits0 []uint64 + bits1 []uint64 + bits2 []uint64 + bits3 []uint64 + bits4 []uint64 +} + +func (c RQMultiBitCode) String() string { + var sb strings.Builder + sb.WriteString(fmt.Sprintf("bits0[0]: %064b, ", c.bits0[0])) + return fmt.Sprintf("RQMultiBitCode{Step: %.4f, SquaredNorm: %.4f, bits: %s", + c.Step, c.SquaredNorm, sb.String()) +} + +func maxAbs(rx []float32) float32 { + var max float32 + for _, v := range rx { + if v < 0 { + v = -v + } + if v > max { + max = v + } + } + return max +} + +// TODO: Handle corner cases as we do for 8-bit RQ. +func (rq *BinaryRotationalQuantizer) encodeQuery(x []float32) RQMultiBitCode { + rx := rq.rotation.Rotate(x) + abs := maxAbs(rx) + if abs == 0 { + // The input vector is the zero vector. + return RQMultiBitCode{} + } + step := abs / 31 + // Encode each rotated entry to an unsigned integer and extract the bits. + blocks := len(rx) >> 6 + bits0 := make([]uint64, blocks) + bits1 := make([]uint64, blocks) + bits2 := make([]uint64, blocks) + bits3 := make([]uint64, blocks) + bits4 := make([]uint64, blocks) + var squaredNorm float32 + i := 0 + for b := range blocks { + var b0, b1, b2, b3, b4 uint64 + for bit := uint64(1); bit != 0; bit <<= 1 { + squaredNorm += rx[i] * rx[i] + c := uint64(((rx[i] + abs) / (2 * step)) + rq.rounding[i]) + if c&1 != 0 { + b0 |= bit + } + if c&2 != 0 { + b1 |= bit + } + if c&4 != 0 { + b2 |= bit + } + if c&8 != 0 { + b3 |= bit + } + if c&16 != 0 { + b4 |= bit + } + i++ + } + bits0[b] = b0 + bits1[b] = b1 + bits2[b] = b2 + bits3[b] = b3 + bits4[b] = b4 + } + return RQMultiBitCode{ + Dimension: len(rx), + SquaredNorm: squaredNorm, + Step: step, + bits0: bits0, + bits1: bits1, + bits2: bits2, + bits3: bits3, + bits4: bits4, + } +} + +type BinaryRQDistancer struct { + query []float32 + distancer distancer.Provider + rq *BinaryRotationalQuantizer + cos float32 + l2 float32 + cq RQMultiBitCode +} + +func (d *BinaryRQDistancer) QueryCode() RQMultiBitCode { + return d.cq +} + +func (rq *BinaryRotationalQuantizer) NewDistancer(q []float32) *BinaryRQDistancer { + var cos float32 + if rq.distancer.Type() == "cosine-dot" { + cos = 1.0 + } + var l2 float32 + if rq.distancer.Type() == "l2-squared" { + l2 = 1.0 + } + return &BinaryRQDistancer{ + query: q, + distancer: rq.distancer, + rq: rq, + cos: cos, + l2: l2, + cq: rq.encodeQuery(q), + } +} + +func HammingDist(x, y []uint64) int { + var count int + for i := range x { + count += bits.OnesCount64(x[i] ^ y[i]) + } + return count +} + +// Exported in case we need to use it later. +func HammingDistSIMD(x, y []uint64) float32 { + return hammingBitwiseImpl(x, y) +} + +// Notes: +// SIMD only seems to outperform bits.OnesCount64 once the dimensionality is +// greater than ~512, at least on an M4 (ARM neon). However adding the +// if-statement to determine whether to use SIMD also comes at a cost. +// For binary quantization we always use SIMD, so maybe that is the way to go. +func (d *BinaryRQDistancer) Distance(x []uint64) (float32, error) { + cx := RQOneBitCode(x) + bits := cx.Bits() + const hammingDistSIMDThreshold = 512 + if d.cq.Dimension < hammingDistSIMDThreshold { + dot := 31 * d.cq.Dimension + dot -= HammingDist(d.cq.bits0, bits) << 1 + dot -= HammingDist(d.cq.bits1, bits) << 2 + dot -= HammingDist(d.cq.bits2, bits) << 3 + dot -= HammingDist(d.cq.bits3, bits) << 4 + dot -= HammingDist(d.cq.bits4, bits) << 5 + dotEstimate := d.cq.Step * cx.Step() * float32(dot) + return d.l2*(cx.SquaredNorm()+d.cq.SquaredNorm) + d.cos - (1.0+d.l2)*dotEstimate, nil + } + dot := float32(31 * d.cq.Dimension) + dot -= 2 * HammingDistSIMD(d.cq.bits0, bits) + dot -= 4 * HammingDistSIMD(d.cq.bits1, bits) + dot -= 8 * HammingDistSIMD(d.cq.bits2, bits) + dot -= 16 * HammingDistSIMD(d.cq.bits3, bits) + dot -= 32 * HammingDistSIMD(d.cq.bits4, bits) + dotEstimate := d.cq.Step * cx.Step() * dot + return d.l2*(cx.SquaredNorm()+d.cq.SquaredNorm) + d.cos - (1.0+d.l2)*dotEstimate, nil +} + +func (d *BinaryRQDistancer) DistanceToFloat(x []float32) (float32, error) { + if len(d.query) > 0 { + return d.distancer.SingleDist(d.query, x) + } + cx := d.rq.Encode(x) + return d.Distance(cx) +} + +// DistanceBetweenCompressedVectors is used in: +// 1. Distance in compression_distance_bag.go -> selectNeighborsHeuristic in compression_distance_bag.go +// 2. DistanceBetweenCompressedVectorsFromIDs -> distBetweenNodes -> connectNeighborAtLevel +// TODO: Speed this up by tabulating the computation involving Cosine and storing the norm instead of the squared norm on the vectors. +func (brq *BinaryRotationalQuantizer) DistanceBetweenCompressedVectors(x, y []uint64) (float32, error) { + cx, cy := RQOneBitCode(x), RQOneBitCode(y) + fractionDiff := float64(HammingDist(cx.Bits(), cy.Bits())) / float64(cx.Dimension()) + cosEstimate := math.Cos(math.Pi * fractionDiff) + dotEstimate := float32(math.Sqrt(float64(cx.SquaredNorm())) * math.Sqrt(float64(cy.SquaredNorm())) * cosEstimate) + return brq.l2*(cx.SquaredNorm()+cy.SquaredNorm()) + brq.cos - (1.0+brq.l2)*dotEstimate, nil +} + +func (brq *BinaryRotationalQuantizer) CompressedBytes(compressed []uint64) []byte { + slice := make([]byte, len(compressed)*8) + for i := range compressed { + binary.LittleEndian.PutUint64(slice[i*8:], compressed[i]) + } + return slice +} + +func (brq *BinaryRotationalQuantizer) FromCompressedBytes(compressed []byte) []uint64 { + l := len(compressed) / 8 + if len(compressed)%8 != 0 { + l++ + } + slice := make([]uint64, l) + + for i := range slice { + slice[i] = binary.LittleEndian.Uint64(compressed[i*8:]) + } + return slice +} + +func (brq *BinaryRotationalQuantizer) FromCompressedBytesWithSubsliceBuffer(compressed []byte, buffer *[]uint64) []uint64 { + l := len(compressed) / 8 + if len(compressed)%8 != 0 { + l++ + } + + if len(*buffer) < l { + *buffer = make([]uint64, 1000*l) + } + + // take from end so we can address the start of the buffer + slice := (*buffer)[len(*buffer)-l:] + *buffer = (*buffer)[:len(*buffer)-l] + + for i := range slice { + slice[i] = binary.LittleEndian.Uint64(compressed[i*8:]) + } + return slice +} + +// Used when we delete nodes to reconnect the HNSW graph. +// Performs distance computations using DistanceBetweenCompressedVectors. +type BinaryRQCompressedDistancer struct { + brq *BinaryRotationalQuantizer + cq RQOneBitCode +} + +func (d *BinaryRQCompressedDistancer) Distance(cx []uint64) (float32, error) { + return d.brq.DistanceBetweenCompressedVectors(d.cq, cx) +} + +func (d *BinaryRQCompressedDistancer) DistanceToFloat(x []float32) (float32, error) { + return d.brq.DistanceBetweenCompressedVectors(d.cq, d.brq.Encode(x)) +} + +func (brq *BinaryRotationalQuantizer) NewCompressedQuantizerDistancer(c []uint64) quantizerDistancer[uint64] { + return &BinaryRQCompressedDistancer{ + brq: brq, + cq: c, + } +} + +func (brq *BinaryRotationalQuantizer) NewQuantizerDistancer(vec []float32) quantizerDistancer[uint64] { + return brq.NewDistancer(vec) +} + +func (brq *BinaryRotationalQuantizer) ReturnQuantizerDistancer(distancer quantizerDistancer[uint64]) { +} + +type BRQData struct { + InputDim uint32 + Rotation FastRotation + Rounding []float32 +} + +func (brq *BinaryRotationalQuantizer) PersistCompression(logger CommitLogger) { + logger.AddBRQCompression(BRQData{ + InputDim: brq.inputDim, + Rotation: *brq.rotation, + Rounding: brq.rounding, + }) +} + +type BinaryRQStats struct { + dataBits uint32 + queryBits uint32 +} + +func (brq BinaryRQStats) CompressionType() string { + return "rq" +} + +func (brq BinaryRQStats) CompressionRatio(dimensionality int) float64 { + // RQ compression: original size = inputDim * 4 bytes (float32) + // compressed size = 8 bytes (metadata) + outputDim * 1 bit (compressed data) + // where outputDim is typically the same as inputDim after rotation + originalSize := dimensionality * 4 + compressedSize := 8 + (dimensionality / 8) // 8 bytes metadata + 1 bit per dimension + return float64(originalSize) / float64(compressedSize) +} + +func (brq *BinaryRotationalQuantizer) Stats() CompressionStats { + return BinaryRQStats{ + dataBits: 1, + queryBits: uint32(5), + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/compressionhelpers/binary_rotational_quantization_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/compressionhelpers/binary_rotational_quantization_test.go new file mode 100644 index 0000000000000000000000000000000000000000..58dc5972f8ffdd4936be9174f04812c44ca0ab86 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/compressionhelpers/binary_rotational_quantization_test.go @@ -0,0 +1,279 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package compressionhelpers_test + +import ( + "fmt" + "math" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/weaviate/weaviate/adapters/repos/db/vector/compressionhelpers" + "github.com/weaviate/weaviate/adapters/repos/db/vector/hnsw/distancer" +) + +func TestBRQDistanceEstimates(t *testing.T) { + metrics := []distancer.Provider{ + distancer.NewCosineDistanceProvider(), + distancer.NewDotProductProvider(), + distancer.NewL2SquaredProvider(), + } + rng := newRNG(123) + n := 100 + + // What kind of error do we expect from 1-bit RQ under each of the different metrics? + // We know that the absolute error of the dot product of unit vectors decreases by 1/SQRT(D) + // As we scale the vectors the absolute error should scale with the product of the scaling factors. + for _, m := range metrics { + for range n { + // Create two unit vectors with a uniform random correlation + // between -1 and 1 and scale them randomly. + dim := 2 + rng.IntN(2000) + q, x := correlatedVectors(dim, 1-2*rng.Float32()) + var sq, sx float32 = 1.0, 1.0 + if m.Type() != "cosine-dot" { + var scaleFactor float32 = 1.0 + if rng.Float32() < 0.5 { + scaleFactor = 1e3 + } + sq = scaleFactor * rng.Float32() + sx = scaleFactor * rng.Float32() + } + scale(q, sq) + scale(x, sx) + + rq := compressionhelpers.NewBinaryRotationalQuantizer(dim, rng.Uint64(), m) + distancer := rq.NewDistancer(q) + cx := rq.Encode(x) + distancerEstimate, _ := distancer.Distance(cx) + target, _ := m.SingleDist(q, x) + + baseEps := 2.3 / math.Sqrt(float64(dim)) + eps := float64(sq*sx) * baseEps + if m.Type() == "l2-squared" { + eps *= 2 + } + err := absDiff(distancerEstimate, target) + assert.Less(t, err, eps, + "Metric: %s, Dimension: %d, Target: %.4f, Estimate: %.4f, Estimate/Target: %.4f, Error: %.4f, Eps: %.4f", + m.Type(), dim, target, distancerEstimate, distancerEstimate/target, err, eps) + } + } +} + +func TestBRQCompressedDistanceEstimates(t *testing.T) { + metrics := []distancer.Provider{ + distancer.NewCosineDistanceProvider(), + distancer.NewDotProductProvider(), + distancer.NewL2SquaredProvider(), + } + rng := newRNG(123) + n := 100 + + // What kind of error do we expect from 1-bit RQ under each of the different metrics? + // We know that the absolute error of the dot product of unit vectors decreases by 1/SQRT(D) + // As we scale the vectors the absolute error should scale with the product of the scaling factors. + for _, m := range metrics { + for range n { + // Create two unit vectors with a uniform random correlation + // between -1 and 1 and scale them randomly. + dim := 2 + rng.IntN(2000) + q, x := correlatedVectors(dim, 1-2*rng.Float32()) + var sq, sx float32 = 1.0, 1.0 + if m.Type() != "cosine-dot" { + var scaleFactor float32 = 1.0 + if rng.Float32() < 0.5 { + scaleFactor = 1e3 + } + sq = scaleFactor * rng.Float32() + sx = scaleFactor * rng.Float32() + } + scale(q, sq) + scale(x, sx) + + rq := compressionhelpers.NewBinaryRotationalQuantizer(dim, rng.Uint64(), m) + cq := rq.Encode(q) + cx := rq.Encode(x) + estimate, _ := rq.DistanceBetweenCompressedVectors(cq, cx) + target, _ := m.SingleDist(q, x) + + baseEps := 3.0 / math.Sqrt(float64(dim)) + eps := float64(sq*sx) * baseEps + if m.Type() == "l2-squared" { + eps *= 2 + } + err := absDiff(estimate, target) + assert.Less(t, err, eps, + "Metric: %s, Dimension: %d, Target: %.4f, Estimate: %.4f, Estimate/Target: %.4f, Error: %.4f, Eps: %.4f", + m.Type(), dim, target, estimate, estimate/target, err, eps) + } + } +} + +// The absolute error when estimating the dot product between unit vectors +// should scale according to 1/SQRT(D). Therefore we should roughly be seeing +// that quadrupling the dimensionality halves the error. +// +// The average bias should be small compared to the average absolute error. The +// sign of the average bias should also appear random (the estimator should be +// unbiased in expectation). +func BenchmarkBRQError(b *testing.B) { + dimensions := []int{64, 128, 256, 512, 1024, 1536, 2048} + for _, dim := range dimensions { + var absErr float64 + var bias float64 + b.Run(fmt.Sprintf("d%d", dim), func(b *testing.B) { + rng := newRNG(43) + for b.Loop() { + alpha := 1 - 2*rng.Float32() + q, x := correlatedVectors(dim, float32(alpha)) + quantizer := compressionhelpers.NewBinaryRotationalQuantizer(dim, rng.Uint64(), distancer.NewDotProductProvider()) + distancer := quantizer.NewDistancer(q) + cx := quantizer.Encode(x) + dotEstimate, _ := distancer.Distance(cx) + dotEstimate = -dotEstimate // The distancer estimate is the negative dot product. + absErr += math.Abs(float64(dotEstimate - alpha)) + bias += float64(dotEstimate - alpha) + } + b.ReportMetric(absErr/float64(b.N), "avg.err") + b.ReportMetric(bias/float64(b.N), "avg.bias") + }) + } +} + +func BenchmarkBRQCompressedError(b *testing.B) { + dimensions := []int{64, 128, 256, 512, 1024, 1536, 2048} + correlation := []float32{-0.9, -0.7, -0.5, -0.25, 0.0, 0.25, 0.5, 0.7, 0.9} + for _, dim := range dimensions { + for _, alpha := range correlation { + var absErr float64 + var bias float64 + b.Run(fmt.Sprintf("d:%d-alpha:%.2f", dim, alpha), func(b *testing.B) { + rng := newRNG(43) + for b.Loop() { + q, x := correlatedVectors(dim, float32(alpha)) + quantizer := compressionhelpers.NewBinaryRotationalQuantizer(dim, rng.Uint64(), distancer.NewDotProductProvider()) + cx := quantizer.Encode(x) + cq := quantizer.Encode(q) + dotEstimate, _ := quantizer.DistanceBetweenCompressedVectors(cq, cx) + dotEstimate = -dotEstimate // The distancer estimate is the negative dot product. + absErr += math.Abs(float64(dotEstimate - alpha)) + bias += float64(dotEstimate - alpha) + } + b.ReportMetric(absErr/float64(b.N), "avg.err") + b.ReportMetric(bias/float64(b.N), "avg.bias") + }) + } + } +} + +func BenchmarkBRQEncode(b *testing.B) { + dimensions := []int{256, 1024, 1536} + rng := newRNG(42) + for _, dim := range dimensions { + quantizer := compressionhelpers.NewBinaryRotationalQuantizer(dim, rng.Uint64(), distancer.NewDotProductProvider()) + x := make([]float32, dim) + x[0] = 1 + b.Run(fmt.Sprintf("d%d", dim), func(b *testing.B) { + for b.Loop() { + quantizer.Encode(x) + } + b.ReportMetric(float64(b.Elapsed().Microseconds())/float64(b.N), "us/op") + b.ReportMetric(float64(b.N)/float64(b.Elapsed().Seconds()), "ops/sec") + }) + } +} + +func BenchmarkBRQNewDistancer(b *testing.B) { + dimensions := []int{128, 256, 1024, 1536} + rng := newRNG(42) + for _, dim := range dimensions { + quantizer := compressionhelpers.NewBinaryRotationalQuantizer(dim, rng.Uint64(), distancer.NewDotProductProvider()) + q := make([]float32, dim) + q[0] = 1 + b.Run(fmt.Sprintf("d%d", dim), func(b *testing.B) { + for b.Loop() { + quantizer.NewDistancer(q) + } + b.ReportMetric(float64(b.Elapsed().Microseconds())/float64(b.N), "us/op") + b.ReportMetric(float64(b.N)/float64(b.Elapsed().Seconds()), "ops/sec") + }) + } +} + +func BenchmarkBRQDistance(b *testing.B) { + rng := newRNG(42) + dimensions := []int{128, 256, 512, 768, 1024, 1536, 2048} + metrics := []distancer.Provider{ + distancer.NewCosineDistanceProvider(), + distancer.NewDotProductProvider(), + distancer.NewL2SquaredProvider(), + } + for _, dim := range dimensions { + for _, m := range metrics { + quantizer := compressionhelpers.NewBinaryRotationalQuantizer(dim, rng.Uint64(), m) + q, x := correlatedVectors(dim, 0.5) + cx := quantizer.Encode(x) + distancer := quantizer.NewDistancer(q) + b.Run(fmt.Sprintf("d%d-%s", dim, m.Type()), func(b *testing.B) { + for b.Loop() { + distancer.Distance(cx) + } + b.ReportMetric((float64(b.N)/1e6)/float64(b.Elapsed().Seconds()), "m.ops/sec") + }) + } + } +} + +func BenchmarkBRQCompressedDistance(b *testing.B) { + rng := newRNG(42) + dimensions := []int{128, 256, 512, 768, 1024, 1536, 2048} + metrics := []distancer.Provider{ + distancer.NewCosineDistanceProvider(), + distancer.NewDotProductProvider(), + distancer.NewL2SquaredProvider(), + } + for _, dim := range dimensions { + for _, m := range metrics { + quantizer := compressionhelpers.NewBinaryRotationalQuantizer(dim, rng.Uint64(), m) + q, x := correlatedVectors(dim, 0.5) + cq := quantizer.Encode(q) + cx := quantizer.Encode(x) + b.Run(fmt.Sprintf("d%d-%s", dim, m.Type()), func(b *testing.B) { + for b.Loop() { + quantizer.DistanceBetweenCompressedVectors(cq, cx) + } + b.ReportMetric((float64(b.N)/1e6)/float64(b.Elapsed().Seconds()), "m.ops/sec") + }) + } + } +} + +func BenchmarkSignDot(b *testing.B) { + words := []int{4, 8, 16, 24, 32} + for _, w := range words { + bits := 64 * w + x, y := make([]uint64, w), make([]uint64, w) + b.Run(fmt.Sprintf("Go-d%d", bits), func(b *testing.B) { + for b.Loop() { + compressionhelpers.HammingDist(x, y) + } + b.ReportMetric((float64(b.N)/1e6)/float64(b.Elapsed().Seconds()), "m.ops/sec") + }) + b.Run(fmt.Sprintf("SIMD-d%d", bits), func(b *testing.B) { + for b.Loop() { + compressionhelpers.HammingDistSIMD(x, y) + } + b.ReportMetric((float64(b.N)/1e6)/float64(b.Elapsed().Seconds()), "m.ops/sec") + }) + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/compressionhelpers/compression.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/compressionhelpers/compression.go new file mode 100644 index 0000000000000000000000000000000000000000..c8fc14f093b09d28f0438684c6f71b44325db016 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/compressionhelpers/compression.go @@ -0,0 +1,866 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package compressionhelpers + +import ( + "context" + "encoding/binary" + "fmt" + "runtime" + "time" + + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + + "github.com/weaviate/weaviate/adapters/repos/db/helpers" + "github.com/weaviate/weaviate/adapters/repos/db/lsmkv" + "github.com/weaviate/weaviate/adapters/repos/db/vector/cache" + "github.com/weaviate/weaviate/adapters/repos/db/vector/hnsw/distancer" + "github.com/weaviate/weaviate/entities/storobj" + "github.com/weaviate/weaviate/entities/vectorindex/hnsw" + "github.com/weaviate/weaviate/usecases/memwatch" +) + +type CompressorDistancer interface { + DistanceToNode(id uint64) (float32, error) + DistanceToFloat(vec []float32) (float32, error) +} + +type ReturnDistancerFn func() + +type CommitLogger interface { + AddPQCompression(PQData) error + AddSQCompression(SQData) error + AddRQCompression(RQData) error + AddBRQCompression(BRQData) error +} + +type CompressionStats interface { + CompressionType() string + CompressionRatio(dimensions int) float64 +} + +type VectorCompressor interface { + Drop() error + GrowCache(size uint64) + SetCacheMaxSize(size int64) + GetCacheMaxSize() int64 + Delete(ctx context.Context, id uint64) + Preload(id uint64, vector []float32) + PreloadMulti(docID uint64, ids []uint64, vecs [][]float32) + PreloadPassage(id uint64, docID uint64, relativeID uint64, vec []float32) + GetKeys(id uint64) (uint64, uint64) + SetKeys(id uint64, docID uint64, relativeID uint64) + Prefetch(id uint64) + CountVectors() int64 + PrefillCache() + PrefillMultiCache(docIDVectors map[uint64][]uint64) + + DistanceBetweenCompressedVectorsFromIDs(ctx context.Context, x, y uint64) (float32, error) + NewDistancer(vector []float32) (CompressorDistancer, ReturnDistancerFn) + NewDistancerFromID(id uint64) (CompressorDistancer, error) + NewBag() CompressionDistanceBag + + PersistCompression(CommitLogger) + Stats() CompressionStats +} + +type quantizedVectorsCompressor[T byte | uint64] struct { + cache cache.Cache[T] + compressedStore *lsmkv.Store + quantizer quantizer[T] + storeId func([]byte, uint64) + loadId func([]byte) uint64 + logger logrus.FieldLogger +} + +func (compressor *quantizedVectorsCompressor[T]) Drop() error { + compressor.cache.Drop() + return nil +} + +func (compressor *quantizedVectorsCompressor[T]) GrowCache(size uint64) { + compressor.cache.Grow(size) +} + +func (compressor *quantizedVectorsCompressor[T]) SetCacheMaxSize(size int64) { + compressor.cache.UpdateMaxSize(size) +} + +func (compressor *quantizedVectorsCompressor[T]) CountVectors() int64 { + return compressor.cache.CountVectors() +} + +func (compressor *quantizedVectorsCompressor[T]) GetCacheMaxSize() int64 { + return compressor.cache.CopyMaxSize() +} + +func (compressor *quantizedVectorsCompressor[T]) Delete(ctx context.Context, id uint64) { + compressor.cache.Delete(ctx, id) + idBytes := make([]byte, 8) + compressor.storeId(idBytes, id) + if err := compressor.compressedStore.Bucket(helpers.VectorsCompressedBucketLSM).Delete(idBytes); err != nil { + compressor.logger.WithFields(logrus.Fields{ + "action": "compressor_delete", + "id": id, + }).WithError(err). + Warnf("cannot delete vector from compressed cache") + } +} + +func (compressor *quantizedVectorsCompressor[T]) Preload(id uint64, vector []float32) { + compressedVector := compressor.quantizer.Encode(vector) + idBytes := make([]byte, 8) + compressor.storeId(idBytes, id) + compressor.compressedStore.Bucket(helpers.VectorsCompressedBucketLSM).Put(idBytes, compressor.quantizer.CompressedBytes(compressedVector)) + compressor.cache.Grow(id) + compressor.cache.Preload(id, compressedVector) +} + +func (compressor *quantizedVectorsCompressor[T]) PreloadMulti(docID uint64, ids []uint64, vecs [][]float32) { + compressedVectors := make([][]T, len(vecs)) + for i, vector := range vecs { + compressedVectors[i] = compressor.quantizer.Encode(vector) + } + maxID := ids[0] + for i, id := range ids { + idBytes := make([]byte, 8) + compressor.storeId(idBytes, id) + compressor.compressedStore.Bucket(helpers.VectorsCompressedBucketLSM).Put(idBytes, compressor.quantizer.CompressedBytes(compressedVectors[i])) + if id > maxID { + maxID = id + } + } + compressor.cache.Grow(maxID) + compressor.cache.PreloadMulti(docID, ids, compressedVectors) +} + +func (compressor *quantizedVectorsCompressor[T]) PreloadPassage(id, docID, relativeID uint64, vec []float32) { + compressedVector := compressor.quantizer.Encode(vec) + idBytes := make([]byte, 8) + compressor.storeId(idBytes, id) + compressor.compressedStore.Bucket(helpers.VectorsCompressedBucketLSM).Put(idBytes, compressor.quantizer.CompressedBytes(compressedVector)) + compressor.cache.Grow(id) + compressor.cache.PreloadPassage(id, docID, relativeID, compressedVector) +} + +func (compressor *quantizedVectorsCompressor[T]) GetKeys(id uint64) (uint64, uint64) { + return compressor.cache.GetKeys(id) +} + +func (compressor *quantizedVectorsCompressor[T]) SetKeys(id, docID, relativeID uint64) { + compressor.cache.SetKeys(id, docID, relativeID) +} + +func (compressor *quantizedVectorsCompressor[T]) Prefetch(id uint64) { + compressor.cache.Prefetch(id) +} + +func (compressor *quantizedVectorsCompressor[T]) Stats() CompressionStats { + return compressor.quantizer.Stats() +} + +func (compressor *quantizedVectorsCompressor[T]) DistanceBetweenCompressedVectors(x, y []T) (float32, error) { + return compressor.quantizer.DistanceBetweenCompressedVectors(x, y) +} + +func (compressor *quantizedVectorsCompressor[T]) compressedVectorFromID(ctx context.Context, id uint64) ([]T, error) { + compressedVector, err := compressor.cache.Get(ctx, id) + if err != nil { + return nil, err + } + if len(compressedVector) == 0 { + return nil, fmt.Errorf("got a nil or zero-length vector at docID %d", id) + } + return compressedVector, nil +} + +func (compressor *quantizedVectorsCompressor[T]) DistanceBetweenCompressedVectorsFromIDs(ctx context.Context, id1, id2 uint64) (float32, error) { + compressedVector1, err := compressor.compressedVectorFromID(ctx, id1) + if err != nil { + return 0, err + } + + compressedVector2, err := compressor.compressedVectorFromID(ctx, id2) + if err != nil { + return 0, err + } + + dist, err := compressor.DistanceBetweenCompressedVectors(compressedVector1, compressedVector2) + return dist, err +} + +func (compressor *quantizedVectorsCompressor[T]) getCompressedVectorForID(ctx context.Context, id uint64) ([]T, error) { + idBytes := make([]byte, 8) + compressor.storeId(idBytes, id) + compressedVector, err := compressor.compressedStore.Bucket(helpers.VectorsCompressedBucketLSM).Get(idBytes) + if err != nil { + return nil, errors.Wrap(err, "Getting vector for id") + } + if len(compressedVector) == 0 { + return nil, storobj.NewErrNotFoundf(id, "getCompressedVectorForID") + } + + return compressor.quantizer.FromCompressedBytes(compressedVector), nil +} + +func (compressor *quantizedVectorsCompressor[T]) NewDistancer(vector []float32) (CompressorDistancer, ReturnDistancerFn) { + d := &quantizedCompressorDistancer[T]{ + compressor: compressor, + distancer: compressor.quantizer.NewQuantizerDistancer(vector), + } + return d, func() { + compressor.returnDistancer(d) + } +} + +func (compressor *quantizedVectorsCompressor[T]) NewDistancerFromID(id uint64) (CompressorDistancer, error) { + compressedVector, err := compressor.compressedVectorFromID(context.Background(), id) + if err != nil { + return nil, err + } + if compressedVector == nil { + return nil, storobj.ErrNotFound{ + DocID: id, + } + } + d := &quantizedCompressorDistancer[T]{ + compressor: compressor, + distancer: compressor.quantizer.NewCompressedQuantizerDistancer(compressedVector), + } + return d, nil +} + +func (compressor *quantizedVectorsCompressor[T]) returnDistancer(distancer CompressorDistancer) { + dst := distancer.(*quantizedCompressorDistancer[T]).distancer + if dst == nil { + return + } + compressor.quantizer.ReturnQuantizerDistancer(dst) +} + +func (compressor *quantizedVectorsCompressor[T]) NewBag() CompressionDistanceBag { + return &quantizedDistanceBag[T]{ + compressor: compressor, + elements: make(map[uint64][]T), + } +} + +func (compressor *quantizedVectorsCompressor[T]) initCompressedStore() error { + err := compressor.compressedStore.CreateOrLoadBucket(context.Background(), helpers.VectorsCompressedBucketLSM) + if err != nil { + return errors.Wrapf(err, "Create or load bucket (compressed vectors store)") + } + return nil +} + +func (compressor *quantizedVectorsCompressor[T]) PrefillCache() { + before := time.Now() + + // The idea here is to first read everything from disk in one go, then grow + // the cache just once before inserting all vectors. A previous iteration + // would grow the cache as part of the cursor loop and this ended up making + // up 75% of the CPU time needed. This new implementation with two loops is + // much more efficient and only ever-so-slightly more memory-consuming (about + // one additional struct per vector while loading. Should be negligible) + + parallel := 2 * runtime.GOMAXPROCS(0) + maxID := uint64(0) + vecs := make([]VecAndID[T], 0, 10_000) + + it := NewParallelIterator( + compressor.compressedStore.Bucket(helpers.VectorsCompressedBucketLSM), + parallel, compressor.loadId, compressor.quantizer.FromCompressedBytesWithSubsliceBuffer, + compressor.logger) + channel := it.IterateAll() + if channel == nil { + return // nothing to do + } + + for v := range channel { + vecs = append(vecs, v...) + } + + for i := range vecs { + if vecs[i].Id > maxID { + maxID = vecs[i].Id + } + } + + compressor.cache.Grow(maxID) + + for _, vec := range vecs { + compressor.cache.Preload(vec.Id, vec.Vec) + } + + took := time.Since(before) + compressor.logger.WithFields(logrus.Fields{ + "action": "hnsw_compressed_vector_cache_prefill", + "count": len(vecs), + "maxID": maxID, + "took": took, + }).Info("prefilled compressed vector cache") +} + +func (compressor *quantizedVectorsCompressor[T]) PrefillMultiCache(docIDVectors map[uint64][]uint64) { + before := time.Now() + + parallel := 2 * runtime.GOMAXPROCS(0) + maxID := uint64(0) + vecs := make([]VecAndID[T], 0, 10_000) + + it := NewParallelIterator( + compressor.compressedStore.Bucket(helpers.VectorsCompressedBucketLSM), + parallel, compressor.loadId, compressor.quantizer.FromCompressedBytesWithSubsliceBuffer, + compressor.logger) + channel := it.IterateAll() + if channel == nil { + return // nothing to do + } + + for v := range channel { + vecs = append(vecs, v...) + } + + for i := range vecs { + if vecs[i].Id > maxID { + maxID = vecs[i].Id + } + } + + compressor.cache.Grow(maxID) + + nodeIDMappings := make(map[uint64]cache.CacheKeys, len(vecs)) + for docID := range docIDVectors { + for relativeID, id := range docIDVectors[docID] { + nodeIDMappings[id] = cache.CacheKeys{ + DocID: docID, + RelativeID: uint64(relativeID), + } + } + } + for _, vec := range vecs { + docID := nodeIDMappings[vec.Id].DocID + relativeID := nodeIDMappings[vec.Id].RelativeID + compressor.cache.PreloadPassage(vec.Id, docID, relativeID, vec.Vec) + } + + took := time.Since(before) + compressor.logger.WithFields(logrus.Fields{ + "action": "hnsw_compressed_vector_cache_prefill", + "count": len(vecs), + "maxID": maxID, + "took": took, + }).Info("prefilled compressed vector cache for multivector") +} + +func (compressor *quantizedVectorsCompressor[T]) PersistCompression(logger CommitLogger) { + compressor.quantizer.PersistCompression(logger) +} + +func NewHNSWPQCompressor( + cfg hnsw.PQConfig, + distance distancer.Provider, + dimensions int, + vectorCacheMaxObjects int, + logger logrus.FieldLogger, + data [][]float32, + store *lsmkv.Store, + allocChecker memwatch.AllocChecker, +) (VectorCompressor, error) { + quantizer, err := NewProductQuantizer(cfg, distance, dimensions, logger) + if err != nil { + return nil, err + } + pqVectorsCompressor := &quantizedVectorsCompressor[byte]{ + quantizer: quantizer, + compressedStore: store, + storeId: binary.LittleEndian.PutUint64, + loadId: binary.LittleEndian.Uint64, + logger: logger, + } + pqVectorsCompressor.initCompressedStore() + pqVectorsCompressor.cache = cache.NewShardedByteLockCache( + pqVectorsCompressor.getCompressedVectorForID, vectorCacheMaxObjects, 1, logger, + 0, allocChecker) + pqVectorsCompressor.cache.Grow(uint64(len(data))) + err = quantizer.Fit(data) + if err != nil { + return nil, err + } + return pqVectorsCompressor, nil +} + +func RestoreHNSWPQCompressor( + cfg hnsw.PQConfig, + distance distancer.Provider, + dimensions int, + vectorCacheMaxObjects int, + logger logrus.FieldLogger, + encoders []PQEncoder, + store *lsmkv.Store, + allocChecker memwatch.AllocChecker, +) (VectorCompressor, error) { + quantizer, err := NewProductQuantizerWithEncoders(cfg, distance, dimensions, encoders, logger) + if err != nil { + return nil, err + } + pqVectorsCompressor := &quantizedVectorsCompressor[byte]{ + quantizer: quantizer, + compressedStore: store, + storeId: binary.LittleEndian.PutUint64, + loadId: binary.LittleEndian.Uint64, + logger: logger, + } + pqVectorsCompressor.initCompressedStore() + pqVectorsCompressor.cache = cache.NewShardedByteLockCache( + pqVectorsCompressor.getCompressedVectorForID, vectorCacheMaxObjects, 1, logger, 0, + allocChecker) + return pqVectorsCompressor, nil +} + +func NewHNSWPQMultiCompressor( + cfg hnsw.PQConfig, + distance distancer.Provider, + dimensions int, + vectorCacheMaxObjects int, + logger logrus.FieldLogger, + data [][]float32, + store *lsmkv.Store, + allocChecker memwatch.AllocChecker, +) (VectorCompressor, error) { + quantizer, err := NewProductQuantizer(cfg, distance, dimensions, logger) + if err != nil { + return nil, err + } + pqVectorsCompressor := &quantizedVectorsCompressor[byte]{ + quantizer: quantizer, + compressedStore: store, + storeId: binary.LittleEndian.PutUint64, + loadId: binary.LittleEndian.Uint64, + logger: logger, + } + pqVectorsCompressor.initCompressedStore() + pqVectorsCompressor.cache = cache.NewShardedMultiByteLockCache( + pqVectorsCompressor.getCompressedVectorForID, vectorCacheMaxObjects, logger, + 0, allocChecker) + pqVectorsCompressor.cache.Grow(uint64(len(data))) + err = quantizer.Fit(data) + if err != nil { + return nil, err + } + return pqVectorsCompressor, nil +} + +func RestoreHNSWPQMultiCompressor( + cfg hnsw.PQConfig, + distance distancer.Provider, + dimensions int, + vectorCacheMaxObjects int, + logger logrus.FieldLogger, + encoders []PQEncoder, + store *lsmkv.Store, + allocChecker memwatch.AllocChecker, +) (VectorCompressor, error) { + quantizer, err := NewProductQuantizerWithEncoders(cfg, distance, dimensions, encoders, logger) + if err != nil { + return nil, err + } + pqVectorsCompressor := &quantizedVectorsCompressor[byte]{ + quantizer: quantizer, + compressedStore: store, + storeId: binary.LittleEndian.PutUint64, + loadId: binary.LittleEndian.Uint64, + logger: logger, + } + pqVectorsCompressor.initCompressedStore() + pqVectorsCompressor.cache = cache.NewShardedMultiByteLockCache( + pqVectorsCompressor.getCompressedVectorForID, vectorCacheMaxObjects, logger, 0, + allocChecker) + return pqVectorsCompressor, nil +} + +func NewBQCompressor( + distance distancer.Provider, + vectorCacheMaxObjects int, + logger logrus.FieldLogger, + store *lsmkv.Store, + allocChecker memwatch.AllocChecker, +) (VectorCompressor, error) { + quantizer := NewBinaryQuantizer(distance) + bqVectorsCompressor := &quantizedVectorsCompressor[uint64]{ + quantizer: &quantizer, + compressedStore: store, + storeId: binary.BigEndian.PutUint64, + loadId: binary.BigEndian.Uint64, + logger: logger, + } + bqVectorsCompressor.initCompressedStore() + bqVectorsCompressor.cache = cache.NewShardedUInt64LockCache( + bqVectorsCompressor.getCompressedVectorForID, vectorCacheMaxObjects, 1, logger, 0, + allocChecker) + return bqVectorsCompressor, nil +} + +func NewBQMultiCompressor( + distance distancer.Provider, + vectorCacheMaxObjects int, + logger logrus.FieldLogger, + store *lsmkv.Store, + allocChecker memwatch.AllocChecker, +) (VectorCompressor, error) { + quantizer := NewBinaryQuantizer(distance) + bqVectorsCompressor := &quantizedVectorsCompressor[uint64]{ + quantizer: &quantizer, + compressedStore: store, + storeId: binary.BigEndian.PutUint64, + loadId: binary.BigEndian.Uint64, + logger: logger, + } + bqVectorsCompressor.initCompressedStore() + bqVectorsCompressor.cache = cache.NewShardedMultiUInt64LockCache( + bqVectorsCompressor.getCompressedVectorForID, vectorCacheMaxObjects, logger, 0, + allocChecker) + return bqVectorsCompressor, nil +} + +func NewHNSWSQCompressor( + distance distancer.Provider, + vectorCacheMaxObjects int, + logger logrus.FieldLogger, + data [][]float32, + store *lsmkv.Store, + allocChecker memwatch.AllocChecker, +) (VectorCompressor, error) { + quantizer := NewScalarQuantizer(data, distance) + sqVectorsCompressor := &quantizedVectorsCompressor[byte]{ + quantizer: quantizer, + compressedStore: store, + storeId: binary.BigEndian.PutUint64, + loadId: binary.BigEndian.Uint64, + logger: logger, + } + sqVectorsCompressor.initCompressedStore() + sqVectorsCompressor.cache = cache.NewShardedByteLockCache( + sqVectorsCompressor.getCompressedVectorForID, vectorCacheMaxObjects, 1, logger, + 0, allocChecker) + sqVectorsCompressor.cache.Grow(uint64(len(data))) + return sqVectorsCompressor, nil +} + +func RestoreHNSWSQCompressor( + distance distancer.Provider, + vectorCacheMaxObjects int, + logger logrus.FieldLogger, + a, b float32, + dimensions uint16, + store *lsmkv.Store, + allocChecker memwatch.AllocChecker, +) (VectorCompressor, error) { + quantizer, err := RestoreScalarQuantizer(a, b, dimensions, distance) + if err != nil { + return nil, err + } + sqVectorsCompressor := &quantizedVectorsCompressor[byte]{ + quantizer: quantizer, + compressedStore: store, + storeId: binary.BigEndian.PutUint64, + loadId: binary.BigEndian.Uint64, + logger: logger, + } + sqVectorsCompressor.initCompressedStore() + sqVectorsCompressor.cache = cache.NewShardedByteLockCache( + sqVectorsCompressor.getCompressedVectorForID, vectorCacheMaxObjects, 1, logger, + 0, allocChecker) + return sqVectorsCompressor, nil +} + +func NewHNSWSQMultiCompressor( + distance distancer.Provider, + vectorCacheMaxObjects int, + logger logrus.FieldLogger, + data [][]float32, + store *lsmkv.Store, + allocChecker memwatch.AllocChecker, +) (VectorCompressor, error) { + quantizer := NewScalarQuantizer(data, distance) + sqVectorsCompressor := &quantizedVectorsCompressor[byte]{ + quantizer: quantizer, + compressedStore: store, + storeId: binary.BigEndian.PutUint64, + loadId: binary.BigEndian.Uint64, + logger: logger, + } + sqVectorsCompressor.initCompressedStore() + sqVectorsCompressor.cache = cache.NewShardedMultiByteLockCache( + sqVectorsCompressor.getCompressedVectorForID, vectorCacheMaxObjects, logger, + 0, allocChecker) + sqVectorsCompressor.cache.Grow(uint64(len(data))) + return sqVectorsCompressor, nil +} + +func RestoreHNSWSQMultiCompressor( + distance distancer.Provider, + vectorCacheMaxObjects int, + logger logrus.FieldLogger, + a, b float32, + dimensions uint16, + store *lsmkv.Store, + allocChecker memwatch.AllocChecker, +) (VectorCompressor, error) { + quantizer, err := RestoreScalarQuantizer(a, b, dimensions, distance) + if err != nil { + return nil, err + } + sqVectorsCompressor := &quantizedVectorsCompressor[byte]{ + quantizer: quantizer, + compressedStore: store, + storeId: binary.BigEndian.PutUint64, + loadId: binary.BigEndian.Uint64, + logger: logger, + } + sqVectorsCompressor.initCompressedStore() + sqVectorsCompressor.cache = cache.NewShardedMultiByteLockCache( + sqVectorsCompressor.getCompressedVectorForID, vectorCacheMaxObjects, logger, + 0, allocChecker) + return sqVectorsCompressor, nil +} + +func NewRQCompressor( + distance distancer.Provider, + vectorCacheMaxObjects int, + logger logrus.FieldLogger, + store *lsmkv.Store, + allocChecker memwatch.AllocChecker, + bits int, + dim int, +) (VectorCompressor, error) { + var rqVectorsCompressor VectorCompressor + switch bits { + case 1: + quantizer := NewBinaryRotationalQuantizer(dim, DefaultFastRotationSeed, distance) + rqVectorsCompressor = &quantizedVectorsCompressor[uint64]{ + quantizer: quantizer, + compressedStore: store, + storeId: binary.BigEndian.PutUint64, + loadId: binary.BigEndian.Uint64, + logger: logger, + } + rqVectorsCompressor.(*quantizedVectorsCompressor[uint64]).initCompressedStore() + rqVectorsCompressor.(*quantizedVectorsCompressor[uint64]).cache = cache.NewShardedUInt64LockCache( + rqVectorsCompressor.(*quantizedVectorsCompressor[uint64]).getCompressedVectorForID, vectorCacheMaxObjects, 1, logger, + 0, allocChecker) + case 8: + quantizer := NewRotationalQuantizer(dim, DefaultFastRotationSeed, bits, distance) + rqVectorsCompressor = &quantizedVectorsCompressor[byte]{ + quantizer: quantizer, + compressedStore: store, + storeId: binary.BigEndian.PutUint64, + loadId: binary.BigEndian.Uint64, + logger: logger, + } + rqVectorsCompressor.(*quantizedVectorsCompressor[byte]).initCompressedStore() + rqVectorsCompressor.(*quantizedVectorsCompressor[byte]).cache = cache.NewShardedByteLockCache( + rqVectorsCompressor.(*quantizedVectorsCompressor[byte]).getCompressedVectorForID, vectorCacheMaxObjects, 1, logger, + 0, allocChecker) + default: + return nil, errors.New("invalid bits value, only 1 and 8 bits are supported") + } + + return rqVectorsCompressor, nil +} + +func RestoreRQCompressor( + distance distancer.Provider, + vectorCacheMaxObjects int, + logger logrus.FieldLogger, + dimensions int, + bits int, + outputDim int, + rounds int, + swaps [][]Swap, + signs [][]float32, + rounding []float32, + store *lsmkv.Store, + allocChecker memwatch.AllocChecker, +) (VectorCompressor, error) { + var rqVectorsCompressor VectorCompressor + switch bits { + case 1: + quantizer, err := RestoreBinaryRotationalQuantizer(dimensions, outputDim, rounds, swaps, signs, rounding, distance) + if err != nil { + return nil, err + } + rqVectorsCompressor = &quantizedVectorsCompressor[uint64]{ + quantizer: quantizer, + compressedStore: store, + storeId: binary.BigEndian.PutUint64, + loadId: binary.BigEndian.Uint64, + logger: logger, + } + rqVectorsCompressor.(*quantizedVectorsCompressor[uint64]).initCompressedStore() + rqVectorsCompressor.(*quantizedVectorsCompressor[uint64]).cache = cache.NewShardedUInt64LockCache( + rqVectorsCompressor.(*quantizedVectorsCompressor[uint64]).getCompressedVectorForID, vectorCacheMaxObjects, 1, logger, + 0, allocChecker) + case 8: + quantizer, err := RestoreRotationalQuantizer(dimensions, bits, outputDim, rounds, swaps, signs, distance) + if err != nil { + return nil, err + } + rqVectorsCompressor = &quantizedVectorsCompressor[byte]{ + quantizer: quantizer, + compressedStore: store, + storeId: binary.BigEndian.PutUint64, + loadId: binary.BigEndian.Uint64, + logger: logger, + } + rqVectorsCompressor.(*quantizedVectorsCompressor[byte]).initCompressedStore() + rqVectorsCompressor.(*quantizedVectorsCompressor[byte]).cache = cache.NewShardedByteLockCache( + rqVectorsCompressor.(*quantizedVectorsCompressor[byte]).getCompressedVectorForID, vectorCacheMaxObjects, 1, logger, + 0, allocChecker) + default: + return nil, errors.New("invalid bits value, only 1 and 8 bits are supported") + } + + return rqVectorsCompressor, nil +} + +func NewRQMultiCompressor( + distance distancer.Provider, + vectorCacheMaxObjects int, + logger logrus.FieldLogger, + store *lsmkv.Store, + allocChecker memwatch.AllocChecker, + bits int, + dim int, +) (VectorCompressor, error) { + var rqVectorsCompressor VectorCompressor + switch bits { + case 1: + quantizer := NewBinaryRotationalQuantizer(dim, DefaultFastRotationSeed, distance) + rqVectorsCompressor = &quantizedVectorsCompressor[uint64]{ + quantizer: quantizer, + compressedStore: store, + storeId: binary.BigEndian.PutUint64, + loadId: binary.BigEndian.Uint64, + logger: logger, + } + rqVectorsCompressor.(*quantizedVectorsCompressor[uint64]).initCompressedStore() + rqVectorsCompressor.(*quantizedVectorsCompressor[uint64]).cache = cache.NewShardedMultiUInt64LockCache( + rqVectorsCompressor.(*quantizedVectorsCompressor[uint64]).getCompressedVectorForID, vectorCacheMaxObjects, logger, + 0, allocChecker) + case 8: + quantizer := NewRotationalQuantizer(dim, DefaultFastRotationSeed, bits, distance) + rqVectorsCompressor = &quantizedVectorsCompressor[byte]{ + quantizer: quantizer, + compressedStore: store, + storeId: binary.BigEndian.PutUint64, + loadId: binary.BigEndian.Uint64, + logger: logger, + } + rqVectorsCompressor.(*quantizedVectorsCompressor[byte]).initCompressedStore() + rqVectorsCompressor.(*quantizedVectorsCompressor[byte]).cache = cache.NewShardedMultiByteLockCache( + rqVectorsCompressor.(*quantizedVectorsCompressor[byte]).getCompressedVectorForID, vectorCacheMaxObjects, logger, + 0, allocChecker) + default: + return nil, errors.New("invalid bits value, only 1 and 8 bits are supported") + } + + return rqVectorsCompressor, nil +} + +func RestoreRQMultiCompressor( + distance distancer.Provider, + vectorCacheMaxObjects int, + logger logrus.FieldLogger, + dimensions int, + bits int, + outputDim int, + rounds int, + swaps [][]Swap, + signs [][]float32, + rounding []float32, + store *lsmkv.Store, + allocChecker memwatch.AllocChecker, +) (VectorCompressor, error) { + var rqVectorsCompressor VectorCompressor + switch bits { + case 1: + quantizer, err := RestoreBinaryRotationalQuantizer(dimensions, outputDim, rounds, swaps, signs, rounding, distance) + if err != nil { + return nil, err + } + rqVectorsCompressor = &quantizedVectorsCompressor[uint64]{ + quantizer: quantizer, + compressedStore: store, + storeId: binary.BigEndian.PutUint64, + loadId: binary.BigEndian.Uint64, + logger: logger, + } + rqVectorsCompressor.(*quantizedVectorsCompressor[uint64]).initCompressedStore() + rqVectorsCompressor.(*quantizedVectorsCompressor[uint64]).cache = cache.NewShardedMultiUInt64LockCache( + rqVectorsCompressor.(*quantizedVectorsCompressor[uint64]).getCompressedVectorForID, vectorCacheMaxObjects, logger, + 0, allocChecker) + case 8: + quantizer, err := RestoreRotationalQuantizer(dimensions, bits, outputDim, rounds, swaps, signs, distance) + if err != nil { + return nil, err + } + rqVectorsCompressor = &quantizedVectorsCompressor[byte]{ + quantizer: quantizer, + compressedStore: store, + storeId: binary.BigEndian.PutUint64, + loadId: binary.BigEndian.Uint64, + logger: logger, + } + rqVectorsCompressor.(*quantizedVectorsCompressor[byte]).initCompressedStore() + rqVectorsCompressor.(*quantizedVectorsCompressor[byte]).cache = cache.NewShardedMultiByteLockCache( + rqVectorsCompressor.(*quantizedVectorsCompressor[byte]).getCompressedVectorForID, vectorCacheMaxObjects, logger, + 0, allocChecker) + default: + return nil, errors.New("invalid bits value, only 1 and 8 bits are supported") + } + + return rqVectorsCompressor, nil +} + +type quantizedCompressorDistancer[T byte | uint64] struct { + compressor *quantizedVectorsCompressor[T] + distancer quantizerDistancer[T] +} + +func (distancer *quantizedCompressorDistancer[T]) DistanceToNode(id uint64) (float32, error) { + compressedVector, err := distancer.compressor.cache.Get(context.Background(), id) + if err != nil { + return 0, err + } + if len(compressedVector) == 0 { + return 0, fmt.Errorf( + "got a nil or zero-length vector at docID %d", id) + } + return distancer.distancer.Distance(compressedVector) +} + +func (distancer *quantizedCompressorDistancer[T]) DistanceToFloat(vector []float32) (float32, error) { + return distancer.distancer.DistanceToFloat(vector) +} + +type UncompressedStats struct{} + +func (u UncompressedStats) CompressionType() string { + return "none" +} + +func (u UncompressedStats) CompressionRatio(_ int) float64 { + // Uncompressed vectors have no compression + return 1.0 +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/compressionhelpers/compression_distance_bag.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/compressionhelpers/compression_distance_bag.go new file mode 100644 index 0000000000000000000000000000000000000000..5d3bf4ed32882fdd9a72abec9bf4153005e5bebd --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/compressionhelpers/compression_distance_bag.go @@ -0,0 +1,45 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package compressionhelpers + +import ( + "context" + "fmt" +) + +type CompressionDistanceBag interface { + Load(ctx context.Context, id uint64) error + Distance(x, y uint64) (float32, error) +} + +type quantizedDistanceBag[T byte | uint64] struct { + elements map[uint64][]T + compressor *quantizedVectorsCompressor[T] +} + +func (bag *quantizedDistanceBag[T]) Load(ctx context.Context, id uint64) error { + var err error + bag.elements[id], err = bag.compressor.cache.Get(ctx, id) + return err +} + +func (bag *quantizedDistanceBag[T]) Distance(x, y uint64) (float32, error) { + v1, found := bag.elements[x] + if !found { + return 0, fmt.Errorf("missing id in bag: %d", x) + } + v2, found := bag.elements[y] + if !found { + return 0, fmt.Errorf("missing id in bag: %d", y) + } + return bag.compressor.DistanceBetweenCompressedVectors(v1, v2) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/compressionhelpers/compression_distance_bag_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/compressionhelpers/compression_distance_bag_test.go new file mode 100644 index 0000000000000000000000000000000000000000..5bbb993f3d06c69cad41993e4da2fbc8ce8ea708 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/compressionhelpers/compression_distance_bag_test.go @@ -0,0 +1,53 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +//go:build !race + +package compressionhelpers_test + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/weaviate/weaviate/adapters/repos/db/vector/compressionhelpers" + "github.com/weaviate/weaviate/adapters/repos/db/vector/hnsw/distancer" + testinghelpers "github.com/weaviate/weaviate/adapters/repos/db/vector/testinghelpers" +) + +func Test_NoRaceQuantizedDistanceBag(t *testing.T) { + compressor, err := compressionhelpers.NewBQCompressor(distancer.NewCosineDistanceProvider(), 1e12, nil, testinghelpers.NewDummyStore(t), nil) + assert.Nil(t, err) + compressor.Preload(1, []float32{-0.5, 0.5}) + compressor.Preload(2, []float32{0.25, 0.7}) + compressor.Preload(3, []float32{0.5, 0.5}) + + t.Run("returns error when id has not been loaded", func(t *testing.T) { + bag := compressor.NewBag() + _, err = bag.Distance(1, 2) + assert.NotNil(t, err) + }) + + t.Run("returns error when id has not been loaded", func(t *testing.T) { + bag := compressor.NewBag() + bag.Load(context.Background(), 1) + bag.Load(context.Background(), 2) + bag.Load(context.Background(), 3) + + d, err := bag.Distance(1, 2) + assert.Nil(t, err) + assert.Equal(t, float32(1), d) + + d, err = bag.Distance(2, 3) + assert.Nil(t, err) + assert.Equal(t, float32(0), d) + }) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/compressionhelpers/compression_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/compressionhelpers/compression_test.go new file mode 100644 index 0000000000000000000000000000000000000000..5d3d887ebe3e8b5ebaadb261556956a2d4f2c52f --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/compressionhelpers/compression_test.go @@ -0,0 +1,153 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +//go:build !race + +package compressionhelpers_test + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/adapters/repos/db/vector/compressionhelpers" + "github.com/weaviate/weaviate/adapters/repos/db/vector/hnsw/distancer" + "github.com/weaviate/weaviate/adapters/repos/db/vector/testinghelpers" + "github.com/weaviate/weaviate/entities/vectorindex/hnsw" + "github.com/weaviate/weaviate/usecases/memwatch" +) + +func Test_NoRaceQuantizedVectorCompressor(t *testing.T) { + t.Run("loading and deleting data works", func(t *testing.T) { + compressor, err := compressionhelpers.NewBQCompressor(distancer.NewCosineDistanceProvider(), 1e12, nil, testinghelpers.NewDummyStore(t), nil) + assert.Nil(t, err) + compressor.Preload(1, []float32{-0.5, 0.5}) + vec, err := compressor.DistanceBetweenCompressedVectorsFromIDs(context.Background(), 1, 2) + assert.Equal(t, float32(0), vec) + assert.NotNil(t, err) + + compressor.Preload(2, []float32{0.25, 0.7}) + compressor.Preload(3, []float32{0.5, 0.5}) + compressor.Delete(context.Background(), 1) + + _, err = compressor.DistanceBetweenCompressedVectorsFromIDs(context.Background(), 1, 2) + assert.NotNil(t, err) + }) + + t.Run("distance are right when using BQ", func(t *testing.T) { + compressor, err := compressionhelpers.NewBQCompressor(distancer.NewCosineDistanceProvider(), 1e12, nil, testinghelpers.NewDummyStore(t), nil) + assert.Nil(t, err) + compressor.Preload(1, []float32{-0.5, 0.5}) + compressor.Preload(2, []float32{0.25, 0.7}) + compressor.Preload(3, []float32{0.5, 0.5}) + + d, err := compressor.DistanceBetweenCompressedVectorsFromIDs(context.Background(), 1, 2) + assert.Nil(t, err) + assert.Equal(t, float32(1), d) + + d, err = compressor.DistanceBetweenCompressedVectorsFromIDs(context.Background(), 1, 3) + assert.Nil(t, err) + assert.Equal(t, float32(1), d) + + d, err = compressor.DistanceBetweenCompressedVectorsFromIDs(context.Background(), 2, 3) + assert.Nil(t, err) + assert.Equal(t, float32(0), d) + }) + + t.Run("distance are right when using BQDistancer", func(t *testing.T) { + compressor, err := compressionhelpers.NewBQCompressor(distancer.NewCosineDistanceProvider(), 1e12, nil, testinghelpers.NewDummyStore(t), nil) + assert.Nil(t, err) + compressor.Preload(1, []float32{-0.5, 0.5}) + compressor.Preload(2, []float32{0.25, 0.7}) + compressor.Preload(3, []float32{0.5, 0.5}) + distancer, returnFn := compressor.NewDistancer([]float32{0.1, -0.2}) + defer returnFn() + + d, err := distancer.DistanceToNode(1) + assert.Nil(t, err) + assert.Equal(t, float32(2), d) + + d, err = distancer.DistanceToNode(2) + assert.Nil(t, err) + assert.Equal(t, float32(1), d) + + d, err = distancer.DistanceToNode(3) + assert.Nil(t, err) + assert.Equal(t, float32(1), d) + + d, err = distancer.DistanceToFloat([]float32{0.8, -0.2}) + assert.Nil(t, err) + assert.Equal(t, float32(0.88), d) + }) + + t.Run("distance are right when using BQDistancer to compressed node", func(t *testing.T) { + compressor, err := compressionhelpers.NewBQCompressor(distancer.NewCosineDistanceProvider(), 1e12, nil, testinghelpers.NewDummyStore(t), nil) + assert.Nil(t, err) + compressor.Preload(1, []float32{-0.5, 0.5}) + compressor.Preload(2, []float32{0.25, 0.7}) + compressor.Preload(3, []float32{0.5, 0.5}) + distancer, err := compressor.NewDistancerFromID(1) + + assert.Nil(t, err) + + d, err := distancer.DistanceToNode(1) + assert.Nil(t, err) + assert.Equal(t, float32(0), d) + + d, err = distancer.DistanceToNode(2) + assert.Nil(t, err) + assert.Equal(t, float32(1), d) + + d, err = distancer.DistanceToNode(3) + assert.Nil(t, err) + assert.Equal(t, float32(1), d) + + d, err = distancer.DistanceToFloat([]float32{0.8, -0.2}) + assert.Nil(t, err) + assert.Equal(t, float32(2), d) + }) + + t.Run("don't panic when vector dimensions are mismatched", func(t *testing.T) { + var ( + config = hnsw.PQConfig{ + Enabled: true, + Segments: 1, + Encoder: hnsw.PQEncoder{ + Type: hnsw.PQEncoderTypeKMeans, + Distribution: hnsw.PQEncoderDistributionLogNormal, + }, + Centroids: 1, + } + dist = distancer.NewCosineDistanceProvider() + dims = 3 + cacheMaxObjs = 4 + trainingData = [][]float32{ + {0.0, 0.1, 0.2}, + } + ) + + var ( + storedVec = []float32{0.0, 0.1, 0.2} + mismatchedVec = []float32{0.0, 0.1} + ) + + compressor, err := compressionhelpers.NewHNSWPQCompressor( + config, dist, dims, cacheMaxObjs, nil, trainingData, + testinghelpers.NewDummyStore(t), + memwatch.NewDummyMonitor(), + ) + require.Nil(t, err) + d, _ := compressor.NewDistancer(storedVec) + _, err = d.DistanceToFloat(mismatchedVec) + assert.EqualError(t, err, "2 vs 3: vector lengths don't match") + }) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/compressionhelpers/distance.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/compressionhelpers/distance.go new file mode 100644 index 0000000000000000000000000000000000000000..d2858d91cce03972699762160a7df388927b6869 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/compressionhelpers/distance.go @@ -0,0 +1,43 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package compressionhelpers + +import "math/bits" + +var l2SquaredByteImpl func(a, b []byte) uint32 = func(a, b []byte) uint32 { + var sum uint32 + + for i := range a { + diff := uint32(a[i]) - uint32(b[i]) + sum += diff * diff + } + + return sum +} + +var dotByteImpl func(a, b []uint8) uint32 = func(a, b []byte) uint32 { + var sum uint32 + + for i := range a { + sum += uint32(a[i]) * uint32(b[i]) + } + + return sum +} + +var hammingBitwiseImpl func(a, b []uint64) float32 = func(a, b []uint64) float32 { + total := float32(0) + for segment := range a { + total += float32(bits.OnesCount64(a[segment] ^ b[segment])) + } + return total +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/compressionhelpers/distance_amd64.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/compressionhelpers/distance_amd64.go new file mode 100644 index 0000000000000000000000000000000000000000..1f117d6df4a414862c007690d9de245bad86988b --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/compressionhelpers/distance_amd64.go @@ -0,0 +1,25 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package compressionhelpers + +import ( + "github.com/weaviate/weaviate/adapters/repos/db/vector/hnsw/distancer/asm" + "golang.org/x/sys/cpu" +) + +func init() { + if cpu.X86.HasAVX2 { + l2SquaredByteImpl = asm.L2ByteAVX256 + dotByteImpl = asm.DotByteAVX256 + hammingBitwiseImpl = asm.HammingBitwiseAVX256 + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/compressionhelpers/distance_arm64.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/compressionhelpers/distance_arm64.go new file mode 100644 index 0000000000000000000000000000000000000000..b4486f12720840f767aadeed80eb4ee294a35da8 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/compressionhelpers/distance_arm64.go @@ -0,0 +1,25 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package compressionhelpers + +import ( + "github.com/weaviate/weaviate/adapters/repos/db/vector/hnsw/distancer/asm" + "golang.org/x/sys/cpu" +) + +func init() { + if cpu.ARM64.HasASIMD { + l2SquaredByteImpl = asm.L2ByteARM64 + dotByteImpl = asm.DotByteARM64 + hammingBitwiseImpl = asm.HammingBitwise + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/compressionhelpers/export_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/compressionhelpers/export_test.go new file mode 100644 index 0000000000000000000000000000000000000000..b34e7adcac258ccec4582d4913fed5a8c3ee6f18 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/compressionhelpers/export_test.go @@ -0,0 +1,18 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package compressionhelpers + +// Exported for testing. +var ( + FastWalshHadamardTransform64 = fastWalshHadamardTransform64 + FastWalshHadamardTransform256 = fastWalshHadamardTransform256 +) diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/compressionhelpers/fast_rotation.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/compressionhelpers/fast_rotation.go new file mode 100644 index 0000000000000000000000000000000000000000..d3fe82465a4ad5791d129eb5bfdbb75dd2e6a874 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/compressionhelpers/fast_rotation.go @@ -0,0 +1,258 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package compressionhelpers + +import ( + "math/rand/v2" + "slices" +) + +type FastRotation struct { + OutputDim uint32 // The dimension of the output returned by Rotate(). + Rounds uint32 // The number of rounds of random signs, swaps, and blocked transforms that the Rotate() function is going to apply. + Swaps [][]Swap // Random swaps to apply each round prior to transforming. + Signs [][]float32 // Random signs to apply each round prior to transforming. We store these as float32 values for performance reasons, to avoid casts. +} + +const ( + DefaultFastRotationSeed = uint64(0x535ab5105169b1df) +) + +func randomSigns(dim int, rng *rand.Rand) []float32 { + signs := make([]float32, dim) + for i := range signs { + if rng.Float64() < 0.5 { + signs[i] = -1 + } else { + signs[i] = 1 + } + } + return signs +} + +type Swap struct { + I, J uint16 +} + +// Returns a slice of n/2 random swaps such that every element in a slice of length n gets swapped exactly once. +// We order the swaps to make the access pattern more sequential. +func randomSwaps(n int, rng *rand.Rand) []Swap { + swaps := make([]Swap, n/2) + p := rng.Perm(n) + for s := range swaps { + a := uint16(p[2*s]) + b := uint16(p[2*s+1]) + if a < b { + swaps[s] = Swap{I: a, J: b} + } else { + swaps[s] = Swap{I: b, J: a} + } + } + slices.SortFunc(swaps, func(a, b Swap) int { + if a.I < b.I { + return -1 + } + if a.I > b.I { + return 1 + } + return 0 + }) + return swaps +} + +func NewFastRotation(inputDim int, rounds int, seed uint64) *FastRotation { + outputDim := 64 + for outputDim < inputDim { + outputDim += 64 + } + rng := rand.New(rand.NewPCG(seed, 0x385ab5285169b1ac)) + swaps := make([][]Swap, rounds) + signs := make([][]float32, rounds) + for i := range rounds { + swaps[i] = randomSwaps(outputDim, rng) + signs[i] = randomSigns(outputDim, rng) + } + return &FastRotation{ + OutputDim: uint32(outputDim), + Rounds: uint32(rounds), + Swaps: swaps, + Signs: signs, + } +} + +func RestoreFastRotation(outputDim int, rounds int, swaps [][]Swap, signs [][]float32) *FastRotation { + return &FastRotation{ + OutputDim: uint32(outputDim), + Rounds: uint32(rounds), + Swaps: swaps, + Signs: signs, + } +} + +func (r *FastRotation) Rotate(x []float32) []float32 { + rx := make([]float32, r.OutputDim) + copy(rx, x) + for i := range r.Rounds { + // Apply random swaps and signs. + for _, s := range r.Swaps[i] { + rx[s.I], rx[s.J] = r.Signs[i][s.I]*rx[s.J], r.Signs[i][s.J]*rx[s.I] + } + // Transform in blocks (of length 256 if possible, otherwise length 64). + pos := 0 + for pos < len(rx) { + if len(rx)-pos >= 256 { + fastWalshHadamardTransform256(rx[pos:(pos + 256)]) + pos += 256 + continue + } + fastWalshHadamardTransform64(rx[pos:(pos + 64)]) + pos += 64 + } + } + return rx +} + +func fastWalshHadamardTransform16(x []float32, normalize float32) { + x0 := normalize * x[0] + x1 := normalize * x[1] + x2 := normalize * x[2] + x3 := normalize * x[3] + x4 := normalize * x[4] + x5 := normalize * x[5] + x6 := normalize * x[6] + x7 := normalize * x[7] + x8 := normalize * x[8] + x9 := normalize * x[9] + x10 := normalize * x[10] + x11 := normalize * x[11] + x12 := normalize * x[12] + x13 := normalize * x[13] + x14 := normalize * x[14] + x15 := normalize * x[15] + + x0, x1 = x0+x1, x0-x1 + x2, x3 = x2+x3, x2-x3 + + x0, x2 = x0+x2, x0-x2 + x1, x3 = x1+x3, x1-x3 + + x4, x5 = x4+x5, x4-x5 + x6, x7 = x6+x7, x6-x7 + + x4, x6 = x4+x6, x4-x6 + x5, x7 = x5+x7, x5-x7 + + x0, x4 = x0+x4, x0-x4 + x1, x5 = x1+x5, x1-x5 + x2, x6 = x2+x6, x2-x6 + x3, x7 = x3+x7, x3-x7 + + x8, x9 = x8+x9, x8-x9 + x10, x11 = x10+x11, x10-x11 + + x8, x10 = x8+x10, x8-x10 + x9, x11 = x9+x11, x9-x11 + + x12, x13 = x12+x13, x12-x13 + x14, x15 = x14+x15, x14-x15 + + x12, x14 = x12+x14, x12-x14 + x13, x15 = x13+x15, x13-x15 + + x8, x12 = x8+x12, x8-x12 + x9, x13 = x9+x13, x9-x13 + x10, x14 = x10+x14, x10-x14 + x11, x15 = x11+x15, x11-x15 + + x0, x8 = x0+x8, x0-x8 + x1, x9 = x1+x9, x1-x9 + x2, x10 = x2+x10, x2-x10 + x3, x11 = x3+x11, x3-x11 + x4, x12 = x4+x12, x4-x12 + x5, x13 = x5+x13, x5-x13 + x6, x14 = x6+x14, x6-x14 + x7, x15 = x7+x15, x7-x15 + + x[0] = x0 + x[1] = x1 + x[2] = x2 + x[3] = x3 + x[4] = x4 + x[5] = x5 + x[6] = x6 + x[7] = x7 + x[8] = x8 + x[9] = x9 + x[10] = x10 + x[11] = x11 + x[12] = x12 + x[13] = x13 + x[14] = x14 + x[15] = x15 +} + +// This explicit instantiation is about 10% faster. +func fastWalshHadamardTransform64(x []float32) { + const normalize = 0.125 + fastWalshHadamardTransform16(x[:16], normalize) + fastWalshHadamardTransform16(x[16:32], normalize) + for i := range 16 { + x[i], x[16+i] = x[i]+x[16+i], x[i]-x[16+i] + } + + fastWalshHadamardTransform16(x[32:48], normalize) + fastWalshHadamardTransform16(x[48:], normalize) + for i := 32; i < 48; i++ { + x[i], x[16+i] = x[i]+x[16+i], x[i]-x[16+i] + } + + for i := range 32 { + x[i], x[32+i] = x[i]+x[32+i], x[i]-x[32+i] + } +} + +func block64FWHT256(x []float32) { + const normalize = 0.0625 + fastWalshHadamardTransform16(x[0:16], normalize) + fastWalshHadamardTransform16(x[16:32], normalize) + for i := range 16 { + x[i], x[16+i] = x[i]+x[16+i], x[i]-x[16+i] + } + + fastWalshHadamardTransform16(x[32:48], normalize) + fastWalshHadamardTransform16(x[48:64], normalize) + for i := 32; i < 48; i++ { + x[i], x[16+i] = x[i]+x[16+i], x[i]-x[16+i] + } + + for i := range 32 { + x[i], x[32+i] = x[i]+x[32+i], x[i]-x[32+i] + } +} + +func fastWalshHadamardTransform256(x []float32) { + block64FWHT256(x[0:64]) + block64FWHT256(x[64:128]) + for i := range 64 { + x[i], x[64+i] = x[i]+x[64+i], x[i]-x[64+i] + } + + block64FWHT256(x[128:192]) + block64FWHT256(x[192:256]) + for i := 128; i < 192; i++ { + x[i], x[64+i] = x[i]+x[64+i], x[i]-x[64+i] + } + + for i := range 128 { + x[i], x[128+i] = x[i]+x[128+i], x[i]-x[128+i] + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/compressionhelpers/fast_rotation_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/compressionhelpers/fast_rotation_test.go new file mode 100644 index 0000000000000000000000000000000000000000..9c51b10bd46fe57ea2010d04447437bfebfbe209 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/compressionhelpers/fast_rotation_test.go @@ -0,0 +1,440 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package compressionhelpers_test + +import ( + "fmt" + "math" + "math/rand/v2" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/weaviate/weaviate/adapters/repos/db/vector/compressionhelpers" + "golang.org/x/exp/slices" +) + +// Euclidean norm. +func norm(x []float32) float64 { + return math.Sqrt(dot(x, x)) +} + +// Euclidean distance. +func dist(u []float32, v []float32) float64 { + x := make([]float32, len(v)) + for i := range x { + x[i] = u[i] - v[i] + } + return norm(x) +} + +func dot(u []float32, v []float32) float64 { + var sum float64 + for i := range u { + sum += float64(u[i]) * float64(v[i]) + } + return sum +} + +func randomNormalVector(d int, rng *rand.Rand) []float32 { + z := make([]float32, d) + for i := range d { + z[i] = float32(rng.NormFloat64()) + } + return z +} + +func randomUnitVector(d int, rng *rand.Rand) []float32 { + x := randomNormalVector(d, rng) + normalize := float32(1.0 / norm(x)) + for i := range x { + x[i] *= normalize + } + return x +} + +func newRNG(seed uint64) *rand.Rand { + return rand.New(rand.NewPCG(seed, 0x385ab5285169b1ac)) +} + +func TestFastRotationPreservesNorm(t *testing.T) { + rng := newRNG(65234) + n := 100 + for range n { + d := 2 + rng.IntN(2000) + r := 1 + rng.IntN(5) + rotation := compressionhelpers.NewFastRotation(d, r, rng.Uint64()) + z := randomNormalVector(d, rng) + rz := rotation.Rotate(z) + assert.Less(t, math.Abs(norm(rz)-norm(z)), 5e-6) + } +} + +func TestFastRotationPreservesDistance(t *testing.T) { + rng := newRNG(4242) + n := 100 + for range n { + d := 2 + rng.IntN(2000) + rounds := 1 + rng.IntN(5) + rotation := compressionhelpers.NewFastRotation(d, rounds, rng.Uint64()) + z1 := randomNormalVector(d, rng) + z2 := randomNormalVector(d, rng) + rz1 := rotation.Rotate(z1) + rz2 := rotation.Rotate(z2) + assert.Less(t, math.Abs(dist(rz1, rz2)-dist(z1, z2)), 6e-6) + } +} + +func TestFastRotationOutputLength(t *testing.T) { + rng := newRNG(42) + n := 100 + for range n { + d := rng.IntN(1000) + r := 1 + rng.IntN(5) + rotation := compressionhelpers.NewFastRotation(d, r, rng.Uint64()) + x := make([]float32, d) + rx := rotation.Rotate(x) + if d < 64 { + assert.Equal(t, len(rx), 64) + } else { + assert.GreaterOrEqual(t, len(rx), 64) + assert.Less(t, len(rx), d+64) + } + } +} + +// Rotate the standard basis and verify that the rotated vectors are orthogonal. +func TestFastRotationPreservesOrthogonality(t *testing.T) { + rng := newRNG(424242) + dimensions := []int{3, 8, 26, 32, 33, 61, 127, 128, 129, 255, 257} + for _, d := range dimensions { + unitVectors := make([][]float32, d) + for i := range d { + v := make([]float32, d) + v[i] = 1.0 + unitVectors[i] = v + } + + r := compressionhelpers.NewFastRotation(d, 3, rng.Uint64()) + rotatedVectors := make([][]float32, d) + for i := range d { + rotatedVectors[i] = r.Rotate(unitVectors[i]) + } + + for i := range d { + u := rotatedVectors[i] + for j := range d { + v := rotatedVectors[j] + if i == j { + assert.Less(t, math.Abs(dot(u, v)-1.0), 1e-6) + } else { + assert.Less(t, math.Abs(dot(u, v)), 1e-6) + } + } + } + } +} + +// Smoothing doesn't work great unless we use a high number of rounds. +// This in turn probably comes with more noticeable floating point errors. +func TestFastRotationSmoothensVector(t *testing.T) { + rng := newRNG(424242) + dim := 2 + rng.IntN(1000) + rounds := 5 + r := compressionhelpers.NewFastRotation(dim, rounds, rng.Uint64()) + + bound := 5.8 / math.Sqrt(float64(r.OutputDim)) + for i := range dim { + x := make([]float32, dim) + x[i] = 1.0 + rx := r.Rotate(x) + for j := range rx { + assert.Less(t, math.Abs(float64(rx[j])), bound, + "Failure in index %d of %d-dim vector e%d rotated into %d-dims using %d rounds", j, dim, i, len(rx), rounds) + } + } +} + +// The uniform distribution on the d-dimensional sphere can be produced by +// normalizing a d-dimensional vector of i.i.d standard normal distributed +// random variables to unit length. +// +// The ith entry of a randomly rotated unit vector should therefore follow the +// distribution Zi / SQRT(Z1^2 + ... + Zd^2). +// +// When d is relatively large, e.g. d >= 32, the denominator should be tightly +// concentrated around sqrt(d). We can therefore test that (sqrt(d)*Rx)_i +// behaves like a standard normal distributed variable. +func TestFastRotatedEntriesAreNormalizedGaussian(t *testing.T) { + type CumulativeProbabilityCount struct { + Value float64 + Probability float64 + Count int + } + cdf := []CumulativeProbabilityCount{ + {-3.0, 1.0 - 0.9987, 0}, + {-2.0, 1.0 - 0.9772, 0}, + {-1.0, 1.0 - 0.8413, 0}, + {-0.5, 1.0 - 0.6915, 0}, + {-0.25, 1.0 - 0.5987, 0}, + {0.0, 0.5, 0}, + {0.25, 0.5987, 0}, + {0.5, 0.6915, 0}, + {1.0, 0.8413, 0}, + {2.0, 0.9772, 0}, + {3.0, 0.9987, 0}, + } + + dimensions := make([]int, 0) + for i := range 15 { + dimensions = append(dimensions, 64+i*64) + } + + for _, d := range dimensions { + rounds := 5 + var seed uint64 = 424242 + r := compressionhelpers.NewFastRotation(d, rounds, seed) + for i := range d { + v := make([]float32, d) + v[i] = 1.0 + rv := r.Rotate(v) + for j := range d { + z := math.Sqrt(float64(d)) * float64(rv[j]) + for k := range cdf { + if z < cdf[k].Value { + cdf[k].Count++ + } + } + } + } + acceptableDeviation := 0.012 // Acceptable absolute deviation from standard normal CDF. + n := d * d // Number of measurements. + for i := range cdf { + p := float64(cdf[i].Count) / float64(n) + assert.Less(t, math.Abs(p-cdf[i].Probability), acceptableDeviation) + // Reset for next run. + cdf[i].Count = 0 + } + } +} + +func TestFastRotatedVectorsAreUniformOnSphere(t *testing.T) { + d := 128 + rounds := 3 + v := make([]float32, d) + v[0] = 1.0 + target := 100 + n := 8 * target + var count int + for i := range n { + r := compressionhelpers.NewFastRotation(d, rounds, uint64(i)) + rv := r.Rotate(v) + if rv[17] < 0 && rv[32] > 0 && rv[41] < 0 { + count++ + } + } + dev := 15 + assert.Greater(t, count, target-dev) + assert.Less(t, count, target+dev) +} + +// For testing that the unrolled recursion gives the same result. +func fastWalshHadamardTransform(x []float32, normalize float32) { + if len(x) == 2 { + x[0], x[1] = normalize*(x[0]+x[1]), normalize*(x[0]-x[1]) + return + } + m := len(x) / 2 + fastWalshHadamardTransform(x[:m], normalize) + fastWalshHadamardTransform(x[m:], normalize) + for i := range m { + x[i], x[m+i] = x[i]+x[m+i], x[i]-x[m+i] + } +} + +func TestFastWalshHadamardTransform64(t *testing.T) { + rng := newRNG(7212334) + n := 1000 + dim := 64 + for range n { + x := make([]float32, dim) + for i := range x { + x[i] = 1 + if rng.Float64() < 0.5 { + x[i] = -1 + } + } + target := make([]float32, dim) + copy(target, x) + fastWalshHadamardTransform(target, 0.125) + compressionhelpers.FastWalshHadamardTransform64(x) + assert.True(t, slices.Equal(x, target)) + } +} + +func TestFastWalshHadamardTransform256(t *testing.T) { + rng := newRNG(7212334) + n := 1000 + dim := 256 + for range n { + x := make([]float32, dim) + for i := range x { + x[i] = 1 + if rng.Float64() < 0.5 { + x[i] = -1 + } + } + target := make([]float32, 256) + copy(target, x) + fastWalshHadamardTransform(target, 0.0625) + compressionhelpers.FastWalshHadamardTransform256(x) + assert.True(t, slices.Equal(x, target)) + } +} + +func BenchmarkFastRotation(b *testing.B) { + rng := newRNG(42) + dimensions := []int{128, 256, 512, 768, 1024, 1536, 2048} + rounds := []int{3, 5} + for _, dim := range dimensions { + x := make([]float32, dim) + x[0] = 1.0 + for _, r := range rounds { + rotation := compressionhelpers.NewFastRotation(dim, r, rng.Uint64()) + b.Run(fmt.Sprintf("Rotate-d%d-r%d", dim, r), func(b *testing.B) { + for b.Loop() { + rotation.Rotate(x) + } + }) + } + } +} + +func BenchmarkFastRotationError(b *testing.B) { + rng := newRNG(42) + dimensions := []int{64, 128, 256, 512, 768, 1024, 1536, 2048} + rounds := []int{3, 5} + for _, dim := range dimensions { + x := randomUnitVector(dim, rng) + y := randomUnitVector(dim, rng) + target := dot(x, y) + for _, r := range rounds { + b.Run(fmt.Sprintf("Rotate-d%d-r%d", dim, r), func(b *testing.B) { + var errorSum float64 + var maxError float64 + for b.Loop() { + b.StopTimer() + rotation := compressionhelpers.NewFastRotation(dim, r, rng.Uint64()) + b.StartTimer() + rx := rotation.Rotate(x) + ry := rotation.Rotate(y) + b.StopTimer() + err := math.Abs(dot(rx, ry) - target) + errorSum += err + if err > maxError { + maxError = err + } + } + // The absolute errors are approximately of size 1e-7. + errorScale := 1e7 + b.ReportMetric(errorScale*errorSum/float64(b.N), "error(avg)") + b.ReportMetric(errorScale*maxError, "error(max)") + }) + } + } +} + +// The uniform distribution on the d-dimensional sphere can be produced by +// normalizing a d-dimensional vector of i.i.d standard normal distributed +// random variables to unit length. +// +// The ith entry of a randomly rotated unit vector should therefore follow the +// distribution Zi / SQRT(Z1^2 + ... + Zd^2). +// +// When d is relatively large, e.g. d >= 32, the denominator should be tightly +// concentrated around sqrt(d). We can therefore test that (sqrt(d)*Rx)_i +// behaves like a standard normal distributed variable. +func BenchmarkFastRotationOutputDistribution(b *testing.B) { + rounds := []int{1, 2, 3, 4, 5} + inputDimensions := []int{1536} + rng := newRNG(1234) + + for _, inputDim := range inputDimensions { + for _, r := range rounds { + b.Run(fmt.Sprintf("Rotation-d%d-r%d", inputDim, r), func(b *testing.B) { + type CumulativeProbabilityCount struct { + Value float64 + Probability float64 + Count int + } + cdf := []CumulativeProbabilityCount{ + {-3.0, 1.0 - 0.9987, 0}, + {-2.0, 1.0 - 0.9772, 0}, + {-1.0, 1.0 - 0.8413, 0}, + {-0.5, 1.0 - 0.6915, 0}, + {-0.25, 1.0 - 0.5987, 0}, + {0.0, 0.5, 0}, + {0.25, 0.5987, 0}, + {0.5, 0.6915, 0}, + {1.0, 0.8413, 0}, + {2.0, 0.9772, 0}, + {3.0, 0.9987, 0}, + } + + var intervalSum float64 + var intervalMax float64 + var outDim int + for b.Loop() { + rotation := compressionhelpers.NewFastRotation(inputDim, r, rng.Uint64()) + outDim = int(rotation.OutputDim) + // We rotate each of the d unit vectors and collect measurements. + for i := range outDim { + v := make([]float32, outDim) + v[i] = 1.0 + rv := rotation.Rotate(v) + for j := range outDim { + z := math.Sqrt(float64(outDim)) * float64(rv[j]) + for k := range cdf { + if z < cdf[k].Value { + cdf[k].Count++ + } + } + } + interval := float64(slices.Max(rv) - slices.Min(rv)) + intervalSum += interval + if interval > intervalMax { + intervalMax = interval + } + } + + } + + // Report the max deviation from the normal CDF. + var maxDeviation float64 + for i := range cdf { + numEntries := outDim * outDim * b.N + p := float64(cdf[i].Count) / float64(numEntries) + dev := math.Abs(p - cdf[i].Probability) + if dev > maxDeviation { + maxDeviation = dev + } + } + b.ReportMetric(maxDeviation, "cdf_dev(max)") + + // Report the average and max interval length. + numIntervals := outDim * b.N + avgInterval := intervalSum / float64(numIntervals) + b.ReportMetric(avgInterval, "interval(avg)") + b.ReportMetric(intervalMax, "interval(max)") + }) + } + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/compressionhelpers/kmeans_encoder.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/compressionhelpers/kmeans_encoder.go new file mode 100644 index 0000000000000000000000000000000000000000..9acee4e71c1ca8bf1878c24ac162dacd95527f37 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/compressionhelpers/kmeans_encoder.go @@ -0,0 +1,120 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package compressionhelpers + +import ( + "encoding/binary" + "fmt" + "math" + + "github.com/weaviate/weaviate/adapters/repos/db/vector/hnsw/distancer" + "github.com/weaviate/weaviate/adapters/repos/db/vector/kmeans" +) + +type KMeansEncoder struct { + k int // Number of centers/centroids. + d int // Dimensions of the data. + s int // Segment where it operates. + centers [][]float32 // k-means centroids used for encoding data. + distance distancer.Provider // Distance measure used to encode to nearest center. +} + +func NewKMeansEncoder(k int, dimensions int, segment int) *KMeansEncoder { + encoder := &KMeansEncoder{ + k: k, + d: dimensions, + s: segment, + distance: distancer.NewL2SquaredProvider(), + } + return encoder +} + +func NewKMeansEncoderWithCenters(k int, dimensions int, segment int, centers [][]float32) *KMeansEncoder { + encoder := NewKMeansEncoder(k, dimensions, segment) + encoder.centers = centers + return encoder +} + +// Assumes that data contains only non-nil vectors. +func (m *KMeansEncoder) Fit(data [][]float32) error { + km := kmeans.New(m.k, m.d, m.s) + km.DeltaThreshold = 0.01 + km.IterationThreshold = 10 + // Experiments on ANN datasets reveal that random initialization is ~20% + // faster than k-means++ initialization when used for PQ and gives the same + // or slightly better recall. + km.Initialization = kmeans.RandomInitialization + // Experiments show that GraphPruning is always faster for short segments, + // typically giving a 2x speedup for d = 8. On some datasets such as SIFT + // and GIST it is faster also up to 32 dimensions, and it will never be much + // slower than brute force assignment since the additional overhead is + // ~k^2 distance computations and k << n. We therefore always enable it. + km.Assignment = kmeans.GraphPruning + err := km.Fit(data) + m.centers = km.Centers + return err +} + +func (m *KMeansEncoder) Encode(point []float32) byte { + var minDist float32 = math.MaxFloat32 + idx := 0 + segment := point[m.s*m.d : (m.s+1)*m.d] + for i := range m.centers { + if dist, _ := m.distance.SingleDist(segment, m.centers[i]); dist < minDist { + minDist = dist + idx = i + } + } + return byte(idx) +} + +func (m *KMeansEncoder) Centroid(i byte) []float32 { + return m.centers[i] +} + +func (m *KMeansEncoder) Add(x []float32) { + // Only here to satisfy the PQEncoder interface. +} + +func (m *KMeansEncoder) ExposeDataForRestore() []byte { + ds := len(m.centers[0]) + len := 4 * m.k * ds + buffer := make([]byte, len) + for i := 0; i < len/4; i++ { + binary.LittleEndian.PutUint32(buffer[i*4:(i+1)*4], math.Float32bits(m.centers[i/ds][i%ds])) + } + return buffer +} + +// String prints some minimal information about the encoder. This can be +// used for viability checks to see if the encoder was initialized +// correctly – for example after a restart. +func (m *KMeansEncoder) String() string { + maxElem := 5 + var firstCenters []float32 + i := 0 + for _, center := range m.centers { + for _, centerVal := range center { + if i == maxElem { + break + } + + firstCenters = append(firstCenters, centerVal) + i++ + } + if i == maxElem { + break + } + } + return fmt.Sprintf("KMeans Encoder: K=%d, dim=%d, segment=%d first_center_truncated=%v", + m.k, m.d, m.s, firstCenters) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/compressionhelpers/kmeans_encoder_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/compressionhelpers/kmeans_encoder_test.go new file mode 100644 index 0000000000000000000000000000000000000000..3d2029d9ac821b7cc8718b5bee32d89c87e984c3 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/compressionhelpers/kmeans_encoder_test.go @@ -0,0 +1,66 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +//go:build !race + +package compressionhelpers_test + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/weaviate/weaviate/adapters/repos/db/vector/compressionhelpers" + "github.com/weaviate/weaviate/adapters/repos/db/vector/hnsw/distancer" + testinghelpers "github.com/weaviate/weaviate/adapters/repos/db/vector/testinghelpers" +) + +func TestKMeansEncoderEncodesToNearestCentroid(t *testing.T) { + l2 := distancer.NewL2SquaredProvider() + vectors := [][]float32{ + {0, 5}, + {0.1, 4.9}, + {0.01, 5.1}, + {10.1, 7}, + {5.1, 2}, + {5.0, 2.1}, + } + encoder := compressionhelpers.NewKMeansEncoder( + 3, + 2, + 0, + ) + encoder.Fit(vectors) + centers := make([]byte, 6) + for i := range centers { + centers[i] = encoder.Encode(vectors[i]) + } + for v := range vectors { + min, _ := l2.SingleDist(vectors[v], encoder.Centroid(centers[v])) + for c := range centers { + dist, _ := l2.SingleDist(vectors[v], encoder.Centroid(centers[c])) + assert.True(t, dist >= min) + } + } +} + +func TestKMeansEncoderTerminatesOnRandomData(t *testing.T) { + vectorsSize := 10000 + vectors, _ := testinghelpers.RandomVecs(vectorsSize, 0, 128) + before := time.Now() + encoder := compressionhelpers.NewKMeansEncoder( + 256, + 1, + 10, + ) + encoder.Fit(vectors) + assert.True(t, time.Since(before).Seconds() < 50) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/compressionhelpers/mock_compression_stats.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/compressionhelpers/mock_compression_stats.go new file mode 100644 index 0000000000000000000000000000000000000000..c7e1478db82cc9ebb33e60715d8363d833cf6c80 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/compressionhelpers/mock_compression_stats.go @@ -0,0 +1,134 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by mockery v2.53.2. DO NOT EDIT. + +package compressionhelpers + +import mock "github.com/stretchr/testify/mock" + +// MockCompressionStats is an autogenerated mock type for the CompressionStats type +type MockCompressionStats struct { + mock.Mock +} + +type MockCompressionStats_Expecter struct { + mock *mock.Mock +} + +func (_m *MockCompressionStats) EXPECT() *MockCompressionStats_Expecter { + return &MockCompressionStats_Expecter{mock: &_m.Mock} +} + +// CompressionRatio provides a mock function with given fields: dimensions +func (_m *MockCompressionStats) CompressionRatio(dimensions int) float64 { + ret := _m.Called(dimensions) + + if len(ret) == 0 { + panic("no return value specified for CompressionRatio") + } + + var r0 float64 + if rf, ok := ret.Get(0).(func(int) float64); ok { + r0 = rf(dimensions) + } else { + r0 = ret.Get(0).(float64) + } + + return r0 +} + +// MockCompressionStats_CompressionRatio_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CompressionRatio' +type MockCompressionStats_CompressionRatio_Call struct { + *mock.Call +} + +// CompressionRatio is a helper method to define mock.On call +// - dimensions int +func (_e *MockCompressionStats_Expecter) CompressionRatio(dimensions interface{}) *MockCompressionStats_CompressionRatio_Call { + return &MockCompressionStats_CompressionRatio_Call{Call: _e.mock.On("CompressionRatio", dimensions)} +} + +func (_c *MockCompressionStats_CompressionRatio_Call) Run(run func(dimensions int)) *MockCompressionStats_CompressionRatio_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(int)) + }) + return _c +} + +func (_c *MockCompressionStats_CompressionRatio_Call) Return(_a0 float64) *MockCompressionStats_CompressionRatio_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockCompressionStats_CompressionRatio_Call) RunAndReturn(run func(int) float64) *MockCompressionStats_CompressionRatio_Call { + _c.Call.Return(run) + return _c +} + +// CompressionType provides a mock function with no fields +func (_m *MockCompressionStats) CompressionType() string { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for CompressionType") + } + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +// MockCompressionStats_CompressionType_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CompressionType' +type MockCompressionStats_CompressionType_Call struct { + *mock.Call +} + +// CompressionType is a helper method to define mock.On call +func (_e *MockCompressionStats_Expecter) CompressionType() *MockCompressionStats_CompressionType_Call { + return &MockCompressionStats_CompressionType_Call{Call: _e.mock.On("CompressionType")} +} + +func (_c *MockCompressionStats_CompressionType_Call) Run(run func()) *MockCompressionStats_CompressionType_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *MockCompressionStats_CompressionType_Call) Return(_a0 string) *MockCompressionStats_CompressionType_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockCompressionStats_CompressionType_Call) RunAndReturn(run func() string) *MockCompressionStats_CompressionType_Call { + _c.Call.Return(run) + return _c +} + +// NewMockCompressionStats creates a new instance of MockCompressionStats. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewMockCompressionStats(t interface { + mock.TestingT + Cleanup(func()) +}) *MockCompressionStats { + mock := &MockCompressionStats{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/compressionhelpers/parallel_iterator.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/compressionhelpers/parallel_iterator.go new file mode 100644 index 0000000000000000000000000000000000000000..ec97f67e9bf36ac073321a068dae2d4263aa6ae1 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/compressionhelpers/parallel_iterator.go @@ -0,0 +1,326 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package compressionhelpers + +import ( + "bytes" + "sync" + "sync/atomic" + "time" + + "github.com/sirupsen/logrus" + "github.com/weaviate/weaviate/adapters/repos/db/lsmkv" + enterrors "github.com/weaviate/weaviate/entities/errors" + "golang.org/x/text/language" + "golang.org/x/text/message" +) + +type parallelIterator[T byte | uint64] struct { + bucket *lsmkv.Bucket + parallel int + logger logrus.FieldLogger + loadId func([]byte) uint64 + fromCompressedBytes func(compressed []byte, buf *[]T) []T + + // a simple counter that each routine can write to atomically. It is used to + // track progress and display it to the user. + loaded atomic.Int64 + // rather than tracking every tick which adds a lot of synchronization overhead, + // only track every trackInterval-th tick. + trackInterval int64 + // how often to report progress to the user (via logs) + reportProgressInterval time.Duration +} + +func NewParallelIterator[T byte | uint64](bucket *lsmkv.Bucket, parallel int, loadId func([]byte) uint64, fromCompressedBytes func(compressed []byte, buf *[]T) []T, + logger logrus.FieldLogger, +) *parallelIterator[T] { + return ¶llelIterator[T]{ + bucket: bucket, + parallel: parallel, + logger: logger, + loadId: loadId, + fromCompressedBytes: fromCompressedBytes, + trackInterval: 1000, + reportProgressInterval: 5 * time.Second, + } +} + +func (cpi *parallelIterator[T]) IterateAll() chan []VecAndID[T] { + if cpi.parallel <= 1 { + // caller explicitly wants no parallelism, fallback to regular cursor + return cpi.iterateAllNoConcurrency() + } + + stopTracking := cpi.startTracking() + + // We need one fewer seed than our desired parallel factor, that is because + // we will add one routine that starts with cursor.First() and reads to the + // first checkpoint, therefore we will have len(checkpoints) + 1 routines in + // total. + seedCount := cpi.parallel - 1 + seeds := cpi.bucket.QuantileKeys(seedCount) + if len(seeds) == 0 { + // no seeds likely means an empty index. If we exit early, we also need to + // stop the progress tracking. + stopTracking() + return nil + } + + wg := sync.WaitGroup{} + out := make(chan []VecAndID[T]) + + // There are three scenarios: + // 1. Read from beginning to first checkpoint + // 2. Read from checkpoint n to checkpoint n+1 + // 3. Read from last checkpoint to end + + extract := func(k, v []byte, buf *[]T) VecAndID[T] { + id := cpi.loadId(k) + vec := cpi.fromCompressedBytes(v, buf) + return VecAndID[T]{Id: id, Vec: vec} + } + + // S1: Read from beginning to first checkpoint: + wg.Add(1) + enterrors.GoWrapper(func() { + c := cpi.bucket.Cursor() + localResults := make([]VecAndID[T], 0, 10_000) + defer c.Close() + defer wg.Done() + + // The first call of cpi.fromCompressedBytes will allocate a buffer into localBuf + // which can then be used for the rest of the calls. Once the buffer runs + // out, the next call will allocate a new buffer. + var localBuf []T + + for k, v := c.First(); k != nil && bytes.Compare(k, seeds[0]) < 0; k, v = c.Next() { + if len(k) == 0 { + cpi.logger.WithFields(logrus.Fields{ + "action": "hnsw_compressed_vector_cache_prefill", + "len": len(v), + "lenk": len(k), + }).Warn("skipping compressed vector with unexpected length") + continue + } + + localResults = append(localResults, extract(k, v, &localBuf)) + cpi.trackIndividual(len(localResults)) + } + cpi.cleanUpTempAllocs(localResults, &localBuf) + + out <- localResults + }, cpi.logger) + + // S2: Read from checkpoint n to checkpoint n+1, stop at last checkpoint: + for i := 0; i < len(seeds)-1; i++ { + wg.Add(1) + start := seeds[i] + end := seeds[i+1] + + enterrors.GoWrapper(func() { + defer wg.Done() + localResults := make([]VecAndID[T], 0, 10_000) + c := cpi.bucket.Cursor() + defer c.Close() + + // The first call of cpi.fromCompressedBytes will allocate a buffer into localBuf + // which can then be used for the rest of the calls. Once the buffer runs + // out, the next call will allocate a new buffer. + var localBuf []T + for k, v := c.Seek(start); k != nil && bytes.Compare(k, end) < 0; k, v = c.Next() { + if len(k) == 0 { + cpi.logger.WithFields(logrus.Fields{ + "action": "hnsw_compressed_vector_cache_prefill", + "len": len(v), + "lenk": len(k), + }).Warn("skipping compressed vector with unexpected length") + continue + } + localResults = append(localResults, extract(k, v, &localBuf)) + cpi.trackIndividual(len(localResults)) + } + cpi.cleanUpTempAllocs(localResults, &localBuf) + + out <- localResults + }, cpi.logger) + } + + // S3: Read from last checkpoint to end: + wg.Add(1) + enterrors.GoWrapper(func() { + c := cpi.bucket.Cursor() + defer c.Close() + defer wg.Done() + localResults := make([]VecAndID[T], 0, 10_000) + + // The first call of cpi.fromCompressedBytes will allocate a buffer into localBuf + // which can then be used for the rest of the calls. Once the buffer runs + // out, the next call will allocate a new buffer. + var localBuf []T + for k, v := c.Seek(seeds[len(seeds)-1]); k != nil; k, v = c.Next() { + if len(k) == 0 { + cpi.logger.WithFields(logrus.Fields{ + "action": "hnsw_compressed_vector_cache_prefill", + "len": len(v), + "lenk": len(k), + }).Warn("skipping compressed vector with unexpected length") + continue + } + + localResults = append(localResults, extract(k, v, &localBuf)) + cpi.trackIndividual(len(localResults)) + } + cpi.cleanUpTempAllocs(localResults, &localBuf) + + out <- localResults + }, cpi.logger) + + enterrors.GoWrapper(func() { + wg.Wait() + close(out) + stopTracking() + }, cpi.logger) + + return out +} + +func (cpi *parallelIterator[T]) iterateAllNoConcurrency() chan []VecAndID[T] { + out := make(chan []VecAndID[T]) + stopTracking := cpi.startTracking() + enterrors.GoWrapper(func() { + defer close(out) + c := cpi.bucket.Cursor() + defer c.Close() + defer stopTracking() + + // The first call of cpi.fromCompressedBytes will allocate a buffer into localBuf + // which can then be used for the rest of the calls. Once the buffer runs + // out, the next call will allocate a new buffer. + var localBuf []T + localResults := make([]VecAndID[T], 0, 10_000) + for k, v := c.First(); k != nil; k, v = c.Next() { + if len(k) == 0 { + cpi.logger.WithFields(logrus.Fields{ + "action": "hnsw_compressed_vector_cache_prefill", + "len": len(v), + "lenk": len(k), + }).Warn("skipping compressed vector with unexpected length") + continue + } + id := cpi.loadId(k) + vec := cpi.fromCompressedBytes(v, &localBuf) + localResults = append(localResults, VecAndID[T]{Id: id, Vec: vec}) + cpi.trackIndividual(len(localResults)) + } + + out <- localResults + }, cpi.logger) + + return out +} + +func (cpi *parallelIterator[T]) startTracking() func() { + cpi.loaded.Store(0) + + t := time.NewTicker(cpi.reportProgressInterval) + cancel := make(chan struct{}) + + enterrors.GoWrapper(func() { + start := time.Now() + lastReported := start + last := int64(0) + + p := message.NewPrinter(language.English) + + for { + select { + case now := <-t.C: + loaded := cpi.loaded.Load() + elapsed := now.Sub(start) + elapsedSinceLast := now.Sub(lastReported) + rate := float64(loaded-last) / elapsedSinceLast.Seconds() + totalRate := float64(loaded) / elapsed.Seconds() + + cpi.logger.WithFields(logrus.Fields{ + "action": "hnsw_compressed_vector_cache_prefill_progress", + "loaded": loaded, + "rate_per_second": int(rate), + "total_rate_per_second": int(totalRate), + "elapsed_total": elapsed, + }).Infof("loaded %s vectors in %s, current rate is %s vectors/s, total rate is %s vectors/s", + p.Sprintf("%d", loaded), elapsed.Round(10*time.Millisecond), + p.Sprintf("%.0f", rate), p.Sprintf("%.0f", totalRate)) + + last = loaded + case <-cancel: + t.Stop() + close(cancel) + return + } + } + }, cpi.logger) + + return func() { + cancel <- struct{}{} + } +} + +func (cpi *parallelIterator[T]) trackIndividual(loaded int) { + // Possibly a premature optimization, the idea is to reduce the necessary + // synchronization when we load from hundreds of goroutines in parallel. + // Rather than tracking every single tick, we track in chunks of + // cpi.trackInterval. + if int64(loaded)%cpi.trackInterval == 0 { + cpi.loaded.Add(cpi.trackInterval) + } +} + +type VecAndID[T uint64 | byte] struct { + Id uint64 + Vec []T +} + +func (cpi *parallelIterator[T]) cleanUpTempAllocs(localResults []VecAndID[T], localBuf *[]T) { + usedSpaceInBuffer := cap(*localBuf) - len(*localBuf) + if len(localResults) == 0 || usedSpaceInBuffer == cap(*localBuf) { + return + } + + // We allocate localBuf in chunks of 1000 vectors to avoid allocations for every single vector we load, which is a + // big performance improvement. + // However, we allocate that per go-routine and in the worst case we'd waste 1000*lengthOneVec*num_cores*2 bytes per + // index. For MT with many small tenants this can add up to quite a bit of memory + // This function creates a slice that exactly fits all elements in the last iteration, copies all data over from + // localBuf and reassigns everything to the new buffer + + // localBuf is written to from the back => there is unused space at the front + fittingLocalBuf := make([]T, usedSpaceInBuffer) + lengthOneVec := len(localResults[0].Vec) + entriesToRecopy := usedSpaceInBuffer / lengthOneVec + + // copy used data over to new buf + unusedLength := len(*localBuf) + *localBuf = (*localBuf)[:cap(*localBuf)] + copy(fittingLocalBuf, (*localBuf)[unusedLength:]) + + // order is important. To get the correct mapping we need to iterated: + // - localResults from the back + // - fittingLocalBuf from the front + for i := 0; i < entriesToRecopy; i++ { + localResults[len(localResults)-i-1].Vec = fittingLocalBuf[:lengthOneVec] + fittingLocalBuf = fittingLocalBuf[lengthOneVec:] + } + + // explicitly tell GC that the old buffer can go away + *localBuf = nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/compressionhelpers/parallel_iterator_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/compressionhelpers/parallel_iterator_test.go new file mode 100644 index 0000000000000000000000000000000000000000..1144282830e3bf240a7c362341202d60a74babed --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/compressionhelpers/parallel_iterator_test.go @@ -0,0 +1,183 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package compressionhelpers + +import ( + "context" + "encoding/binary" + "fmt" + "testing" + + logrustest "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/adapters/repos/db/lsmkv" + "github.com/weaviate/weaviate/entities/cyclemanager" +) + +func TestCompressedParallelIterator(t *testing.T) { + tests := []iteratorTestCase{ + { + name: "single vector, many parallel routines", + totalVecs: 1, + parallel: 16, + }, + { + name: "two vectors, many parallel routines", + totalVecs: 2, + parallel: 16, + }, + { + name: "three vectors, many parallel routines", + totalVecs: 3, + parallel: 16, + }, + { + name: "many vectors, many parallel routines", + totalVecs: 1000, + parallel: 16, + }, + { + name: "many vectors, single routine", + totalVecs: 1000, + parallel: 1, + }, + { + name: "many vectors, more than allocation size per routine, two routines", + totalVecs: 2020, + parallel: 2, + }, + { + name: "one fewer vectors than routines", + totalVecs: 5, + parallel: 6, + }, + { + name: "matching vectors and routines", + totalVecs: 6, + parallel: 6, + }, + { + name: "one more vector than routines", + totalVecs: 7, + parallel: 6, + }, + } + + quantization := []string{"pq", "bq", "sq"} + testsWithQuantization := make([]iteratorTestCase, len(tests)*len(quantization)) + for i, test := range tests { + for j, q := range quantization { + test.quantization = q + testsWithQuantization[i*len(quantization)+j] = test + } + } + + for _, test := range testsWithQuantization { + t.Run(fmt.Sprintf("%s: %s", test.quantization, test.name), func(t *testing.T) { + bucket := buildCompressedBucketForTest(t, test.totalVecs) + defer bucket.Shutdown(context.Background()) + + logger, _ := logrustest.NewNullLogger() + loadId := binary.BigEndian.Uint64 + switch test.quantization { + case "pq": + assertValue := func(t *testing.T, vec VecAndID[byte]) { + valAsUint64 := binary.LittleEndian.Uint64(vec.Vec) + assert.Equal(t, vec.Id, valAsUint64) + } + q := &ProductQuantizer{} + fromCompressed := q.FromCompressedBytesWithSubsliceBuffer + cpi := NewParallelIterator(bucket, test.parallel, loadId, fromCompressed, logger) + testIterator(t, cpi, test, assertValue) + case "bq": + assertValue := func(t *testing.T, vec VecAndID[uint64]) { + assert.Equal(t, vec.Id, vec.Vec[0]) + } + q := NewBinaryQuantizer(nil) + fromCompressed := q.FromCompressedBytesWithSubsliceBuffer + cpi := NewParallelIterator(bucket, test.parallel, loadId, fromCompressed, logger) + testIterator(t, cpi, test, assertValue) + + case "sq": + assertValue := func(t *testing.T, vec VecAndID[byte]) { + valAsUint64 := binary.LittleEndian.Uint64(vec.Vec) + assert.Equal(t, vec.Id, valAsUint64) + } + q := &ScalarQuantizer{} + fromCompressed := q.FromCompressedBytesWithSubsliceBuffer + cpi := NewParallelIterator(bucket, test.parallel, loadId, fromCompressed, logger) + testIterator(t, cpi, test, assertValue) + + default: + t.Fatalf("unknown quantization: %s", test.quantization) + } + }) + } +} + +type iteratorTestCase struct { + name string + totalVecs int + parallel int + quantization string +} + +func testIterator[T uint64 | byte](t *testing.T, cpi *parallelIterator[T], test iteratorTestCase, + assertValue func(t *testing.T, vec VecAndID[T]), +) { + require.NotNil(t, cpi) + + ch := cpi.IterateAll() + idsFound := make(map[uint64]struct{}) + for vecs := range ch { + for _, vec := range vecs { + if _, ok := idsFound[vec.Id]; ok { + t.Errorf("id %d found more than once", vec.Id) + } + idsFound[vec.Id] = struct{}{} + + assertValue(t, vec) + } + } + + // assert all ids are found + // we already know that the ids are unique, so we can just check the + // length + require.Len(t, idsFound, test.totalVecs) +} + +func buildCompressedBucketForTest(t *testing.T, totalVecs int) *lsmkv.Bucket { + ctx := context.Background() + logger, _ := logrustest.NewNullLogger() + bucket, err := lsmkv.NewBucketCreator().NewBucket(ctx, t.TempDir(), "", logger, nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), + lsmkv.WithPread(true), lsmkv.WithSegmentsChecksumValidationEnabled(false)) + require.Nil(t, err) + + for i := 0; i < totalVecs; i++ { + key := make([]byte, 8) + val := make([]byte, 8) + + binary.BigEndian.PutUint64(key, uint64(i)) + // make the actual vector the same as the key that makes it easy to do some + // basic checks + binary.LittleEndian.PutUint64(val, uint64(i)) + + err := bucket.Put(key, val) + require.Nil(t, err) + } + + require.Nil(t, bucket.FlushAndSwitch()) + + return bucket +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/compressionhelpers/product_quantization.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/compressionhelpers/product_quantization.go new file mode 100644 index 0000000000000000000000000000000000000000..298607d4087d3e417447128a3bc2836e670690c7 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/compressionhelpers/product_quantization.go @@ -0,0 +1,455 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package compressionhelpers + +import ( + "errors" + "fmt" + "math" + "sync" + + "github.com/sirupsen/logrus" + + "github.com/weaviate/weaviate/adapters/repos/db/vector/hnsw/distancer" + ent "github.com/weaviate/weaviate/entities/vectorindex/hnsw" +) + +type Encoder byte + +const ( + UseTileEncoder Encoder = 0 + UseKMeansEncoder Encoder = 1 +) + +type DistanceLookUpTable struct { + calculated []bool + distances []float32 + center [][]float32 + segments int + centroids int + flatCenter []float32 +} + +func NewDistanceLookUpTable(segments int, centroids int, center []float32) *DistanceLookUpTable { + distances := make([]float32, segments*centroids) + calculated := make([]bool, segments*centroids) + parsedCenter := make([][]float32, segments) + ds := len(center) / segments + for c := 0; c < segments; c++ { + parsedCenter[c] = center[c*ds : (c+1)*ds] + } + + dlt := &DistanceLookUpTable{ + distances: distances, + calculated: calculated, + center: parsedCenter, + segments: segments, + centroids: centroids, + flatCenter: center, + } + return dlt +} + +func (lut *DistanceLookUpTable) Reset(segments int, centroids int, center []float32) { + elems := segments * centroids + lut.segments = segments + lut.centroids = centroids + if len(lut.distances) != elems || + len(lut.calculated) != elems || + len(lut.center) != segments { + lut.distances = make([]float32, segments*centroids) + lut.calculated = make([]bool, segments*centroids) + lut.center = make([][]float32, segments) + } else { + for i := range lut.calculated { + lut.calculated[i] = false + } + } + + ds := len(center) / segments + for c := 0; c < segments; c++ { + lut.center[c] = center[c*ds : (c+1)*ds] + } + lut.flatCenter = center +} + +func (lut *DistanceLookUpTable) LookUp( + encoded []byte, + pq *ProductQuantizer, +) float32 { + var sum float32 + + for i := range pq.kms { + c := ExtractCode8(encoded, i) + if lut.distCalculated(i, c) { + sum += lut.codeDist(i, c) + } else { + centroid := pq.kms[i].Centroid(c) + dist := pq.distance.Step(lut.center[i], centroid) + lut.setCodeDist(i, c, dist) + lut.setDistCalculated(i, c) + sum += dist + } + } + return pq.distance.Wrap(sum) +} + +// meant for better readability, rely on the fact that the compiler will inline this +func (lut *DistanceLookUpTable) posForSegmentAndCode(segment int, code byte) int { + return segment*lut.centroids + int(code) +} + +// meant for better readability, rely on the fact that the compiler will inline this +func (lut *DistanceLookUpTable) distCalculated(segment int, code byte) bool { + return lut.calculated[lut.posForSegmentAndCode(segment, code)] +} + +// meant for better readability, rely on the fact that the compiler will inline this +func (lut *DistanceLookUpTable) setDistCalculated(segment int, code byte) { + lut.calculated[lut.posForSegmentAndCode(segment, code)] = true +} + +// meant for better readability, rely on the fact that the compiler will inline this +func (lut *DistanceLookUpTable) codeDist(segment int, code byte) float32 { + return lut.distances[lut.posForSegmentAndCode(segment, code)] +} + +// meant for better readability, rely on the fact that the compiler will inline this +func (lut *DistanceLookUpTable) setCodeDist(segment int, code byte, dist float32) { + lut.distances[lut.posForSegmentAndCode(segment, code)] = dist +} + +type DLUTPool struct { + pool sync.Pool +} + +func NewDLUTPool() *DLUTPool { + return &DLUTPool{ + pool: sync.Pool{ + New: func() any { + return &DistanceLookUpTable{} + }, + }, + } +} + +func (p *DLUTPool) Get(segments, centroids int, centers []float32) *DistanceLookUpTable { + dlt := p.pool.Get().(*DistanceLookUpTable) + dlt.Reset(segments, centroids, centers) + return dlt +} + +func (p *DLUTPool) Return(dlt *DistanceLookUpTable) { + p.pool.Put(dlt) +} + +type ProductQuantizer struct { + ks int // centroids + m int // segments + ds int // dimensions per segment + distance distancer.Provider + dimensions int + kms []PQEncoder + encoderType Encoder + encoderDistribution EncoderDistribution + dlutPool *DLUTPool + trainingLimit int + globalDistances []float32 + logger logrus.FieldLogger +} + +type PQData struct { + Ks uint16 + M uint16 + Dimensions uint16 + EncoderType Encoder + EncoderDistribution byte + Encoders []PQEncoder + UseBitsEncoding bool + TrainingLimit int +} + +type PQStats struct { + Ks int `json:"centroids"` + M int `json:"segments"` +} + +func (p PQStats) CompressionType() string { + return "pq" +} + +func (p PQStats) CompressionRatio(dimensions int) float64 { + // PQ compression: original size = dimensions * 4 bytes (float32) + // compressed size = segments * 1 byte (one code per segment) + originalSize := dimensions * 4 + compressedSize := p.M // segments + return float64(originalSize) / float64(compressedSize) +} + +type PQEncoder interface { + Encode(x []float32) byte + Centroid(b byte) []float32 + Add(x []float32) + Fit(data [][]float32) error + ExposeDataForRestore() []byte +} + +func NewProductQuantizer(cfg ent.PQConfig, distance distancer.Provider, dimensions int, logger logrus.FieldLogger) (*ProductQuantizer, error) { + if cfg.Segments <= 0 { + return nil, errors.New("segments cannot be 0 nor negative") + } + if cfg.Centroids > 256 { + return nil, fmt.Errorf("centroids should not be higher than 256. Attempting to use %d", cfg.Centroids) + } + if dimensions%cfg.Segments != 0 { + return nil, errors.New("segments should be an integer divisor of dimensions") + } + encoderType, err := parseEncoder(cfg.Encoder.Type) + if err != nil { + return nil, errors.New("invalid encoder type") + } + + encoderDistribution, err := parseEncoderDistribution(cfg.Encoder.Distribution) + if err != nil { + return nil, errors.New("invalid encoder distribution") + } + pq := &ProductQuantizer{ + ks: cfg.Centroids, + m: cfg.Segments, + ds: int(dimensions / cfg.Segments), + distance: distance, + trainingLimit: cfg.TrainingLimit, + dimensions: dimensions, + encoderType: encoderType, + encoderDistribution: encoderDistribution, + dlutPool: NewDLUTPool(), + logger: logger, + } + + return pq, nil +} + +func NewProductQuantizerWithEncoders(cfg ent.PQConfig, distance distancer.Provider, dimensions int, encoders []PQEncoder, logger logrus.FieldLogger) (*ProductQuantizer, error) { + cfg.Segments = len(encoders) + pq, err := NewProductQuantizer(cfg, distance, dimensions, logger) + if err != nil { + return nil, err + } + + pq.kms = encoders + pq.buildGlobalDistances() + return pq, nil +} + +func (pq *ProductQuantizer) buildGlobalDistances() { + // This hosts the partial distances between the centroids. This way we do not need + // to recalculate all the time when calculating full distances between compressed vecs + pq.globalDistances = make([]float32, pq.m*pq.ks*pq.ks) + for segment := 0; segment < pq.m; segment++ { + for i := 0; i < pq.ks; i++ { + cX := pq.kms[segment].Centroid(byte(i)) + for j := 0; j <= i; j++ { + cY := pq.kms[segment].Centroid(byte(j)) + pq.globalDistances[segment*pq.ks*pq.ks+i*pq.ks+j] = pq.distance.Step(cX, cY) + // Just copy from already calculated cell since step should be symmetric. + pq.globalDistances[segment*pq.ks*pq.ks+j*pq.ks+i] = pq.globalDistances[segment*pq.ks*pq.ks+i*pq.ks+j] + } + } + } +} + +// Only made public for testing purposes... Not sure we need it outside +func ExtractCode8(encoded []byte, index int) byte { + return encoded[index] +} + +func parseEncoder(encoder string) (Encoder, error) { + switch encoder { + case ent.PQEncoderTypeTile: + return UseTileEncoder, nil + case ent.PQEncoderTypeKMeans: + return UseKMeansEncoder, nil + default: + return 0, fmt.Errorf("invalid encoder type: %s", encoder) + } +} + +func parseEncoderDistribution(distribution string) (EncoderDistribution, error) { + switch distribution { + case ent.PQEncoderDistributionLogNormal: + return LogNormalEncoderDistribution, nil + case ent.PQEncoderDistributionNormal: + return NormalEncoderDistribution, nil + default: + return 0, fmt.Errorf("invalid encoder distribution: %s", distribution) + } +} + +// Only made public for testing purposes... Not sure we need it outside +func PutCode8(code byte, buffer []byte, index int) { + buffer[index] = code +} + +func (pq *ProductQuantizer) PersistCompression(logger CommitLogger) { + logger.AddPQCompression(PQData{ + Dimensions: uint16(pq.dimensions), + EncoderType: pq.encoderType, + Ks: uint16(pq.ks), + M: uint16(pq.m), + EncoderDistribution: byte(pq.encoderDistribution), + Encoders: pq.kms, + TrainingLimit: pq.trainingLimit, + }) +} + +func (pq *ProductQuantizer) DistanceBetweenCompressedVectors(x, y []byte) (float32, error) { + if len(x) != pq.m || len(y) != pq.m { + return 0, fmt.Errorf("ProductQuantizer.DistanceBetweenCompressedVectors: inconsistent compressed vectors lengths") + } + + dist := float32(0) + + for i := 0; i < pq.m; i++ { + cX := ExtractCode8(x, i) + cY := ExtractCode8(y, i) + dist += pq.globalDistances[i*pq.ks*pq.ks+int(cX)*pq.ks+int(cY)] + } + + return pq.distance.Wrap(dist), nil +} + +type PQDistancer struct { + x []float32 + pq *ProductQuantizer + lut *DistanceLookUpTable + compressed []byte +} + +func (pq *ProductQuantizer) NewDistancer(a []float32) *PQDistancer { + lut := pq.CenterAt(a) + return &PQDistancer{ + x: a, + pq: pq, + lut: lut, + compressed: nil, + } +} + +func (pq *ProductQuantizer) NewCompressedQuantizerDistancer(a []byte) quantizerDistancer[byte] { + return &PQDistancer{ + x: nil, + pq: pq, + lut: nil, + compressed: a, + } +} + +func (pq *ProductQuantizer) ReturnDistancer(d *PQDistancer) { + pq.dlutPool.Return(d.lut) +} + +func (d *PQDistancer) Distance(x []byte) (float32, error) { + if d.lut == nil { + return d.pq.DistanceBetweenCompressedVectors(d.compressed, x) + } + if len(x) != d.pq.m { + return 0, fmt.Errorf("inconsistent compressed vector length") + } + return d.pq.Distance(x, d.lut), nil +} + +func (d *PQDistancer) DistanceToFloat(x []float32) (float32, error) { + if d.lut != nil { + return d.pq.distance.SingleDist(x, d.lut.flatCenter) + } + xComp := d.pq.Encode(x) + return d.pq.DistanceBetweenCompressedVectors(d.compressed, xComp) +} + +func (pq *ProductQuantizer) Fit(data [][]float32) error { + if pq.trainingLimit > 0 && len(data) > pq.trainingLimit { + data = data[:pq.trainingLimit] + } + switch pq.encoderType { + case UseTileEncoder: + pq.kms = make([]PQEncoder, pq.m) + err := ConcurrentlyWithError(pq.logger, uint64(pq.m), func(i uint64) error { + pq.kms[i] = NewTileEncoder(int(math.Log2(float64(pq.ks))), int(i), pq.encoderDistribution) + for j := 0; j < len(data); j++ { + pq.kms[i].Add(data[j]) + } + return pq.kms[i].Fit(data) + }) + if err != nil { + return err + } + case UseKMeansEncoder: + mutex := sync.Mutex{} + var errorResult error = nil + pq.kms = make([]PQEncoder, pq.m) + Concurrently(pq.logger, uint64(pq.m), func(i uint64) { + mutex.Lock() + if errorResult != nil { + mutex.Unlock() + return + } + mutex.Unlock() + pq.kms[i] = NewKMeansEncoder( + pq.ks, + pq.ds, + int(i), + ) + err := pq.kms[i].Fit(data) + mutex.Lock() + if errorResult == nil && err != nil { + errorResult = err + } + mutex.Unlock() + }) + if errorResult != nil { + return errorResult + } + } + pq.buildGlobalDistances() + return nil +} + +func (pq *ProductQuantizer) Encode(vec []float32) []byte { + codes := make([]byte, pq.m) + for i := 0; i < pq.m; i++ { + PutCode8(pq.kms[i].Encode(vec), codes, i) + } + return codes +} + +func (pq *ProductQuantizer) Decode(code []byte) []float32 { + vec := make([]float32, 0, pq.m) + for i := 0; i < pq.m; i++ { + vec = append(vec, pq.kms[i].Centroid(ExtractCode8(code, i))...) + } + return vec +} + +func (pq *ProductQuantizer) CenterAt(vec []float32) *DistanceLookUpTable { + return pq.dlutPool.Get(int(pq.m), int(pq.ks), vec) +} + +func (pq *ProductQuantizer) Distance(encoded []byte, lut *DistanceLookUpTable) float32 { + return lut.LookUp(encoded, pq) +} + +func (pq *ProductQuantizer) Stats() CompressionStats { + return PQStats{ + Ks: pq.ks, + M: pq.m, + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/compressionhelpers/product_quantization_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/compressionhelpers/product_quantization_test.go new file mode 100644 index 0000000000000000000000000000000000000000..885db79c1af8dce458ec4bbcc4336b517797a7d3 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/compressionhelpers/product_quantization_test.go @@ -0,0 +1,251 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +//go:build !race + +package compressionhelpers_test + +import ( + "fmt" + "sort" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/adapters/repos/db/vector/compressionhelpers" + "github.com/weaviate/weaviate/adapters/repos/db/vector/hnsw/distancer" + "github.com/weaviate/weaviate/adapters/repos/db/vector/testinghelpers" + ent "github.com/weaviate/weaviate/entities/vectorindex/hnsw" +) + +type IndexAndDistance struct { + index uint64 + distance float32 +} + +func distance(dp distancer.Provider) func(x, y []float32) float32 { + return func(x, y []float32) float32 { + dist, _ := dp.SingleDist(x, y) + return dist + } +} + +func Test_NoRacePQSettings(t *testing.T) { + distanceProvider := distancer.NewL2SquaredProvider() + + cfg := ent.PQConfig{ + Enabled: true, + Encoder: ent.PQEncoder{ + Type: ent.PQEncoderTypeKMeans, + Distribution: ent.PQEncoderDistributionLogNormal, + }, + Centroids: 512, + Segments: 128, + } + + _, err := compressionhelpers.NewProductQuantizer( + cfg, + distanceProvider, + 128, + logger, + ) + assert.NotNil(t, err) +} + +func Test_NoRacePQKMeans(t *testing.T) { + dimensions := 128 + vectors_size := 1000 + queries_size := 100 + k := 100 + centroids := 255 + vectors, queries := testinghelpers.RandomVecs(vectors_size, queries_size, int(dimensions)) + distanceProvider := distancer.NewDotProductProvider() + + cfg := ent.PQConfig{ + Enabled: true, + Encoder: ent.PQEncoder{ + Type: ent.PQEncoderTypeKMeans, + Distribution: ent.PQEncoderDistributionLogNormal, + }, + Centroids: centroids, + Segments: dimensions, + } + pq, _ := compressionhelpers.NewProductQuantizer( + cfg, + distanceProvider, + dimensions, + logger, + ) + pq.Fit(vectors) + encoded := make([][]byte, vectors_size) + for i := 0; i < vectors_size; i++ { + encoded[i] = pq.Encode(vectors[i]) + } + + var relevant uint64 + queries_size = 100 + for _, query := range queries { + truth, _ := testinghelpers.BruteForce(logger, vectors, query, k, distance(distanceProvider)) + distances := make([]IndexAndDistance, len(vectors)) + + distancer := pq.NewDistancer(query) + for v := range vectors { + d, _ := distancer.Distance(encoded[v]) + distances[v] = IndexAndDistance{index: uint64(v), distance: d} + } + sort.Slice(distances, func(a, b int) bool { + return distances[a].distance < distances[b].distance + }) + + results := make([]uint64, 0, k) + for i := 0; i < k; i++ { + results = append(results, distances[i].index) + } + relevant += testinghelpers.MatchesInLists(truth, results) + } + recall := float32(relevant) / float32(k*queries_size) + fmt.Println(recall) + assert.True(t, recall > 0.99) + + pqStats := pq.Stats().(compressionhelpers.PQStats) + assert.Equal(t, pqStats.M, dimensions) + assert.Equal(t, pqStats.Ks, centroids) +} + +func Test_NoRacePQDecodeBytes(t *testing.T) { + t.Run("extracts correctly on one code per byte", func(t *testing.T) { + amount := 100 + values := make([]byte, 0, amount) + for i := byte(0); i < byte(amount); i++ { + values = append(values, i) + } + for i := 0; i < amount; i++ { + code := compressionhelpers.ExtractCode8(values, i) + assert.Equal(t, code, uint8(i)) + } + }) +} + +func Test_NoRacePQInvalidConfig(t *testing.T) { + t.Run("validate pq options", func(t *testing.T) { + amount := 100 + centroids := 256 + cfg := ent.PQConfig{ + Enabled: true, + Encoder: ent.PQEncoder{ + Type: "lmeans", + Distribution: ent.PQEncoderDistributionLogNormal, + }, + Centroids: centroids, + TrainingLimit: 75, + Segments: amount, + } + _, err := compressionhelpers.NewProductQuantizer( + cfg, + nil, + amount, + logger, + ) + assert.ErrorContains(t, err, "invalid encoder type") + cfg = ent.PQConfig{ + Enabled: true, + Encoder: ent.PQEncoder{ + Type: ent.DefaultPQEncoderType, + Distribution: "log", + }, + Centroids: centroids, + TrainingLimit: 75, + Segments: amount, + } + _, err = compressionhelpers.NewProductQuantizer( + cfg, + nil, + amount, + logger, + ) + assert.ErrorContains(t, err, "invalid encoder distribution") + cfg = ent.PQConfig{ + Enabled: true, + Encoder: ent.PQEncoder{ + Type: ent.DefaultPQEncoderType, + Distribution: ent.DefaultPQEncoderDistribution, + }, + Centroids: centroids, + TrainingLimit: 75, + Segments: 0, + } + _, err = compressionhelpers.NewProductQuantizer( + cfg, + nil, + amount, + logger, + ) + assert.ErrorContains(t, err, "segments cannot be 0 nor negative") + cfg = ent.PQConfig{ + Enabled: true, + Encoder: ent.PQEncoder{ + Type: ent.DefaultPQEncoderType, + Distribution: ent.DefaultPQEncoderDistribution, + }, + Centroids: centroids, + TrainingLimit: 75, + Segments: 3, + } + _, err = compressionhelpers.NewProductQuantizer( + cfg, + nil, + 4, + logger, + ) + assert.ErrorContains(t, err, "segments should be an integer divisor of dimensions") + }) +} + +func Test_NoRacePQEncodeBytes(t *testing.T) { + t.Run("encodes correctly on one code per byte", func(t *testing.T) { + amount := 100 + values := make([]byte, amount) + for i := 0; i < amount; i++ { + compressionhelpers.PutCode8(uint8(i), values, i) + } + for i := 0; i < amount; i++ { + code := compressionhelpers.ExtractCode8(values, i) + assert.Equal(t, code, uint8(i)) + } + }) +} + +func Test_PQDistanceError(t *testing.T) { + distanceProvider := distancer.NewL2SquaredProvider() + + cfg := ent.PQConfig{ + Enabled: true, + Encoder: ent.PQEncoder{ + Type: ent.PQEncoderTypeKMeans, + Distribution: ent.PQEncoderDistributionLogNormal, + }, + Centroids: 256, + Segments: 128, + } + + q, err := compressionhelpers.NewProductQuantizer( + cfg, + distanceProvider, + 128, + logger, + ) + require.NoError(t, err) + + _, err = q.DistanceBetweenCompressedVectors(nil, nil) + require.Error(t, err) + msg := "ProductQuantizer.DistanceBetweenCompressedVectors: inconsistent compressed vectors lengths" + assert.EqualError(t, err, msg) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/compressionhelpers/quantizer.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/compressionhelpers/quantizer.go new file mode 100644 index 0000000000000000000000000000000000000000..1ef287284ac0ef86b2d79a644b8dfdc4c6ae69aa --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/compressionhelpers/quantizer.go @@ -0,0 +1,160 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package compressionhelpers + +import ( + "encoding/binary" +) + +type quantizerDistancer[T byte | uint64] interface { + Distance(x []T) (float32, error) + DistanceToFloat(x []float32) (float32, error) +} + +type quantizer[T byte | uint64] interface { + DistanceBetweenCompressedVectors(x, y []T) (float32, error) + Encode(vec []float32) []T + NewQuantizerDistancer(a []float32) quantizerDistancer[T] + NewCompressedQuantizerDistancer(a []T) quantizerDistancer[T] + ReturnQuantizerDistancer(distancer quantizerDistancer[T]) + CompressedBytes(compressed []T) []byte + FromCompressedBytes(compressed []byte) []T + PersistCompression(logger CommitLogger) + Stats() CompressionStats + + // FromCompressedBytesWithSubsliceBuffer is like FromCompressedBytes, but + // instead of allocating a new slice you can pass in a buffer to use. It will + // slice something off of that buffer. If the buffer is too small, it will + // allocate a new buffer. + FromCompressedBytesWithSubsliceBuffer(compressed []byte, buffer *[]T) []T +} + +func (bq *BinaryQuantizer) PersistCompression(logger CommitLogger) { +} + +func (pq *ProductQuantizer) NewQuantizerDistancer(vec []float32) quantizerDistancer[byte] { + return pq.NewDistancer(vec) +} + +func (pq *ProductQuantizer) ReturnQuantizerDistancer(distancer quantizerDistancer[byte]) { + concreteDistancer := distancer.(*PQDistancer) + if concreteDistancer == nil { + return + } + pq.ReturnDistancer(concreteDistancer) +} + +func (bq *BinaryQuantizer) CompressedBytes(compressed []uint64) []byte { + slice := make([]byte, len(compressed)*8) + for i := range compressed { + binary.LittleEndian.PutUint64(slice[i*8:], compressed[i]) + } + return slice +} + +func (bq *BinaryQuantizer) FromCompressedBytes(compressed []byte) []uint64 { + l := len(compressed) / 8 + if len(compressed)%8 != 0 { + l++ + } + slice := make([]uint64, l) + + for i := range slice { + slice[i] = binary.LittleEndian.Uint64(compressed[i*8:]) + } + return slice +} + +// FromCompressedBytesWithSubsliceBuffer is like FromCompressedBytes, but +// instead of allocating a new slice you can pass in a buffer to use. It will +// slice something off of that buffer. If the buffer is too small, it will +// allocate a new buffer. +func (bq *BinaryQuantizer) FromCompressedBytesWithSubsliceBuffer(compressed []byte, buffer *[]uint64) []uint64 { + l := len(compressed) / 8 + if len(compressed)%8 != 0 { + l++ + } + + if len(*buffer) < l { + *buffer = make([]uint64, 1000*l) + } + + // take from end so we can address the start of the buffer + slice := (*buffer)[len(*buffer)-l:] + *buffer = (*buffer)[:len(*buffer)-l] + + for i := range slice { + slice[i] = binary.LittleEndian.Uint64(compressed[i*8:]) + } + return slice +} + +func (pq *ProductQuantizer) CompressedBytes(compressed []byte) []byte { + return compressed +} + +func (pq *ProductQuantizer) FromCompressedBytes(compressed []byte) []byte { + return compressed +} + +func (pq *ProductQuantizer) FromCompressedBytesWithSubsliceBuffer(compressed []byte, buffer *[]byte) []byte { + if len(*buffer) < len(compressed) { + *buffer = make([]byte, len(compressed)*1000) + } + + // take from end so we can address the start of the buffer + out := (*buffer)[len(*buffer)-len(compressed):] + copy(out, compressed) + *buffer = (*buffer)[:len(*buffer)-len(compressed)] + + return out +} + +type BQDistancer struct { + x []float32 + bq *BinaryQuantizer + compressed []uint64 +} + +func (bq *BinaryQuantizer) NewDistancer(a []float32) *BQDistancer { + return &BQDistancer{ + x: a, + bq: bq, + compressed: bq.Encode(a), + } +} + +func (bq *BinaryQuantizer) NewCompressedQuantizerDistancer(a []uint64) quantizerDistancer[uint64] { + return &BQDistancer{ + x: nil, + bq: bq, + compressed: a, + } +} + +func (d *BQDistancer) Distance(x []uint64) (float32, error) { + return d.bq.DistanceBetweenCompressedVectors(d.compressed, x) +} + +func (d *BQDistancer) DistanceToFloat(x []float32) (float32, error) { + if len(d.x) > 0 { + return d.bq.distancer.SingleDist(d.x, x) + } + xComp := d.bq.Encode(x) + return d.bq.DistanceBetweenCompressedVectors(d.compressed, xComp) +} + +func (bq *BinaryQuantizer) NewQuantizerDistancer(vec []float32) quantizerDistancer[uint64] { + return bq.NewDistancer(vec) +} + +func (bq *BinaryQuantizer) ReturnQuantizerDistancer(distancer quantizerDistancer[uint64]) {} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/compressionhelpers/qunatizer_benchmark_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/compressionhelpers/qunatizer_benchmark_test.go new file mode 100644 index 0000000000000000000000000000000000000000..71d2fadca65cbfc3d6f727e59d596f073c002207 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/compressionhelpers/qunatizer_benchmark_test.go @@ -0,0 +1,99 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package compressionhelpers + +import ( + "math/rand" + "testing" + + "github.com/stretchr/testify/assert" +) + +func BenchmarkBQFromCompressedBytes(b *testing.B) { + bq := NewBinaryQuantizer(nil) + iterations := 1000 + encoded := make([][]uint64, iterations) + for i := range encoded { + encoded[i] = make([]uint64, 6) + for j := range encoded[i] { + encoded[i][j] = uint64(j) + } + } + + compressed := make([][]byte, iterations) + for i := range compressed { + compressed[i] = make([]byte, 48) + compressed[i] = bq.CompressedBytes(encoded[i]) + } + + b.ResetTimer() + b.ReportAllocs() + for i := 0; i < b.N; i++ { + for j := range compressed { + _ = bq.FromCompressedBytes(compressed[j]) + } + } +} + +func BenchmarkBQFromCompressedBytesWithSubsliceBuffer(b *testing.B) { + bq := NewBinaryQuantizer(nil) + iterations := 1000 + encoded := make([][]uint64, iterations) + for i := range encoded { + encoded[i] = make([]uint64, 6) + for j := range encoded[i] { + encoded[i][j] = uint64(j) + } + } + + compressed := make([][]byte, iterations) + for i := range compressed { + compressed[i] = make([]byte, 48) + compressed[i] = bq.CompressedBytes(encoded[i]) + } + + b.ResetTimer() + b.ReportAllocs() + for i := 0; i < b.N; i++ { + var buffer []uint64 + for j := range compressed { + _ = bq.FromCompressedBytesWithSubsliceBuffer(compressed[j], &buffer) + } + } +} + +// This verifies that both the "normal" and the "with subslice buffer" version of +// FromCompressedBytes produce identical results +func TestBQFromCompressedBytes_SanityCheck(t *testing.T) { + bq := NewBinaryQuantizer(nil) + iterations := 10000 + encoded := make([][]uint64, iterations) + for i := range encoded { + encoded[i] = make([]uint64, 6) + for j := range encoded[i] { + encoded[i][j] = rand.Uint64() + } + } + + compressed := make([][]byte, iterations) + for i := range compressed { + compressed[i] = make([]byte, 48) + compressed[i] = bq.CompressedBytes(encoded[i]) + } + + var buffer []uint64 + for j := range compressed { + regular := bq.FromCompressedBytes(compressed[j]) + withBuffer := bq.FromCompressedBytesWithSubsliceBuffer(compressed[j], &buffer) + assert.Equal(t, regular, withBuffer) + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/compressionhelpers/rotational_quantization.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/compressionhelpers/rotational_quantization.go new file mode 100644 index 0000000000000000000000000000000000000000..8bb67c962c09ba5d9b70ea67665b61a80a9a9ae7 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/compressionhelpers/rotational_quantization.go @@ -0,0 +1,355 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package compressionhelpers + +import ( + "encoding/binary" + "fmt" + "math" + + "github.com/pkg/errors" + "golang.org/x/exp/slices" + + "github.com/weaviate/weaviate/adapters/repos/db/vector/hnsw/distancer" +) + +type RotationalQuantizer struct { + inputDim uint32 + rotation *FastRotation + distancer distancer.Provider + bits uint32 // The number of bits per entry used by Encode() to encode data vectors. + + // Precomputed for faster distance computations. + err error // Precomputed error returned by DistanceBetweenCompressedVectors. + cos float32 // Indicator for the cosine-dot distancer. + l2 float32 // Indicator for the l2-squared distancer. +} + +func distancerIndicatorsAndError(distancer distancer.Provider) (float32, float32, error) { + supportedDistances := []string{"cosine-dot", "l2-squared", "dot"} + if !slices.Contains(supportedDistances, distancer.Type()) { + return 0, 0, errors.Errorf("Distance not supported yet %s", distancer) + } + + var cos, l2 float32 + if distancer.Type() == "cosine-dot" { + cos = 1.0 + } + if distancer.Type() == "l2-squared" { + l2 = 1.0 + } + return cos, l2, nil +} + +func NewRotationalQuantizer(inputDim int, seed uint64, bits int, distancer distancer.Provider) *RotationalQuantizer { + // Using three rounds offers a nice trade-off between performance and + // quality. If we use only two rounds we see that the encoding becomes + // biased in some of the unit tests. + rotationRounds := 3 + rotation := NewFastRotation(inputDim, rotationRounds, seed) + cos, l2, err := distancerIndicatorsAndError(distancer) + rq := &RotationalQuantizer{ + inputDim: uint32(inputDim), + rotation: rotation, + bits: uint32(bits), + distancer: distancer, + // Precomputed values for faster distance computation. + err: err, + cos: cos, + l2: l2, + } + return rq +} + +func RestoreRotationalQuantizer(inputDim int, bits int, outputDim int, rounds int, swaps [][]Swap, signs [][]float32, distancer distancer.Provider) (*RotationalQuantizer, error) { + cos, l2, err := distancerIndicatorsAndError(distancer) + rq := &RotationalQuantizer{ + inputDim: uint32(inputDim), + rotation: RestoreFastRotation(outputDim, rounds, swaps, signs), + bits: uint32(bits), + distancer: distancer, + err: err, + cos: cos, + l2: l2, + } + return rq, nil +} + +func (rq *RotationalQuantizer) OutputDimension() int { + return int(rq.rotation.OutputDim) +} + +func putFloat32(b []byte, pos int, x float32) { + binary.BigEndian.PutUint32(b[pos:], math.Float32bits(x)) +} + +func getFloat32(b []byte, pos int) float32 { + return math.Float32frombits(binary.BigEndian.Uint32(b[pos:])) +} + +type RQCode []byte + +func (c RQCode) Lower() float32 { + return getFloat32(c, 0) +} + +func (c RQCode) setLower(x float32) { + putFloat32(c, 0, x) +} + +func (c RQCode) Step() float32 { + return getFloat32(c, 4) +} + +func (c RQCode) setStep(x float32) { + putFloat32(c, 4, x) +} + +func (c RQCode) CodeSum() float32 { + return getFloat32(c, 8) +} + +func (c RQCode) setCodeSum(x float32) { + putFloat32(c, 8, x) +} + +func (c RQCode) Norm2() float32 { + return getFloat32(c, 12) +} + +func (c RQCode) setNorm2(x float32) { + putFloat32(c, 12, x) +} + +func (c RQCode) Byte(i int) byte { + return c[16+i] +} + +func (c RQCode) Bytes() []byte { + return c[16:] +} + +func (c RQCode) setByte(i int, b byte) { + c[16+i] = b +} + +func (c RQCode) Dimension() int { + return len(c) - 16 +} + +func NewRQCode(d int) RQCode { + return make([]byte, d+16) +} + +// The code representing the zero vector. +// We also return this in case of abnormal input, such as a nil vector. +func ZeroRQCode(d int) RQCode { + return NewRQCode(d) +} + +func (c RQCode) String() string { + return fmt.Sprintf("RQCode{Lower: %.4f, Step: %.4f, CodeSum: %.4f, Norm2: %.4f, Bytes[:10]: %v", + c.Lower(), c.Step(), c.CodeSum(), c.Norm2(), c.Bytes()[:10]) +} + +func (rq *RotationalQuantizer) Encode(x []float32) []byte { + return rq.encode(x, rq.bits) +} + +func dotProduct(x, y []float32) float32 { + distancer := distancer.NewDotProductProvider() + negativeDot, _ := distancer.SingleDist(x, y) + return -negativeDot +} + +func (rq *RotationalQuantizer) encode(x []float32, bits uint32) []byte { + outDim := rq.OutputDimension() + if len(x) == 0 { + return ZeroRQCode(outDim) + } + if len(x) > outDim { + x = x[:outDim] + } + + rx := rq.rotation.Rotate(x) + var maxCode uint8 = (1 << bits) - 1 + lower := slices.Min(rx) + step := (slices.Max(rx) - lower) / float32(maxCode) + + if step <= 0 { + // The input was likely the zero vector or indistinguishable from it. + return ZeroRQCode(outDim) + } + + code := NewRQCode(outDim) + var codeSum float32 + for i, v := range rx { + c := byte((v-lower)/step + 0.5) + codeSum += float32(c) + code.setByte(i, c) + } + code.setLower(lower) + code.setStep(step) + code.setCodeSum(step * codeSum) + code.setNorm2(dotProduct(x, x)) + return code +} + +func (rq *RotationalQuantizer) Rotate(x []float32) []float32 { + return rq.rotation.Rotate(x) +} + +func (rq *RotationalQuantizer) Restore(b []byte) []float32 { + c := RQCode(b) + x := make([]float32, c.Dimension()) + for i := range c.Dimension() { + x[i] = c.Lower() + c.Step()*float32(c.Byte(i)) + } + return x +} + +type RQDistancer struct { + distancer distancer.Provider + rq *RotationalQuantizer + query []float32 + + // Fields of the RQCode struct. Extracted here for performance reasons. + lower float32 + step float32 + codeSum float32 + norm2 float32 + bytes []byte + a float32 // precomputed value from RQCode. + + err error + cos float32 + l2 float32 +} + +func (rq *RotationalQuantizer) newDistancer(q []float32, cq RQCode) *RQDistancer { + return &RQDistancer{ + distancer: rq.distancer, + rq: rq, + query: q, + err: rq.err, + cos: rq.cos, + l2: rq.l2, + // RQCode fields. + lower: cq.Lower(), + step: cq.Step(), + codeSum: cq.CodeSum(), + norm2: cq.Norm2(), + bytes: cq.Bytes(), + a: float32(cq.Dimension())*cq.Lower() + cq.CodeSum(), + } +} + +func (rq *RotationalQuantizer) NewDistancer(q []float32) *RQDistancer { + var cq RQCode = rq.Encode(q) + return rq.newDistancer(q, cq) +} + +// Optimized distance computation that precomputes as much as possible and +// avoids conditional statements by using indicator variables. +func (d *RQDistancer) Distance(x []byte) (float32, error) { + cx := RQCode(x) + dotEstimate := cx.Lower()*d.a + cx.CodeSum()*d.lower + cx.Step()*d.step*float32(dotByteImpl(cx.Bytes(), d.bytes)) + return d.l2*(cx.Norm2()+d.norm2) + d.cos - (1.0+d.l2)*dotEstimate, d.err +} + +func (d *RQDistancer) DistanceToFloat(x []float32) (float32, error) { + if len(d.query) > 0 { + return d.distancer.SingleDist(d.query, x) + } + cx := d.rq.Encode(x) + return d.Distance(cx) +} + +// We duplicate the distance computation from the RQDistancer here for performance reasons. +func (rq RotationalQuantizer) DistanceBetweenCompressedVectors(x, y []byte) (float32, error) { + cx, cy := RQCode(x), RQCode(y) + a := float32(rq.rotation.OutputDim) * cx.Lower() * cy.Lower() + b := cx.Lower() * cy.CodeSum() + c := cy.Lower() * cx.CodeSum() + d := cx.Step() * cy.Step() * float32(dotByteImpl(cx.Bytes(), cy.Bytes())) + dotEstimate := a + b + c + d + return rq.l2*(cx.Norm2()+cy.Norm2()) + rq.cos - (1.0+rq.l2)*dotEstimate, rq.err +} + +func (rq *RotationalQuantizer) NewCompressedQuantizerDistancer(c []byte) quantizerDistancer[byte] { + restored := rq.Restore(c) + return rq.newDistancer(restored, c) +} + +type RQStats struct { + Bits uint32 `json:"bits"` +} + +func (rq RQStats) CompressionType() string { + return "rq" +} + +func (rq RQStats) CompressionRatio(dimensionality int) float64 { + // RQ compression: original size = inputDim * 4 bytes (float32) + // compressed size = 16 bytes (metadata) + outputDim * 1 byte (compressed data) + // where outputDim is typically the same as inputDim after rotation + originalSize := dimensionality * 4 + compressedSize := 16 + dimensionality // 16 bytes metadata + 1 byte per dimension + return float64(originalSize) / float64(compressedSize) +} + +func (rq *RotationalQuantizer) Stats() CompressionStats { + return RQStats{ + Bits: rq.bits, + } +} + +func (rq *RotationalQuantizer) CompressedBytes(compressed []byte) []byte { + return compressed +} + +func (rq *RotationalQuantizer) FromCompressedBytes(compressed []byte) []byte { + return compressed +} + +func (rq *RotationalQuantizer) FromCompressedBytesWithSubsliceBuffer(compressed []byte, buffer *[]byte) []byte { + if len(*buffer) < len(compressed) { + *buffer = make([]byte, len(compressed)*1000) + } + + // take from end so we can address the start of the buffer + out := (*buffer)[len(*buffer)-len(compressed):] + copy(out, compressed) + *buffer = (*buffer)[:len(*buffer)-len(compressed)] + + return out +} + +func (rq *RotationalQuantizer) NewQuantizerDistancer(vec []float32) quantizerDistancer[byte] { + return rq.NewDistancer(vec) +} + +func (rq *RotationalQuantizer) ReturnQuantizerDistancer(distancer quantizerDistancer[byte]) {} + +type RQData struct { + InputDim uint32 + Bits uint32 + Rotation FastRotation +} + +func (rq *RotationalQuantizer) PersistCompression(logger CommitLogger) { + logger.AddRQCompression(RQData{ + InputDim: rq.inputDim, + Bits: rq.bits, + Rotation: *rq.rotation, + }) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/compressionhelpers/rotational_quantization_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/compressionhelpers/rotational_quantization_test.go new file mode 100644 index 0000000000000000000000000000000000000000..6623aa5df23bdd34d3062979a30d226e8cc3adc2 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/compressionhelpers/rotational_quantization_test.go @@ -0,0 +1,376 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package compressionhelpers_test + +import ( + "fmt" + "math" + "math/rand/v2" + "slices" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/weaviate/weaviate/adapters/repos/db/vector/compressionhelpers" + "github.com/weaviate/weaviate/adapters/repos/db/vector/hnsw/distancer" +) + +func defaultRotationalQuantizer(dim int, seed uint64) *compressionhelpers.RotationalQuantizer { + return compressionhelpers.NewRotationalQuantizer(dim, seed, 8, distancer.NewCosineDistanceProvider()) +} + +// Create two d-dimensional unit vectors with a cosine similarity of alpha. +func correlatedVectors(d int, alpha float32) ([]float32, []float32) { + x := make([]float32, d) + x[0] = 1.0 + y := make([]float32, d) + y[0] = alpha + y[1] = float32(math.Sqrt(float64(1 - alpha*alpha))) + return x, y +} + +// RQDistancer.Distance and RotationalQuantizer.DistanceBetweenCompressedVectors +// are implemented separately for performance reasons. Verify that they return +// the same distance estimates up to floating point errors. +func TestRQDistanceEstimatesAreIdentical(t *testing.T) { + rng := newRNG(64521467) + + metrics := []distancer.Provider{ + distancer.NewCosineDistanceProvider(), + distancer.NewDotProductProvider(), + distancer.NewL2SquaredProvider(), + } + n := 100 + for range n { + d := 2 + rng.IntN(2000) + for _, m := range metrics { + bits := 8 + rq := compressionhelpers.NewRotationalQuantizer(d, rng.Uint64(), bits, m) + q, x := randomUnitVector(d, rng), randomUnitVector(d, rng) + cq, cx := rq.Encode(q), rq.Encode(x) + distancer := rq.NewDistancer(q) + distancerEstimate, _ := distancer.Distance(cx) + compressedEstimate, _ := rq.DistanceBetweenCompressedVectors(cq, cx) + eps := 2e-6 // Unfortunately the deviation can be quite big. Perhaps the intermediate calculations can be done using float64? + assert.Less(t, math.Abs(float64(distancerEstimate-compressedEstimate)), eps) + } + } +} + +func absDiff(a float32, b float32) float64 { + return math.Abs(float64(a - b)) +} + +func TestRQDistanceEstimate(t *testing.T) { + a := float32(1.0 / math.Sqrt2) + q := []float32{1.0, 0.0} + x := []float32{a, a} + + dim := 2 + bits := 8 + var seed uint64 = 42 + + metrics := []distancer.Provider{ + distancer.NewCosineDistanceProvider(), + distancer.NewDotProductProvider(), + distancer.NewL2SquaredProvider(), + } + + for _, m := range metrics { + rq := compressionhelpers.NewRotationalQuantizer(dim, seed, bits, m) + cq, cx := rq.Encode(q), rq.Encode(x) + distancer := rq.NewDistancer(q) + distancerEstimate, _ := distancer.Distance(cx) + compressedEstimate, _ := rq.DistanceBetweenCompressedVectors(cq, cx) + + target, _ := m.SingleDist(q, x) + eps := 1e-3 + assert.Less(t, absDiff(distancerEstimate, target), eps) + assert.Less(t, absDiff(compressedEstimate, target), eps) + } +} + +func randomUniformVector(d int, rng *rand.Rand) []float32 { + x := make([]float32, d) + for i := range x { + x[i] = 2*rng.Float32() - 1.0 + } + return x +} + +func TestRQEncodeRestore(t *testing.T) { + n := 10 + rng := newRNG(7542) + for range n { + d := 2 + rng.IntN(1000) + rq := defaultRotationalQuantizer(d, rng.Uint64()) + + s := 1000 * rng.Float32() + x := randomUniformVector(d, rng) + + // Each entry of the scaled uniform vector ranges from [-s, s] + // So the euclidean norm is going to be something like sd/3 + // Once we rotate the absolute magnitude of the entries should not exceed + // something like (sd/3)*(6/sqrt(D)) < 2*s*sqrt(d) where D >= d is the output dimension. + // So suppose the entries range between [-2*s*sqrt(d), 2*s*sqrt(d)] and we quantize this interval using 256 evenly distributed values. + // Then the absolute quantization error in any one entry should not exceed 2*s*sqrt(d)/256 + errorBoundUpper := float64(s) * math.Sqrt(float64(d)) / 128 + eps := 0.1 * errorBoundUpper // The actual error is much smaller. + + scale(x, s) + cx := rq.Encode(x) + target := rq.Rotate(x) + restored := rq.Restore(cx) + + for i := range target { + assert.Less(t, math.Abs(float64(target[i]-restored[i])), eps) + } + } +} + +func TestRQDistancer(t *testing.T) { + metrics := []distancer.Provider{ + distancer.NewCosineDistanceProvider(), + distancer.NewDotProductProvider(), + distancer.NewL2SquaredProvider(), + } + rng := newRNG(6789) + n := 250 + for range n { + d := 2 + rng.IntN(2000) + alpha := -1.0 + 2*rng.Float32() + // Note that we are testing on vectors where all mass is concentrated in + // the first two entries. It might be more realistic to test on vectors + // that have already been rotated randomly. + q, x := correlatedVectors(d, alpha) + for _, m := range metrics { + bits := 8 + rq := compressionhelpers.NewRotationalQuantizer(d, rng.Uint64(), bits, m) + distancer := rq.NewDistancer(q) + expected, _ := m.SingleDist(q, x) + cx := rq.Encode(x) + estimated, _ := distancer.Distance(cx) + assert.Less(t, math.Abs(float64(estimated-expected)), 0.0051) + } + } +} + +// Verify that the estimator behaves according to the concentration bounds +// specified in the paper. In the paper they use an asymmetric encoding of +// (float32, B-bits) while we use (queryBits, dataBits), so we cannot expect to +// satisfy their bounds exactly in all cases. This is especially the case when +// using few bits, something this quantization scheme is not optimized for. +func TestRQEstimationConcentrationBounds(t *testing.T) { + rng := newRNG(12345) + n := 100 + for range n { + d := 2 + rng.IntN(2000) + alpha := -1.0 + 2*rng.Float32() + bits := 8 + + // With probability > 0.999 the absolute error should be less than eps. + // For d = 256 the error for b bits is 2^(-b) * 0.36, so 0.18, 0.09, + // 0.045.. for b = 1, 2, 3,... + eps := math.Pow(2.0, -float64(bits)) * 5.75 / math.Sqrt(float64(d)) + + // With the optimizations we are adding, such as reducing the number of + // rotation rounds and removing randomized rounding from the encoding we + // are seeing a loss. We keep track of this loss as a factor that we + // have to increase eps by in order to pass this test. For the initial + // implementation this factor was 1.0. Note that a factor of 2 + // corresponds to a loss of 1 bit compared to extended RabitQ. A factor + // of 4 corresponds to 2 bits and so on... + additionalErrorFactor := 1.5 + eps *= additionalErrorFactor + + q, x := correlatedVectors(d, alpha) + rq := compressionhelpers.NewRotationalQuantizer(d, rng.Uint64(), bits, distancer.NewDotProductProvider()) + cx := rq.Encode(x) + dist := rq.NewDistancer(q) + estimate, _ := dist.Distance(cx) + cosineSimilarityEstimate := -estimate // Holds for unit vectors. + assert.Less(t, math.Abs(float64(cosineSimilarityEstimate-alpha)), eps) + } +} + +func scale(x []float32, s float32) { + for i := range x { + x[i] *= s + } +} + +// Verify that the error scales as expected with the norm of the vectors. +// i.e. we can handle vectors of different norms. +func TestRQDistancerRandomVectorsWithScaling(t *testing.T) { + // We do not test for cosine similarity here since it assumes normalized vectors. + metrics := []distancer.Provider{ + distancer.NewDotProductProvider(), + distancer.NewL2SquaredProvider(), + } + rng := newRNG(77433) + n := 100 + for range n { + d := 2 + rng.IntN(1000) + alpha := -1.0 + 2*rng.Float32() + q, x := correlatedVectors(d, alpha) + s1 := 1000 * rng.Float32() + s2 := 1000 * rng.Float32() + scale(q, s1) + scale(x, s2) + for _, m := range metrics { + bits := 8 + rq := compressionhelpers.NewRotationalQuantizer(d, rng.Uint64(), bits, m) + distancer := rq.NewDistancer(q) + cx := rq.Encode(x) + target, _ := m.SingleDist(q, x) + estimate, _ := distancer.Distance(cx) + + // Suppose we are seeing absolute errors of estimation of size eps when working with unit vectors. + // Then the error when scaling should scale roughly with the product of the scaling factors for the inner product. + // For the Euclidean distance things are slightly more complex. + assert.Less(t, math.Abs(float64(estimate-target)), float64(s1*s2*0.004), "Failure at a dimensionality of %d, metric %s", d, m.Type()) + } + } +} + +func TestRQCodePointDistribution(t *testing.T) { + rng := newRNG(999) + n := 100 + bits := 8 + codePoints := 1 << bits + for range n { + inDim := 2 + rng.IntN(1024) + rq := compressionhelpers.NewRotationalQuantizer(inDim, rng.Uint64(), bits, distancer.NewDotProductProvider()) + + // Encode m random unit vectors and mark the bytes that were used. + m := 100 + byteCount := make([]int, codePoints) + for range m { + x := randomUnitVector(inDim, rng) + var c compressionhelpers.RQCode = rq.Encode(x) + codeBytes := c.Bytes() + for _, b := range codeBytes { + byteCount[b]++ + } + } + + uniformExpectation := float64(m) * float64(rq.OutputDimension()) / float64(codePoints) + for i := range byteCount { + // The code was designed to guarantee that the min and max are + // always included, so they will have an abnormally high count, + // especially in low dimensions. + if i == 0 || i == (len(byteCount)-1) { + continue + } + errorMsg := fmt.Sprintf("Byte %d was seen %d times (%.3f times its expectation). Input dimension: %d, Output dimension: %d", + i, byteCount[i], float64(byteCount[i])/uniformExpectation, inDim, rq.OutputDimension()) + assert.Greater(t, byteCount[i], 0, errorMsg) + assert.Less(t, float64(byteCount[i]), 3.0*uniformExpectation, errorMsg) + } + } +} + +func TestRQHandlesAbnormalVectorsGracefully(t *testing.T) { + inDim := 97 + bits := 8 + rq := compressionhelpers.NewRotationalQuantizer(inDim, 42, bits, distancer.NewDotProductProvider()) + outDim := rq.OutputDimension() + zeroCode := compressionhelpers.ZeroRQCode(outDim) + + var nilVector []float32 + assert.True(t, slices.Equal(rq.Encode(nilVector), zeroCode)) + + lengthZeroVector := make([]float32, 0) + assert.True(t, slices.Equal(rq.Encode(lengthZeroVector), zeroCode)) + + longVectorOfZeroes := make([]float32, 572) + shortVectorOfZeroes := make([]float32, 15) + assert.True(t, slices.Equal(rq.Encode(longVectorOfZeroes), zeroCode)) + assert.True(t, slices.Equal(rq.Encode(shortVectorOfZeroes), zeroCode)) + + // Only the first at most outDim entries are used for the encoding, the rest is ignored. + x := make([]float32, 243) + for i := range x { + x[i] = float32(i) + } + assert.True(t, slices.Equal(rq.Encode(x[:outDim]), rq.Encode(x))) +} + +func BenchmarkRQEncode(b *testing.B) { + dimensions := []int{256, 1024, 1536} + rng := newRNG(42) + for _, dim := range dimensions { + quantizer := defaultRotationalQuantizer(dim, rng.Uint64()) + x := make([]float32, dim) + x[0] = 1 + b.Run(fmt.Sprintf("FastRQEncode-d%d", dim), func(b *testing.B) { + for b.Loop() { + quantizer.Encode(x) + } + b.ReportMetric(float64(b.Elapsed().Microseconds())/float64(b.N), "us/op") + b.ReportMetric(float64(b.N)/float64(b.Elapsed().Seconds()), "ops/sec") + }) + } +} + +func BenchmarkRQDistancer(b *testing.B) { + dimensions := []int{64, 128, 256, 512, 1024, 1536, 2048} + rng := newRNG(42) + metrics := []distancer.Provider{ + distancer.NewCosineDistanceProvider(), + distancer.NewDotProductProvider(), + distancer.NewL2SquaredProvider(), + } + for _, dim := range dimensions { + for _, m := range metrics { + // Rotational quantization. + bits := 8 + rq := compressionhelpers.NewRotationalQuantizer(dim, rng.Uint64(), bits, m) + q, x := correlatedVectors(dim, 0.5) + cx := rq.Encode(x) + distancer := rq.NewDistancer(q) + b.Run(fmt.Sprintf("RQDistancer-d%d-%s", dim, m.Type()), func(b *testing.B) { + for b.Loop() { + distancer.Distance(cx) + } + b.ReportMetric((float64(b.N)/1e6)/float64(b.Elapsed().Seconds()), "m.ops/sec") + }) + } + } +} + +// For comparison. +func BenchmarkSQDistancer(b *testing.B) { + rng := newRNG(42) + dimensions := []int{64, 128, 256, 512, 1024, 1536, 2048} + metrics := []distancer.Provider{ + distancer.NewCosineDistanceProvider(), + distancer.NewDotProductProvider(), + distancer.NewL2SquaredProvider(), + } + for _, dim := range dimensions { + for _, m := range metrics { + train := [][]float32{ + randomUnitVector(dim, rng), + } + quantizer := compressionhelpers.NewScalarQuantizer(train, m) + q, x := correlatedVectors(dim, 0.5) + xCode := quantizer.Encode(x) + distancer := quantizer.NewDistancer(q) + b.Run(fmt.Sprintf("SQDistancer-d%d-%s", dim, m.Type()), func(b *testing.B) { + for b.Loop() { + distancer.Distance(xCode) + } + b.ReportMetric((float64(b.N)/1e6)/float64(b.Elapsed().Seconds()), "m.ops/sec") + }) + } + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/compressionhelpers/scalar_quantization.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/compressionhelpers/scalar_quantization.go new file mode 100644 index 0000000000000000000000000000000000000000..614d3091d72c28daf2637798bba2c5a9a7716d16 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/compressionhelpers/scalar_quantization.go @@ -0,0 +1,233 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package compressionhelpers + +import ( + "encoding/binary" + "math" + + "github.com/pkg/errors" + + "github.com/weaviate/weaviate/adapters/repos/db/vector/hnsw/distancer" +) + +const ( + codes = 255.0 + codes2 = codes * codes +) + +type ScalarQuantizer struct { + a float32 + b float32 + a2 float32 + ab float32 + ib2 float32 + distancer distancer.Provider + dimensions int +} + +type SQData struct { + A float32 + B float32 + Dimensions uint16 +} + +func (sq *ScalarQuantizer) DistanceBetweenCompressedVectors(x, y []byte) (float32, error) { + if len(x) != len(y) { + return 0, errors.Errorf("vector lengths don't match: %d vs %d", + len(x), len(y)) + } + switch sq.distancer.Type() { + case "l2-squared": + return sq.a2 * float32(l2SquaredByteImpl(x[:len(x)-8], y[:len(y)-8])), nil + case "dot": + return -(sq.a2*float32(dotByteImpl(x[:len(x)-8], y[:len(y)-8])) + sq.ab*float32(sq.norm(x)+sq.norm(y)) + sq.ib2), nil + case "cosine-dot": + return 1 - (sq.a2*float32(dotByteImpl(x[:len(x)-8], y[:len(y)-8])) + sq.ab*float32(sq.norm(x)+sq.norm(y)) + sq.ib2), nil + } + return 0, errors.Errorf("Distance not supported yet %s", sq.distancer) +} + +func (pq *ScalarQuantizer) FromCompressedBytesWithSubsliceBuffer(compressed []byte, buffer *[]byte) []byte { + if len(*buffer) < len(compressed) { + *buffer = make([]byte, len(compressed)*1000) + } + + // take from end so we can address the start of the buffer + out := (*buffer)[len(*buffer)-len(compressed):] + copy(out, compressed) + *buffer = (*buffer)[:len(*buffer)-len(compressed)] + + return out +} + +func NewScalarQuantizer(data [][]float32, distance distancer.Provider) *ScalarQuantizer { + if len(data) == 0 { + return nil + } + + sq := &ScalarQuantizer{ + distancer: distance, + dimensions: len(data[0]), + } + sq.b = data[0][0] + for i := 0; i < len(data); i++ { + vec := data[i] + for _, x := range vec { + if x < sq.b { + sq.a += sq.b - x + sq.b = x + } else if x-sq.b > sq.a { + sq.a = x - sq.b + } + } + } + sq.a2 = sq.a * sq.a / codes2 + sq.ab = sq.a * sq.b / codes + sq.ib2 = sq.b * sq.b * float32(sq.dimensions) + return sq +} + +func RestoreScalarQuantizer(a, b float32, dimensions uint16, distance distancer.Provider) (*ScalarQuantizer, error) { + if a == 0 { + return nil, errors.New("invalid range value while restoring SQ settings") + } + + sq := &ScalarQuantizer{ + distancer: distance, + a: a, + b: b, + a2: a * a / codes2, + ab: a * b / codes, + ib2: b * b * float32(dimensions), + dimensions: int(dimensions), + } + return sq, nil +} + +func codeFor(x, a, b, codes float32) byte { + if x < b { + return 0 + } else if x-b > a { + return byte(codes) + } else { + return byte(math.Floor(float64((x - b) * codes / a))) + } +} + +func (sq *ScalarQuantizer) Encode(vec []float32) []byte { + var sum uint32 = 0 + var sum2 uint32 = 0 + code := make([]byte, len(vec)+8) + for i := 0; i < len(vec); i++ { + code[i] = codeFor(vec[i], sq.a, sq.b, codes) + sum += uint32(code[i]) + sum2 += uint32(code[i]) * uint32(code[i]) + } + binary.BigEndian.PutUint32(code[len(vec):], sum) + binary.BigEndian.PutUint32(code[len(vec)+4:], sum2) + return code +} + +type SQDistancer struct { + x []float32 + sq *ScalarQuantizer + compressed []byte +} + +func (sq *ScalarQuantizer) NewDistancer(a []float32) *SQDistancer { + sum := float32(0) + sum2 := float32(0) + for _, x := range a { + sum += x + sum2 += (x * x) + } + return &SQDistancer{ + x: a, + sq: sq, + compressed: sq.Encode(a), + } +} + +func (d *SQDistancer) Distance(x []byte) (float32, error) { + return d.sq.DistanceBetweenCompressedVectors(d.compressed, x) +} + +func (d *SQDistancer) DistanceToFloat(x []float32) (float32, error) { + if len(d.x) > 0 { + return d.sq.distancer.SingleDist(d.x, x) + } + xComp := d.sq.Encode(x) + return d.sq.DistanceBetweenCompressedVectors(d.compressed, xComp) +} + +func (sq *ScalarQuantizer) NewQuantizerDistancer(a []float32) quantizerDistancer[byte] { + return sq.NewDistancer(a) +} + +func (sq *ScalarQuantizer) NewCompressedQuantizerDistancer(a []byte) quantizerDistancer[byte] { + return &SQDistancer{ + x: nil, + sq: sq, + compressed: a, + } +} + +func (sq *ScalarQuantizer) ReturnQuantizerDistancer(distancer quantizerDistancer[byte]) {} + +func (sq *ScalarQuantizer) CompressedBytes(compressed []byte) []byte { + return compressed +} + +func (sq *ScalarQuantizer) FromCompressedBytes(compressed []byte) []byte { + return compressed +} + +func (sq *ScalarQuantizer) PersistCompression(logger CommitLogger) { + logger.AddSQCompression(SQData{ + A: sq.a, + B: sq.b, + Dimensions: uint16(sq.dimensions), + }) +} + +func (sq *ScalarQuantizer) norm(code []byte) uint32 { + return binary.BigEndian.Uint32(code[len(code)-8:]) +} + +type SQStats struct { + A float32 `json:"a"` + B float32 `json:"b"` +} + +func (s SQStats) CompressionType() string { + return "sq" +} + +func (s SQStats) CompressionRatio(_ int) float64 { + // SQ compression: original size = dimensions * 4 bytes (float32) + // compressed size = dimensions * 1 byte + 8 bytes (for sum and sum2) + // For practical vector dimensions (typically 1536+), the ratio approaches 4 + // and the +8 bytes overhead becomes negligible + // For 1536 dimensions: (1536 * 4) / (1536 + 8) ≈ 3.98 + // For 768 dimensions: (768 * 4) / (768 + 8) ≈ 3.96 + // For 384 dimensions: (384 * 4) / (384 + 8) ≈ 3.92 + // The ratio is essentially constant at ~4x compression + return 4.0 +} + +func (sq *ScalarQuantizer) Stats() CompressionStats { + return SQStats{ + A: sq.a, + B: sq.b, + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/compressionhelpers/scalar_quantization_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/compressionhelpers/scalar_quantization_test.go new file mode 100644 index 0000000000000000000000000000000000000000..7bc7f2b5d67c53254051ec255a78d41695f9ff74 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/compressionhelpers/scalar_quantization_test.go @@ -0,0 +1,204 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +//go:build !race + +package compressionhelpers_test + +import ( + "fmt" + "math" + "sync" + "testing" + "time" + + "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" + "github.com/weaviate/weaviate/adapters/repos/db/priorityqueue" + "github.com/weaviate/weaviate/adapters/repos/db/vector/compressionhelpers" + "github.com/weaviate/weaviate/adapters/repos/db/vector/hnsw/distancer" + testinghelpers "github.com/weaviate/weaviate/adapters/repos/db/vector/testinghelpers" +) + +func Test_NoRaceSQEncode(t *testing.T) { + sq := compressionhelpers.NewScalarQuantizer([][]float32{ + {1, 0, 0, 0}, + {1, 1, 1, 5}, + }, distancer.NewCosineDistanceProvider()) + vec := []float32{0.5, 1, 0, 2} + code := sq.Encode(vec) + assert.NotNil(t, code) + assert.Equal(t, byte(25), code[0]) + assert.Equal(t, byte(51), code[1]) + assert.Equal(t, byte(0), code[2]) + assert.Equal(t, byte(102), code[3]) +} + +func Test_NoRaceSQDistance(t *testing.T) { + distancers := []distancer.Provider{distancer.NewL2SquaredProvider(), distancer.NewCosineDistanceProvider(), distancer.NewDotProductProvider()} + for _, distancer := range distancers { + sq := compressionhelpers.NewScalarQuantizer([][]float32{ + {1, 0, 0, 0}, + {1, 1, 1, 5}, + }, distancer) + vec1 := []float32{0.217, 0.435, 0, 0.348} + vec2 := []float32{0.241, 0.202, 0.257, 0.300} + + dist, err := sq.DistanceBetweenCompressedVectors(sq.Encode(vec1), sq.Encode(vec2)) + expectedDist, _ := distancer.SingleDist(vec1, vec2) + assert.Nil(t, err) + if err == nil { + assert.True(t, math.Abs(float64(expectedDist-dist)) < 0.0112) + fmt.Println(expectedDist-dist, expectedDist, dist) + } + } +} + +func distancerWrapper(dp distancer.Provider) func(x, y []float32) float32 { + return func(x, y []float32) float32 { + dist, _ := dp.SingleDist(x, y) + return dist + } +} + +func Test_NoRaceRandomSQDistanceFloatToByte(t *testing.T) { + distancers := []distancer.Provider{distancer.NewL2SquaredProvider(), distancer.NewCosineDistanceProvider(), distancer.NewDotProductProvider()} + vSize := 100 + qSize := 10 + dims := 150 + k := 10 + data, queries := testinghelpers.RandomVecs(vSize, qSize, dims) + testinghelpers.Normalize(data) + testinghelpers.Normalize(queries) + for _, distancer := range distancers { + sq := compressionhelpers.NewScalarQuantizer(data, distancer) + neighbors := make([][]uint64, qSize) + for j, y := range queries { + neighbors[j], _ = testinghelpers.BruteForce(logrus.New(), data, y, k, distancerWrapper(distancer)) + } + xCompressed := make([][]byte, vSize) + for i, x := range data { + xCompressed[i] = sq.Encode(x) + } + var relevant uint64 + mutex := sync.Mutex{} + ellapsed := time.Duration(0) + compressionhelpers.Concurrently(logger, uint64(len(queries)), func(i uint64) { + heap := priorityqueue.NewMax[any](k) + cd := sq.NewDistancer(queries[i]) + for j := range xCompressed { + before := time.Now() + d, _ := cd.Distance(xCompressed[j]) + ell := time.Since(before) + mutex.Lock() + ellapsed += ell + mutex.Unlock() + if heap.Len() < k || heap.Top().Dist > d { + if heap.Len() == k { + heap.Pop() + } + heap.Insert(uint64(j), d) + } + } + results := make([]uint64, 0, k) + for heap.Len() > 0 { + results = append(results, heap.Pop().ID) + } + hits := matchesInLists(neighbors[i][:k], results) + mutex.Lock() + relevant += hits + mutex.Unlock() + }) + + recall := float32(relevant) / float32(k*len(queries)) + latency := float32(ellapsed.Microseconds()) / float32(len(queries)) + fmt.Println(distancer.Type(), recall, latency) + assert.GreaterOrEqual(t, recall, float32(0.95), distancer.Type()) + + sqStats := sq.Stats().(compressionhelpers.SQStats) + assert.GreaterOrEqual(t, sqStats.A, float32(-1)) + assert.GreaterOrEqual(t, sqStats.A, sqStats.B) + assert.LessOrEqual(t, sqStats.B, float32(1)) + } +} + +func Test_NoRaceRandomSQDistanceByteToByte(t *testing.T) { + distancers := []distancer.Provider{distancer.NewL2SquaredProvider(), distancer.NewCosineDistanceProvider(), distancer.NewDotProductProvider()} + vSize := 100 + qSize := 10 + dims := 150 + k := 10 + data, queries := testinghelpers.RandomVecsFixedSeed(vSize, qSize, dims) + testinghelpers.Normalize(data) + testinghelpers.Normalize(queries) + for _, distancer := range distancers { + sq := compressionhelpers.NewScalarQuantizer(data, distancer) + neighbors := make([][]uint64, qSize) + for j, y := range queries { + neighbors[j], _ = testinghelpers.BruteForce(logrus.New(), data, y, k, distancerWrapper(distancer)) + } + xCompressed := make([][]byte, vSize) + for i, x := range data { + xCompressed[i] = sq.Encode(x) + } + var relevant uint64 + mutex := sync.Mutex{} + ellapsed := time.Duration(0) + compressionhelpers.Concurrently(logger, uint64(len(queries)), func(i uint64) { + heap := priorityqueue.NewMax[any](k) + cd := sq.NewCompressedQuantizerDistancer(sq.Encode(queries[i])) + for j := range xCompressed { + before := time.Now() + d, _ := cd.Distance(xCompressed[j]) + ell := time.Since(before) + mutex.Lock() + ellapsed += ell + mutex.Unlock() + if heap.Len() < k || heap.Top().Dist > d { + if heap.Len() == k { + heap.Pop() + } + heap.Insert(uint64(j), d) + } + } + results := make([]uint64, 0, k) + for heap.Len() > 0 { + results = append(results, heap.Pop().ID) + } + hits := matchesInLists(neighbors[i][:k], results) + mutex.Lock() + relevant += hits + mutex.Unlock() + }) + + recall := float32(relevant) / float32(k*len(queries)) + latency := float32(ellapsed.Microseconds()) / float32(len(queries)) + fmt.Println(distancer.Type(), recall, latency) + assert.GreaterOrEqual(t, recall, float32(0.95), distancer.Type()) + } +} + +func matchesInLists(control []uint64, results []uint64) uint64 { + desired := map[uint64]struct{}{} + for _, relevant := range control { + desired[relevant] = struct{}{} + } + + var matches uint64 + for _, candidate := range results { + _, ok := desired[candidate] + if ok { + matches++ + } + } + + return matches +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/compressionhelpers/tile_encoder.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/compressionhelpers/tile_encoder.go new file mode 100644 index 0000000000000000000000000000000000000000..adc56356886bb0bbde34bfb07f9128c84090b7cc --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/compressionhelpers/tile_encoder.go @@ -0,0 +1,204 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package compressionhelpers + +import ( + "encoding/binary" + "math" + "sync/atomic" + + "gonum.org/v1/gonum/stat/distuv" +) + +type distribution interface { + Transform(x float64) float64 + CDF(x float64) float64 + Quantile(x float64) float64 +} + +type logNormalDistribution struct { + dist *distuv.LogNormal +} + +func newLogNormalDistribution(mean float64, std float64) distribution { + return &logNormalDistribution{ + dist: &distuv.LogNormal{ + Mu: mean, + Sigma: std, + }, + } +} + +func (d *logNormalDistribution) Transform(x float64) float64 { + if x > 0 { + return math.Log(x) + } + return 0 +} + +func (d *logNormalDistribution) CDF(x float64) float64 { + return d.dist.CDF(x) +} + +func (d *logNormalDistribution) Quantile(x float64) float64 { + return d.dist.Quantile(x) +} + +type normalDistribution struct { + dist *distuv.Normal +} + +func newNormalDistribution(mean float64, std float64) distribution { + return &normalDistribution{ + dist: &distuv.Normal{ + Mu: mean, + Sigma: std, + }, + } +} + +func (d *normalDistribution) Transform(x float64) float64 { + return x +} + +func (d *normalDistribution) CDF(x float64) float64 { + return d.dist.CDF(x) +} + +func (d *normalDistribution) Quantile(x float64) float64 { + return d.dist.Quantile(x) +} + +type Centroid struct { + Center []float32 + Calculated atomic.Bool +} + +type EncoderDistribution byte + +const ( + NormalEncoderDistribution EncoderDistribution = 0 + LogNormalEncoderDistribution EncoderDistribution = 1 +) + +type TileEncoder struct { + bins float64 + mean float64 + stdDev float64 + size float64 + s1 float64 + s2 float64 + segment int + centroids []Centroid + encoderDistribution EncoderDistribution + distribution distribution +} + +func NewTileEncoder(bits int, segment int, encoderDistribution EncoderDistribution) *TileEncoder { + centroids := math.Pow(2, float64(bits)) + te := &TileEncoder{ + bins: centroids, + mean: 0, + stdDev: 0, + size: 0, + s1: 0, + s2: 0, + segment: segment, + centroids: make([]Centroid, int(centroids)), + encoderDistribution: encoderDistribution, + } + te.setEncoderDistribution() + return te +} + +func RestoreTileEncoder(bins float64, mean float64, stdDev float64, size float64, s1 float64, s2 float64, segment uint16, encoderDistribution byte) *TileEncoder { + te := &TileEncoder{ + bins: bins, + mean: mean, + stdDev: stdDev, + size: size, + s1: s1, + s2: s2, + segment: int(segment), + encoderDistribution: EncoderDistribution(encoderDistribution), + } + te.setEncoderDistribution() + return te +} + +func (te *TileEncoder) ExposeDataForRestore() []byte { + buffer := make([]byte, 51) + binary.LittleEndian.PutUint64(buffer[0:8], math.Float64bits(te.bins)) + binary.LittleEndian.PutUint64(buffer[8:16], math.Float64bits(te.mean)) + binary.LittleEndian.PutUint64(buffer[16:24], math.Float64bits(te.stdDev)) + binary.LittleEndian.PutUint64(buffer[24:32], math.Float64bits(te.size)) + binary.LittleEndian.PutUint64(buffer[32:40], math.Float64bits(te.s1)) + binary.LittleEndian.PutUint64(buffer[40:48], math.Float64bits(te.s2)) + binary.LittleEndian.PutUint16(buffer[48:50], uint16(te.segment)) + buffer[50] = byte(te.encoderDistribution) + return buffer +} + +func (te *TileEncoder) Fit(data [][]float32) error { + te.setEncoderDistribution() + return nil +} + +func (te *TileEncoder) setEncoderDistribution() { + switch te.encoderDistribution { + case LogNormalEncoderDistribution: + te.distribution = newLogNormalDistribution(te.mean, te.stdDev) + case NormalEncoderDistribution: + te.distribution = newNormalDistribution(te.mean, te.stdDev) + } +} + +func (te *TileEncoder) Add(x []float32) { + // calculate mean and stddev iteratively + x64 := te.distribution.Transform(float64(x[te.segment])) + te.s1 += x64 + te.s2 += x64 * x64 + te.size++ + te.mean = te.s1 / te.size + sum := te.s2 + te.size*te.mean*te.mean + prod := 2 * te.mean * te.s1 + te.stdDev = math.Sqrt((sum - prod) / te.size) +} + +func (te *TileEncoder) Encode(x []float32) byte { + cdf := te.distribution.CDF(float64(x[te.segment])) + intPart, _ := math.Modf(cdf * float64(te.bins)) + return byte(intPart) +} + +func (te *TileEncoder) centroid(b byte) []float32 { + res := make([]float32, 0, 1) + if b == 0 { + res = append(res, float32(te.distribution.Quantile(1/te.bins))) + } else if b == byte(te.bins) { + res = append(res, float32(te.distribution.Quantile((te.bins-1)/te.bins))) + } else { + b64 := float64(b) + mean := (b64/te.bins + (b64+1)/te.bins) / 2 + res = append(res, float32(te.distribution.Quantile(mean))) + } + return res +} + +func (te *TileEncoder) Centroid(b byte) []float32 { + if te.centroids[b].Calculated.Load() { + return te.centroids[b].Center + } + te.centroids[b].Center = te.centroid(b) + te.centroids[b].Calculated.Store(true) + return te.centroids[b].Center +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/compressionhelpers/tile_encoder_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/compressionhelpers/tile_encoder_test.go new file mode 100644 index 0000000000000000000000000000000000000000..eebcfd443917fe23e51c63d8c1eef6928e6597ef --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/compressionhelpers/tile_encoder_test.go @@ -0,0 +1,67 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +//go:build !race + +package compressionhelpers_test + +import ( + "math" + "math/rand" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/weaviate/weaviate/adapters/repos/db/vector/compressionhelpers" +) + +func Test_NoRaceTileEncoderEncode(t *testing.T) { + encoder := compressionhelpers.NewTileEncoder(4, 0, compressionhelpers.LogNormalEncoderDistribution) + for i := 0; i < 1000000; i++ { + encoder.Add([]float32{float32(rand.NormFloat64() + 100)}) + } + encoder.Fit([][]float32{}) + assert.Equal(t, encoder.Encode([]float32{0.1}), byte(0)) + assert.Equal(t, encoder.Encode([]float32{100}), byte(8)) + assert.Equal(t, encoder.Encode([]float32{1000}), byte(16)) +} + +func Test_NoRaceTileEncoderCentroids(t *testing.T) { + encoder := compressionhelpers.NewTileEncoder(4, 0, compressionhelpers.LogNormalEncoderDistribution) + for i := 0; i < 1000000; i++ { + encoder.Add([]float32{float32(rand.NormFloat64() + 100)}) + } + encoder.Fit([][]float32{}) + assert.Equal(t, math.Round(float64(encoder.Centroid(0)[0])), 98.0) + assert.Equal(t, math.Round(float64(encoder.Centroid(2)[0])), 99.0) + assert.Equal(t, math.Round(float64(encoder.Centroid(14)[0])), 101.0) +} + +func Test_NoRaceNormalTileEncoderEncode(t *testing.T) { + encoder := compressionhelpers.NewTileEncoder(4, 0, compressionhelpers.NormalEncoderDistribution) + for i := 0; i < 1000000; i++ { + encoder.Add([]float32{float32(rand.NormFloat64())}) + } + encoder.Fit([][]float32{}) + assert.Equal(t, encoder.Encode([]float32{0.1}), byte(8)) + assert.Equal(t, encoder.Encode([]float32{100}), byte(16)) + assert.Equal(t, encoder.Encode([]float32{1000}), byte(16)) +} + +func Test_NoRaceNormalTileEncoderCentroids(t *testing.T) { + encoder := compressionhelpers.NewTileEncoder(4, 0, compressionhelpers.NormalEncoderDistribution) + for i := 0; i < 1000000; i++ { + encoder.Add([]float32{float32(rand.NormFloat64())}) + } + encoder.Fit([][]float32{}) + assert.Equal(t, math.Round(float64(encoder.Centroid(0)[0])), -2.0) + assert.Equal(t, math.Round(float64(encoder.Centroid(8)[0])), 0.0) + assert.Equal(t, math.Round(float64(encoder.Centroid(15)[0])), 2.0) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/compressionhelpers/training_limit_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/compressionhelpers/training_limit_test.go new file mode 100644 index 0000000000000000000000000000000000000000..892221627ed21c21925a5770f1c60284536e8fa1 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/compressionhelpers/training_limit_test.go @@ -0,0 +1,83 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +//go:build !race + +package compressionhelpers + +import ( + "fmt" + "math/rand" + "testing" + "time" + + "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/assert" + "github.com/weaviate/weaviate/adapters/repos/db/vector/hnsw/distancer" + "github.com/weaviate/weaviate/entities/vectorindex/hnsw" +) + +func getRandomSeed() *rand.Rand { + return rand.New(rand.NewSource(time.Now().UnixNano())) +} + +func genVector(r *rand.Rand, dimensions int) []float32 { + vector := make([]float32, 0, dimensions) + for i := 0; i < dimensions; i++ { + vector = append(vector, r.Float32()*2-1) + } + return vector +} + +func RandomVecs(size int, queriesSize int, dimensions int) ([][]float32, [][]float32) { + fmt.Printf("generating %d vectors...\n", size+queriesSize) + r := getRandomSeed() + vectors := make([][]float32, 0, size) + queries := make([][]float32, 0, queriesSize) + for i := 0; i < size; i++ { + vectors = append(vectors, genVector(r, dimensions)) + } + for i := 0; i < queriesSize; i++ { + queries = append(queries, genVector(r, dimensions)) + } + return vectors, queries +} + +func Test_NoRacePQInvalidConfig(t *testing.T) { + logger, _ := test.NewNullLogger() + t.Run("validate training limit applied", func(t *testing.T) { + amount := 64 + centroids := 256 + vectors_size := 400 + vectors, _ := RandomVecs(vectors_size, vectors_size, amount) + distanceProvider := distancer.NewL2SquaredProvider() + + cfg := hnsw.PQConfig{ + Enabled: true, + Encoder: hnsw.PQEncoder{ + Type: hnsw.PQEncoderTypeKMeans, + Distribution: hnsw.PQEncoderDistributionLogNormal, + }, + Centroids: centroids, + TrainingLimit: 260, + Segments: amount, + } + pq, err := NewProductQuantizer( + cfg, + distanceProvider, + amount, + logger, + ) + assert.NoError(t, err) + pq.Fit(vectors) + assert.Equal(t, pq.trainingLimit, 260) + }) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/compressionhelpers/utils.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/compressionhelpers/utils.go new file mode 100644 index 0000000000000000000000000000000000000000..79f9b7ce7927607a3deb19ad8f6b7c7a1081711d --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/compressionhelpers/utils.go @@ -0,0 +1,63 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package compressionhelpers + +import ( + "math" + "runtime" + "sync" + + "github.com/sirupsen/logrus" + enterrors "github.com/weaviate/weaviate/entities/errors" +) + +type Action func(taskIndex uint64) + +func Concurrently(log logrus.FieldLogger, n uint64, action Action) { + n64 := float64(n) + workerCount := runtime.GOMAXPROCS(0) + wg := &sync.WaitGroup{} + split := uint64(math.Ceil(n64 / float64(workerCount))) + for worker := uint64(0); worker < uint64(workerCount); worker++ { + workerID := worker + + wg.Add(1) + enterrors.GoWrapper(func() { + defer wg.Done() + for i := workerID * split; i < uint64(math.Min(float64((workerID+1)*split), n64)); i++ { + action(i) + } + }, log) + } + wg.Wait() +} + +func ConcurrentlyWithError(log logrus.FieldLogger, n uint64, action func(taskIndex uint64) error) error { + n64 := float64(n) + workerCount := runtime.GOMAXPROCS(0) + eg := enterrors.NewErrorGroupWrapper(log) + eg.SetLimit(workerCount) + split := uint64(math.Ceil(n64 / float64(workerCount))) + for worker := uint64(0); worker < uint64(workerCount); worker++ { + workerID := worker + eg.Go(func() error { + for i := workerID * split; i < uint64(math.Min(float64((workerID+1)*split), n64)); i++ { + err := action(i) + if err != nil { + return err + } + } + return nil + }) + } + return eg.Wait() +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/dynamic/config.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/dynamic/config.go new file mode 100644 index 0000000000000000000000000000000000000000..d2c672d63f6dafe22c4a0de56d079414470a81e4 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/dynamic/config.go @@ -0,0 +1,117 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package dynamic + +import ( + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/weaviate/weaviate/adapters/repos/db/vector/common" + "github.com/weaviate/weaviate/adapters/repos/db/vector/flat" + "github.com/weaviate/weaviate/adapters/repos/db/vector/hnsw" + "github.com/weaviate/weaviate/adapters/repos/db/vector/hnsw/distancer" + "github.com/weaviate/weaviate/entities/cyclemanager" + "github.com/weaviate/weaviate/entities/errorcompounder" + schemaconfig "github.com/weaviate/weaviate/entities/schema/config" + ent "github.com/weaviate/weaviate/entities/vectorindex/dynamic" + "github.com/weaviate/weaviate/usecases/memwatch" + "github.com/weaviate/weaviate/usecases/monitoring" + bolt "go.etcd.io/bbolt" +) + +type Config struct { + ID string + TargetVector string + Logger logrus.FieldLogger + RootPath string + ShardName string + ClassName string + PrometheusMetrics *monitoring.PrometheusMetrics + VectorForIDThunk common.VectorForID[float32] + TempVectorForIDThunk common.TempVectorForID[float32] + DistanceProvider distancer.Provider + MakeCommitLoggerThunk hnsw.MakeCommitLogger + TombstoneCallbacks cyclemanager.CycleCallbackGroup + SharedDB *bolt.DB + HNSWDisableSnapshots bool + HNSWSnapshotOnStartup bool + HNSWWaitForCachePrefill bool + MinMMapSize int64 + MaxWalReuseSize int64 + LazyLoadSegments bool + AllocChecker memwatch.AllocChecker + WriteSegmentInfoIntoFileName bool + WriteMetadataFilesEnabled bool +} + +func (c Config) Validate() error { + ec := errorcompounder.New() + + if c.ID == "" { + ec.Addf("id cannot be empty") + } + + if c.DistanceProvider == nil { + ec.Addf("distancerProvider cannot be nil") + } + + return ec.ToError() +} + +func ValidateUserConfigUpdate(initial, updated schemaconfig.VectorIndexConfig) error { + initialParsed, ok := initial.(ent.UserConfig) + if !ok { + return errors.Errorf("initial is not UserConfig, but %T", initial) + } + + updatedParsed, ok := updated.(ent.UserConfig) + if !ok { + return errors.Errorf("updated is not UserConfig, but %T", updated) + } + + immutableFields := []immutableParameter{ + { + name: "distance", + accessor: func(c ent.UserConfig) interface{} { return c.Distance }, + }, + } + + for _, u := range immutableFields { + if err := validateImmutableField(u, initialParsed, updatedParsed); err != nil { + return err + } + } + if err := flat.ValidateUserConfigUpdate(initialParsed.FlatUC, updatedParsed.FlatUC); err != nil { + return err + } + if err := hnsw.ValidateUserConfigUpdate(initialParsed.HnswUC, updatedParsed.HnswUC); err != nil { + return err + } + return nil +} + +type immutableParameter struct { + accessor func(c ent.UserConfig) interface{} + name string +} + +func validateImmutableField(u immutableParameter, + previous, next ent.UserConfig, +) error { + oldField := u.accessor(previous) + newField := u.accessor(next) + if oldField != newField { + return errors.Errorf("%s is immutable: attempted change from \"%v\" to \"%v\"", + u.name, oldField, newField) + } + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/dynamic/index.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/dynamic/index.go new file mode 100644 index 0000000000000000000000000000000000000000..319c0fb183948437f9c7eeb19e76cce87f9e8548 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/dynamic/index.go @@ -0,0 +1,624 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package dynamic + +import ( + "context" + "encoding/binary" + simpleErrors "errors" + "fmt" + "io" + "math" + "os" + "path/filepath" + "sync" + "sync/atomic" + + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "go.etcd.io/bbolt" + + "github.com/weaviate/weaviate/adapters/repos/db/helpers" + "github.com/weaviate/weaviate/adapters/repos/db/lsmkv" + "github.com/weaviate/weaviate/adapters/repos/db/vector/common" + "github.com/weaviate/weaviate/adapters/repos/db/vector/compressionhelpers" + "github.com/weaviate/weaviate/adapters/repos/db/vector/flat" + "github.com/weaviate/weaviate/adapters/repos/db/vector/hnsw" + "github.com/weaviate/weaviate/adapters/repos/db/vector/hnsw/distancer" + entcfg "github.com/weaviate/weaviate/entities/config" + "github.com/weaviate/weaviate/entities/cyclemanager" + enterrors "github.com/weaviate/weaviate/entities/errors" + schemaconfig "github.com/weaviate/weaviate/entities/schema/config" + ent "github.com/weaviate/weaviate/entities/vectorindex/dynamic" + hnswent "github.com/weaviate/weaviate/entities/vectorindex/hnsw" + "github.com/weaviate/weaviate/usecases/monitoring" +) + +const composerUpgradedKey = "upgraded" + +var dynamicBucket = []byte("dynamic") + +type Index interface { + // UnderlyingIndex returns the underlying index type (flat or hnsw) + UnderlyingIndex() common.IndexType +} + +type VectorIndex interface { + Add(ctx context.Context, id uint64, vector []float32) error + AddBatch(ctx context.Context, id []uint64, vector [][]float32) error + Delete(id ...uint64) error + SearchByVector(ctx context.Context, vector []float32, k int, allow helpers.AllowList) ([]uint64, []float32, error) + SearchByVectorDistance(ctx context.Context, vector []float32, dist float32, + maxLimit int64, allow helpers.AllowList) ([]uint64, []float32, error) + UpdateUserConfig(updated schemaconfig.VectorIndexConfig, callback func()) error + Drop(ctx context.Context) error + Shutdown(ctx context.Context) error + Flush() error + SwitchCommitLogs(ctx context.Context) error + ListFiles(ctx context.Context, basePath string) ([]string, error) + PostStartup() + Compressed() bool + Multivector() bool + ValidateBeforeInsert(vector []float32) error + ContainsDoc(docID uint64) bool + QueryVectorDistancer(queryVector []float32) common.QueryVectorDistancer + // Iterate over all indexed document ids in the index. + // Consistency or order is not guaranteed, as the index may be concurrently modified. + // If the callback returns false, the iteration will stop. + Iterate(fn func(docID uint64) bool) + Type() common.IndexType +} + +type upgradableIndexer interface { + Upgraded() bool + Upgrade(callback func()) error + ShouldUpgrade() (bool, int) + AlreadyIndexed() uint64 +} + +type dynamic struct { + sync.RWMutex + id string + targetVector string + store *lsmkv.Store + logger logrus.FieldLogger + rootPath string + shardName string + className string + prometheusMetrics *monitoring.PrometheusMetrics + vectorForIDThunk common.VectorForID[float32] + tempVectorForIDThunk common.TempVectorForID[float32] + distanceProvider distancer.Provider + makeCommitLoggerThunk hnsw.MakeCommitLogger + threshold uint64 + index VectorIndex + upgraded atomic.Bool + upgradeOnce sync.Once + tombstoneCallbacks cyclemanager.CycleCallbackGroup + hnswUC hnswent.UserConfig + db *bbolt.DB + ctx context.Context + cancel context.CancelFunc + hnswDisableSnapshots bool + hnswSnapshotOnStartup bool + hnswWaitForCachePrefill bool + LazyLoadSegments bool + flatBQ bool + WriteSegmentInfoIntoFileName bool + WriteMetadataFilesEnabled bool +} + +func New(cfg Config, uc ent.UserConfig, store *lsmkv.Store) (*dynamic, error) { + if !entcfg.Enabled(os.Getenv("ASYNC_INDEXING")) { + return nil, errors.New("the dynamic index can only be created under async indexing environment") + } + if err := cfg.Validate(); err != nil { + return nil, errors.Wrap(err, "invalid config") + } + + logger := cfg.Logger + if logger == nil { + l := logrus.New() + l.Out = io.Discard + logger = l + } + + flatConfig := flat.Config{ + ID: cfg.ID, + RootPath: cfg.RootPath, + TargetVector: cfg.TargetVector, + Logger: cfg.Logger, + DistanceProvider: cfg.DistanceProvider, + MinMMapSize: cfg.MinMMapSize, + MaxWalReuseSize: cfg.MaxWalReuseSize, + LazyLoadSegments: cfg.LazyLoadSegments, + AllocChecker: cfg.AllocChecker, + WriteSegmentInfoIntoFileName: cfg.WriteSegmentInfoIntoFileName, + WriteMetadataFilesEnabled: cfg.WriteMetadataFilesEnabled, + } + + ctx, cancel := context.WithCancel(context.Background()) + + index := &dynamic{ + id: cfg.ID, + targetVector: cfg.TargetVector, + logger: logger, + rootPath: cfg.RootPath, + shardName: cfg.ShardName, + className: cfg.ClassName, + prometheusMetrics: cfg.PrometheusMetrics, + vectorForIDThunk: cfg.VectorForIDThunk, + tempVectorForIDThunk: cfg.TempVectorForIDThunk, + distanceProvider: cfg.DistanceProvider, + makeCommitLoggerThunk: cfg.MakeCommitLoggerThunk, + store: store, + threshold: uc.Threshold, + tombstoneCallbacks: cfg.TombstoneCallbacks, + hnswUC: uc.HnswUC, + db: cfg.SharedDB, + ctx: ctx, + cancel: cancel, + hnswDisableSnapshots: cfg.HNSWDisableSnapshots, + hnswSnapshotOnStartup: cfg.HNSWSnapshotOnStartup, + hnswWaitForCachePrefill: cfg.HNSWWaitForCachePrefill, + LazyLoadSegments: cfg.LazyLoadSegments, + flatBQ: uc.FlatUC.BQ.Enabled, + WriteSegmentInfoIntoFileName: cfg.WriteSegmentInfoIntoFileName, + WriteMetadataFilesEnabled: cfg.WriteMetadataFilesEnabled, + } + + err := cfg.SharedDB.Update(func(tx *bbolt.Tx) error { + _, err := tx.CreateBucketIfNotExists(dynamicBucket) + return err + }) + if err != nil { + return nil, errors.Wrap(err, "create dynamic bolt bucket") + } + + upgraded := false + + err = cfg.SharedDB.View(func(tx *bbolt.Tx) error { + b := tx.Bucket(dynamicBucket) + + v := b.Get(index.dbKey()) + if v == nil { + return nil + } + + upgraded = v[0] != 0 + return nil + }) + if err != nil { + return nil, errors.Wrap(err, "get dynamic state") + } + + if upgraded { + index.upgraded.Store(true) + hnsw, err := hnsw.New( + hnsw.Config{ + Logger: index.logger, + RootPath: index.rootPath, + ID: index.id, + ShardName: index.shardName, + ClassName: index.className, + PrometheusMetrics: index.prometheusMetrics, + VectorForIDThunk: index.vectorForIDThunk, + TempVectorForIDThunk: index.tempVectorForIDThunk, + DistanceProvider: index.distanceProvider, + MakeCommitLoggerThunk: index.makeCommitLoggerThunk, + DisableSnapshots: index.hnswDisableSnapshots, + SnapshotOnStartup: index.hnswSnapshotOnStartup, + LazyLoadSegments: index.LazyLoadSegments, + WaitForCachePrefill: index.hnswWaitForCachePrefill, + WriteSegmentInfoIntoFileName: cfg.WriteSegmentInfoIntoFileName, + WriteMetadataFilesEnabled: cfg.WriteMetadataFilesEnabled, + }, + index.hnswUC, + index.tombstoneCallbacks, + index.store, + ) + if err != nil { + return nil, err + } + index.index = hnsw + } else { + flat, err := flat.New(flatConfig, uc.FlatUC, store) + if err != nil { + return nil, err + } + index.index = flat + } + + return index, nil +} + +func (dynamic *dynamic) Type() common.IndexType { + return common.IndexTypeDynamic +} + +func (dynamic *dynamic) dbKey() []byte { + var key []byte + if dynamic.targetVector == "fef" { + key = make([]byte, 0, len(composerUpgradedKey)+len(dynamic.targetVector)+1) + key = append(key, composerUpgradedKey...) + key = append(key, '_') + key = append(key, dynamic.targetVector...) + } else { + key = []byte(composerUpgradedKey) + } + + return key +} + +func (dynamic *dynamic) getBucketName() string { + if dynamic.targetVector != "" { + return fmt.Sprintf("%s_%s", helpers.VectorsBucketLSM, dynamic.targetVector) + } + + return helpers.VectorsBucketLSM +} + +func (dynamic *dynamic) getCompressedBucketName() string { + if dynamic.targetVector != "" { + return fmt.Sprintf("%s_%s", helpers.VectorsCompressedBucketLSM, dynamic.targetVector) + } + return helpers.VectorsCompressedBucketLSM +} + +func (dynamic *dynamic) Compressed() bool { + dynamic.RLock() + defer dynamic.RUnlock() + return dynamic.index.Compressed() +} + +func (dynamic *dynamic) Multivector() bool { + dynamic.RLock() + defer dynamic.RUnlock() + return dynamic.index.Multivector() +} + +func (dynamic *dynamic) AddBatch(ctx context.Context, ids []uint64, vectors [][]float32) error { + dynamic.RLock() + defer dynamic.RUnlock() + return dynamic.index.AddBatch(ctx, ids, vectors) +} + +func (dynamic *dynamic) Add(ctx context.Context, id uint64, vector []float32) error { + dynamic.RLock() + defer dynamic.RUnlock() + return dynamic.index.Add(ctx, id, vector) +} + +func (dynamic *dynamic) Delete(ids ...uint64) error { + dynamic.RLock() + defer dynamic.RUnlock() + return dynamic.index.Delete(ids...) +} + +func (dynamic *dynamic) SearchByVector(ctx context.Context, vector []float32, k int, allow helpers.AllowList) ([]uint64, []float32, error) { + dynamic.RLock() + defer dynamic.RUnlock() + return dynamic.index.SearchByVector(ctx, vector, k, allow) +} + +func (dynamic *dynamic) SearchByVectorDistance(ctx context.Context, vector []float32, targetDistance float32, maxLimit int64, allow helpers.AllowList) ([]uint64, []float32, error) { + dynamic.RLock() + defer dynamic.RUnlock() + return dynamic.index.SearchByVectorDistance(ctx, vector, targetDistance, maxLimit, allow) +} + +func (dynamic *dynamic) UpdateUserConfig(updated schemaconfig.VectorIndexConfig, callback func()) error { + parsed, ok := updated.(ent.UserConfig) + if !ok { + callback() + return errors.Errorf("config is not UserConfig, but %T", updated) + } + if dynamic.upgraded.Load() { + dynamic.RLock() + defer dynamic.RUnlock() + dynamic.index.UpdateUserConfig(parsed.HnswUC, callback) + } else { + dynamic.hnswUC = parsed.HnswUC + dynamic.RLock() + defer dynamic.RUnlock() + dynamic.index.UpdateUserConfig(parsed.FlatUC, callback) + } + return nil +} + +func (dynamic *dynamic) Drop(ctx context.Context) error { + if dynamic.ctx.Err() != nil { + // already dropped + return nil + } + + // cancel the context before locking to stop any ongoing operations + // and prevent new ones from starting + dynamic.cancel() + + dynamic.Lock() + defer dynamic.Unlock() + if err := dynamic.db.Close(); err != nil { + return err + } + os.Remove(filepath.Join(dynamic.rootPath, "index.db")) + return dynamic.index.Drop(ctx) +} + +func (dynamic *dynamic) Flush() error { + dynamic.RLock() + defer dynamic.RUnlock() + return dynamic.index.Flush() +} + +func (dynamic *dynamic) Shutdown(ctx context.Context) error { + if dynamic.ctx.Err() != nil { + // already closed + return nil + } + + // cancel the context before locking to stop any ongoing operations + // and prevent new ones from starting + dynamic.cancel() + + dynamic.Lock() + defer dynamic.Unlock() + + if err := dynamic.db.Close(); err != nil { + return err + } + return dynamic.index.Shutdown(ctx) +} + +func (dynamic *dynamic) SwitchCommitLogs(ctx context.Context) error { + dynamic.RLock() + defer dynamic.RUnlock() + return dynamic.index.SwitchCommitLogs(ctx) +} + +func (dynamic *dynamic) ListFiles(ctx context.Context, basePath string) ([]string, error) { + dynamic.RLock() + defer dynamic.RUnlock() + return dynamic.index.ListFiles(ctx, basePath) +} + +func (dynamic *dynamic) ValidateBeforeInsert(vector []float32) error { + dynamic.RLock() + defer dynamic.RUnlock() + return dynamic.index.ValidateBeforeInsert(vector) +} + +func (dynamic *dynamic) PostStartup() { + dynamic.Lock() + defer dynamic.Unlock() + dynamic.index.PostStartup() +} + +func (dynamic *dynamic) ContainsDoc(docID uint64) bool { + dynamic.RLock() + defer dynamic.RUnlock() + return dynamic.index.ContainsDoc(docID) +} + +func (dynamic *dynamic) AlreadyIndexed() uint64 { + dynamic.RLock() + defer dynamic.RUnlock() + return (dynamic.index).(upgradableIndexer).AlreadyIndexed() +} + +func (dynamic *dynamic) QueryVectorDistancer(queryVector []float32) common.QueryVectorDistancer { + dynamic.RLock() + defer dynamic.RUnlock() + return dynamic.index.QueryVectorDistancer(queryVector) +} + +func (dynamic *dynamic) ShouldUpgrade() (bool, int) { + if !dynamic.upgraded.Load() { + return true, int(dynamic.threshold) + } + dynamic.RLock() + defer dynamic.RUnlock() + return (dynamic.index).(upgradableIndexer).ShouldUpgrade() +} + +func (dynamic *dynamic) Upgraded() bool { + dynamic.RLock() + defer dynamic.RUnlock() + return dynamic.upgraded.Load() && dynamic.index.(upgradableIndexer).Upgraded() +} + +func float32SliceFromByteSlice(vector []byte, slice []float32) []float32 { + for i := range slice { + slice[i] = math.Float32frombits(binary.LittleEndian.Uint32(vector[i*4:])) + } + return slice +} + +func (dynamic *dynamic) Upgrade(callback func()) error { + if dynamic.ctx.Err() != nil { + // already closed + return dynamic.ctx.Err() + } + + if dynamic.upgraded.Load() { + return dynamic.index.(upgradableIndexer).Upgrade(callback) + } + + dynamic.upgradeOnce.Do(func() { + enterrors.GoWrapper(func() { + defer callback() + + err := dynamic.doUpgrade() + if err != nil { + dynamic.logger.WithError(err).Error("failed to upgrade index") + return + } + }, dynamic.logger) + }) + + return nil +} + +func (dynamic *dynamic) doUpgrade() error { + // Start with a read lock to prevent reading from the index + // while it's being dropped or closed. + // This allows search operations to continue while the index is being + // upgraded. + dynamic.RLock() + + index, err := hnsw.New( + hnsw.Config{ + Logger: dynamic.logger, + RootPath: dynamic.rootPath, + ID: dynamic.id, + ShardName: dynamic.shardName, + ClassName: dynamic.className, + PrometheusMetrics: dynamic.prometheusMetrics, + VectorForIDThunk: dynamic.vectorForIDThunk, + TempVectorForIDThunk: dynamic.tempVectorForIDThunk, + DistanceProvider: dynamic.distanceProvider, + MakeCommitLoggerThunk: dynamic.makeCommitLoggerThunk, + DisableSnapshots: dynamic.hnswDisableSnapshots, + SnapshotOnStartup: dynamic.hnswSnapshotOnStartup, + WaitForCachePrefill: dynamic.hnswWaitForCachePrefill, + WriteSegmentInfoIntoFileName: dynamic.WriteSegmentInfoIntoFileName, + WriteMetadataFilesEnabled: dynamic.WriteMetadataFilesEnabled, + }, + dynamic.hnswUC, + dynamic.tombstoneCallbacks, + dynamic.store, + ) + if err != nil { + dynamic.RUnlock() + return err + } + + bucket := dynamic.store.Bucket(dynamic.getBucketName()) + + cursor := bucket.Cursor() + + for k, v := cursor.First(); k != nil; k, v = cursor.Next() { + if dynamic.ctx.Err() != nil { + cursor.Close() + // context was cancelled, stop processing + dynamic.RUnlock() + return dynamic.ctx.Err() + } + + id := binary.BigEndian.Uint64(k) + vc := make([]float32, len(v)/4) + float32SliceFromByteSlice(v, vc) + + err := index.Add(dynamic.ctx, id, vc) + if err != nil { + dynamic.logger.WithField("id", id).WithError(err).Error("failed to add vector") + continue + } + } + + cursor.Close() + + // end of read-only zone + dynamic.RUnlock() + + // Lock the index for writing but check if it was already + // closed in the meantime + dynamic.Lock() + defer dynamic.Unlock() + + if err := dynamic.ctx.Err(); err != nil { + // already closed + return errors.Wrap(err, "index was closed while upgrading") + } + + err = dynamic.db.Update(func(tx *bbolt.Tx) error { + b := tx.Bucket(dynamicBucket) + return b.Put(dynamic.dbKey(), []byte{1}) + }) + if err != nil { + return errors.Wrap(err, "update dynamic") + } + + dynamic.index.Drop(dynamic.ctx) + dynamic.index = index + dynamic.upgraded.Store(true) + + var errs []error + bDir := dynamic.store.Bucket(dynamic.getBucketName()).GetDir() + err = dynamic.store.ShutdownBucket(dynamic.ctx, dynamic.getBucketName()) + if err != nil { + errs = append(errs, err) + } + err = os.RemoveAll(bDir) + if err != nil { + errs = append(errs, err) + } + if dynamic.flatBQ && !dynamic.hnswUC.BQ.Enabled { + bDir = dynamic.store.Bucket(dynamic.getCompressedBucketName()).GetDir() + err = dynamic.store.ShutdownBucket(dynamic.ctx, dynamic.getCompressedBucketName()) + if err != nil { + errs = append(errs, err) + } + err = os.RemoveAll(bDir) + if err != nil { + errs = append(errs, err) + } + } + if len(errs) > 0 { + dynamic.logger.Warn(simpleErrors.Join(errs...)) + } + + return nil +} + +func (dynamic *dynamic) Iterate(fn func(id uint64) bool) { + dynamic.index.Iterate(fn) +} + +type hnswStats interface { + Stats() (*hnsw.HnswStats, error) +} + +func (dynamic *dynamic) Stats() (*hnsw.HnswStats, error) { + dynamic.RLock() + defer dynamic.RUnlock() + + h, ok := dynamic.index.(hnswStats) + if !ok { + return nil, errors.New("index is not hnsw") + } + return h.Stats() +} + +func (dynamic *dynamic) CompressionStats() compressionhelpers.CompressionStats { + dynamic.RLock() + defer dynamic.RUnlock() + + // Delegate to the underlying index (flat or hnsw) + if vectorIndex, ok := dynamic.index.(compressionhelpers.CompressionStats); ok { + return vectorIndex + } + + // Fallback: return uncompressed stats if the underlying index doesn't support CompressionStats + return compressionhelpers.UncompressedStats{} +} + +// UnderlyingIndex returns the underlying index type (flat or hnsw) +// for dynamic indexes. +func (dynamic *dynamic) UnderlyingIndex() common.IndexType { + dynamic.RLock() + defer dynamic.RUnlock() + return dynamic.index.Type() +} + +// to make sure the dynamic index satisfies the Index interface +var _ = Index(&dynamic{}) diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/dynamic/index_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/dynamic/index_test.go new file mode 100644 index 0000000000000000000000000000000000000000..47dcd7714c9e9fb57ae4c4502780d2ed08859208 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/dynamic/index_test.go @@ -0,0 +1,512 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package dynamic + +import ( + "context" + "fmt" + "os" + "path/filepath" + "strconv" + "sync" + "testing" + "time" + + "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.etcd.io/bbolt" + + "github.com/weaviate/weaviate/adapters/repos/db/vector/common" + "github.com/weaviate/weaviate/adapters/repos/db/vector/compressionhelpers" + "github.com/weaviate/weaviate/adapters/repos/db/vector/hnsw" + "github.com/weaviate/weaviate/adapters/repos/db/vector/hnsw/distancer" + "github.com/weaviate/weaviate/adapters/repos/db/vector/testinghelpers" + "github.com/weaviate/weaviate/entities/cyclemanager" + "github.com/weaviate/weaviate/entities/storobj" + ent "github.com/weaviate/weaviate/entities/vectorindex/dynamic" + flatent "github.com/weaviate/weaviate/entities/vectorindex/flat" + hnswent "github.com/weaviate/weaviate/entities/vectorindex/hnsw" +) + +var logger, _ = test.NewNullLogger() + +func TestDynamic(t *testing.T) { + ctx := context.Background() + currentIndexing := os.Getenv("ASYNC_INDEXING") + os.Setenv("ASYNC_INDEXING", "true") + defer os.Setenv("ASYNC_INDEXING", currentIndexing) + dimensions := 20 + vectors_size := 1_000 + queries_size := 10 + k := 10 + + db, err := bbolt.Open(filepath.Join(t.TempDir(), "index.db"), 0o666, nil) + require.NoError(t, err) + t.Cleanup(func() { + db.Close() + }) + + vectors, queries := testinghelpers.RandomVecs(vectors_size, queries_size, dimensions) + rootPath := t.TempDir() + distancer := distancer.NewL2SquaredProvider() + truths := make([][]uint64, queries_size) + compressionhelpers.Concurrently(logger, uint64(len(queries)), func(i uint64) { + truths[i], _ = testinghelpers.BruteForce(logger, vectors, queries[i], k, testinghelpers.DistanceWrapper(distancer)) + }) + noopCallback := cyclemanager.NewCallbackGroupNoop() + fuc := flatent.UserConfig{} + fuc.SetDefaults() + hnswuc := hnswent.UserConfig{ + MaxConnections: 30, + EFConstruction: 64, + EF: 32, + VectorCacheMaxObjects: 1_000_000, + } + dynamic, err := New(Config{ + RootPath: rootPath, + ID: "nil-vector-test", + MakeCommitLoggerThunk: hnsw.MakeNoopCommitLogger, + DistanceProvider: distancer, + VectorForIDThunk: func(ctx context.Context, id uint64) ([]float32, error) { + vec := vectors[int(id)] + if vec == nil { + return nil, storobj.NewErrNotFoundf(id, "nil vec") + } + return vec, nil + }, + TempVectorForIDThunk: TempVectorForIDThunk(vectors), + TombstoneCallbacks: noopCallback, + SharedDB: db, + }, ent.UserConfig{ + Threshold: uint64(vectors_size), + Distance: distancer.Type(), + HnswUC: hnswuc, + FlatUC: fuc, + }, testinghelpers.NewDummyStore(t)) + assert.Nil(t, err) + + compressionhelpers.Concurrently(logger, uint64(vectors_size), func(i uint64) { + err := dynamic.Add(ctx, i, vectors[i]) + require.NoError(t, err) + }) + shouldUpgrade, at := dynamic.ShouldUpgrade() + assert.True(t, shouldUpgrade) + assert.Equal(t, vectors_size, at) + assert.False(t, dynamic.Upgraded()) + recall1, latency1 := testinghelpers.RecallAndLatency(ctx, queries, k, dynamic, truths) + fmt.Println(recall1, latency1) + assert.True(t, recall1 > 0.99) + wg := sync.WaitGroup{} + wg.Add(1) + err = dynamic.Upgrade(func() { + wg.Done() + }) + require.NoError(t, err) + wg.Wait() + shouldUpgrade, _ = dynamic.ShouldUpgrade() + assert.False(t, shouldUpgrade) + recall2, latency2 := testinghelpers.RecallAndLatency(ctx, queries, k, dynamic, truths) + fmt.Println(recall2, latency2) + assert.True(t, recall2 > 0.9) + assert.True(t, latency1 > latency2) +} + +func TestDynamicReturnsErrorIfNoAsync(t *testing.T) { + currentIndexing := os.Getenv("ASYNC_INDEXING") + os.Unsetenv("ASYNC_INDEXING") + defer os.Setenv("ASYNC_INDEXING", currentIndexing) + rootPath := t.TempDir() + noopCallback := cyclemanager.NewCallbackGroupNoop() + fuc := flatent.UserConfig{} + fuc.SetDefaults() + hnswuc := hnswent.NewDefaultUserConfig() + db, err := bbolt.Open(filepath.Join(t.TempDir(), "index.db"), 0o666, nil) + require.NoError(t, err) + t.Cleanup(func() { + db.Close() + }) + + distancer := distancer.NewL2SquaredProvider() + _, err = New(Config{ + RootPath: rootPath, + ID: "nil-vector-test", + MakeCommitLoggerThunk: hnsw.MakeNoopCommitLogger, + DistanceProvider: distancer, + VectorForIDThunk: func(ctx context.Context, id uint64) ([]float32, error) { + return nil, nil + }, + TempVectorForIDThunk: TempVectorForIDThunk(nil), + TombstoneCallbacks: noopCallback, + SharedDB: db, + }, ent.UserConfig{ + Threshold: uint64(100), + Distance: distancer.Type(), + HnswUC: hnswuc, + FlatUC: fuc, + }, testinghelpers.NewDummyStore(t)) + assert.NotNil(t, err) +} + +func TempVectorForIDThunk(vectors [][]float32) func(context.Context, uint64, *common.VectorSlice) ([]float32, error) { + return func(ctx context.Context, id uint64, container *common.VectorSlice) ([]float32, error) { + copy(container.Slice, vectors[int(id)]) + return vectors[int(id)], nil + } +} + +func TestDynamicWithTargetVectors(t *testing.T) { + ctx := context.Background() + currentIndexing := os.Getenv("ASYNC_INDEXING") + os.Setenv("ASYNC_INDEXING", "true") + defer os.Setenv("ASYNC_INDEXING", currentIndexing) + dimensions := 20 + vectors_size := 1_000 + queries_size := 10 + k := 10 + + db, err := bbolt.Open(filepath.Join(t.TempDir(), "index.db"), 0o666, nil) + require.NoError(t, err) + t.Cleanup(func() { + db.Close() + }) + + vectors, queries := testinghelpers.RandomVecs(vectors_size, queries_size, dimensions) + rootPath := t.TempDir() + distancer := distancer.NewL2SquaredProvider() + truths := make([][]uint64, queries_size) + compressionhelpers.Concurrently(logger, uint64(len(queries)), func(i uint64) { + truths[i], _ = testinghelpers.BruteForce(logger, vectors, queries[i], k, testinghelpers.DistanceWrapper(distancer)) + }) + noopCallback := cyclemanager.NewCallbackGroupNoop() + fuc := flatent.UserConfig{} + fuc.SetDefaults() + hnswuc := hnswent.UserConfig{ + MaxConnections: 30, + EFConstruction: 64, + EF: 32, + VectorCacheMaxObjects: 1_000_000, + } + + var indexes []*dynamic + + for i := 0; i < 5; i++ { + dynamic, err := New(Config{ + TargetVector: "target_" + strconv.Itoa(i), + RootPath: rootPath, + ID: "nil-vector-test_" + strconv.Itoa(i), + MakeCommitLoggerThunk: hnsw.MakeNoopCommitLogger, + DistanceProvider: distancer, + VectorForIDThunk: func(ctx context.Context, id uint64) ([]float32, error) { + vec := vectors[int(id)] + if vec == nil { + return nil, storobj.NewErrNotFoundf(id, "nil vec") + } + return vec, nil + }, + TempVectorForIDThunk: TempVectorForIDThunk(vectors), + TombstoneCallbacks: noopCallback, + SharedDB: db, + }, ent.UserConfig{ + Threshold: uint64(vectors_size), + Distance: distancer.Type(), + HnswUC: hnswuc, + FlatUC: fuc, + }, testinghelpers.NewDummyStore(t)) + require.NoError(t, err) + + indexes = append(indexes, dynamic) + } + + for _, v := range indexes { + v := v + compressionhelpers.Concurrently(logger, uint64(vectors_size), func(i uint64) { + v.Add(ctx, i, vectors[i]) + }) + shouldUpgrade, at := v.ShouldUpgrade() + assert.True(t, shouldUpgrade) + assert.Equal(t, vectors_size, at) + assert.False(t, v.Upgraded()) + recall1, latency1 := testinghelpers.RecallAndLatency(ctx, queries, k, v, truths) + fmt.Println(recall1, latency1) + assert.True(t, recall1 > 0.99) + wg := sync.WaitGroup{} + wg.Add(1) + v.Upgrade(func() { + wg.Done() + }) + wg.Wait() + shouldUpgrade, _ = v.ShouldUpgrade() + assert.False(t, shouldUpgrade) + recall2, latency2 := testinghelpers.RecallAndLatency(ctx, queries, k, v, truths) + fmt.Println(recall2, latency2) + assert.True(t, recall2 > 0.9) + assert.True(t, latency1 > latency2) + } +} + +func TestDynamicUpgradeCancelation(t *testing.T) { + ctx := context.Background() + t.Setenv("ASYNC_INDEXING", "true") + dimensions := 20 + vectors_size := 1_000 + queries_size := 10 + k := 10 + + db, err := bbolt.Open(filepath.Join(t.TempDir(), "index.db"), 0o666, nil) + require.NoError(t, err) + t.Cleanup(func() { + db.Close() + }) + + vectors, queries := testinghelpers.RandomVecs(vectors_size, queries_size, dimensions) + rootPath := t.TempDir() + distancer := distancer.NewL2SquaredProvider() + truths := make([][]uint64, queries_size) + compressionhelpers.Concurrently(logger, uint64(len(queries)), func(i uint64) { + truths[i], _ = testinghelpers.BruteForce(logger, vectors, queries[i], k, testinghelpers.DistanceWrapper(distancer)) + }) + noopCallback := cyclemanager.NewCallbackGroupNoop() + fuc := flatent.UserConfig{} + fuc.SetDefaults() + hnswuc := hnswent.UserConfig{ + MaxConnections: 30, + EFConstruction: 64, + EF: 32, + VectorCacheMaxObjects: 1_000_000, + } + + dynamic, err := New(Config{ + RootPath: rootPath, + ID: "foo", + MakeCommitLoggerThunk: hnsw.MakeNoopCommitLogger, + DistanceProvider: distancer, + VectorForIDThunk: func(ctx context.Context, id uint64) ([]float32, error) { + vec := vectors[int(id)] + if vec == nil { + return nil, storobj.NewErrNotFoundf(id, "nil vec") + } + return vec, nil + }, + TempVectorForIDThunk: TempVectorForIDThunk(vectors), + TombstoneCallbacks: noopCallback, + SharedDB: db, + }, ent.UserConfig{ + Threshold: uint64(vectors_size), + Distance: distancer.Type(), + HnswUC: hnswuc, + FlatUC: fuc, + }, testinghelpers.NewDummyStore(t)) + require.NoError(t, err) + + compressionhelpers.Concurrently(logger, uint64(vectors_size), func(i uint64) { + dynamic.Add(ctx, i, vectors[i]) + }) + + shouldUpgrade, at := dynamic.ShouldUpgrade() + require.True(t, shouldUpgrade) + require.Equal(t, vectors_size, at) + require.False(t, dynamic.Upgraded()) + + called := make(chan struct{}) + dynamic.Upgrade(func() { + close(called) + }) + + // close the index to cancel the upgrade + err = dynamic.Shutdown(context.Background()) + require.NoError(t, err) + + require.False(t, dynamic.upgraded.Load()) + + select { + case <-called: + case <-time.After(5 * time.Second): + t.Fatal("upgrade callback was not called") + } +} + +func TestDynamicWithDifferentCompressionSchema(t *testing.T) { + ctx := context.Background() + t.Setenv("ASYNC_INDEXING", "true") + dimensions := 20 + vectors_size := 1_000 + threshold := 600 + queries_size := 10 + k := 10 + + tempDir := t.TempDir() + + db, err := bbolt.Open(filepath.Join(tempDir, "index.db"), 0o666, nil) + require.NoError(t, err) + t.Cleanup(func() { + db.Close() + }) + + vectors, queries := testinghelpers.RandomVecs(vectors_size, queries_size, dimensions) + rootPath := tempDir + distancer := distancer.NewL2SquaredProvider() + truths := make([][]uint64, queries_size) + compressionhelpers.Concurrently(logger, uint64(len(queries)), func(i uint64) { + truths[i], _ = testinghelpers.BruteForce(logger, vectors, queries[i], k, testinghelpers.DistanceWrapper(distancer)) + }) + noopCallback := cyclemanager.NewCallbackGroupNoop() + fuc := flatent.UserConfig{} + fuc.SetDefaults() + fuc.BQ = flatent.CompressionUserConfig{ + Enabled: true, + Cache: true, + } + hnswuc := hnswent.UserConfig{ + MaxConnections: 30, + EFConstruction: 64, + EF: 32, + VectorCacheMaxObjects: 1_000_000, + PQ: hnswent.PQConfig{ + Enabled: true, + BitCompression: false, + Segments: 5, + Centroids: 255, + TrainingLimit: threshold - 1, + Encoder: hnswent.PQEncoder{ + Type: hnswent.PQEncoderTypeKMeans, + Distribution: hnswent.PQEncoderDistributionLogNormal, + }, + }, + } + + config := Config{ + TargetVector: "", + RootPath: rootPath, + ID: "vector-test_0", + MakeCommitLoggerThunk: func() (hnsw.CommitLogger, error) { + return hnsw.NewCommitLogger(tempDir, "vector-test_0", logger, noopCallback) + }, + DistanceProvider: distancer, + VectorForIDThunk: func(ctx context.Context, id uint64) ([]float32, error) { + vec := vectors[int(id)] + if vec == nil { + return nil, storobj.NewErrNotFoundf(id, "nil vec") + } + return vec, nil + }, + TempVectorForIDThunk: TempVectorForIDThunk(vectors), + TombstoneCallbacks: noopCallback, + SharedDB: db, + HNSWWaitForCachePrefill: true, + } + uc := ent.UserConfig{ + Threshold: uint64(threshold), + Distance: distancer.Type(), + HnswUC: hnswuc, + FlatUC: fuc, + } + + dummyStore := testinghelpers.NewDummyStore(t) + dynamic, err := New(config, uc, dummyStore) + require.NoError(t, err) + + compressionhelpers.Concurrently(logger, uint64(threshold), func(i uint64) { + err := dynamic.Add(ctx, i, vectors[i]) + require.NoError(t, err) + }) + shouldUpgrade, at := dynamic.ShouldUpgrade() + assert.True(t, shouldUpgrade) + assert.Equal(t, threshold, at) + assert.False(t, dynamic.Upgraded()) + var wg sync.WaitGroup + wg.Add(1) + + // flat -> hnsw + err = dynamic.Upgrade(func() { + wg.Done() + }) + require.NoError(t, err) + wg.Wait() + wg.Add(1) + + // PQ + err = dynamic.Upgrade(func() { + wg.Done() + }) + require.NoError(t, err) + wg.Wait() + compressionhelpers.Concurrently(logger, uint64(vectors_size-threshold), func(i uint64) { + err := dynamic.Add(ctx, uint64(threshold)+i, vectors[threshold+int(i)]) + require.NoError(t, err) + }) + + recall, latency := testinghelpers.RecallAndLatency(ctx, queries, k, dynamic, truths) + fmt.Println(recall, latency) + + err = dynamic.Flush() + require.NoError(t, err) + err = dynamic.Shutdown(t.Context()) + require.NoError(t, err) + dummyStore.FlushMemtables(t.Context()) + + // open the db again + db, err = bbolt.Open(filepath.Join(tempDir, "index.db"), 0o666, nil) + require.NoError(t, err) + config.SharedDB = db + + dynamic, err = New(config, uc, dummyStore) + require.NoError(t, err) + dynamic.PostStartup() + recall2, _ := testinghelpers.RecallAndLatency(ctx, queries, k, dynamic, truths) + assert.Equal(t, recall, recall2) +} + +func TestDynamicIndexUnderlyingIndexDetection(t *testing.T) { + tests := []struct { + name string + underlyingType common.IndexType + expectedString string + expectedType common.IndexType + }{ + { + name: "dynamic index with flat underlying", + underlyingType: common.IndexTypeFlat, + expectedString: "flat", + expectedType: common.IndexTypeFlat, + }, + { + name: "dynamic index with hnsw underlying", + underlyingType: common.IndexTypeHNSW, + expectedString: "hnsw", + expectedType: common.IndexTypeHNSW, + }, + { + name: "dynamic index with dynamic underlying", + underlyingType: common.IndexTypeDynamic, + expectedString: "dynamic", + expectedType: common.IndexTypeDynamic, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Create a mock that implements the UnderlyingIndex method + mockDynamicIndex := NewMockIndex(t) + mockDynamicIndex.EXPECT().UnderlyingIndex().Return(tt.underlyingType) + + // Test the method directly + underlyingType := mockDynamicIndex.UnderlyingIndex() + + // Assert the returned type + assert.Equal(t, tt.expectedType, underlyingType, "Should return correct underlying index type") + + // Assert the string conversion + assert.Equal(t, tt.expectedString, underlyingType.String(), "Should convert to correct string") + }) + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/dynamic/mock_index.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/dynamic/mock_index.go new file mode 100644 index 0000000000000000000000000000000000000000..2e53d827224d380cf06eb108adad75d02937fb34 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/dynamic/mock_index.go @@ -0,0 +1,91 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by mockery v2.53.2. DO NOT EDIT. + +package dynamic + +import ( + mock "github.com/stretchr/testify/mock" + common "github.com/weaviate/weaviate/adapters/repos/db/vector/common" +) + +// MockIndex is an autogenerated mock type for the Index type +type MockIndex struct { + mock.Mock +} + +type MockIndex_Expecter struct { + mock *mock.Mock +} + +func (_m *MockIndex) EXPECT() *MockIndex_Expecter { + return &MockIndex_Expecter{mock: &_m.Mock} +} + +// UnderlyingIndex provides a mock function with no fields +func (_m *MockIndex) UnderlyingIndex() common.IndexType { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for UnderlyingIndex") + } + + var r0 common.IndexType + if rf, ok := ret.Get(0).(func() common.IndexType); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(common.IndexType) + } + + return r0 +} + +// MockIndex_UnderlyingIndex_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UnderlyingIndex' +type MockIndex_UnderlyingIndex_Call struct { + *mock.Call +} + +// UnderlyingIndex is a helper method to define mock.On call +func (_e *MockIndex_Expecter) UnderlyingIndex() *MockIndex_UnderlyingIndex_Call { + return &MockIndex_UnderlyingIndex_Call{Call: _e.mock.On("UnderlyingIndex")} +} + +func (_c *MockIndex_UnderlyingIndex_Call) Run(run func()) *MockIndex_UnderlyingIndex_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *MockIndex_UnderlyingIndex_Call) Return(_a0 common.IndexType) *MockIndex_UnderlyingIndex_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockIndex_UnderlyingIndex_Call) RunAndReturn(run func() common.IndexType) *MockIndex_UnderlyingIndex_Call { + _c.Call.Return(run) + return _c +} + +// NewMockIndex creates a new instance of MockIndex. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewMockIndex(t interface { + mock.TestingT + Cleanup(func()) +}) *MockIndex { + mock := &MockIndex{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/dynamic/mock_vector_index.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/dynamic/mock_vector_index.go new file mode 100644 index 0000000000000000000000000000000000000000..d86f45406d20a101cd79a2d6dddd9cf63257af97 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/dynamic/mock_vector_index.go @@ -0,0 +1,975 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by mockery v2.53.2. DO NOT EDIT. + +package dynamic + +import ( + common "github.com/weaviate/weaviate/adapters/repos/db/vector/common" + config "github.com/weaviate/weaviate/entities/schema/config" + + context "context" + + helpers "github.com/weaviate/weaviate/adapters/repos/db/helpers" + + mock "github.com/stretchr/testify/mock" +) + +// MockVectorIndex is an autogenerated mock type for the VectorIndex type +type MockVectorIndex struct { + mock.Mock +} + +type MockVectorIndex_Expecter struct { + mock *mock.Mock +} + +func (_m *MockVectorIndex) EXPECT() *MockVectorIndex_Expecter { + return &MockVectorIndex_Expecter{mock: &_m.Mock} +} + +// Add provides a mock function with given fields: ctx, id, vector +func (_m *MockVectorIndex) Add(ctx context.Context, id uint64, vector []float32) error { + ret := _m.Called(ctx, id, vector) + + if len(ret) == 0 { + panic("no return value specified for Add") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, uint64, []float32) error); ok { + r0 = rf(ctx, id, vector) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockVectorIndex_Add_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Add' +type MockVectorIndex_Add_Call struct { + *mock.Call +} + +// Add is a helper method to define mock.On call +// - ctx context.Context +// - id uint64 +// - vector []float32 +func (_e *MockVectorIndex_Expecter) Add(ctx interface{}, id interface{}, vector interface{}) *MockVectorIndex_Add_Call { + return &MockVectorIndex_Add_Call{Call: _e.mock.On("Add", ctx, id, vector)} +} + +func (_c *MockVectorIndex_Add_Call) Run(run func(ctx context.Context, id uint64, vector []float32)) *MockVectorIndex_Add_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(uint64), args[2].([]float32)) + }) + return _c +} + +func (_c *MockVectorIndex_Add_Call) Return(_a0 error) *MockVectorIndex_Add_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockVectorIndex_Add_Call) RunAndReturn(run func(context.Context, uint64, []float32) error) *MockVectorIndex_Add_Call { + _c.Call.Return(run) + return _c +} + +// AddBatch provides a mock function with given fields: ctx, id, vector +func (_m *MockVectorIndex) AddBatch(ctx context.Context, id []uint64, vector [][]float32) error { + ret := _m.Called(ctx, id, vector) + + if len(ret) == 0 { + panic("no return value specified for AddBatch") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, []uint64, [][]float32) error); ok { + r0 = rf(ctx, id, vector) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockVectorIndex_AddBatch_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'AddBatch' +type MockVectorIndex_AddBatch_Call struct { + *mock.Call +} + +// AddBatch is a helper method to define mock.On call +// - ctx context.Context +// - id []uint64 +// - vector [][]float32 +func (_e *MockVectorIndex_Expecter) AddBatch(ctx interface{}, id interface{}, vector interface{}) *MockVectorIndex_AddBatch_Call { + return &MockVectorIndex_AddBatch_Call{Call: _e.mock.On("AddBatch", ctx, id, vector)} +} + +func (_c *MockVectorIndex_AddBatch_Call) Run(run func(ctx context.Context, id []uint64, vector [][]float32)) *MockVectorIndex_AddBatch_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].([]uint64), args[2].([][]float32)) + }) + return _c +} + +func (_c *MockVectorIndex_AddBatch_Call) Return(_a0 error) *MockVectorIndex_AddBatch_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockVectorIndex_AddBatch_Call) RunAndReturn(run func(context.Context, []uint64, [][]float32) error) *MockVectorIndex_AddBatch_Call { + _c.Call.Return(run) + return _c +} + +// Compressed provides a mock function with no fields +func (_m *MockVectorIndex) Compressed() bool { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Compressed") + } + + var r0 bool + if rf, ok := ret.Get(0).(func() bool); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(bool) + } + + return r0 +} + +// MockVectorIndex_Compressed_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Compressed' +type MockVectorIndex_Compressed_Call struct { + *mock.Call +} + +// Compressed is a helper method to define mock.On call +func (_e *MockVectorIndex_Expecter) Compressed() *MockVectorIndex_Compressed_Call { + return &MockVectorIndex_Compressed_Call{Call: _e.mock.On("Compressed")} +} + +func (_c *MockVectorIndex_Compressed_Call) Run(run func()) *MockVectorIndex_Compressed_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *MockVectorIndex_Compressed_Call) Return(_a0 bool) *MockVectorIndex_Compressed_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockVectorIndex_Compressed_Call) RunAndReturn(run func() bool) *MockVectorIndex_Compressed_Call { + _c.Call.Return(run) + return _c +} + +// ContainsDoc provides a mock function with given fields: docID +func (_m *MockVectorIndex) ContainsDoc(docID uint64) bool { + ret := _m.Called(docID) + + if len(ret) == 0 { + panic("no return value specified for ContainsDoc") + } + + var r0 bool + if rf, ok := ret.Get(0).(func(uint64) bool); ok { + r0 = rf(docID) + } else { + r0 = ret.Get(0).(bool) + } + + return r0 +} + +// MockVectorIndex_ContainsDoc_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ContainsDoc' +type MockVectorIndex_ContainsDoc_Call struct { + *mock.Call +} + +// ContainsDoc is a helper method to define mock.On call +// - docID uint64 +func (_e *MockVectorIndex_Expecter) ContainsDoc(docID interface{}) *MockVectorIndex_ContainsDoc_Call { + return &MockVectorIndex_ContainsDoc_Call{Call: _e.mock.On("ContainsDoc", docID)} +} + +func (_c *MockVectorIndex_ContainsDoc_Call) Run(run func(docID uint64)) *MockVectorIndex_ContainsDoc_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(uint64)) + }) + return _c +} + +func (_c *MockVectorIndex_ContainsDoc_Call) Return(_a0 bool) *MockVectorIndex_ContainsDoc_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockVectorIndex_ContainsDoc_Call) RunAndReturn(run func(uint64) bool) *MockVectorIndex_ContainsDoc_Call { + _c.Call.Return(run) + return _c +} + +// Delete provides a mock function with given fields: id +func (_m *MockVectorIndex) Delete(id ...uint64) error { + _va := make([]interface{}, len(id)) + for _i := range id { + _va[_i] = id[_i] + } + var _ca []interface{} + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for Delete") + } + + var r0 error + if rf, ok := ret.Get(0).(func(...uint64) error); ok { + r0 = rf(id...) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockVectorIndex_Delete_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Delete' +type MockVectorIndex_Delete_Call struct { + *mock.Call +} + +// Delete is a helper method to define mock.On call +// - id ...uint64 +func (_e *MockVectorIndex_Expecter) Delete(id ...interface{}) *MockVectorIndex_Delete_Call { + return &MockVectorIndex_Delete_Call{Call: _e.mock.On("Delete", + append([]interface{}{}, id...)...)} +} + +func (_c *MockVectorIndex_Delete_Call) Run(run func(id ...uint64)) *MockVectorIndex_Delete_Call { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]uint64, len(args)-0) + for i, a := range args[0:] { + if a != nil { + variadicArgs[i] = a.(uint64) + } + } + run(variadicArgs...) + }) + return _c +} + +func (_c *MockVectorIndex_Delete_Call) Return(_a0 error) *MockVectorIndex_Delete_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockVectorIndex_Delete_Call) RunAndReturn(run func(...uint64) error) *MockVectorIndex_Delete_Call { + _c.Call.Return(run) + return _c +} + +// Drop provides a mock function with given fields: ctx +func (_m *MockVectorIndex) Drop(ctx context.Context) error { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for Drop") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = rf(ctx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockVectorIndex_Drop_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Drop' +type MockVectorIndex_Drop_Call struct { + *mock.Call +} + +// Drop is a helper method to define mock.On call +// - ctx context.Context +func (_e *MockVectorIndex_Expecter) Drop(ctx interface{}) *MockVectorIndex_Drop_Call { + return &MockVectorIndex_Drop_Call{Call: _e.mock.On("Drop", ctx)} +} + +func (_c *MockVectorIndex_Drop_Call) Run(run func(ctx context.Context)) *MockVectorIndex_Drop_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *MockVectorIndex_Drop_Call) Return(_a0 error) *MockVectorIndex_Drop_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockVectorIndex_Drop_Call) RunAndReturn(run func(context.Context) error) *MockVectorIndex_Drop_Call { + _c.Call.Return(run) + return _c +} + +// Flush provides a mock function with no fields +func (_m *MockVectorIndex) Flush() error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Flush") + } + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockVectorIndex_Flush_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Flush' +type MockVectorIndex_Flush_Call struct { + *mock.Call +} + +// Flush is a helper method to define mock.On call +func (_e *MockVectorIndex_Expecter) Flush() *MockVectorIndex_Flush_Call { + return &MockVectorIndex_Flush_Call{Call: _e.mock.On("Flush")} +} + +func (_c *MockVectorIndex_Flush_Call) Run(run func()) *MockVectorIndex_Flush_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *MockVectorIndex_Flush_Call) Return(_a0 error) *MockVectorIndex_Flush_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockVectorIndex_Flush_Call) RunAndReturn(run func() error) *MockVectorIndex_Flush_Call { + _c.Call.Return(run) + return _c +} + +// Iterate provides a mock function with given fields: fn +func (_m *MockVectorIndex) Iterate(fn func(uint64) bool) { + _m.Called(fn) +} + +// MockVectorIndex_Iterate_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Iterate' +type MockVectorIndex_Iterate_Call struct { + *mock.Call +} + +// Iterate is a helper method to define mock.On call +// - fn func(uint64) bool +func (_e *MockVectorIndex_Expecter) Iterate(fn interface{}) *MockVectorIndex_Iterate_Call { + return &MockVectorIndex_Iterate_Call{Call: _e.mock.On("Iterate", fn)} +} + +func (_c *MockVectorIndex_Iterate_Call) Run(run func(fn func(uint64) bool)) *MockVectorIndex_Iterate_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(func(uint64) bool)) + }) + return _c +} + +func (_c *MockVectorIndex_Iterate_Call) Return() *MockVectorIndex_Iterate_Call { + _c.Call.Return() + return _c +} + +func (_c *MockVectorIndex_Iterate_Call) RunAndReturn(run func(func(uint64) bool)) *MockVectorIndex_Iterate_Call { + _c.Run(run) + return _c +} + +// ListFiles provides a mock function with given fields: ctx, basePath +func (_m *MockVectorIndex) ListFiles(ctx context.Context, basePath string) ([]string, error) { + ret := _m.Called(ctx, basePath) + + if len(ret) == 0 { + panic("no return value specified for ListFiles") + } + + var r0 []string + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string) ([]string, error)); ok { + return rf(ctx, basePath) + } + if rf, ok := ret.Get(0).(func(context.Context, string) []string); ok { + r0 = rf(ctx, basePath) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]string) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string) error); ok { + r1 = rf(ctx, basePath) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockVectorIndex_ListFiles_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ListFiles' +type MockVectorIndex_ListFiles_Call struct { + *mock.Call +} + +// ListFiles is a helper method to define mock.On call +// - ctx context.Context +// - basePath string +func (_e *MockVectorIndex_Expecter) ListFiles(ctx interface{}, basePath interface{}) *MockVectorIndex_ListFiles_Call { + return &MockVectorIndex_ListFiles_Call{Call: _e.mock.On("ListFiles", ctx, basePath)} +} + +func (_c *MockVectorIndex_ListFiles_Call) Run(run func(ctx context.Context, basePath string)) *MockVectorIndex_ListFiles_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string)) + }) + return _c +} + +func (_c *MockVectorIndex_ListFiles_Call) Return(_a0 []string, _a1 error) *MockVectorIndex_ListFiles_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockVectorIndex_ListFiles_Call) RunAndReturn(run func(context.Context, string) ([]string, error)) *MockVectorIndex_ListFiles_Call { + _c.Call.Return(run) + return _c +} + +// Multivector provides a mock function with no fields +func (_m *MockVectorIndex) Multivector() bool { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Multivector") + } + + var r0 bool + if rf, ok := ret.Get(0).(func() bool); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(bool) + } + + return r0 +} + +// MockVectorIndex_Multivector_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Multivector' +type MockVectorIndex_Multivector_Call struct { + *mock.Call +} + +// Multivector is a helper method to define mock.On call +func (_e *MockVectorIndex_Expecter) Multivector() *MockVectorIndex_Multivector_Call { + return &MockVectorIndex_Multivector_Call{Call: _e.mock.On("Multivector")} +} + +func (_c *MockVectorIndex_Multivector_Call) Run(run func()) *MockVectorIndex_Multivector_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *MockVectorIndex_Multivector_Call) Return(_a0 bool) *MockVectorIndex_Multivector_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockVectorIndex_Multivector_Call) RunAndReturn(run func() bool) *MockVectorIndex_Multivector_Call { + _c.Call.Return(run) + return _c +} + +// PostStartup provides a mock function with no fields +func (_m *MockVectorIndex) PostStartup() { + _m.Called() +} + +// MockVectorIndex_PostStartup_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'PostStartup' +type MockVectorIndex_PostStartup_Call struct { + *mock.Call +} + +// PostStartup is a helper method to define mock.On call +func (_e *MockVectorIndex_Expecter) PostStartup() *MockVectorIndex_PostStartup_Call { + return &MockVectorIndex_PostStartup_Call{Call: _e.mock.On("PostStartup")} +} + +func (_c *MockVectorIndex_PostStartup_Call) Run(run func()) *MockVectorIndex_PostStartup_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *MockVectorIndex_PostStartup_Call) Return() *MockVectorIndex_PostStartup_Call { + _c.Call.Return() + return _c +} + +func (_c *MockVectorIndex_PostStartup_Call) RunAndReturn(run func()) *MockVectorIndex_PostStartup_Call { + _c.Run(run) + return _c +} + +// QueryVectorDistancer provides a mock function with given fields: queryVector +func (_m *MockVectorIndex) QueryVectorDistancer(queryVector []float32) common.QueryVectorDistancer { + ret := _m.Called(queryVector) + + if len(ret) == 0 { + panic("no return value specified for QueryVectorDistancer") + } + + var r0 common.QueryVectorDistancer + if rf, ok := ret.Get(0).(func([]float32) common.QueryVectorDistancer); ok { + r0 = rf(queryVector) + } else { + r0 = ret.Get(0).(common.QueryVectorDistancer) + } + + return r0 +} + +// MockVectorIndex_QueryVectorDistancer_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'QueryVectorDistancer' +type MockVectorIndex_QueryVectorDistancer_Call struct { + *mock.Call +} + +// QueryVectorDistancer is a helper method to define mock.On call +// - queryVector []float32 +func (_e *MockVectorIndex_Expecter) QueryVectorDistancer(queryVector interface{}) *MockVectorIndex_QueryVectorDistancer_Call { + return &MockVectorIndex_QueryVectorDistancer_Call{Call: _e.mock.On("QueryVectorDistancer", queryVector)} +} + +func (_c *MockVectorIndex_QueryVectorDistancer_Call) Run(run func(queryVector []float32)) *MockVectorIndex_QueryVectorDistancer_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].([]float32)) + }) + return _c +} + +func (_c *MockVectorIndex_QueryVectorDistancer_Call) Return(_a0 common.QueryVectorDistancer) *MockVectorIndex_QueryVectorDistancer_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockVectorIndex_QueryVectorDistancer_Call) RunAndReturn(run func([]float32) common.QueryVectorDistancer) *MockVectorIndex_QueryVectorDistancer_Call { + _c.Call.Return(run) + return _c +} + +// SearchByVector provides a mock function with given fields: ctx, vector, k, allow +func (_m *MockVectorIndex) SearchByVector(ctx context.Context, vector []float32, k int, allow helpers.AllowList) ([]uint64, []float32, error) { + ret := _m.Called(ctx, vector, k, allow) + + if len(ret) == 0 { + panic("no return value specified for SearchByVector") + } + + var r0 []uint64 + var r1 []float32 + var r2 error + if rf, ok := ret.Get(0).(func(context.Context, []float32, int, helpers.AllowList) ([]uint64, []float32, error)); ok { + return rf(ctx, vector, k, allow) + } + if rf, ok := ret.Get(0).(func(context.Context, []float32, int, helpers.AllowList) []uint64); ok { + r0 = rf(ctx, vector, k, allow) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]uint64) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, []float32, int, helpers.AllowList) []float32); ok { + r1 = rf(ctx, vector, k, allow) + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).([]float32) + } + } + + if rf, ok := ret.Get(2).(func(context.Context, []float32, int, helpers.AllowList) error); ok { + r2 = rf(ctx, vector, k, allow) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// MockVectorIndex_SearchByVector_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SearchByVector' +type MockVectorIndex_SearchByVector_Call struct { + *mock.Call +} + +// SearchByVector is a helper method to define mock.On call +// - ctx context.Context +// - vector []float32 +// - k int +// - allow helpers.AllowList +func (_e *MockVectorIndex_Expecter) SearchByVector(ctx interface{}, vector interface{}, k interface{}, allow interface{}) *MockVectorIndex_SearchByVector_Call { + return &MockVectorIndex_SearchByVector_Call{Call: _e.mock.On("SearchByVector", ctx, vector, k, allow)} +} + +func (_c *MockVectorIndex_SearchByVector_Call) Run(run func(ctx context.Context, vector []float32, k int, allow helpers.AllowList)) *MockVectorIndex_SearchByVector_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].([]float32), args[2].(int), args[3].(helpers.AllowList)) + }) + return _c +} + +func (_c *MockVectorIndex_SearchByVector_Call) Return(_a0 []uint64, _a1 []float32, _a2 error) *MockVectorIndex_SearchByVector_Call { + _c.Call.Return(_a0, _a1, _a2) + return _c +} + +func (_c *MockVectorIndex_SearchByVector_Call) RunAndReturn(run func(context.Context, []float32, int, helpers.AllowList) ([]uint64, []float32, error)) *MockVectorIndex_SearchByVector_Call { + _c.Call.Return(run) + return _c +} + +// SearchByVectorDistance provides a mock function with given fields: ctx, vector, dist, maxLimit, allow +func (_m *MockVectorIndex) SearchByVectorDistance(ctx context.Context, vector []float32, dist float32, maxLimit int64, allow helpers.AllowList) ([]uint64, []float32, error) { + ret := _m.Called(ctx, vector, dist, maxLimit, allow) + + if len(ret) == 0 { + panic("no return value specified for SearchByVectorDistance") + } + + var r0 []uint64 + var r1 []float32 + var r2 error + if rf, ok := ret.Get(0).(func(context.Context, []float32, float32, int64, helpers.AllowList) ([]uint64, []float32, error)); ok { + return rf(ctx, vector, dist, maxLimit, allow) + } + if rf, ok := ret.Get(0).(func(context.Context, []float32, float32, int64, helpers.AllowList) []uint64); ok { + r0 = rf(ctx, vector, dist, maxLimit, allow) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]uint64) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, []float32, float32, int64, helpers.AllowList) []float32); ok { + r1 = rf(ctx, vector, dist, maxLimit, allow) + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).([]float32) + } + } + + if rf, ok := ret.Get(2).(func(context.Context, []float32, float32, int64, helpers.AllowList) error); ok { + r2 = rf(ctx, vector, dist, maxLimit, allow) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// MockVectorIndex_SearchByVectorDistance_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SearchByVectorDistance' +type MockVectorIndex_SearchByVectorDistance_Call struct { + *mock.Call +} + +// SearchByVectorDistance is a helper method to define mock.On call +// - ctx context.Context +// - vector []float32 +// - dist float32 +// - maxLimit int64 +// - allow helpers.AllowList +func (_e *MockVectorIndex_Expecter) SearchByVectorDistance(ctx interface{}, vector interface{}, dist interface{}, maxLimit interface{}, allow interface{}) *MockVectorIndex_SearchByVectorDistance_Call { + return &MockVectorIndex_SearchByVectorDistance_Call{Call: _e.mock.On("SearchByVectorDistance", ctx, vector, dist, maxLimit, allow)} +} + +func (_c *MockVectorIndex_SearchByVectorDistance_Call) Run(run func(ctx context.Context, vector []float32, dist float32, maxLimit int64, allow helpers.AllowList)) *MockVectorIndex_SearchByVectorDistance_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].([]float32), args[2].(float32), args[3].(int64), args[4].(helpers.AllowList)) + }) + return _c +} + +func (_c *MockVectorIndex_SearchByVectorDistance_Call) Return(_a0 []uint64, _a1 []float32, _a2 error) *MockVectorIndex_SearchByVectorDistance_Call { + _c.Call.Return(_a0, _a1, _a2) + return _c +} + +func (_c *MockVectorIndex_SearchByVectorDistance_Call) RunAndReturn(run func(context.Context, []float32, float32, int64, helpers.AllowList) ([]uint64, []float32, error)) *MockVectorIndex_SearchByVectorDistance_Call { + _c.Call.Return(run) + return _c +} + +// Shutdown provides a mock function with given fields: ctx +func (_m *MockVectorIndex) Shutdown(ctx context.Context) error { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for Shutdown") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = rf(ctx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockVectorIndex_Shutdown_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Shutdown' +type MockVectorIndex_Shutdown_Call struct { + *mock.Call +} + +// Shutdown is a helper method to define mock.On call +// - ctx context.Context +func (_e *MockVectorIndex_Expecter) Shutdown(ctx interface{}) *MockVectorIndex_Shutdown_Call { + return &MockVectorIndex_Shutdown_Call{Call: _e.mock.On("Shutdown", ctx)} +} + +func (_c *MockVectorIndex_Shutdown_Call) Run(run func(ctx context.Context)) *MockVectorIndex_Shutdown_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *MockVectorIndex_Shutdown_Call) Return(_a0 error) *MockVectorIndex_Shutdown_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockVectorIndex_Shutdown_Call) RunAndReturn(run func(context.Context) error) *MockVectorIndex_Shutdown_Call { + _c.Call.Return(run) + return _c +} + +// SwitchCommitLogs provides a mock function with given fields: ctx +func (_m *MockVectorIndex) SwitchCommitLogs(ctx context.Context) error { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for SwitchCommitLogs") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = rf(ctx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockVectorIndex_SwitchCommitLogs_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SwitchCommitLogs' +type MockVectorIndex_SwitchCommitLogs_Call struct { + *mock.Call +} + +// SwitchCommitLogs is a helper method to define mock.On call +// - ctx context.Context +func (_e *MockVectorIndex_Expecter) SwitchCommitLogs(ctx interface{}) *MockVectorIndex_SwitchCommitLogs_Call { + return &MockVectorIndex_SwitchCommitLogs_Call{Call: _e.mock.On("SwitchCommitLogs", ctx)} +} + +func (_c *MockVectorIndex_SwitchCommitLogs_Call) Run(run func(ctx context.Context)) *MockVectorIndex_SwitchCommitLogs_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *MockVectorIndex_SwitchCommitLogs_Call) Return(_a0 error) *MockVectorIndex_SwitchCommitLogs_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockVectorIndex_SwitchCommitLogs_Call) RunAndReturn(run func(context.Context) error) *MockVectorIndex_SwitchCommitLogs_Call { + _c.Call.Return(run) + return _c +} + +// Type provides a mock function with no fields +func (_m *MockVectorIndex) Type() common.IndexType { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Type") + } + + var r0 common.IndexType + if rf, ok := ret.Get(0).(func() common.IndexType); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(common.IndexType) + } + + return r0 +} + +// MockVectorIndex_Type_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Type' +type MockVectorIndex_Type_Call struct { + *mock.Call +} + +// Type is a helper method to define mock.On call +func (_e *MockVectorIndex_Expecter) Type() *MockVectorIndex_Type_Call { + return &MockVectorIndex_Type_Call{Call: _e.mock.On("Type")} +} + +func (_c *MockVectorIndex_Type_Call) Run(run func()) *MockVectorIndex_Type_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *MockVectorIndex_Type_Call) Return(_a0 common.IndexType) *MockVectorIndex_Type_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockVectorIndex_Type_Call) RunAndReturn(run func() common.IndexType) *MockVectorIndex_Type_Call { + _c.Call.Return(run) + return _c +} + +// UpdateUserConfig provides a mock function with given fields: updated, callback +func (_m *MockVectorIndex) UpdateUserConfig(updated config.VectorIndexConfig, callback func()) error { + ret := _m.Called(updated, callback) + + if len(ret) == 0 { + panic("no return value specified for UpdateUserConfig") + } + + var r0 error + if rf, ok := ret.Get(0).(func(config.VectorIndexConfig, func()) error); ok { + r0 = rf(updated, callback) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockVectorIndex_UpdateUserConfig_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UpdateUserConfig' +type MockVectorIndex_UpdateUserConfig_Call struct { + *mock.Call +} + +// UpdateUserConfig is a helper method to define mock.On call +// - updated config.VectorIndexConfig +// - callback func() +func (_e *MockVectorIndex_Expecter) UpdateUserConfig(updated interface{}, callback interface{}) *MockVectorIndex_UpdateUserConfig_Call { + return &MockVectorIndex_UpdateUserConfig_Call{Call: _e.mock.On("UpdateUserConfig", updated, callback)} +} + +func (_c *MockVectorIndex_UpdateUserConfig_Call) Run(run func(updated config.VectorIndexConfig, callback func())) *MockVectorIndex_UpdateUserConfig_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(config.VectorIndexConfig), args[1].(func())) + }) + return _c +} + +func (_c *MockVectorIndex_UpdateUserConfig_Call) Return(_a0 error) *MockVectorIndex_UpdateUserConfig_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockVectorIndex_UpdateUserConfig_Call) RunAndReturn(run func(config.VectorIndexConfig, func()) error) *MockVectorIndex_UpdateUserConfig_Call { + _c.Call.Return(run) + return _c +} + +// ValidateBeforeInsert provides a mock function with given fields: vector +func (_m *MockVectorIndex) ValidateBeforeInsert(vector []float32) error { + ret := _m.Called(vector) + + if len(ret) == 0 { + panic("no return value specified for ValidateBeforeInsert") + } + + var r0 error + if rf, ok := ret.Get(0).(func([]float32) error); ok { + r0 = rf(vector) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockVectorIndex_ValidateBeforeInsert_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ValidateBeforeInsert' +type MockVectorIndex_ValidateBeforeInsert_Call struct { + *mock.Call +} + +// ValidateBeforeInsert is a helper method to define mock.On call +// - vector []float32 +func (_e *MockVectorIndex_Expecter) ValidateBeforeInsert(vector interface{}) *MockVectorIndex_ValidateBeforeInsert_Call { + return &MockVectorIndex_ValidateBeforeInsert_Call{Call: _e.mock.On("ValidateBeforeInsert", vector)} +} + +func (_c *MockVectorIndex_ValidateBeforeInsert_Call) Run(run func(vector []float32)) *MockVectorIndex_ValidateBeforeInsert_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].([]float32)) + }) + return _c +} + +func (_c *MockVectorIndex_ValidateBeforeInsert_Call) Return(_a0 error) *MockVectorIndex_ValidateBeforeInsert_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockVectorIndex_ValidateBeforeInsert_Call) RunAndReturn(run func([]float32) error) *MockVectorIndex_ValidateBeforeInsert_Call { + _c.Call.Return(run) + return _c +} + +// NewMockVectorIndex creates a new instance of MockVectorIndex. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewMockVectorIndex(t interface { + mock.TestingT + Cleanup(func()) +}) *MockVectorIndex { + mock := &MockVectorIndex{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/dynamic/restore_integration_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/dynamic/restore_integration_test.go new file mode 100644 index 0000000000000000000000000000000000000000..e145dcd64887b84dee542c8d009e2943001afea9 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/dynamic/restore_integration_test.go @@ -0,0 +1,133 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package dynamic + +import ( + "context" + "os" + "path/filepath" + "sync" + "testing" + + "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/adapters/repos/db/vector/compressionhelpers" + "github.com/weaviate/weaviate/adapters/repos/db/vector/hnsw" + "github.com/weaviate/weaviate/adapters/repos/db/vector/hnsw/distancer" + "github.com/weaviate/weaviate/adapters/repos/db/vector/testinghelpers" + "github.com/weaviate/weaviate/entities/cyclemanager" + "github.com/weaviate/weaviate/entities/storobj" + ent "github.com/weaviate/weaviate/entities/vectorindex/dynamic" + flatent "github.com/weaviate/weaviate/entities/vectorindex/flat" + hnswent "github.com/weaviate/weaviate/entities/vectorindex/hnsw" + "go.etcd.io/bbolt" +) + +func TestBackup_Integration(t *testing.T) { + ctx := context.Background() + currentIndexing := os.Getenv("ASYNC_INDEXING") + os.Setenv("ASYNC_INDEXING", "true") + defer os.Setenv("ASYNC_INDEXING", currentIndexing) + dimensions := 20 + vectors_size := 1_000 + queries_size := 10 + k := 10 + + vectors, queries := testinghelpers.RandomVecs(vectors_size, queries_size, dimensions) + truths := make([][]uint64, queries_size) + distancer := distancer.NewL2SquaredProvider() + compressionhelpers.Concurrently(logger, uint64(len(queries)), func(i uint64) { + truths[i], _ = testinghelpers.BruteForce(logger, vectors, queries[i], k, testinghelpers.DistanceWrapper(distancer)) + }) + logger, _ := test.NewNullLogger() + + dirName := t.TempDir() + indexID := "restore-integration-test" + noopCallback := cyclemanager.NewCallbackGroupNoop() + fuc := flatent.UserConfig{} + fuc.SetDefaults() + hnswuc := hnswent.UserConfig{ + MaxConnections: 30, + EFConstruction: 64, + EF: 32, + VectorCacheMaxObjects: 1_000_000, + } + + tmpDir := t.TempDir() + dbPath := filepath.Join(tmpDir, "index.db") + db, err := bbolt.Open(dbPath, 0o666, nil) + require.NoError(t, err) + t.Cleanup(func() { + db.Close() + }) + + config := Config{ + RootPath: dirName, + ID: indexID, + Logger: logger, + DistanceProvider: distancer, + MakeCommitLoggerThunk: func() (hnsw.CommitLogger, error) { + return hnsw.NewCommitLogger(dirName, indexID, logger, noopCallback) + }, + VectorForIDThunk: func(ctx context.Context, id uint64) ([]float32, error) { + vec := vectors[int(id)] + if vec == nil { + return nil, storobj.NewErrNotFoundf(id, "nil vec") + } + return vec, nil + }, + TempVectorForIDThunk: TempVectorForIDThunk(vectors), + TombstoneCallbacks: noopCallback, + SharedDB: db, + } + + uc := ent.UserConfig{ + Threshold: uint64(vectors_size), + Distance: distancer.Type(), + HnswUC: hnswuc, + FlatUC: fuc, + } + + store := testinghelpers.NewDummyStore(t) + + idx, err := New(config, uc, store) + require.Nil(t, err) + idx.PostStartup() + + compressionhelpers.Concurrently(logger, uint64(vectors_size), func(i uint64) { + idx.Add(ctx, i, vectors[i]) + }) + + wg := sync.WaitGroup{} + wg.Add(1) + idx.Upgrade(func() { + wg.Done() + }) + wg.Wait() + recall1, _ := testinghelpers.RecallAndLatency(ctx, queries, k, idx, truths) + assert.True(t, recall1 > 0.9) + + assert.Nil(t, idx.Flush()) + assert.Nil(t, idx.Shutdown(context.Background())) + + // open the db again + db, err = bbolt.Open(dbPath, 0o666, nil) + require.NoError(t, err) + config.SharedDB = db + + idx, err = New(config, uc, store) + require.Nil(t, err) + idx.PostStartup() + recall2, _ := testinghelpers.RecallAndLatency(ctx, queries, k, idx, truths) + assert.Equal(t, recall1, recall2) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/flat/config.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/flat/config.go new file mode 100644 index 0000000000000000000000000000000000000000..cb7ce1c56deff6cced26f82f750328561709ecce --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/flat/config.go @@ -0,0 +1,51 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package flat + +import ( + "github.com/sirupsen/logrus" + "github.com/weaviate/weaviate/adapters/repos/db/vector/hnsw/distancer" + "github.com/weaviate/weaviate/entities/errorcompounder" + "github.com/weaviate/weaviate/usecases/memwatch" +) + +type Config struct { + ID string + RootPath string + TargetVector string + MinMMapSize int64 + MaxWalReuseSize int64 + Logger logrus.FieldLogger + DistanceProvider distancer.Provider + AllocChecker memwatch.AllocChecker + LazyLoadSegments bool + WriteSegmentInfoIntoFileName bool + WriteMetadataFilesEnabled bool +} + +func (c Config) Validate() error { + ec := errorcompounder.New() + + if c.ID == "" { + ec.Addf("id cannot be empty") + } + + if c.RootPath == "" { + ec.Addf("rootPath cannot be empty") + } + + if c.DistanceProvider == nil { + ec.Addf("distancerProvider cannot be nil") + } + + return ec.ToError() +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/flat/config_update_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/flat/config_update_test.go new file mode 100644 index 0000000000000000000000000000000000000000..0c665762a63096e561097cfa93fb3f7bd67aef4e --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/flat/config_update_test.go @@ -0,0 +1,78 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package flat + +import ( + "testing" + + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/entities/schema/config" + ent "github.com/weaviate/weaviate/entities/vectorindex/flat" +) + +func TestFlatUserConfigUpdates(t *testing.T) { + t.Run("various immutable and mutable fields", func(t *testing.T) { + type test struct { + name string + initial config.VectorIndexConfig + update config.VectorIndexConfig + expectedError error + } + + tests := []test{ + { + name: "attempting to change pq enabled", + initial: ent.UserConfig{PQ: ent.CompressionUserConfig{Enabled: false}}, + update: ent.UserConfig{PQ: ent.CompressionUserConfig{Enabled: true}}, + expectedError: errors.Errorf( + "pq is immutable: " + + "attempted change from \"false\" to \"true\""), + }, + { + name: "attempting to change bq enabled", + initial: ent.UserConfig{BQ: ent.CompressionUserConfig{Enabled: true}}, + update: ent.UserConfig{BQ: ent.CompressionUserConfig{Enabled: false}}, + expectedError: errors.Errorf( + "bq is immutable: " + + "attempted change from \"true\" to \"false\""), + }, + { + name: "attempting to change distance", + initial: ent.UserConfig{Distance: "cosine"}, + update: ent.UserConfig{Distance: "l2-squared"}, + expectedError: errors.Errorf( + "distance is immutable: " + + "attempted change from \"cosine\" to \"l2-squared\""), + }, + { + name: "changing rescoreLimit", + initial: ent.UserConfig{BQ: ent.CompressionUserConfig{RescoreLimit: 10}}, + update: ent.UserConfig{BQ: ent.CompressionUserConfig{RescoreLimit: 100}}, + expectedError: nil, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + err := ValidateUserConfigUpdate(test.initial, test.update) + if test.expectedError == nil { + assert.Nil(t, err) + } else { + require.NotNil(t, err, "update validation must error") + assert.Equal(t, test.expectedError.Error(), err.Error()) + } + }) + } + }) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/flat/index.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/flat/index.go new file mode 100644 index 0000000000000000000000000000000000000000..9aa773cbbe2e80bf884bc5ad116bf713641c49bd --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/flat/index.go @@ -0,0 +1,1054 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package flat + +import ( + "context" + "encoding/binary" + "fmt" + "io" + "math" + "os" + "path/filepath" + "runtime" + "sync" + "sync/atomic" + "time" + + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + bolt "go.etcd.io/bbolt" + + "github.com/weaviate/weaviate/adapters/repos/db/helpers" + "github.com/weaviate/weaviate/adapters/repos/db/lsmkv" + "github.com/weaviate/weaviate/adapters/repos/db/priorityqueue" + "github.com/weaviate/weaviate/adapters/repos/db/vector/cache" + "github.com/weaviate/weaviate/adapters/repos/db/vector/common" + "github.com/weaviate/weaviate/adapters/repos/db/vector/compressionhelpers" + "github.com/weaviate/weaviate/adapters/repos/db/vector/hnsw/distancer" + entcfg "github.com/weaviate/weaviate/entities/config" + enterrors "github.com/weaviate/weaviate/entities/errors" + entlsmkv "github.com/weaviate/weaviate/entities/lsmkv" + schemaConfig "github.com/weaviate/weaviate/entities/schema/config" + flatent "github.com/weaviate/weaviate/entities/vectorindex/flat" + "github.com/weaviate/weaviate/usecases/floatcomp" +) + +const ( + compressionBQ = "bq" + compressionPQ = "pq" + compressionSQ = "sq" + compressionNone = "none" + defaultCachePageSize = 32 +) + +type flat struct { + id string + targetVector string + rootPath string + dims int32 + metadata *bolt.DB + metadataLock *sync.RWMutex + store *lsmkv.Store + logger logrus.FieldLogger + distancerProvider distancer.Provider + trackDimensionsOnce sync.Once + rescore int64 + bq compressionhelpers.BinaryQuantizer + + pqResults *common.PqMaxPool + pool *pools + + compression string + bqCache cache.Cache[uint64] + count uint64 + concurrentCacheReads int +} + +type distanceCalc func(vecAsBytes []byte) (float32, error) + +func New(cfg Config, uc flatent.UserConfig, store *lsmkv.Store) (*flat, error) { + if err := cfg.Validate(); err != nil { + return nil, errors.Wrap(err, "invalid config") + } + + logger := cfg.Logger + if logger == nil { + l := logrus.New() + l.Out = io.Discard + logger = l + } + + index := &flat{ + id: cfg.ID, + targetVector: cfg.TargetVector, + rootPath: cfg.RootPath, + logger: logger, + distancerProvider: cfg.DistanceProvider, + metadataLock: &sync.RWMutex{}, + rescore: extractCompressionRescore(uc), + pqResults: common.NewPqMaxPool(100), + compression: extractCompression(uc), + pool: newPools(), + store: store, + concurrentCacheReads: runtime.GOMAXPROCS(0) * 2, + } + if err := index.initBuckets(context.Background(), cfg); err != nil { + return nil, fmt.Errorf("init flat index buckets: %w", err) + } + + if uc.BQ.Enabled && uc.BQ.Cache { + index.bqCache = cache.NewShardedUInt64LockCache( + index.getBQVector, uc.VectorCacheMaxObjects, defaultCachePageSize, cfg.Logger, 0, cfg.AllocChecker) + } + + if err := index.initMetadata(); err != nil { + return nil, err + } + + return index, nil +} + +func (flat *flat) getBQVector(ctx context.Context, id uint64) ([]uint64, error) { + key := flat.pool.byteSlicePool.Get(8) + defer flat.pool.byteSlicePool.Put(key) + binary.BigEndian.PutUint64(key.slice, id) + bytes, err := flat.store.Bucket(flat.getCompressedBucketName()).Get(key.slice) + if err != nil { + return nil, err + } + if len(bytes) == 0 { + return nil, nil + } + return uint64SliceFromByteSlice(bytes, make([]uint64, len(bytes)/8)), nil +} + +func extractCompression(uc flatent.UserConfig) string { + if uc.BQ.Enabled { + return compressionBQ + } + + if uc.PQ.Enabled { + return compressionPQ + } + + if uc.SQ.Enabled { + return compressionSQ + } + + return compressionNone +} + +func extractCompressionRescore(uc flatent.UserConfig) int64 { + compression := extractCompression(uc) + switch compression { + case compressionPQ: + return int64(uc.PQ.RescoreLimit) + case compressionBQ: + return int64(uc.BQ.RescoreLimit) + case compressionSQ: + return int64(uc.SQ.RescoreLimit) + default: + return 0 + } +} + +func (index *flat) storeCompressedVector(id uint64, vector []byte) { + index.storeGenericVector(id, vector, index.getCompressedBucketName()) +} + +func (index *flat) storeVector(id uint64, vector []byte) { + index.storeGenericVector(id, vector, index.getBucketName()) +} + +func (index *flat) storeGenericVector(id uint64, vector []byte, bucket string) { + idBytes := make([]byte, 8) + binary.BigEndian.PutUint64(idBytes, id) + index.store.Bucket(bucket).Put(idBytes, vector) +} + +func (index *flat) isBQ() bool { + return index.compression == compressionBQ +} + +func (index *flat) isBQCached() bool { + return index.bqCache != nil +} + +func (index *flat) Compressed() bool { + return index.compression != compressionNone +} + +func (index *flat) Multivector() bool { + return false +} + +func (index *flat) getBucketName() string { + if index.targetVector != "" { + return fmt.Sprintf("%s_%s", helpers.VectorsBucketLSM, index.targetVector) + } + return helpers.VectorsBucketLSM +} + +func (index *flat) getCompressedBucketName() string { + if index.targetVector != "" { + return fmt.Sprintf("%s_%s", helpers.VectorsCompressedBucketLSM, index.targetVector) + } + return helpers.VectorsCompressedBucketLSM +} + +func (index *flat) initBuckets(ctx context.Context, cfg Config) error { + // TODO: Forced compaction should not stay an all or nothing option. + // This is only a temporary measure until dynamic compaction + // behavior is implemented. + // See: https://github.com/weaviate/weaviate/issues/5241 + forceCompaction := shouldForceCompaction() + if err := index.store.CreateOrLoadBucket(ctx, index.getBucketName(), + lsmkv.WithForceCompaction(forceCompaction), + lsmkv.WithUseBloomFilter(false), + lsmkv.WithMinMMapSize(cfg.MinMMapSize), + lsmkv.WithMinWalThreshold(cfg.MinMMapSize), + lsmkv.WithAllocChecker(cfg.AllocChecker), + lsmkv.WithLazySegmentLoading(cfg.LazyLoadSegments), + lsmkv.WithWriteSegmentInfoIntoFileName(cfg.WriteSegmentInfoIntoFileName), + lsmkv.WithWriteMetadata(cfg.WriteMetadataFilesEnabled), + + // Pread=false flag introduced around ~v1.25.9. Before that, the pread flag + // was simply missing. Now we want to explicitly set it to false for + // performance reasons. There are pread performance improvements in the + // pipeline, but as of now, mmap is much more performant – especially for + // parallel cache prefilling. + // + // In the future when the pure pread performance is on par with mmap, we + // should update this to pass the global setting. + lsmkv.WithPread(false), + ); err != nil { + return fmt.Errorf("create or load flat vectors bucket: %w", err) + } + if index.isBQ() { + if err := index.store.CreateOrLoadBucket(ctx, index.getCompressedBucketName(), + lsmkv.WithForceCompaction(forceCompaction), + lsmkv.WithUseBloomFilter(false), + lsmkv.WithMinMMapSize(cfg.MinMMapSize), + lsmkv.WithMinWalThreshold(cfg.MinMMapSize), + lsmkv.WithAllocChecker(cfg.AllocChecker), + lsmkv.WithLazySegmentLoading(cfg.LazyLoadSegments), + lsmkv.WithWriteSegmentInfoIntoFileName(cfg.WriteSegmentInfoIntoFileName), + lsmkv.WithWriteMetadata(cfg.WriteMetadataFilesEnabled), + + // Pread=false flag introduced around ~v1.25.9. Before that, the pread flag + // was simply missing. Now we want to explicitly set it to false for + // performance reasons. There are pread performance improvements in the + // pipeline, but as of now, mmap is much more performant – especially for + // parallel cache prefilling. + // + // In the future when the pure pread performance is on par with mmap, we + // should update this to pass the global setting. + lsmkv.WithPread(false), + ); err != nil { + return fmt.Errorf("create or load flat compressed vectors bucket: %w", err) + } + } + return nil +} + +// TODO: Remove this function when gh-5241 is completed. See flat::initBuckets for more details. +func shouldForceCompaction() bool { + return !entcfg.Enabled(os.Getenv("FLAT_INDEX_DISABLE_FORCED_COMPACTION")) +} + +func (index *flat) AddBatch(ctx context.Context, ids []uint64, vectors [][]float32) error { + if err := ctx.Err(); err != nil { + return err + } + if len(ids) != len(vectors) { + return errors.Errorf("ids and vectors sizes does not match") + } + if len(ids) == 0 { + return errors.Errorf("insertBatch called with empty lists") + } + for i := range ids { + if err := ctx.Err(); err != nil { + return err + } + if err := index.Add(ctx, ids[i], vectors[i]); err != nil { + return err + } + } + return nil +} + +func byteSliceFromUint64Slice(vector []uint64, slice []byte) []byte { + for i := range vector { + binary.LittleEndian.PutUint64(slice[i*8:], vector[i]) + } + return slice +} + +func byteSliceFromFloat32Slice(vector []float32, slice []byte) []byte { + for i := range vector { + binary.LittleEndian.PutUint32(slice[i*4:], math.Float32bits(vector[i])) + } + return slice +} + +func uint64SliceFromByteSlice(vector []byte, slice []uint64) []uint64 { + for i := range slice { + slice[i] = binary.LittleEndian.Uint64(vector[i*8:]) + } + return slice +} + +func float32SliceFromByteSlice(vector []byte, slice []float32) []float32 { + for i := range slice { + slice[i] = math.Float32frombits(binary.LittleEndian.Uint32(vector[i*4:])) + } + return slice +} + +func (index *flat) Add(ctx context.Context, id uint64, vector []float32) error { + if err := ctx.Err(); err != nil { + return err + } + + index.trackDimensionsOnce.Do(func() { + size := int32(len(vector)) + atomic.StoreInt32(&index.dims, size) + err := index.setDimensions(size) + if err != nil { + index.logger.WithError(err).Error("could not set dimensions") + } + + if index.isBQ() { + index.bq = compressionhelpers.NewBinaryQuantizer(nil) + } + }) + + if err := index.ValidateBeforeInsert(vector); err != nil { + return err + } + + vector = index.normalized(vector) + slice := make([]byte, len(vector)*4) + index.storeVector(id, byteSliceFromFloat32Slice(vector, slice)) + + if index.isBQ() { + vectorBQ := index.bq.Encode(vector) + if index.isBQCached() { + index.bqCache.Grow(id) + index.bqCache.Preload(id, vectorBQ) + } + slice = make([]byte, len(vectorBQ)*8) + index.storeCompressedVector(id, byteSliceFromUint64Slice(vectorBQ, slice)) + } + + for { + oldCount := atomic.LoadUint64(&index.count) + if atomic.CompareAndSwapUint64(&index.count, oldCount, oldCount+1) { + break + } + } + + return nil +} + +func (index *flat) Delete(ids ...uint64) error { + for i := range ids { + if index.isBQCached() { + index.bqCache.Delete(context.Background(), ids[i]) + } + idBytes := make([]byte, 8) + binary.BigEndian.PutUint64(idBytes, ids[i]) + + if err := index.store.Bucket(index.getBucketName()).Delete(idBytes); err != nil { + return err + } + + if index.isBQ() { + if err := index.store.Bucket(index.getCompressedBucketName()).Delete(idBytes); err != nil { + return err + } + } + } + return nil +} + +func (index *flat) searchTimeRescore(k int) int { + // load atomically, so we can get away with concurrent updates of the + // userconfig without having to set a lock each time we try to read - which + // can be so common that it would cause considerable overhead + if rescore := int(atomic.LoadInt64(&index.rescore)); rescore > k { + return rescore + } + return k +} + +func (index *flat) SearchByVector(ctx context.Context, vector []float32, k int, allow helpers.AllowList) ([]uint64, []float32, error) { + switch index.compression { + case compressionBQ: + return index.searchByVectorBQ(ctx, vector, k, allow) + case compressionPQ: + // use uncompressed for now + fallthrough + default: + return index.searchByVector(ctx, vector, k, allow) + } +} + +func (index *flat) searchByVector(ctx context.Context, vector []float32, k int, allow helpers.AllowList) ([]uint64, []float32, error) { + // TODO: pass context into inner methods, so it can be checked more granuarly + heap := index.pqResults.GetMax(k) + defer index.pqResults.Put(heap) + + vector = index.normalized(vector) + + if err := index.findTopVectors(heap, allow, k, + index.store.Bucket(index.getBucketName()).Cursor, + index.createDistanceCalc(vector), + ); err != nil { + return nil, nil, err + } + + ids, dists := index.extractHeap(heap) + return ids, dists, nil +} + +func (index *flat) createDistanceCalc(vector []float32) distanceCalc { + return func(vecAsBytes []byte) (float32, error) { + vecSlice := index.pool.float32SlicePool.Get(len(vecAsBytes) / 4) + defer index.pool.float32SlicePool.Put(vecSlice) + + candidate := float32SliceFromByteSlice(vecAsBytes, vecSlice.slice) + return index.distancerProvider.SingleDist(vector, candidate) + } +} + +func (index *flat) searchByVectorBQ(ctx context.Context, vector []float32, k int, allow helpers.AllowList) ([]uint64, []float32, error) { + // TODO: pass context into inner methods, so it can be checked more granuarly + rescore := index.searchTimeRescore(k) + heap := index.pqResults.GetMax(rescore) + defer index.pqResults.Put(heap) + + vector = index.normalized(vector) + vectorBQ := index.bq.Encode(vector) + + if index.isBQCached() { + if err := index.findTopVectorsCached(heap, allow, rescore, vectorBQ); err != nil { + return nil, nil, err + } + } else { + if err := index.findTopVectors(heap, allow, rescore, + index.store.Bucket(index.getCompressedBucketName()).Cursor, + index.createDistanceCalcBQ(vectorBQ), + ); err != nil { + return nil, nil, err + } + } + + distanceCalc := index.createDistanceCalc(vector) + idsSlice := index.pool.uint64SlicePool.Get(heap.Len()) + defer index.pool.uint64SlicePool.Put(idsSlice) + + for i := range idsSlice.slice { + idsSlice.slice[i] = heap.Pop().ID + } + + // we expect to be mostly IO-bound, so more goroutines than CPUs is fine + distancesUncompressedVectors := make([]float32, len(idsSlice.slice)) + + eg := enterrors.NewErrorGroupWrapper(index.logger) + for workerID := 0; workerID < index.concurrentCacheReads; workerID++ { + workerID := workerID + eg.Go(func() error { + for idPos := workerID; idPos < len(idsSlice.slice); idPos += index.concurrentCacheReads { + id := idsSlice.slice[idPos] + candidateAsBytes, err := index.vectorById(id) + if err != nil { + return err + } + if len(candidateAsBytes) == 0 { + continue + } + distance, err := distanceCalc(candidateAsBytes) + if err != nil { + return err + } + + distancesUncompressedVectors[idPos] = distance + } + + return nil + }) + } + + if err := eg.Wait(); err != nil { + return nil, nil, err + } + + for i, id := range idsSlice.slice { + index.insertToHeap(heap, k, id, distancesUncompressedVectors[i]) + } + + ids, dists := index.extractHeap(heap) + return ids, dists, nil +} + +func (index *flat) createDistanceCalcBQ(vectorBQ []uint64) distanceCalc { + return func(vecAsBytes []byte) (float32, error) { + vecSliceBQ := index.pool.uint64SlicePool.Get(len(vecAsBytes) / 8) + defer index.pool.uint64SlicePool.Put(vecSliceBQ) + + candidate := uint64SliceFromByteSlice(vecAsBytes, vecSliceBQ.slice) + return index.bq.DistanceBetweenCompressedVectors(candidate, vectorBQ) + } +} + +func (index *flat) vectorById(id uint64) ([]byte, error) { + idSlice := index.pool.byteSlicePool.Get(8) + defer index.pool.byteSlicePool.Put(idSlice) + + binary.BigEndian.PutUint64(idSlice.slice, id) + return index.store.Bucket(index.getBucketName()).Get(idSlice.slice) +} + +// populates given heap with smallest distances and corresponding ids calculated by +// distanceCalc +func (index *flat) findTopVectors(heap *priorityqueue.Queue[any], + allow helpers.AllowList, limit int, cursorFn func() *lsmkv.CursorReplace, + distanceCalc distanceCalc, +) error { + var key []byte + var v []byte + var id uint64 + allowMax := uint64(0) + + cursor := cursorFn() + defer cursor.Close() + + if allow != nil { + // nothing allowed, skip search + if allow.IsEmpty() { + return nil + } + + allowMax = allow.Max() + + idSlice := index.pool.byteSlicePool.Get(8) + binary.BigEndian.PutUint64(idSlice.slice, allow.Min()) + key, v = cursor.Seek(idSlice.slice) + index.pool.byteSlicePool.Put(idSlice) + } else { + key, v = cursor.First() + } + + // since keys are sorted, once key/id get greater than max allowed one + // further search can be stopped + for ; key != nil && (allow == nil || id <= allowMax); key, v = cursor.Next() { + id = binary.BigEndian.Uint64(key) + if allow == nil || allow.Contains(id) { + distance, err := distanceCalc(v) + if err != nil { + return err + } + index.insertToHeap(heap, limit, id, distance) + } + } + return nil +} + +// populates given heap with smallest distances and corresponding ids calculated by +// distanceCalc +func (index *flat) findTopVectorsCached(heap *priorityqueue.Queue[any], + allow helpers.AllowList, limit int, vectorBQ []uint64, +) error { + var id uint64 + allowMax := uint64(0) + + if allow != nil { + // nothing allowed, skip search + if allow.IsEmpty() { + return nil + } + + allowMax = allow.Max() + + id = allow.Min() + } else { + id = 0 + } + all := index.bqCache.Len() + + out := make([][]uint64, index.bqCache.PageSize()) + errs := make([]error, index.bqCache.PageSize()) + + // since keys are sorted, once key/id get greater than max allowed one + // further search can be stopped + for id < uint64(all) && (allow == nil || id <= allowMax) { + + vecs, errs, start, end := index.bqCache.GetAllInCurrentLock(context.Background(), id, out, errs) + + for i, vec := range vecs { + if i < (int(end) - int(start)) { + currentId := start + uint64(i) + + if (currentId < uint64(all)) && (allow == nil || allow.Contains(currentId)) { + + err := errs[i] + if err != nil { + return err + } + if len(vec) == 0 { + continue + } + distance, err := index.bq.DistanceBetweenCompressedVectors(vec, vectorBQ) + if err != nil { + return err + } + index.insertToHeap(heap, limit, currentId, distance) + + } + } + } + + id = end + } + + return nil +} + +func (index *flat) insertToHeap(heap *priorityqueue.Queue[any], + limit int, id uint64, distance float32, +) { + if heap.Len() < limit { + heap.Insert(id, distance) + } else if heap.Top().Dist > distance { + heap.Pop() + heap.Insert(id, distance) + } +} + +func (index *flat) extractHeap(heap *priorityqueue.Queue[any], +) ([]uint64, []float32) { + len := heap.Len() + + ids := make([]uint64, len) + dists := make([]float32, len) + for i := len - 1; i >= 0; i-- { + item := heap.Pop() + ids[i] = item.ID + dists[i] = item.Dist + } + return ids, dists +} + +func (index *flat) normalized(vector []float32) []float32 { + if index.distancerProvider.Type() == "cosine-dot" { + // cosine-dot requires normalized vectors, as the dot product and cosine + // similarity are only identical if the vector is normalized + return distancer.Normalize(vector) + } + return vector +} + +func (index *flat) SearchByVectorDistance(ctx context.Context, vector []float32, + targetDistance float32, maxLimit int64, allow helpers.AllowList, +) ([]uint64, []float32, error) { + var ( + searchParams = newSearchByDistParams(maxLimit) + + resultIDs []uint64 + resultDist []float32 + ) + + recursiveSearch := func() (bool, error) { + totalLimit := searchParams.TotalLimit() + ids, dist, err := index.SearchByVector(ctx, vector, totalLimit, allow) + if err != nil { + return false, errors.Wrap(err, "vector search") + } + + // if there is less results than given limit search can be stopped + shouldContinue := !(len(ids) < totalLimit) + + // ensures the indexes aren't out of range + offsetCap := searchParams.OffsetCapacity(ids) + totalLimitCap := searchParams.TotalLimitCapacity(ids) + + if offsetCap == totalLimitCap { + return false, nil + } + + ids, dist = ids[offsetCap:totalLimitCap], dist[offsetCap:totalLimitCap] + for i := range ids { + if aboveThresh := dist[i] <= targetDistance; aboveThresh || + floatcomp.InDelta(float64(dist[i]), float64(targetDistance), 1e-6) { + resultIDs = append(resultIDs, ids[i]) + resultDist = append(resultDist, dist[i]) + } else { + // as soon as we encounter a certainty which + // is below threshold, we can stop searching + shouldContinue = false + break + } + } + + return shouldContinue, nil + } + + var shouldContinue bool + var err error + for shouldContinue, err = recursiveSearch(); shouldContinue && err == nil; { + searchParams.Iterate() + if searchParams.MaxLimitReached() { + index.logger. + WithField("action", "unlimited_vector_search"). + Warnf("maximum search limit of %d results has been reached", + searchParams.MaximumSearchLimit()) + break + } + } + if err != nil { + return nil, nil, err + } + + return resultIDs, resultDist, nil +} + +func (index *flat) UpdateUserConfig(updated schemaConfig.VectorIndexConfig, callback func()) error { + parsed, ok := updated.(flatent.UserConfig) + if !ok { + callback() + return errors.Errorf("config is not UserConfig, but %T", updated) + } + + // Store atomically as a lock here would be very expensive, this value is + // read on every single user-facing search, which can be highly concurrent + atomic.StoreInt64(&index.rescore, extractCompressionRescore(parsed)) + + callback() + return nil +} + +func (index *flat) Drop(ctx context.Context) error { + if err := index.removeMetadataFile(); err != nil { + return err + } + // Shard::drop will take care of handling store's buckets + return nil +} + +func (index *flat) Flush() error { + // nothing to do here + // Shard will take care of handling store's buckets + return nil +} + +func (index *flat) Shutdown(ctx context.Context) error { + // Shard::shutdown will take care of handling store's buckets + return nil +} + +func (index *flat) SwitchCommitLogs(context.Context) error { + return nil +} + +func (index *flat) ListFiles(ctx context.Context, basePath string) ([]string, error) { + var files []string + + metadataFile := index.getMetadataFile() + fullPath := filepath.Join(index.rootPath, metadataFile) + + if _, err := os.Stat(fullPath); err == nil { + relPath, err := filepath.Rel(basePath, fullPath) + if err != nil { + return nil, fmt.Errorf("failed to get relative path: %w", err) + } + // If the file doesn't exist, we simply don't add it to the list + files = append(files, relPath) + } + + return files, nil +} + +func (index *flat) GetKeys(id uint64) (uint64, uint64, error) { + return 0, 0, errors.Errorf("GetKeys is not supported for flat index") +} + +func (index *flat) ValidateBeforeInsert(vector []float32) error { + dims := int(atomic.LoadInt32(&index.dims)) + + // no vectors exist + if dims == 0 { + return nil + } + + // check if vector length is the same as existing nodes + if dims != len(vector) { + return errors.Errorf("insert called with a vector of the wrong size: %d. Saved length: %d, path: %s", + len(vector), dims, index.rootPath) + } + + return nil +} + +func (index *flat) PostStartup() { + if !index.isBQCached() { + return + } + + // The idea here is to first read everything from disk in one go, then grow + // the cache just once before inserting all vectors. A previous iteration + // would grow the cache as part of the cursor loop and this ended up making + // up 75% of the CPU time needed. This new implementation with two loops is + // much more efficient and only ever-so-slightly more memory-consuming (about + // one additional struct per vector while loading. Should be negligible) + + // The initial size of 10k is chosen fairly arbitrarily. The cost of growing + // this slice dynamically should be quite cheap compared to other operations + // involved here, e.g. disk reads. + vecs := make([]compressionhelpers.VecAndID[uint64], 0, 10_000) + maxID := uint64(0) + + before := time.Now() + bucket := index.store.Bucket(index.getCompressedBucketName()) + // we expect to be IO-bound, so more goroutines than CPUs is fine, we do + // however want some kind of relationship to the machine size, so + // 2*GOMAXPROCS seems like a good default. + it := compressionhelpers.NewParallelIterator[uint64](bucket, 2*runtime.GOMAXPROCS(0), + binary.BigEndian.Uint64, index.bq.FromCompressedBytesWithSubsliceBuffer, index.logger) + channel := it.IterateAll() + if channel == nil { + return // nothing to do + } + for v := range channel { + vecs = append(vecs, v...) + } + + count := 0 + for i := range vecs { + count++ + if vecs[i].Id > maxID { + maxID = vecs[i].Id + } + } + + // Grow cache just once + index.bqCache.LockAll() + defer index.bqCache.UnlockAll() + + index.bqCache.SetSizeAndGrowNoLock(maxID) + for _, vec := range vecs { + index.bqCache.PreloadNoLock(vec.Id, vec.Vec) + } + + took := time.Since(before) + index.logger.WithFields(logrus.Fields{ + "action": "preload_bq_cache", + "count": count, + "took": took, + "index_id": index.id, + }).Debugf("pre-loaded %d vectors in %s", count, took) +} + +func (index *flat) ContainsDoc(id uint64) bool { + var bucketName string + + // logic modeled after SearchByVector which indicates that the PQ bucket is + // the same as the uncompressed bucket "for now" + switch index.compression { + case compressionBQ: + bucketName = index.getCompressedBucketName() + case compressionPQ: + // use uncompressed for now + fallthrough + default: + bucketName = index.getBucketName() + } + + idBytes := make([]byte, 8) + binary.BigEndian.PutUint64(idBytes, id) + v, err := index.store.Bucket(bucketName).Get(idBytes) + if v == nil || errors.Is(err, entlsmkv.NotFound) { + return false + } + + return true +} + +func (index *flat) Iterate(fn func(docID uint64) bool) { + var bucketName string + + // logic modeled after SearchByVector which indicates that the PQ bucket is + // the same as the uncompressed bucket "for now" + switch index.compression { + case compressionBQ: + bucketName = index.getCompressedBucketName() + case compressionPQ: + // use uncompressed for now + fallthrough + default: + bucketName = index.getBucketName() + } + + bucket := index.store.Bucket(bucketName) + cursor := bucket.Cursor() + defer cursor.Close() + + for key, _ := cursor.First(); key != nil; key, _ = cursor.Next() { + id := binary.BigEndian.Uint64(key) + if !fn(id) { + break + } + } +} + +func newSearchByDistParams(maxLimit int64) *common.SearchByDistParams { + initialOffset := 0 + initialLimit := common.DefaultSearchByDistInitialLimit + + return common.NewSearchByDistParams(initialOffset, initialLimit, initialOffset+initialLimit, maxLimit) +} + +type immutableParameter struct { + accessor func(c flatent.UserConfig) interface{} + name string +} + +func validateImmutableField(u immutableParameter, + previous, next flatent.UserConfig, +) error { + oldField := u.accessor(previous) + newField := u.accessor(next) + if oldField != newField { + return errors.Errorf("%s is immutable: attempted change from \"%v\" to \"%v\"", + u.name, oldField, newField) + } + + return nil +} + +func ValidateUserConfigUpdate(initial, updated schemaConfig.VectorIndexConfig) error { + initialParsed, ok := initial.(flatent.UserConfig) + if !ok { + return errors.Errorf("initial is not UserConfig, but %T", initial) + } + + updatedParsed, ok := updated.(flatent.UserConfig) + if !ok { + return errors.Errorf("updated is not UserConfig, but %T", updated) + } + + immutableFields := []immutableParameter{ + { + name: "distance", + accessor: func(c flatent.UserConfig) interface{} { return c.Distance }, + }, + { + name: "pq.cache", + accessor: func(c flatent.UserConfig) interface{} { return c.PQ.Cache }, + }, + { + name: "pq", + accessor: func(c flatent.UserConfig) interface{} { return c.PQ.Enabled }, + }, + { + name: "bq", + accessor: func(c flatent.UserConfig) interface{} { return c.BQ.Enabled }, + }, + // as of v1.25.2, updating the BQ cache setting is now possible. + // Note that the change does not take effect until the tenant is + // reloaded, either from a complete restart or from + // activating/deactivating it. + } + + for _, u := range immutableFields { + if err := validateImmutableField(u, initialParsed, updatedParsed); err != nil { + return err + } + } + return nil +} + +func (index *flat) AlreadyIndexed() uint64 { + return atomic.LoadUint64(&index.count) +} + +func (index *flat) QueryVectorDistancer(queryVector []float32) common.QueryVectorDistancer { + var distFunc func(nodeID uint64) (float32, error) + queryVector = index.normalized(queryVector) + defaultDistFunc := func(nodeID uint64) (float32, error) { + vec, err := index.vectorById(nodeID) + if err != nil { + return 0, err + } + dist, err := index.distancerProvider.SingleDist(queryVector, float32SliceFromByteSlice(vec, make([]float32, len(vec)/4))) + if err != nil { + return 0, err + } + return dist, nil + } + switch index.compression { + case compressionBQ: + if index.bqCache == nil { + distFunc = defaultDistFunc + } else { + queryVecEncode := index.bq.Encode(queryVector) + distFunc = func(nodeID uint64) (float32, error) { + if int32(nodeID) > index.bqCache.Len() { + return -1, fmt.Errorf("node %v is larger than the cache size %v", nodeID, index.bqCache.Len()) + } + vec, err := index.bqCache.Get(context.Background(), nodeID) + if err != nil { + return 0, err + } + return index.bq.DistanceBetweenCompressedVectors(vec, queryVecEncode) + } + } + case compressionPQ: + // use uncompressed for now + fallthrough + default: + distFunc = func(nodeID uint64) (float32, error) { + vec, err := index.vectorById(nodeID) + if err != nil { + return 0, err + } + dist, err := index.distancerProvider.SingleDist(queryVector, float32SliceFromByteSlice(vec, make([]float32, len(vec)/4))) + if err != nil { + return 0, err + } + return dist, nil + } + } + return common.QueryVectorDistancer{DistanceFunc: distFunc} +} + +func (index *flat) Type() common.IndexType { + return common.IndexTypeFlat +} + +func (index *flat) CompressionStats() compressionhelpers.CompressionStats { + // Flat index doesn't have detailed compression stats, return uncompressed stats + return compressionhelpers.UncompressedStats{} +} + +func (h *flat) ShouldUpgrade() (bool, int) { + return false, 0 +} + +func (h *flat) Upgrade(callback func()) error { + return nil +} + +func (h *flat) Upgraded() bool { + return false +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/flat/index_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/flat/index_test.go new file mode 100644 index 0000000000000000000000000000000000000000..580b11e40e8250bf06c49029dcc9192f31d0b46f --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/flat/index_test.go @@ -0,0 +1,404 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +//go:build !race + +package flat + +import ( + "context" + "encoding/binary" + "errors" + "fmt" + "os" + "runtime" + "strconv" + "sync" + "testing" + "time" + + "github.com/google/uuid" + "github.com/sirupsen/logrus" + "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/adapters/repos/db/helpers" + "github.com/weaviate/weaviate/adapters/repos/db/lsmkv" + + "github.com/weaviate/weaviate/adapters/repos/db/vector/compressionhelpers" + "github.com/weaviate/weaviate/adapters/repos/db/vector/hnsw/distancer" + "github.com/weaviate/weaviate/adapters/repos/db/vector/testinghelpers" + "github.com/weaviate/weaviate/entities/cyclemanager" + flatent "github.com/weaviate/weaviate/entities/vectorindex/flat" +) + +func distanceWrapper(provider distancer.Provider) func(x, y []float32) float32 { + return func(x, y []float32) float32 { + dist, _ := provider.SingleDist(x, y) + return dist + } +} + +func run(ctx context.Context, dirName string, logger *logrus.Logger, compression string, vectorCache bool, + vectors [][]float32, queries [][]float32, k int, truths [][]uint64, + extraVectorsForDelete [][]float32, allowIds []uint64, + distancer distancer.Provider, concurrentCacheReads int, +) (float32, float32, error) { + vectors_size := len(vectors) + queries_size := len(queries) + runId := uuid.New().String() + + store, err := lsmkv.New(dirName, dirName, logger, nil, + cyclemanager.NewCallbackGroupNoop(), + cyclemanager.NewCallbackGroupNoop(), + cyclemanager.NewCallbackGroupNoop()) + if err != nil { + return 0, 0, err + } + + defer store.Shutdown(context.Background()) + + pq := flatent.CompressionUserConfig{ + Enabled: false, + } + bq := flatent.CompressionUserConfig{ + Enabled: false, + } + switch compression { + case compressionPQ: + pq.Enabled = true + pq.RescoreLimit = 100 * k + pq.Cache = vectorCache + case compressionBQ: + bq.Enabled = true + bq.RescoreLimit = 100 * k + bq.Cache = vectorCache + } + index, err := New(Config{ + ID: runId, + RootPath: dirName, + DistanceProvider: distancer, + }, flatent.UserConfig{ + PQ: pq, + BQ: bq, + }, store) + if err != nil { + return 0, 0, err + } + defer index.Shutdown(context.Background()) + + if concurrentCacheReads != 0 { + index.concurrentCacheReads = concurrentCacheReads + } + + compressionhelpers.ConcurrentlyWithError(logger, uint64(vectors_size), func(id uint64) error { + return index.Add(ctx, id, vectors[id]) + }) + + for i := range extraVectorsForDelete { + index.Add(ctx, uint64(vectors_size+i), extraVectorsForDelete[i]) + } + + for i := range extraVectorsForDelete { + Id := make([]byte, 16) + binary.BigEndian.PutUint64(Id[8:], uint64(vectors_size+i)) + err := index.Delete(uint64(vectors_size + i)) + if err != nil { + return 0, 0, err + } + } + + buckets := store.GetBucketsByName() + for _, bucket := range buckets { + bucket.FlushMemtable() + } + + var relevant uint64 + var retrieved int + var querying time.Duration = 0 + mutex := new(sync.Mutex) + + var allowList helpers.AllowList = nil + if allowIds != nil { + allowList = helpers.NewAllowList(allowIds...) + } + err = nil + compressionhelpers.Concurrently(logger, uint64(len(queries)), func(i uint64) { + before := time.Now() + results, _, _ := index.SearchByVector(ctx, queries[i], k, allowList) + + since := time.Since(before) + len := len(results) + matches := testinghelpers.MatchesInLists(truths[i], results) + + if hasDuplicates(results) { + err = errors.New("results have duplicates") + } + + mutex.Lock() + querying += since + retrieved += len + relevant += matches + mutex.Unlock() + }) + + return float32(relevant) / float32(retrieved), float32(querying.Microseconds()) / float32(queries_size), err +} + +func hasDuplicates(results []uint64) bool { + for i := 0; i < len(results)-1; i++ { + for j := i + 1; j < len(results); j++ { + if results[i] == results[j] { + return true + } + } + } + return false +} + +func Test_NoRaceFlatIndex(t *testing.T) { + ctx := context.Background() + dirName := t.TempDir() + + logger, _ := test.NewNullLogger() + + dimensions := 256 + vectors_size := 12000 + queries_size := 100 + k := 10 + vectors, queries := testinghelpers.RandomVecs(vectors_size, queries_size, dimensions) + testinghelpers.Normalize(vectors) + testinghelpers.Normalize(queries) + distancer := distancer.NewCosineDistanceProvider() + + truths := make([][]uint64, queries_size) + for i := range queries { + truths[i], _ = testinghelpers.BruteForce(logger, vectors, queries[i], k, distanceWrapper(distancer)) + } + + extraVectorsForDelete, _ := testinghelpers.RandomVecs(5_000, 0, dimensions) + for _, compression := range []string{compressionNone, compressionBQ} { + t.Run("compression: "+compression, func(t *testing.T) { + for _, cache := range []bool{false, true} { + t.Run("cache: "+strconv.FormatBool(cache), func(t *testing.T) { + if compression == compressionNone && cache == true { + return + } + targetRecall := float32(0.99) + if compression == compressionBQ { + targetRecall = 0.8 + } + t.Run("recall", func(t *testing.T) { + recall, latency, err := run(ctx, dirName, logger, compression, cache, vectors, queries, k, truths, nil, nil, distancer, 0) + require.Nil(t, err) + + fmt.Println(recall, latency) + assert.Greater(t, recall, targetRecall) + assert.Less(t, latency, float32(1_000_000)) + }) + + t.Run("recall with deletes", func(t *testing.T) { + recall, latency, err := run(ctx, dirName, logger, compression, cache, vectors, queries, k, truths, extraVectorsForDelete, nil, distancer, 0) + require.Nil(t, err) + + fmt.Println(recall, latency) + assert.Greater(t, recall, targetRecall) + assert.Less(t, latency, float32(1_000_000)) + }) + }) + } + }) + } + for _, compression := range []string{compressionNone, compressionBQ} { + t.Run("compression: "+compression, func(t *testing.T) { + for _, cache := range []bool{false, true} { + t.Run("cache: "+strconv.FormatBool(cache), func(t *testing.T) { + from := 0 + to := 3_000 + for i := range queries { + truths[i], _ = testinghelpers.BruteForce(logger, vectors[from:to], queries[i], k, distanceWrapper(distancer)) + } + + allowIds := make([]uint64, 0, to-from) + for i := uint64(from); i < uint64(to); i++ { + allowIds = append(allowIds, i) + } + targetRecall := float32(0.99) + if compression == compressionBQ { + targetRecall = 0.8 + } + + t.Run("recall on filtered", func(t *testing.T) { + recall, latency, err := run(ctx, dirName, logger, compression, cache, vectors, queries, k, truths, nil, allowIds, distancer, 0) + require.Nil(t, err) + + fmt.Println(recall, latency) + assert.Greater(t, recall, targetRecall) + assert.Less(t, latency, float32(1_000_000)) + }) + + t.Run("recall on filtered with deletes", func(t *testing.T) { + recall, latency, err := run(ctx, dirName, logger, compression, cache, vectors, queries, k, truths, extraVectorsForDelete, allowIds, distancer, 0) + require.Nil(t, err) + + fmt.Println(recall, latency) + assert.Greater(t, recall, targetRecall) + assert.Less(t, latency, float32(1_000_000)) + }) + }) + } + }) + } + + err := os.RemoveAll(dirName) + if err != nil { + fmt.Println(err) + } +} + +func TestFlat_QueryVectorDistancer(t *testing.T) { + logger, _ := test.NewNullLogger() + + cases := []struct { + pq bool + cache bool + bq bool + }{ + {pq: false, cache: false, bq: false}, + {pq: true, cache: false, bq: false}, + {pq: true, cache: true, bq: false}, + {pq: false, cache: false, bq: true}, + {pq: false, cache: true, bq: true}, + } + for _, tt := range cases { + t.Run("tt.name", func(t *testing.T) { + dirName := t.TempDir() + + pq := flatent.CompressionUserConfig{ + Enabled: tt.pq, Cache: tt.cache, + } + bq := flatent.CompressionUserConfig{ + Enabled: tt.bq, Cache: tt.cache, RescoreLimit: 10, + } + store, err := lsmkv.New(dirName, dirName, logger, nil, + cyclemanager.NewCallbackGroupNoop(), + cyclemanager.NewCallbackGroupNoop(), + cyclemanager.NewCallbackGroupNoop()) + require.Nil(t, err) + + distancr := distancer.NewCosineDistanceProvider() + + index, err := New(Config{ + ID: "id", + RootPath: t.TempDir(), + DistanceProvider: distancr, + }, flatent.UserConfig{ + PQ: pq, + BQ: bq, + }, store) + require.Nil(t, err) + + index.Add(context.TODO(), uint64(0), []float32{-2, 0}) + + dist := index.QueryVectorDistancer([]float32{0, 0}) + require.NotNil(t, dist) + distance, err := dist.DistanceToNode(0) + require.Nil(t, err) + require.Equal(t, distance, float32(1.)) + + // get distance for non-existing node above default cache size + _, err = dist.DistanceToNode(1001) + require.NotNil(t, err) + }) + } +} + +func TestConcurrentReads(t *testing.T) { + ctx := context.Background() + dirName := t.TempDir() + + logger, _ := test.NewNullLogger() + + dimensions := 256 + vectors_size := 12000 + queries_size := 100 + k := 10 + vectors, queries := testinghelpers.RandomVecs(vectors_size, queries_size, dimensions) + testinghelpers.Normalize(vectors) + testinghelpers.Normalize(queries) + distancer := distancer.NewCosineDistanceProvider() + + truths := make([][]uint64, queries_size) + for i := range queries { + truths[i], _ = testinghelpers.BruteForce(logger, vectors, queries[i], k, distanceWrapper(distancer)) + } + + cores := runtime.GOMAXPROCS(0) * 2 + + concurrentReads := []int{1, 2, 4, 8, 16, 32, 64, 128, 256, cores - 1, cores, cores + 1} + for i := range concurrentReads { + t.Run("concurrent reads: "+strconv.Itoa(concurrentReads[i]), func(t *testing.T) { + targetRecall := float32(0.8) + recall, latency, err := run(ctx, dirName, logger, compressionBQ, true, vectors, queries, k, truths, nil, nil, distancer, concurrentReads[i]) + require.Nil(t, err) + + fmt.Println(recall, latency) + assert.Greater(t, recall, targetRecall) + assert.Less(t, latency, float32(1_000_000)) + }) + } +} + +func TestFlat_Validation(t *testing.T) { + logger, _ := test.NewNullLogger() + ctx := t.Context() + + dirName := t.TempDir() + + store, err := lsmkv.New(dirName, dirName, logger, nil, + cyclemanager.NewCallbackGroupNoop(), + cyclemanager.NewCallbackGroupNoop(), + cyclemanager.NewCallbackGroupNoop()) + require.Nil(t, err) + + distancr := distancer.NewCosineDistanceProvider() + + index, err := New(Config{ + ID: "id", + RootPath: t.TempDir(), + DistanceProvider: distancr, + }, flatent.UserConfig{}, store) + require.Nil(t, err) + + // call ValidateBeforeInsert before inserting anything + err = index.ValidateBeforeInsert([]float32{-2, 0}) + require.Nil(t, err) + + // add a vector with 2 dims + err = index.Add(ctx, uint64(0), []float32{-2, 0}) + require.Nil(t, err) + + // validate before inserting a vector with 2 dim + err = index.ValidateBeforeInsert([]float32{-1, 0}) + require.NoError(t, err) + + // add again + err = index.Add(ctx, uint64(0), []float32{-1, 0}) + require.Nil(t, err) + + // validate before inserting a vector with 1 dim + err = index.ValidateBeforeInsert([]float32{-2}) + require.Error(t, err) + + // add a vector with 1 dim + err = index.Add(ctx, uint64(0), []float32{-2}) + require.Error(t, err) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/flat/iterate_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/flat/iterate_test.go new file mode 100644 index 0000000000000000000000000000000000000000..74f4a382f471763d4b3c506334d7105a2b56b184 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/flat/iterate_test.go @@ -0,0 +1,113 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package flat + +import ( + "context" + "testing" + + "github.com/google/uuid" + "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/adapters/repos/db/lsmkv" + "github.com/weaviate/weaviate/adapters/repos/db/vector/hnsw/distancer" + "github.com/weaviate/weaviate/adapters/repos/db/vector/testinghelpers" + "github.com/weaviate/weaviate/entities/cyclemanager" + flatent "github.com/weaviate/weaviate/entities/vectorindex/flat" +) + +func createTestIndex(t *testing.T) *flat { + dirName := t.TempDir() + logger, _ := test.NewNullLogger() + distancer := distancer.NewCosineDistanceProvider() + + store, err := lsmkv.New(dirName, dirName, logger, nil, + cyclemanager.NewCallbackGroupNoop(), + cyclemanager.NewCallbackGroupNoop(), + cyclemanager.NewCallbackGroupNoop()) + require.NoError(t, err) + + index, err := New(Config{ + ID: uuid.New().String(), + DistanceProvider: distancer, + RootPath: dirName, + }, flatent.UserConfig{ + PQ: flatent.CompressionUserConfig{ + Enabled: false, + }, + BQ: flatent.CompressionUserConfig{ + Enabled: false, + }, + }, store) + require.NoError(t, err) + + return index +} + +func createTestVectors(n int) [][]float32 { + dimensions := 256 + vectors, _ := testinghelpers.RandomVecs(n, 0, dimensions) + testinghelpers.Normalize(vectors) + + return vectors +} + +func TestFlatIndexIterate(t *testing.T) { + ctx := context.Background() + t.Run("should not run callback on empty index", func(t *testing.T) { + index := createTestIndex(t) + index.Iterate(func(id uint64) bool { + t.Fatalf("callback should not be called on empty index") + return true + }) + }) + + t.Run("should iterate over all nodes", func(t *testing.T) { + testVectors := createTestVectors(10) + index := createTestIndex(t) + for i, vec := range testVectors { + err := index.Add(ctx, uint64(i), vec) + require.Nil(t, err) + } + + visited := make([]bool, len(testVectors)) + index.Iterate(func(id uint64) bool { + visited[id] = true + return true + }) + for i, v := range visited { + require.True(t, v, "node %d was not visited", i) + } + }) + + t.Run("should stop iteration when callback returns false", func(t *testing.T) { + testVectors := createTestVectors(10) + index := createTestIndex(t) + for i, vec := range testVectors { + err := index.Add(ctx, uint64(i), vec) + require.Nil(t, err) + } + + visited := make([]bool, len(testVectors)) + index.Iterate(func(id uint64) bool { + visited[id] = true + return id < 5 + }) + for i, v := range visited { + if i <= 5 { + require.True(t, v, "node %d was not visited", i) + } else { + require.False(t, v, "node %d was visited", i) + } + } + }) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/flat/metadata.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/flat/metadata.go new file mode 100644 index 0000000000000000000000000000000000000000..4decd48c6306bbbcbafcb0448bae0d020e938780 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/flat/metadata.go @@ -0,0 +1,197 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package flat + +import ( + "encoding/binary" + "fmt" + "os" + "path/filepath" + "sync/atomic" + + "github.com/pkg/errors" + bolt "go.etcd.io/bbolt" +) + +const ( + metadataPrefix = "meta" + vectorMetadataBucket = "vector" +) + +func (index *flat) getMetadataFile() string { + if index.targetVector != "" { + // This may be redundant as target vector is already validated in the schema + cleanTarget := filepath.Clean(index.targetVector) + cleanTarget = filepath.Base(cleanTarget) + return fmt.Sprintf("%s_%s.db", metadataPrefix, cleanTarget) + } + return fmt.Sprintf("%s.db", metadataPrefix) +} + +func (index *flat) removeMetadataFile() error { + path := filepath.Join(index.rootPath, index.getMetadataFile()) + index.closeMetadata() + err := os.Remove(path) + if err != nil { + return errors.Wrapf(err, "remove metadata file %q", path) + } + return nil +} + +func (index *flat) closeMetadata() { + index.metadataLock.Lock() + defer index.metadataLock.Unlock() + + if index.metadata != nil { + index.metadata.Close() + index.metadata = nil + } +} + +func (index *flat) openMetadata() error { + index.metadataLock.Lock() + defer index.metadataLock.Unlock() + + if index.metadata != nil { + return nil // Already open + } + + path := filepath.Join(index.rootPath, index.getMetadataFile()) + db, err := bolt.Open(path, 0o600, nil) + if err != nil { + return errors.Wrapf(err, "open %q", path) + } + + index.metadata = db + return nil +} + +func (index *flat) initMetadata() error { + err := index.openMetadata() + if err != nil { + return err + } + defer index.closeMetadata() + + err = index.metadata.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucketIfNotExists([]byte(vectorMetadataBucket)) + if err != nil { + return errors.Wrap(err, "create bucket") + } + if b == nil { + return errors.New("failed to create or get bucket") + } + return nil + }) + if err != nil { + return errors.Wrap(err, "init metadata bucket") + } + + index.initDimensions() + + return nil +} + +func (index *flat) initDimensions() { + dims, err := index.fetchDimensions() + if err != nil { + index.logger.Warnf("flat index unable to fetch dimensions: %v", err) + } + + if dims == 0 { + dims = index.calculateDimensions() + if dims > 0 { + // Backwards compatibility: set the dimensions in the metadata file + err = index.setDimensions(dims) + if err != nil { + index.logger.Warnf("flat index unable to set dimensions: %v", err) + } + } + } + if dims > 0 { + index.trackDimensionsOnce.Do(func() { + atomic.StoreInt32(&index.dims, dims) + }) + } +} + +func (index *flat) fetchDimensions() (int32, error) { + if index.metadata == nil { + return 0, nil + } + + var dimensions int32 = 0 + err := index.metadata.View(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte(vectorMetadataBucket)) + if b == nil { + return nil + } + v := b.Get([]byte("dimensions")) + if v == nil { + return nil + } + dimensions = int32(binary.LittleEndian.Uint32(v)) + return nil + }) + if err != nil { + return 0, errors.Wrap(err, "fetch dimensions") + } + + return dimensions, nil +} + +func (index *flat) calculateDimensions() int32 { + bucket := index.store.Bucket(index.getBucketName()) + if bucket == nil { + return 0 + } + cursor := bucket.Cursor() + defer cursor.Close() + + var key []byte + var v []byte + const maxCursorSize = 100000 + i := 0 + for key, v = cursor.First(); key != nil; key, v = cursor.Next() { + if len(v) > 0 { + return int32(len(v) / 4) + } + if i > maxCursorSize { + break + } + i++ + } + return 0 +} + +func (index *flat) setDimensions(dimensions int32) error { + err := index.openMetadata() + if err != nil { + return err + } + defer index.closeMetadata() + + err = index.metadata.Update(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte(vectorMetadataBucket)) + if b == nil { + return errors.New("failed to get bucket") + } + buf := make([]byte, 4) + binary.LittleEndian.PutUint32(buf, uint32(dimensions)) + return b.Put([]byte("dimensions"), buf) + }) + if err != nil { + return errors.Wrap(err, "set dimensions") + } + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/flat/metadata_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/flat/metadata_test.go new file mode 100644 index 0000000000000000000000000000000000000000..fed429e397a641ed8d2b1fe3fe0de6628f45775f --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/flat/metadata_test.go @@ -0,0 +1,164 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package flat + +import ( + "context" + "testing" + + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/adapters/repos/db/vector/hnsw/distancer" + flatent "github.com/weaviate/weaviate/entities/vectorindex/flat" + + "github.com/weaviate/weaviate/adapters/repos/db/vector/testinghelpers" +) + +func Test_FlatDimensions(t *testing.T) { + ctx := context.TODO() + store := testinghelpers.NewDummyStore(t) + rootPath := t.TempDir() + defer store.Shutdown(context.Background()) + indexID := "init-dimensions-zero" + distancer := distancer.NewCosineDistanceProvider() + + config := flatent.UserConfig{} + config.SetDefaults() + + index, err := New(Config{ + ID: indexID, + RootPath: rootPath, + DistanceProvider: distancer, + }, config, store) + + t.Run("initial dimensions zero", func(t *testing.T) { + require.Nil(t, err) + require.Equal(t, int32(0), index.dims) + }) + + t.Run("metadata is closed after index creation", func(t *testing.T) { + require.Nil(t, index.metadata, "metadata file should be closed") + }) + + t.Run("dimensions updated", func(t *testing.T) { + err = index.Add(ctx, 1, []float32{1, 2, 3}) + require.Nil(t, err) + require.Equal(t, int32(3), index.dims) + }) + + t.Run("metadata is closed after insert", func(t *testing.T) { + require.Nil(t, index.metadata, "metadata file should be closed") + }) + + t.Run("error when adding vector with wrong dimensions", func(t *testing.T) { + err = index.Add(ctx, 2, []float32{1, 2, 3, 4}) + require.NotNil(t, err) + require.ErrorContains(t, err, "insert called with a vector of the wrong size") + }) + + t.Run("backup metadata file exists", func(t *testing.T) { + files, err := index.ListFiles(context.Background(), rootPath) + require.Nil(t, err) + require.Len(t, files, 1) + require.Equal(t, "meta.db", files[0]) + }) + + t.Run("can restore dimensions", func(t *testing.T) { + index.Shutdown(context.Background()) + index = nil + + index, err = New(Config{ + ID: indexID, + RootPath: rootPath, + DistanceProvider: distancer, + }, config, store) + + require.Nil(t, err) + require.Equal(t, index.dims, int32(3)) + + err = index.Add(ctx, 2, []float32{1, 2, 3, 4}) + require.NotNil(t, err) + require.ErrorContains(t, err, "insert called with a vector of the wrong size") + }) + + t.Run("can restore dimensions without root path", func(t *testing.T) { + emptyRoot := t.TempDir() + index.Shutdown(context.Background()) + index = nil + + index, err = New(Config{ + ID: indexID, + RootPath: emptyRoot, + DistanceProvider: distancer, + }, config, store) + + require.Nil(t, err) + require.Equal(t, index.dims, int32(3)) + + err = index.Add(ctx, 2, []float32{1, 2, 3, 4}) + require.NotNil(t, err) + require.ErrorContains(t, err, "insert called with a vector of the wrong size") + }) +} + +func Test_FlatDimensionsTargetVector(t *testing.T) { + ctx := context.TODO() + store := testinghelpers.NewDummyStore(t) + rootPath := t.TempDir() + defer store.Shutdown(context.Background()) + indexID := "test" + distancer := distancer.NewCosineDistanceProvider() + + config := flatent.UserConfig{} + config.SetDefaults() + + index, err := New(Config{ + ID: indexID, + RootPath: rootPath, + TargetVector: "target", + DistanceProvider: distancer, + }, config, store) + + t.Run("initial dimensions zero", func(t *testing.T) { + require.Nil(t, err) + require.Equal(t, int32(0), index.dims) + }) + + t.Run("dimensions updated", func(t *testing.T) { + err = index.Add(ctx, 1, []float32{1, 2}) + require.Nil(t, err) + require.Equal(t, int32(2), index.dims) + }) + + t.Run("can restore dimensions", func(t *testing.T) { + index.Shutdown(context.Background()) + index = nil + + index, err = New(Config{ + ID: indexID, + RootPath: rootPath, + TargetVector: "target", + DistanceProvider: distancer, + }, config, store) + + require.Nil(t, err) + require.Equal(t, index.dims, int32(2)) + + err = index.Add(ctx, 2, []float32{1, 2, 3, 4}) + require.NotNil(t, err) + require.ErrorContains(t, err, "insert called with a vector of the wrong size") + }) + + t.Run("target vector file validation", func(t *testing.T) { + index.targetVector = "./../foo" + require.Equal(t, "meta_foo.db", index.getMetadataFile()) + }) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/flat/pools.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/flat/pools.go new file mode 100644 index 0000000000000000000000000000000000000000..96fb73f00fe27bb7278ccca66719ef5d23a8048d --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/flat/pools.go @@ -0,0 +1,65 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package flat + +import ( + "sync" +) + +const defaultSize = 200 + +type pools struct { + byteSlicePool *slicePool[byte] + uint64SlicePool *slicePool[uint64] + float32SlicePool *slicePool[float32] +} + +func newPools() *pools { + return &pools{ + byteSlicePool: newSlicePool[byte](), + uint64SlicePool: newSlicePool[uint64](), + float32SlicePool: newSlicePool[float32](), + } +} + +type slicePool[T any] struct { + pool *sync.Pool +} + +type SliceStruct[T any] struct { + slice []T +} + +func newSlicePool[T any]() *slicePool[T] { + return &slicePool[T]{ + pool: &sync.Pool{ + New: func() interface{} { + return &SliceStruct[T]{ + slice: make([]T, defaultSize), + } + }, + }, + } +} + +func (p *slicePool[T]) Get(capacity int) *SliceStruct[T] { + t := p.pool.Get().(*SliceStruct[T]) + if cap(t.slice) < capacity { + t.slice = make([]T, capacity) + } + t.slice = t.slice[:capacity] + return t +} + +func (p *slicePool[T]) Put(t *SliceStruct[T]) { + p.pool.Put(t) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/geo/coordinates_for_id.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/geo/coordinates_for_id.go new file mode 100644 index 0000000000000000000000000000000000000000..357033366ff6f9fac5e8e0a099bfb495c1a9f7a8 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/geo/coordinates_for_id.go @@ -0,0 +1,47 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package geo + +import ( + "context" + "fmt" + + "github.com/weaviate/weaviate/entities/models" +) + +// CoordinatesForID must provide the geo coordinates for the specified index +// id +type CoordinatesForID func(ctx context.Context, id uint64) (*models.GeoCoordinates, error) + +// VectorForID transforms the geo coordinates into a "vector" of fixed length +// two, where element 0 represents the latitude and element 1 represents the +// longitude. This way it is usable by a generic vector index such as HNSW +func (cfid CoordinatesForID) VectorForID(ctx context.Context, id uint64) ([]float32, error) { + coordinates, err := cfid(ctx, id) + if err != nil { + return nil, err + } + + return geoCoordiantesToVector(coordinates) +} + +func geoCoordiantesToVector(in *models.GeoCoordinates) ([]float32, error) { + if in.Latitude == nil { + return nil, fmt.Errorf("latitude must be set") + } + + if in.Longitude == nil { + return nil, fmt.Errorf("longitude must be set") + } + + return []float32{*in.Latitude, *in.Longitude}, nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/geo/geo.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/geo/geo.go new file mode 100644 index 0000000000000000000000000000000000000000..01debfaf08003a22f27c283a2da7c45b0d8a76f1 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/geo/geo.go @@ -0,0 +1,153 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package geo + +import ( + "context" + "fmt" + "time" + + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/weaviate/weaviate/adapters/repos/db/helpers" + "github.com/weaviate/weaviate/adapters/repos/db/vector/hnsw" + "github.com/weaviate/weaviate/adapters/repos/db/vector/hnsw/distancer" + "github.com/weaviate/weaviate/entities/cyclemanager" + "github.com/weaviate/weaviate/entities/filters" + "github.com/weaviate/weaviate/entities/models" + hnswent "github.com/weaviate/weaviate/entities/vectorindex/hnsw" +) + +// Index wraps another index to provide geo searches. This allows us to reuse +// the hnsw vector index, without making geo searches dependent on +// hnsw-specific features. +// +// In the future we could use this level of abstraction to provide a better +// suited geo-index if we deem it necessary +type Index struct { + config Config + vectorIndex vectorIndex +} + +// vectorIndex represents the underlying vector index, typically hnsw +type vectorIndex interface { + Add(ctx context.Context, id uint64, vector []float32) error + KnnSearchByVectorMaxDist(ctx context.Context, query []float32, dist float32, ef int, + allowList helpers.AllowList) ([]uint64, error) + Delete(id ...uint64) error + Drop(ctx context.Context) error + PostStartup() +} + +// Config is passed to the GeoIndex when its created +type Config struct { + ID string + CoordinatesForID CoordinatesForID + DisablePersistence bool + RootPath string + Logger logrus.FieldLogger + + SnapshotDisabled bool + SnapshotOnStartup bool + SnapshotCreateInterval time.Duration + SnapshotMinDeltaCommitlogsNumer int + SnapshotMinDeltaCommitlogsSizePercentage int +} + +func NewIndex(config Config, + commitLogMaintenanceCallbacks, tombstoneCleanupCallbacks cyclemanager.CycleCallbackGroup, +) (*Index, error) { + vi, err := hnsw.New(hnsw.Config{ + VectorForIDThunk: config.CoordinatesForID.VectorForID, + ID: config.ID, + RootPath: config.RootPath, + MakeCommitLoggerThunk: makeCommitLoggerFromConfig(config, commitLogMaintenanceCallbacks), + DistanceProvider: distancer.NewGeoProvider(), + DisableSnapshots: config.SnapshotDisabled, + SnapshotOnStartup: config.SnapshotOnStartup, + }, hnswent.UserConfig{ + MaxConnections: 64, + EFConstruction: 128, + CleanupIntervalSeconds: hnswent.DefaultCleanupIntervalSeconds, + }, tombstoneCleanupCallbacks, nil) + if err != nil { + return nil, errors.Wrap(err, "underlying hnsw index") + } + + i := &Index{ + config: config, + vectorIndex: vi, + } + + return i, nil +} + +func (i *Index) Drop(ctx context.Context) error { + if err := i.vectorIndex.Drop(ctx); err != nil { + return err + } + + i.vectorIndex = nil + return nil +} + +func (i *Index) PostStartup() { + i.vectorIndex.PostStartup() +} + +func makeCommitLoggerFromConfig(config Config, maintenanceCallbacks cyclemanager.CycleCallbackGroup, +) hnsw.MakeCommitLogger { + makeCL := hnsw.MakeNoopCommitLogger + if !config.DisablePersistence { + makeCL = func() (hnsw.CommitLogger, error) { + return hnsw.NewCommitLogger(config.RootPath, config.ID, config.Logger, maintenanceCallbacks, + hnsw.WithSnapshotDisabled(config.SnapshotDisabled), + hnsw.WithSnapshotCreateInterval(config.SnapshotCreateInterval), + hnsw.WithSnapshotMinDeltaCommitlogsNumer(config.SnapshotMinDeltaCommitlogsNumer), + hnsw.WithSnapshotMinDeltaCommitlogsSizePercentage(config.SnapshotMinDeltaCommitlogsSizePercentage), + ) + } + } + return makeCL +} + +// Add extends the index with the specified GeoCoordinates. It is thread-safe +// and can be called concurrently. +func (i *Index) Add(ctx context.Context, id uint64, coordinates *models.GeoCoordinates) error { + v, err := geoCoordiantesToVector(coordinates) + if err != nil { + return errors.Wrap(err, "invalid arguments") + } + + return i.vectorIndex.Add(ctx, id, v) +} + +// WithinGeoRange searches the index by the specified range. It is thread-safe +// and can be called concurrently. +func (i *Index) WithinRange(ctx context.Context, + geoRange filters.GeoRange, +) ([]uint64, error) { + if geoRange.GeoCoordinates == nil { + return nil, fmt.Errorf("invalid arguments: GeoCoordinates in range must be set") + } + + query, err := geoCoordiantesToVector(geoRange.GeoCoordinates) + if err != nil { + return nil, errors.Wrap(err, "invalid arguments") + } + + return i.vectorIndex.KnnSearchByVectorMaxDist(ctx, query, geoRange.Distance, 800, nil) +} + +func (i *Index) Delete(id uint64) error { + return i.vectorIndex.Delete(id) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/geo/geo_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/geo/geo_test.go new file mode 100644 index 0000000000000000000000000000000000000000..8c4095f230beb1ebd95c43d0d589709d16c81ae8 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/geo/geo_test.go @@ -0,0 +1,117 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package geo + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/entities/cyclemanager" + "github.com/weaviate/weaviate/entities/filters" + "github.com/weaviate/weaviate/entities/models" +) + +func TestGeoJourney(t *testing.T) { + ctx := context.Background() + elements := []models.GeoCoordinates{ + { // coordinates of munich + Latitude: ptFloat32(48.13743), + Longitude: ptFloat32(11.57549), + }, + { // coordinates of stuttgart + Latitude: ptFloat32(48.78232), + Longitude: ptFloat32(9.17702), + }, + } + + getCoordinates := func(ctx context.Context, id uint64) (*models.GeoCoordinates, error) { + return &elements[id], nil + } + + geoIndex, err := NewIndex(Config{ + ID: "unit-test", + CoordinatesForID: getCoordinates, + DisablePersistence: true, + RootPath: "doesnt-matter-persistence-is-off", + }, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop()) + require.Nil(t, err) + + t.Run("importing all", func(t *testing.T) { + for id, coordinates := range elements { + err := geoIndex.Add(ctx, uint64(id), &coordinates) + require.Nil(t, err) + } + }) + + t.Run("importing an invalid object", func(t *testing.T) { + err := geoIndex.Add(ctx, 9000, &models.GeoCoordinates{}) + assert.Equal(t, "invalid arguments: latitude must be set", err.Error()) + }) + + km := float32(1000) + t.Run("searching missing longitude", func(t *testing.T) { + _, err := geoIndex.WithinRange(context.Background(), filters.GeoRange{ + GeoCoordinates: &models.GeoCoordinates{ + Latitude: ptFloat32(48.13743), + }, + Distance: 300 * km, + }) + assert.Equal(t, "invalid arguments: longitude must be set", err.Error()) + }) + + t.Run("searching missing latitude", func(t *testing.T) { + _, err := geoIndex.WithinRange(context.Background(), filters.GeoRange{ + GeoCoordinates: &models.GeoCoordinates{ + Longitude: ptFloat32(11.57549), + }, + Distance: 300 * km, + }) + assert.Equal(t, "invalid arguments: latitude must be set", err.Error()) + }) + + t.Run("searching within 500km of munich", func(t *testing.T) { + // should return both cities, with munich first and stuttgart second + results, err := geoIndex.WithinRange(context.Background(), filters.GeoRange{ + GeoCoordinates: &models.GeoCoordinates{ + Latitude: ptFloat32(48.13743), + Longitude: ptFloat32(11.57549), + }, + Distance: 500 * km, + }) + require.Nil(t, err) + + expectedResults := []uint64{0, 1} + assert.Equal(t, expectedResults, results) + }) + + t.Run("searching within 10km of munich", func(t *testing.T) { + // should return both cities, with munich first and stuttgart second + results, err := geoIndex.WithinRange(context.Background(), filters.GeoRange{ + GeoCoordinates: &models.GeoCoordinates{ + Latitude: ptFloat32(48.13743), + Longitude: ptFloat32(11.57549), + }, + Distance: 10 * km, + }) + require.Nil(t, err) + + expectedResults := []uint64{0} + assert.Equal(t, expectedResults, results) + }) +} + +func ptFloat32(in float32) *float32 { + return &in +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/.gitignore b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..f72f62fc8a7ae206106660fd7c00c790782f2cb2 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/.gitignore @@ -0,0 +1,5 @@ +recall_vectors.json +recall_queries.json +recall_truths.json +datasets/ann-benchmarks/ +datasets/big-ann-benchmarks/ \ No newline at end of file diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/backup.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/backup.go new file mode 100644 index 0000000000000000000000000000000000000000..4390367986800b049b708c68f66599f5e34e7ce5 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/backup.go @@ -0,0 +1,127 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package hnsw + +import ( + "context" + "fmt" + "io/fs" + "os" + "path/filepath" + + "github.com/pkg/errors" +) + +// SwitchCommitLogs makes sure that the previously writeable commitlog is +// switched to a new one, thus making the existing file read-only. +func (h *hnsw) SwitchCommitLogs(ctx context.Context) error { + if err := h.commitLog.SwitchCommitLogs(true); err != nil { + return fmt.Errorf("switch commitlogs: %w", err) + } + + return nil +} + +// ListFiles lists all files that are part of the part of the HNSW +// except the last commit-log which is writable. This operation is typically +// called immediately after calling SwitchCommitlogs which means that the +// latest (writeable) log file is typically empty. +// ListFiles errors if maintenance is not paused, as a stable state +// cannot be guaranteed with maintenance going on in the background. +func (h *hnsw) ListFiles(ctx context.Context, basePath string) ([]string, error) { + var ( + logRoot = filepath.Join(h.commitLog.RootPath(), fmt.Sprintf("%s.hnsw.commitlog.d", h.commitLog.ID())) + found = make(map[string]struct{}) + files []string + ) + + err := filepath.WalkDir(logRoot, func(pth string, d fs.DirEntry, err error) error { + if d.IsDir() { + return nil + } + + st, statErr := os.Stat(pth) + if statErr != nil { + return statErr + } + + // only list non-empty files + if st.Size() > 0 { + rel, relErr := filepath.Rel(basePath, pth) + if relErr != nil { + return relErr + } + found[rel] = struct{}{} + } + + return nil + }) + if err != nil { + return nil, fmt.Errorf("failed to list files for hnsw commitlog: %w", err) + } + + curr, _, err := getCurrentCommitLogFileName(logRoot) + if err != nil { + return nil, fmt.Errorf("current commitlog file name: %w", err) + } + + // remove active log from list, as + // it is not part of the backup + path, err := filepath.Rel(basePath, filepath.Join(logRoot, curr)) + if err != nil { + return nil, fmt.Errorf("delete active log: %w", err) + } + delete(found, path) + + snapshotFiles, err := h.listSnapshotFiles(ctx, basePath) + if err != nil { + return nil, fmt.Errorf("list snapshot files: %w", err) + } + + files = make([]string, 0, len(found)+len(snapshotFiles)) + for file := range found { + files = append(files, file) + } + files = append(files, snapshotFiles...) + + return files, nil +} + +func (h *hnsw) listSnapshotFiles(ctx context.Context, basePath string) ([]string, error) { + snapshotDir := snapshotDirectory(h.commitLog.RootPath(), h.commitLog.ID()) + entries, err := os.ReadDir(snapshotDir) + if err != nil { + if errors.Is(err, os.ErrNotExist) { + // no snapshot directory, no files + return []string{}, nil + } + return nil, errors.Wrapf(err, "read snapshot directory %q", snapshotDir) + } + + files := make([]string, 0, len(entries)) + for _, entry := range entries { + info, err := entry.Info() + if err != nil { + return nil, errors.Wrap(err, "direntry info") + } + if info.Size() == 0 { + continue + } + + file, err := filepath.Rel(basePath, filepath.Join(snapshotDir, entry.Name())) + if err != nil { + return nil, errors.Wrap(err, "relative path") + } + files = append(files, file) + } + return files, nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/backup_integration_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/backup_integration_test.go new file mode 100644 index 0000000000000000000000000000000000000000..8f23d1b4edec3c6ae047f8980b603ea0494ecddb --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/backup_integration_test.go @@ -0,0 +1,171 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +//go:build integrationTest +// +build integrationTest + +package hnsw + +import ( + "context" + "fmt" + "os" + "path" + "testing" + "time" + + "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/adapters/repos/db/vector/hnsw/distancer" + "github.com/weaviate/weaviate/entities/cyclemanager" + enthnsw "github.com/weaviate/weaviate/entities/vectorindex/hnsw" +) + +func TestBackup_Integration(t *testing.T) { + ctx := context.Background() + logger, _ := test.NewNullLogger() + + dirName := t.TempDir() + indexID := "backup-integration-test" + + parentCommitLoggerCallbacks := cyclemanager.NewCallbackGroup("parentCommitLogger", logger, 1) + parentCommitLoggerCycle := cyclemanager.NewManager( + cyclemanager.HnswCommitLoggerCycleTicker(), + parentCommitLoggerCallbacks.CycleCallback, logger) + parentCommitLoggerCycle.Start() + defer parentCommitLoggerCycle.StopAndWait(ctx) + commitLoggerCallbacks := cyclemanager.NewCallbackGroup("childCommitLogger", logger, 1) + commitLoggerCallbacksCtrl := parentCommitLoggerCallbacks.Register("commitLogger", commitLoggerCallbacks.CycleCallback) + + parentTombstoneCleanupCallbacks := cyclemanager.NewCallbackGroup("parentTombstoneCleanup", logger, 1) + parentTombstoneCleanupCycle := cyclemanager.NewManager( + cyclemanager.NewFixedTicker(enthnsw.DefaultCleanupIntervalSeconds*time.Second), + parentTombstoneCleanupCallbacks.CycleCallback, logger) + parentTombstoneCleanupCycle.Start() + defer parentTombstoneCleanupCycle.StopAndWait(ctx) + tombstoneCleanupCallbacks := cyclemanager.NewCallbackGroup("childTombstoneCleanup", logger, 1) + tombstoneCleanupCallbacksCtrl := parentTombstoneCleanupCallbacks.Register("tombstoneCleanup", tombstoneCleanupCallbacks.CycleCallback) + + combinedCtrl := cyclemanager.NewCombinedCallbackCtrl(2, logger, commitLoggerCallbacksCtrl, tombstoneCleanupCallbacksCtrl) + + idx, err := New(Config{ + RootPath: dirName, + ID: indexID, + Logger: logger, + DistanceProvider: distancer.NewCosineDistanceProvider(), + VectorForIDThunk: testVectorForID, + MakeCommitLoggerThunk: func() (CommitLogger, error) { + return NewCommitLogger(dirName, indexID, logger, commitLoggerCallbacks) + }, + }, enthnsw.NewDefaultUserConfig(), tombstoneCleanupCallbacks, nil) + require.Nil(t, err) + idx.PostStartup() + + t.Run("insert vector into index", func(t *testing.T) { + for i := 0; i < 10; i++ { + inc := float32(i) + err := idx.Add(ctx, uint64(i), []float32{inc, inc + 1, inc + 2}) + require.Nil(t, err) + } + }) + + // let the index age for a second so that + // the commitlogger filenames, which are + // based on current timestamp, can differ + time.Sleep(time.Second) + + t.Run("pause maintenance", func(t *testing.T) { + err = combinedCtrl.Deactivate(ctx) + require.Nil(t, err) + }) + + t.Run("switch commit logs", func(t *testing.T) { + err = idx.SwitchCommitLogs(ctx) + require.Nil(t, err) + }) + + // after switch commit logs, to have source log(s) + t.Run("create snapshot", func(t *testing.T) { + created, _, err := idx.commitLog.CreateSnapshot() + require.Nil(t, err) + require.True(t, created) + }) + + t.Run("list files", func(t *testing.T) { + files, err := idx.ListFiles(ctx, dirName) + require.Nil(t, err) + + // by this point there should be two files in the commitlog directory. + // one is the active log file, and the other is the previous active + // log which was in use prior to `SwitchCommitLogs`. additionally, + // maintenance has been paused, so we shouldn't see any .condensed + // files either. + // + // because `ListFiles` is used within the context of backups, + // it excludes any currently active log files, which are not part + // of the backup. in this case, the only other file is the prev + // commitlog, so we should only have 1 result here. + // + // additionally snapshot was created which consist of 2 files, + // so total of 3 files are expected + assert.Len(t, files, 3) + + filesUnique := make(map[string]struct{}, len(files)) + for i := range files { + filesUnique[files[i]] = struct{}{} + } + require.Len(t, filesUnique, len(files)) + + t.Run("verify commitlog dir contents", func(t *testing.T) { + // checking to ensure that indeed there are only 2 files in the + // commit log directory, and that one of them is the one result + // from `ListFiles`, and that the other is not a .condensed file + ls, err := os.ReadDir(path.Join(dirName, fmt.Sprintf("%s.hnsw.commitlog.d", indexID))) + require.Nil(t, err) + assert.Len(t, ls, 2) + + var prevLogFound bool + for _, info := range ls { + if path.Base(files[0]) == info.Name() { + prevLogFound = true + } + + assert.Empty(t, path.Ext(info.Name())) + } + assert.True(t, prevLogFound, "previous commitlog not found in commitlog root dir") + }) + + t.Run("verify snapshot dir contents", func(t *testing.T) { + snapshotDir := snapshotDirectory(idx.commitLog.RootPath(), idx.commitLog.ID()) + relSnapshotDir := snapshotDirectory("", idx.commitLog.ID()) + + ls, err := os.ReadDir(snapshotDir) + require.Nil(t, err) + + for i := range ls { + snapshotFilePath := path.Join(relSnapshotDir, ls[i].Name()) + assert.Contains(t, filesUnique, snapshotFilePath) + } + }) + }) + + t.Run("resume maintenance", func(t *testing.T) { + err = combinedCtrl.Activate() + require.Nil(t, err) + }) + + err = idx.Shutdown(ctx) + require.Nil(t, err) + + err = combinedCtrl.Unregister(ctx) + require.Nil(t, err) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/backup_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/backup_test.go new file mode 100644 index 0000000000000000000000000000000000000000..f6319ba326a4c7d027c09be73a896e8d490be1f2 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/backup_test.go @@ -0,0 +1,102 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package hnsw + +import ( + "context" + "fmt" + "os" + "path" + "regexp" + "testing" + "time" + + "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/adapters/repos/db/vector/hnsw/distancer" + "github.com/weaviate/weaviate/entities/cyclemanager" + enthnsw "github.com/weaviate/weaviate/entities/vectorindex/hnsw" +) + +func TestBackup_SwitchCommitLogs(t *testing.T) { + ctx := context.Background() + + dirName := t.TempDir() + indexID := "backup-switch-commitlogs-test" + + idx, err := New(Config{ + RootPath: dirName, + ID: indexID, + Logger: logrus.New(), + DistanceProvider: distancer.NewCosineDistanceProvider(), + VectorForIDThunk: testVectorForID, + MakeCommitLoggerThunk: func() (CommitLogger, error) { + return NewCommitLogger(dirName, indexID, logrus.New(), cyclemanager.NewCallbackGroupNoop()) + }, + }, enthnsw.NewDefaultUserConfig(), cyclemanager.NewCallbackGroupNoop(), nil) + require.Nil(t, err) + idx.PostStartup() + + ctx, cancel := context.WithTimeout(ctx, time.Second) + defer cancel() + + err = idx.SwitchCommitLogs(ctx) + assert.Nil(t, err) + + err = idx.Shutdown(ctx) + require.Nil(t, err) +} + +func TestBackup_ListFiles(t *testing.T) { + ctx := context.Background() + + dirName := t.TempDir() + indexID := "backup-list-files-test" + + idx, err := New(Config{ + RootPath: dirName, + ID: indexID, + Logger: logrus.New(), + DistanceProvider: distancer.NewCosineDistanceProvider(), + VectorForIDThunk: testVectorForID, + MakeCommitLoggerThunk: func() (CommitLogger, error) { + return NewCommitLogger(dirName, indexID, logrus.New(), cyclemanager.NewCallbackGroupNoop()) + }, + }, enthnsw.NewDefaultUserConfig(), cyclemanager.NewCallbackGroupNoop(), nil) + require.Nil(t, err) + idx.PostStartup() + + t.Run("assert expected index contents", func(t *testing.T) { + files, err := idx.ListFiles(ctx, dirName) + assert.Nil(t, err) + + // should return empty, because the only file which + // exists in the commitlog root is the current active + // log file. + assert.Len(t, files, 0) + + // checking to ensure that the commitlog root does + // contain a file. this is the one that was ignored + // in the check above. + ls, err := os.ReadDir(path.Join(dirName, fmt.Sprintf("%s.hnsw.commitlog.d", indexID))) + require.Nil(t, err) + require.Len(t, ls, 1) + // filename should just be a 10 digit int + matched, err := regexp.MatchString("[0-9]{10}", ls[0].Name()) + assert.Nil(t, err) + assert.True(t, matched, "regex does not match") + }) + + err = idx.Shutdown(ctx) + require.Nil(t, err) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/benchmark_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/benchmark_test.go new file mode 100644 index 0000000000000000000000000000000000000000..dc7802f2c542413fc215acda0e1bbfe232510e9b --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/benchmark_test.go @@ -0,0 +1,351 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package hnsw + +import ( + "context" + "flag" + "io" + "net/http" + "net/url" + "os" + "path/filepath" + "sort" + "strconv" + "strings" + "testing" + "time" + + "github.com/pkg/errors" + "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/require" + "gopkg.in/yaml.v3" + + "github.com/weaviate/weaviate/adapters/repos/db/vector/compressionhelpers" +) + +var download = flag.Bool("download", false, "download datasets if not found locally") + +var datasets = map[string]string{ + "random-xs": "datasets/big-ann-benchmarks/random10000/data_10000_20", + "random-xs-clustered": "datasets/big-ann-benchmarks/random-clustered10000/clu-random.fbin.crop_nb_10000", + "msturing-1M": "datasets/big-ann-benchmarks/MSTuringANNS/base1b.fbin.crop_nb_1000000", + "msturing-10M": "datasets/big-ann-benchmarks/MSTuringANNS/base1b.fbin.crop_nb_10000000", + "msspacev-1M": "datasets/big-ann-benchmarks/MSSPACEV1B/spacev1b_base.i8bin.crop_nb_1000000", + "msspacev-10M": "datasets/big-ann-benchmarks/MSSPACEV1B/spacev1b_base.i8bin.crop_nb_10000000", + "msturing-10M-clustered": "datasets/big-ann-benchmarks/MSTuring-10M-clustered/msturing-10M-clustered.fbin", +} + +var queries = map[string]string{ + "random-xs": "datasets/big-ann-benchmarks/random10000/queries_1000_20", + "random-xs-clustered": "datasets/big-ann-benchmarks/random-clustered10000/queries_1000_20.fbin", + "msturing-1M": "datasets/big-ann-benchmarks/MSTuringANNS/query100K.fbin", + "msturing-10M": "datasets/big-ann-benchmarks/MSTuringANNS/query100K.fbin", + "msspacev-1M": "datasets/big-ann-benchmarks/MSSPACEV1B/query.i8bin", + "msspacev-10M": "datasets/big-ann-benchmarks/MSSPACEV1B/query.i8bin", + "msturing-10M-clustered": "datasets/big-ann-benchmarks/MSTuring-10M-clustered/testQuery10K.fbin", +} + +func BenchmarkHnswNeurips23(b *testing.B) { + ctx := context.Background() + + runbooks := []string{ + "datasets/neurips23/simple_runbook.yaml", + "datasets/neurips23/clustered_runbook.yaml", + } + + type datasetPoints struct { + dataset string + points int + } + logger, _ := test.NewNullLogger() + + readDatasets := make(map[datasetPoints][][]float32) + + for _, runbookFile := range runbooks { + b.Run(runbookFile, func(b *testing.B) { + runbook := readRunbook(b, runbookFile) + + for _, step := range runbook.Steps { + b.Run(step.Dataset, func(b *testing.B) { + // Read the dataset if we haven't already + vectors, ok := readDatasets[datasetPoints{step.Dataset, step.MaxPts}] + if !ok { + file, ok := datasets[step.Dataset] + if !ok { + b.Skipf("Neurips23 dataset %s not found", step.Dataset) + } + + if _, err := os.Stat(file); err != nil { + if !*download { + b.Skipf(`Neurips23 dataset %s not found. +Run test with -download to automatically download the dataset. +Ex: go test -v -benchmem -bench ^BenchmarkHnswNeurips23$ -download`, step.Dataset) + } + downloadDataset(b, step.Dataset) + } + + readDatasets[datasetPoints{step.Dataset, step.MaxPts}] = readBigAnnDataset(b, file, step.MaxPts) + vectors = readDatasets[datasetPoints{step.Dataset, step.MaxPts}] + } + + var queryVectors [][]float32 + + b.ResetTimer() + + for i := 0; i < b.N; i++ { + index := createEmptyHnswIndexForTests(b, idVectorSize(len(vectors[0]))) + + for _, op := range step.Operations { + switch op.Operation { + case "insert": + compressionhelpers.Concurrently(logger, uint64(op.End-op.Start), func(i uint64) { + err := index.Add(ctx, uint64(op.Start+int(i)), vectors[op.Start+int(i)]) + require.NoError(b, err) + }) + case "delete": + compressionhelpers.Concurrently(logger, uint64(op.End-op.Start), func(i uint64) { + err := index.Delete(uint64(op.Start + int(i))) + require.NoError(b, err) + }) + case "search": + if len(queryVectors) == 0 { + file, ok := queries[step.Dataset] + if !ok { + b.Errorf("query file: not found for %s dataset", step.Dataset) + } + + queryVectors = readBigAnnDataset(b, file, 0) + } + + compressionhelpers.Concurrently(logger, uint64(len(queryVectors)), func(i uint64) { + _, _, err := index.SearchByVector(ctx, queryVectors[i], 0, nil) + require.NoError(b, err) + }) + default: + b.Errorf("Unknown operation %s", op.Operation) + } + } + } + }) + } + }) + } +} + +func downloadDataset(t testing.TB, name string) { + t.Helper() + + ds, ok := datasets[name] + if !ok { + t.Fatalf("Dataset %s not found", name) + } + + qs, ok := queries[name] + if !ok { + t.Fatalf("Query file not found for %s dataset", name) + } + + for _, f := range []string{ds, qs} { + downloadDatasetFile(t, f) + } +} + +func downloadDatasetFile(t testing.TB, file string) { + t.Helper() + + if _, err := os.Stat(file); err == nil { + return + } + + err := os.MkdirAll(filepath.Dir(file), 0o755) + require.NoError(t, err) + + path := strings.TrimPrefix(file, "datasets/") + + u, err := url.JoinPath("https://storage.googleapis.com/ann-datasets/", path) + require.NoError(t, err) + + t.Logf("Downloading dataset from %s", u) + + client := http.Client{ + Timeout: 60 * time.Second, + } + + resp, err := client.Get(u) + require.NoError(t, err) + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + t.Fatalf("Could not download dataset. Status code: %d", resp.StatusCode) + } + + f, err := os.Create(file) + require.NoError(t, err) + defer f.Close() + + _, err = io.Copy(f, resp.Body) + require.NoError(t, err) + + t.Logf("Downloaded dataset %s", file) +} + +func readBigAnnDataset(t testing.TB, file string, maxObjects int) [][]float32 { + t.Helper() + + var vectors [][]float32 + + f, err := os.Open(file) + if err != nil { + panic(errors.Wrap(err, "Could not open SIFT file")) + } + defer f.Close() + + fi, err := f.Stat() + if err != nil { + panic(errors.Wrap(err, "Could not get SIFT file properties")) + } + fileSize := fi.Size() + + b := make([]byte, 4) + + // The data is a binary file containing either floating point vectors or int8 vectors + // It starts with 8 bytes of header data + // The first 4 bytes are the number of vectors in the file + // The second 4 bytes are the dimensionality of the vectors in the file + // If the file is in fbin format, the vector data needs to be converted from bytes to float. + // If the file is in i8bin format, the vector data needs to be converted from bytes to int8 then to float. + + // The first 4 bytes are the number of vectors in the file + _, err = f.Read(b) + require.NoError(t, err) + n := int32FromBytes(b) + + // The second 4 bytes are the dimensionality of the vectors in the file + _, err = f.Read(b) + require.NoError(t, err) + d := int32FromBytes(b) + + var bytesPerVector int + switch { + case strings.Contains(file, "i8bin"): + bytesPerVector = 1 + case strings.Contains(file, "fbin"): + fallthrough + default: + bytesPerVector = 4 + } + + require.Equal(t, 8+n*d*bytesPerVector, int(fileSize)) + + vectorBytes := make([]byte, d*bytesPerVector) + if maxObjects > 0 && maxObjects < n { + n = maxObjects + } + + for i := 0; i < n; i++ { + _, err = f.Read(vectorBytes) + if errors.Is(err, io.EOF) { + break + } + require.NoError(t, err) + + vectorFloat := make([]float32, 0, d) + for j := 0; j < d; j++ { + start := j * bytesPerVector + var f float32 + if bytesPerVector == 1 { + f = float32(vectorBytes[start]) + } else { + f = float32FromBytes(vectorBytes[start : start+bytesPerVector]) + } + + vectorFloat = append(vectorFloat, f) + } + + vectors = append(vectors, vectorFloat) + } + + if maxObjects > 0 { + require.Equal(t, maxObjects, len(vectors)) + } + + return vectors +} + +type runbook struct { + Steps []runbookStep +} +type runbookStep struct { + Dataset string + MaxPts int + Operations []runbookOperation +} + +type runbookOperation struct { + Operation string + Start int + End int +} + +func readRunbook(t testing.TB, file string) *runbook { + f, err := os.Open(file) + require.NoError(t, err, "Could not open runbook file") + defer f.Close() + + d := yaml.NewDecoder(f) + + var runbook runbook + + var m map[string]map[string]any + err = d.Decode(&m) + require.NoError(t, err) + + var datasets []string + for datasetName := range m { + datasets = append(datasets, datasetName) + } + + sort.Strings(datasets) + + for _, datasetName := range datasets { + stepInfo := m[datasetName] + var step runbookStep + + step.Dataset = datasetName + step.MaxPts = stepInfo["max_pts"].(int) + i := 1 + for { + s := strconv.Itoa(i) + if _, ok := stepInfo[s]; !ok { + break + } + + opInfo := stepInfo[s].(map[any]any) + + var op runbookOperation + op.Operation = opInfo["operation"].(string) + if op.Operation == "insert" || op.Operation == "delete" { + op.Start = opInfo["start"].(int) + op.End = opInfo["end"].(int) + } + + step.Operations = append(step.Operations, op) + + i++ + } + + runbook.Steps = append(runbook.Steps, step) + } + + return &runbook +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/bufiowriter.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/bufiowriter.go new file mode 100644 index 0000000000000000000000000000000000000000..a079e46418f7dabafaef83d45fb12e493528d40c --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/bufiowriter.go @@ -0,0 +1,186 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package hnsw + +import ( + "io" + "os" + "unicode/utf8" +) + +const ( + defaultBufSize = 4096 +) + +// bufWriter implements buffering for an *os.File object. +// If an error occurs writing to a bufWriter, no more data will be +// accepted and all subsequent writes, and Flush, will return the error. +// After all data has been written, the client should call the +// Flush method to guarantee all data has been forwarded to +// the underlying *os.File. +type bufWriter struct { + err error + buf []byte + n int + wr *os.File +} + +// NewWriterSize returns a new Writer whose buffer has at least the specified +// size. If the argument *os.File is already a Writer with large enough +// size, it returns the underlying Writer. +func NewWriterSize(w *os.File, size int) *bufWriter { + if size <= 0 { + size = defaultBufSize + } + return &bufWriter{ + buf: make([]byte, size), + wr: w, + } +} + +// NewWriter returns a new Writer whose buffer has the default size. +func NewWriter(w *os.File) *bufWriter { + return NewWriterSize(w, defaultBufSize) +} + +// Size returns the size of the underlying buffer in bytes. +func (b *bufWriter) Size() int { return len(b.buf) } + +// Reset discards any unflushed buffered data, clears any error, and +// resets b to write its output to w. +func (b *bufWriter) Reset(w *os.File) { + b.err = nil + b.n = 0 + b.wr = w +} + +// Flush writes any buffered data to the underlying *os.File. +func (b *bufWriter) Flush() error { + if b.err != nil { + return b.err + } + if b.n == 0 { + return nil + } + n, err := b.wr.Write(b.buf[0:b.n]) + if n < b.n && err == nil { + err = io.ErrShortWrite + } + if err != nil { + if n > 0 && n < b.n { + copy(b.buf[0:b.n-n], b.buf[n:b.n]) + } + b.n -= n + b.err = err + return err + } + b.n = 0 + return nil +} + +// Available returns how many bytes are unused in the buffer. +func (b *bufWriter) Available() int { return len(b.buf) - b.n } + +// Buffered returns the number of bytes that have been written into the current buffer. +func (b *bufWriter) Buffered() int { return b.n } + +// Write writes the contents of p into the buffer. +// It returns the number of bytes written. +// If nn < len(p), it also returns an error explaining +// why the write is short. +func (b *bufWriter) Write(p []byte) (nn int, err error) { + for len(p) > b.Available() && b.err == nil { + var n int + if b.Buffered() == 0 { + // Large write, empty buffer. + // Write directly from p to avoid copy. + n, b.err = b.wr.Write(p) + } else { + n = copy(b.buf[b.n:], p) + b.n += n + b.Flush() + } + nn += n + p = p[n:] + } + if b.err != nil { + return nn, b.err + } + n := copy(b.buf[b.n:], p) + b.n += n + nn += n + return nn, nil +} + +// WriteByte writes a single byte. +func (b *bufWriter) WriteByte(c byte) error { + if b.err != nil { + return b.err + } + if b.Available() <= 0 && b.Flush() != nil { + return b.err + } + b.buf[b.n] = c + b.n++ + return nil +} + +// WriteRune writes a single Unicode code point, returning +// the number of bytes written and any error. +func (b *bufWriter) WriteRune(r rune) (size int, err error) { + if r < utf8.RuneSelf { + err = b.WriteByte(byte(r)) + if err != nil { + return 0, err + } + return 1, nil + } + if b.err != nil { + return 0, b.err + } + n := b.Available() + if n < utf8.UTFMax { + if b.Flush(); b.err != nil { + return 0, b.err + } + n = b.Available() + if n < utf8.UTFMax { + // Can only happen if buffer is silly small. + return b.WriteString(string(r)) + } + } + size = utf8.EncodeRune(b.buf[b.n:], r) + b.n += size + return size, nil +} + +// WriteString writes a string. +// It returns the number of bytes written. +// If the count is less than len(s), it also returns an error explaining +// why the write is short. +func (b *bufWriter) WriteString(s string) (int, error) { + nn := 0 + for len(s) > b.Available() && b.err == nil { + n := copy(b.buf[b.n:], s) + b.n += n + nn += n + s = s[n:] + b.Flush() + } + if b.err != nil { + return nn, b.err + } + n := copy(b.buf[b.n:], s) + b.n += n + nn += n + return nn, nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/commit_log_combiner.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/commit_log_combiner.go new file mode 100644 index 0000000000000000000000000000000000000000..09b26b548077cdc38c5cb62a87ae0eb8f237f502 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/commit_log_combiner.go @@ -0,0 +1,243 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package hnsw + +import ( + "io" + "os" + "path/filepath" + "sort" + "strings" + + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +type CommitLogCombiner struct { + rootPath string + id string + threshold int64 + logger logrus.FieldLogger +} + +func NewCommitLogCombiner(rootPath, id string, threshold int64, + logger logrus.FieldLogger, +) *CommitLogCombiner { + return &CommitLogCombiner{ + rootPath: rootPath, + id: id, + threshold: threshold, + logger: logger, + } +} + +func (c *CommitLogCombiner) Do(partitions ...string) (bool, error) { + // ensure partitions are sorted + sort.Strings(partitions) + + executed := false + for { + // fileNames will already be in order + fileNames, err := getCommitFileNames(c.rootPath, c.id, 0) + if err != nil { + return executed, errors.Wrap(err, "obtain files names") + } + + anyOk := false + for _, partFileNames := range c.partitonFileNames(fileNames, partitions) { + ok, err := c.combineFirstMatch(partFileNames) + if err != nil { + return executed, err + } + anyOk = anyOk || ok + } + if !anyOk { + break + } + executed = true + } + return executed, nil +} + +func (c *CommitLogCombiner) partitonFileNames(fileNames, partitions []string) [][]string { + if len(fileNames) == 0 { + return [][]string{} + } + if len(partitions) == 0 { + return [][]string{fileNames} + } + + partitioned := make([][]string, 0, len(partitions)+1) + + i := 0 + partFileNames := []string{} + for _, partition := range partitions { + for ; i < len(fileNames); i++ { + logname := strings.TrimSuffix(filepath.Base(fileNames[i]), ".condensed") + if strings.Compare(logname, partition) > 0 { + break + } + partFileNames = append(partFileNames, fileNames[i]) + } + if len(partFileNames) > 0 { + partitioned = append(partitioned, partFileNames) + partFileNames = []string{} + } + } + for ; i < len(fileNames); i++ { + partFileNames = append(partFileNames, fileNames[i]) + } + if len(fileNames) > 0 { + partitioned = append(partitioned, partFileNames) + } + return partitioned +} + +func (c *CommitLogCombiner) combineFirstMatch(fileNames []string) (bool, error) { + for i, fileName := range fileNames { + if !strings.HasSuffix(fileName, ".condensed") { + // not an already condensed file, so no candidate for combining + continue + } + + if i == len(fileNames)-1 { + // this is the last file, so there is nothing to combine it with + return false, nil + } + + if !strings.HasSuffix(fileNames[i+1], ".condensed") { + // the next file is not a condensed file, so this file is not candidate + // for merging with the next + continue + } + + currentStat, err := os.Stat(fileName) + if err != nil { + return false, errors.Wrapf(err, "stat file %q", fileName) + } + + if currentStat.Size() > c.threshold { + // already too big, can't combine further + continue + } + + nextStat, err := os.Stat(fileNames[i+1]) + if err != nil { + return false, errors.Wrapf(err, "stat file %q", fileNames[i+1]) + } + + if currentStat.Size()+nextStat.Size() > c.threshold { + // combining those two would exceed threshold + continue + } + + if err := c.combine(fileName, fileNames[i+1]); err != nil { + return false, errors.Wrapf(err, "combine %q and %q", fileName, fileNames[i+1]) + } + + return true, nil + } + + return false, nil +} + +func (c *CommitLogCombiner) combine(left, right string) error { + // all names are based on the first file, so that once file1 + file2 are + // combined it is as if file2 had never existed and file 1 was just always + // big enough to hold the contents of both + + // clearly indicate that the file is "in progress", in case we crash while + // combining and the after restart there are multiple alternatives + tmpName := strings.TrimSuffix(right, ".condensed") + (".combined.tmp") + + // finalName will look like an uncondensed original commit log, so the + // condensor will pick it up without even knowing that it's a combined file + finalName := strings.TrimSuffix(right, ".condensed") + + if err := c.mergeFiles(tmpName, left, right); err != nil { + return errors.Wrap(err, "merge files") + } + + if err := c.renameAndCleanUp(tmpName, finalName, left, right); err != nil { + return errors.Wrap(err, "rename and clean up files") + } + + c.logger.WithFields(logrus.Fields{ + "action": "hnsw_commit_logger_combine_condensed_logs", + "id": c.id, + "input_first": left, + "input_second": right, + "output": finalName, + }).Info("successfully combined previously condensed commit log files") + + return nil +} + +func (c *CommitLogCombiner) mergeFiles(outName, first, second string) error { + out, err := os.Create(outName) + if err != nil { + return errors.Wrapf(err, "open target file %q", outName) + } + + source1, err := os.Open(first) + if err != nil { + return errors.Wrapf(err, "open first source file %q", first) + } + defer source1.Close() + + source2, err := os.Open(second) + if err != nil { + return errors.Wrapf(err, "open second source file %q", second) + } + defer source2.Close() + + _, err = io.Copy(out, source1) + if err != nil { + return errors.Wrapf(err, "copy first source (%q) into target (%q)", first, + outName) + } + + _, err = io.Copy(out, source2) + if err != nil { + return errors.Wrapf(err, "copy second source (%q) into target (%q)", second, + outName) + } + + err = out.Close() + if err != nil { + return errors.Wrapf(err, "close target file %q", outName) + } + + return nil +} + +func (c *CommitLogCombiner) renameAndCleanUp(tmpName, finalName string, + toDeletes ...string, +) error { + // do the rename before the delete, because if we crash in between we end up + // with duplicate files both with and without the ".condensed" suffix. The + // new (and complete) merged file will not carry the suffix whereas the + // sources will. This will look to the corrupted file fixer as if a + // condensing had gone wrong and will delete the the source + + if err := os.Rename(tmpName, finalName); err != nil { + return errors.Wrapf(err, "rename tmp (%q) to final (%q)", tmpName, finalName) + } + + for _, toDelete := range toDeletes { + if err := os.Remove(toDelete); err != nil { + return errors.Wrapf(err, "clean up %q", toDelete) + } + } + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/commit_log_combiner_integration_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/commit_log_combiner_integration_test.go new file mode 100644 index 0000000000000000000000000000000000000000..030e66d21ca49e2935a185a78348838604454691 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/commit_log_combiner_integration_test.go @@ -0,0 +1,344 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package hnsw + +import ( + "os" + "testing" + + "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func Test_CommitlogCombiner(t *testing.T) { + // For the combiner the contents of a commit log file don't actually matter + // so we can put arbitrary data in the files. It will only make decisions + // about what should be appended, the actual condensing will be taken care of + // by the condensor + + logger, _ := test.NewNullLogger() + + t.Run("without partitions", func(t *testing.T) { + rootPath := t.TempDir() + + threshold := int64(1000) + id := "combiner_test" + // create commit logger directory + require.Nil(t, os.MkdirAll(commitLogDirectory(rootPath, id), 0o777)) + + name := func(fileName string) string { + return commitLogFileName(rootPath, id, fileName) + } + + t.Run("create several condensed files below the threshold", func(t *testing.T) { + // 4 files of 300 bytes each, with 1000 byte threshold. This lets us verify + // that one and two will be combined, so will three and four. + require.Nil(t, createDummyFile(name("1000.condensed"), []byte("file1\n"), 300)) + require.Nil(t, createDummyFile(name("1001.condensed"), []byte("file2\n"), 300)) + require.Nil(t, createDummyFile(name("1002.condensed"), []byte("file3\n"), 300)) + require.Nil(t, createDummyFile(name("1003.condensed"), []byte("file4\n"), 300)) + require.Nil(t, createDummyFile(name("1004"), []byte("current\n"), 50)) + }) + + t.Run("run combiner", func(t *testing.T) { + _, err := NewCommitLogCombiner(rootPath, id, threshold, logger).Do() + require.Nil(t, err) + }) + + t.Run("we are now left with combined files", func(t *testing.T) { + dir, err := os.Open(commitLogDirectory(rootPath, id)) + require.Nil(t, err) + + fileNames, err := dir.Readdirnames(0) + require.Nil(t, err) + require.Len(t, fileNames, 3) + require.ElementsMatch(t, []string{"1001", "1003", "1004"}, fileNames) + + t.Run("the first file is correctly combined", func(t *testing.T) { + contents, err := os.ReadFile(commitLogFileName(rootPath, id, "1001")) + require.Nil(t, err) + require.Len(t, contents, 600) + assert.Equal(t, contents[0:6], []byte("file1\n")) + assert.Equal(t, contents[300:306], []byte("file2\n")) + }) + + t.Run("the second file is correctly combined", func(t *testing.T) { + contents, err := os.ReadFile(commitLogFileName(rootPath, id, "1003")) + require.Nil(t, err) + require.Len(t, contents, 600) + assert.Equal(t, contents[0:6], []byte("file3\n")) + assert.Equal(t, contents[300:306], []byte("file4\n")) + }) + + t.Run("latest file is unchanged", func(t *testing.T) { + contents, err := os.ReadFile(commitLogFileName(rootPath, id, "1004")) + require.Nil(t, err) + require.Len(t, contents, 50) + assert.Equal(t, contents[0:8], []byte("current\n")) + assert.Equal(t, contents[42:], []byte("rrent\ncu")) + }) + }) + }) + + t.Run("with partitions", func(t *testing.T) { + rootPath := t.TempDir() + createLogFiles := func(t *testing.T, id string, commitLogFile func(string) string) { + require.NoError(t, os.MkdirAll(commitLogDirectory(rootPath, id), 0o777)) + require.NoError(t, createDummyFile(commitLogFile("1001.condensed"), []byte("file1\n"), 800)) + require.NoError(t, createDummyFile(commitLogFile("1002.condensed"), []byte("file2\n"), 700)) + require.NoError(t, createDummyFile(commitLogFile("1003.condensed"), []byte("file3\n"), 600)) + require.NoError(t, createDummyFile(commitLogFile("1004.condensed"), []byte("file4\n"), 500)) + require.NoError(t, createDummyFile(commitLogFile("1005.condensed"), []byte("file5\n"), 400)) + require.NoError(t, createDummyFile(commitLogFile("1006.condensed"), []byte("file6\n"), 300)) + require.NoError(t, createDummyFile(commitLogFile("1007.condensed"), []byte("file7\n"), 200)) + require.NoError(t, createDummyFile(commitLogFile("1008.condensed"), []byte("file8\n"), 100)) + require.NoError(t, createDummyFile(commitLogFile("1009"), []byte("current\n"), 50)) + } + assertFilesExist := func(t *testing.T, id string, names ...string) { + dir, err := os.Open(commitLogDirectory(rootPath, id)) + require.Nil(t, err) + + fileNames, err := dir.Readdirnames(0) + require.NoError(t, err) + require.Len(t, fileNames, len(names)) + require.ElementsMatch(t, names, fileNames) + } + assertFileContains := func(t *testing.T, commitLogFile string, expectedSize int, expectedContentByOffset map[int]string) { + contents, err := os.ReadFile(commitLogFile) + require.NoError(t, err) + require.Len(t, contents, expectedSize) + for off, cont := range expectedContentByOffset { + bcont := []byte(cont) + assert.Equal(t, contents[off:off+len(bcont)], bcont) + } + } + + t.Run("no partition", func(t *testing.T) { + id := "combiner_test_no_partition" + threshold := 10_000 + commitLogFile := func(name string) string { return commitLogFileName(rootPath, id, name) } + + t.Run("create log files", func(t *testing.T) { + createLogFiles(t, id, commitLogFile) + }) + + t.Run("combine", func(t *testing.T) { + _, err := NewCommitLogCombiner(rootPath, id, int64(threshold), logger).Do() + require.NoError(t, err) + }) + + t.Run("verify combined files", func(t *testing.T) { + assertFilesExist(t, id, "1002", "1004", "1006", "1008", "1009") + + assertFileContains(t, commitLogFile("1002"), 1500, map[int]string{0: "file1\n", 800: "file2\n"}) + assertFileContains(t, commitLogFile("1004"), 1100, map[int]string{0: "file3\n", 600: "file4\n"}) + assertFileContains(t, commitLogFile("1006"), 700, map[int]string{0: "file5\n", 400: "file6\n"}) + assertFileContains(t, commitLogFile("1008"), 300, map[int]string{0: "file7\n", 200: "file8\n"}) + assertFileContains(t, commitLogFile("1009"), 50, map[int]string{0: "current\n", 42: "rrent\ncu"}) + }) + }) + + t.Run("partition 1004+1008", func(t *testing.T) { + id := "combiner_test_partition_1004_1008" + threshold := 10_000 + commitLogFile := func(name string) string { return commitLogFileName(rootPath, id, name) } + + t.Run("create log files", func(t *testing.T) { + createLogFiles(t, id, commitLogFile) + }) + + t.Run("combine", func(t *testing.T) { + _, err := NewCommitLogCombiner(rootPath, id, int64(threshold), logger).Do("1004", "1008") + require.NoError(t, err) + }) + + t.Run("verify combined files", func(t *testing.T) { + assertFilesExist(t, id, "1002", "1004", "1006", "1008", "1009") + + assertFileContains(t, commitLogFile("1002"), 1500, map[int]string{0: "file1\n", 800: "file2\n"}) + assertFileContains(t, commitLogFile("1004"), 1100, map[int]string{0: "file3\n", 600: "file4\n"}) + assertFileContains(t, commitLogFile("1006"), 700, map[int]string{0: "file5\n", 400: "file6\n"}) + assertFileContains(t, commitLogFile("1008"), 300, map[int]string{0: "file7\n", 200: "file8\n"}) + assertFileContains(t, commitLogFile("1009"), 50, map[int]string{0: "current\n", 42: "rrent\ncu"}) + }) + }) + + t.Run("partition 1003+1006", func(t *testing.T) { + id := "combiner_test_partition_1003_1006" + threshold := 10_000 + commitLogFile := func(name string) string { return commitLogFileName(rootPath, id, name) } + + t.Run("create log files", func(t *testing.T) { + createLogFiles(t, id, commitLogFile) + }) + + t.Run("combine", func(t *testing.T) { + _, err := NewCommitLogCombiner(rootPath, id, int64(threshold), logger).Do("1003", "1006") + require.NoError(t, err) + }) + + t.Run("verify combined files", func(t *testing.T) { + assertFilesExist(t, id, "1002", "1003.condensed", "1005", "1006.condensed", "1008", "1009") + + assertFileContains(t, commitLogFile("1002"), 1500, map[int]string{0: "file1\n", 800: "file2\n"}) + assertFileContains(t, commitLogFile("1003.condensed"), 600, map[int]string{0: "file3\n"}) + assertFileContains(t, commitLogFile("1005"), 900, map[int]string{0: "file4\n", 500: "file5\n"}) + assertFileContains(t, commitLogFile("1006.condensed"), 300, map[int]string{0: "file6\n"}) + assertFileContains(t, commitLogFile("1008"), 300, map[int]string{0: "file7\n", 200: "file8\n"}) + assertFileContains(t, commitLogFile("1009"), 50, map[int]string{0: "current\n", 42: "rrent\ncu"}) + }) + }) + + t.Run("partition 1003, low threshold", func(t *testing.T) { + id := "combiner_test_partition_1003_1006_low_threshold" + threshold := 1000 + commitLogFile := func(name string) string { return commitLogFileName(rootPath, id, name) } + + t.Run("create log files", func(t *testing.T) { + createLogFiles(t, id, commitLogFile) + }) + + t.Run("combine", func(t *testing.T) { + _, err := NewCommitLogCombiner(rootPath, id, int64(threshold), logger).Do("1003") + require.NoError(t, err) + }) + + t.Run("verify combined files", func(t *testing.T) { + assertFilesExist(t, id, "1001.condensed", "1002.condensed", "1003.condensed", "1005", "1007", + "1008.condensed", "1009") + + assertFileContains(t, commitLogFile("1001.condensed"), 800, map[int]string{0: "file1\n"}) + assertFileContains(t, commitLogFile("1002.condensed"), 700, map[int]string{0: "file2\n"}) + assertFileContains(t, commitLogFile("1003.condensed"), 600, map[int]string{0: "file3\n"}) + assertFileContains(t, commitLogFile("1005"), 900, map[int]string{0: "file4\n", 500: "file5\n"}) + assertFileContains(t, commitLogFile("1007"), 500, map[int]string{0: "file6\n", 300: "file7\n"}) + assertFileContains(t, commitLogFile("1008.condensed"), 100, map[int]string{0: "file8\n"}) + assertFileContains(t, commitLogFile("1009"), 50, map[int]string{0: "current\n", 42: "rrent\ncu"}) + }) + }) + + t.Run("partition 1005", func(t *testing.T) { + id := "combiner_test_partition_1005" + threshold := 10_000 + commitLogFile := func(name string) string { return commitLogFileName(rootPath, id, name) } + + t.Run("create log files", func(t *testing.T) { + createLogFiles(t, id, commitLogFile) + }) + + t.Run("combine", func(t *testing.T) { + _, err := NewCommitLogCombiner(rootPath, id, int64(threshold), logger).Do("1005") + require.NoError(t, err) + }) + + t.Run("verify combined files", func(t *testing.T) { + assertFilesExist(t, id, "1002", "1004", "1005.condensed", "1007", "1008.condensed", "1009") + + assertFileContains(t, commitLogFile("1002"), 1500, map[int]string{0: "file1\n", 800: "file2\n"}) + assertFileContains(t, commitLogFile("1004"), 1100, map[int]string{0: "file3\n", 600: "file4\n"}) + assertFileContains(t, commitLogFile("1005.condensed"), 400, map[int]string{0: "file5\n"}) + assertFileContains(t, commitLogFile("1007"), 500, map[int]string{0: "file6\n", 300: "file7\n"}) + assertFileContains(t, commitLogFile("1008.condensed"), 100, map[int]string{0: "file8\n"}) + assertFileContains(t, commitLogFile("1009"), 50, map[int]string{0: "current\n", 42: "rrent\ncu"}) + }) + }) + + t.Run("partition 1005, low threshold", func(t *testing.T) { + id := "combiner_test_partition_1005_low_threshold" + threshold := 1200 + commitLogFile := func(name string) string { return commitLogFileName(rootPath, id, name) } + + t.Run("create log files", func(t *testing.T) { + createLogFiles(t, id, commitLogFile) + }) + + t.Run("combine", func(t *testing.T) { + _, err := NewCommitLogCombiner(rootPath, id, int64(threshold), logger).Do("1005") + require.NoError(t, err) + }) + + t.Run("verify combined files", func(t *testing.T) { + assertFilesExist(t, id, "1001.condensed", "1002.condensed", "1004", "1005.condensed", + "1007", "1008.condensed", "1009") + + assertFileContains(t, commitLogFile("1001.condensed"), 800, map[int]string{0: "file1\n"}) + assertFileContains(t, commitLogFile("1002.condensed"), 700, map[int]string{0: "file2\n"}) + assertFileContains(t, commitLogFile("1004"), 1100, map[int]string{0: "file3\n", 600: "file4\n"}) + assertFileContains(t, commitLogFile("1005.condensed"), 400, map[int]string{0: "file5\n"}) + assertFileContains(t, commitLogFile("1007"), 500, map[int]string{0: "file6\n", 300: "file7\n"}) + assertFileContains(t, commitLogFile("1008.condensed"), 100, map[int]string{0: "file8\n"}) + assertFileContains(t, commitLogFile("1009"), 50, map[int]string{0: "current\n", 42: "rrent\ncu"}) + }) + }) + + t.Run("multiple partitions", func(t *testing.T) { + id := "combiner_test_multiple_partitions" + threshold := 10_000 + commitLogFile := func(name string) string { return commitLogFileName(rootPath, id, name) } + + t.Run("create log files", func(t *testing.T) { + createLogFiles(t, id, commitLogFile) + }) + + t.Run("combine", func(t *testing.T) { + _, err := NewCommitLogCombiner(rootPath, id, int64(threshold), logger).Do("1001", "1002", + "1003", "1004", "1005", "1006", "1007", "1008", "1009", "1010") + require.NoError(t, err) + }) + + t.Run("verify combined files", func(t *testing.T) { + assertFilesExist(t, id, "1001.condensed", "1002.condensed", "1003.condensed", "1004.condensed", + "1005.condensed", "1006.condensed", "1007.condensed", "1008.condensed", "1009") + + assertFileContains(t, commitLogFile("1001.condensed"), 800, map[int]string{0: "file1\n"}) + assertFileContains(t, commitLogFile("1002.condensed"), 700, map[int]string{0: "file2\n"}) + assertFileContains(t, commitLogFile("1003.condensed"), 600, map[int]string{0: "file3\n"}) + assertFileContains(t, commitLogFile("1004.condensed"), 500, map[int]string{0: "file4\n"}) + assertFileContains(t, commitLogFile("1005.condensed"), 400, map[int]string{0: "file5\n"}) + assertFileContains(t, commitLogFile("1006.condensed"), 300, map[int]string{0: "file6\n"}) + assertFileContains(t, commitLogFile("1007.condensed"), 200, map[int]string{0: "file7\n"}) + assertFileContains(t, commitLogFile("1008.condensed"), 100, map[int]string{0: "file8\n"}) + assertFileContains(t, commitLogFile("1009"), 50, map[int]string{0: "current\n", 42: "rrent\ncu"}) + }) + }) + }) +} + +func createDummyFile(fileName string, content []byte, size int) error { + f, err := os.Create(fileName) + if err != nil { + return err + } + + defer f.Close() + + written := 0 + for { + if size == written { + break + } + + if size-written < len(content) { + content = content[:(size - written)] + } + + n, err := f.Write([]byte(content)) + written += n + + if err != nil { + return err + } + } + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/commit_logger.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/commit_logger.go new file mode 100644 index 0000000000000000000000000000000000000000..8985b49cc7db219016ca148f9ad348ab89fe084e --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/commit_logger.go @@ -0,0 +1,752 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package hnsw + +import ( + "context" + "fmt" + "os" + "path/filepath" + "sort" + "strconv" + "strings" + "sync" + "time" + + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/weaviate/weaviate/adapters/repos/db/vector/compressionhelpers" + "github.com/weaviate/weaviate/adapters/repos/db/vector/hnsw/commitlog" + "github.com/weaviate/weaviate/adapters/repos/db/vector/multivector" + "github.com/weaviate/weaviate/entities/cyclemanager" + "github.com/weaviate/weaviate/entities/errorcompounder" + "github.com/weaviate/weaviate/usecases/memwatch" +) + +const defaultCommitLogSize = 500 * 1024 * 1024 + +type hnswCommitLogger struct { + // protect against concurrent attempts to write in the underlying file or + // buffer + sync.Mutex + + rootPath string + id string + condensor Condensor + logger logrus.FieldLogger + maxSizeIndividual int64 + maxSizeCombining int64 + commitLogger *commitlog.Logger + + switchLogsCallbackCtrl cyclemanager.CycleCallbackCtrl + maintainLogsCallbackCtrl cyclemanager.CycleCallbackCtrl + maintenanceCallbacks cyclemanager.CycleCallbackGroup + + allocChecker memwatch.AllocChecker + + snapshotLock sync.Mutex + snapshotLogger logrus.FieldLogger + // whether snapshots are disabled + snapshotDisabled bool + // minimum interval to create next snapshot out of last one and new commitlogs, 0 = no periodic snapshots + snapshotCreateInterval time.Duration + // minimal interval to check if next snapshot should be created + snapshotCheckInterval time.Duration + // minimum number of delta commitlogs required to create new snapshot + snapshotMinDeltaCommitlogsNumber int + // minimum percentage size of delta commitlogs (compared to last snapshot) required to create new one + snapshotMinDeltaCommitlogsSizePercentage int + // time that last snapshot was created at (based on its name, which is based on last included commitlog name; + // not the actual snapshot file creation time) + snapshotLastCreatedAt time.Time + // time that last check if snapshot should be created was made + snapshotLastCheckedAt time.Time + // partitions mark commitlogs (left ones) that should not be combined with + // logs on the right side (newer ones). + // example: given logs 0001.condensed, 0002.condensed, 0003.condensed and 0004.condensed + // with partition = "0002", only logs [older or equal 0002.condensed] + // or [newer than 0002.condensed] can be combined with each other + // (so 0001+0002 or 0003+0004, NOT 0002+0003) + // partitions are commitlog filenames (no path, no extension) + snapshotPartitions []string +} + +func NewCommitLogger(rootPath, name string, logger logrus.FieldLogger, + maintenanceCallbacks cyclemanager.CycleCallbackGroup, opts ...CommitlogOption, +) (*hnswCommitLogger, error) { + l := &hnswCommitLogger{ + rootPath: rootPath, + id: name, + condensor: NewMemoryCondensor(logger), + logger: logger, + maintenanceCallbacks: maintenanceCallbacks, + + // both can be overwritten using functional options + maxSizeIndividual: defaultCommitLogSize / 5, + maxSizeCombining: defaultCommitLogSize, + + snapshotMinDeltaCommitlogsNumber: 1, + snapshotMinDeltaCommitlogsSizePercentage: 0, + } + + for _, o := range opts { + if err := o(l); err != nil { + return nil, err + } + } + + fd, err := getLatestCommitFileOrCreate(rootPath, name) + if err != nil { + return nil, err + } + l.commitLogger = commitlog.NewLoggerWithFile(fd) + + if err := l.initSnapshotData(); err != nil { + return nil, errors.Wrapf(err, "init snapshot data") + } + + return l, nil +} + +func (l *hnswCommitLogger) InitMaintenance() { + id := func(elems ...string) string { + elems = append([]string{"commit_logger"}, elems...) + elems = append(elems, l.id) + return strings.Join(elems, "/") + } + + l.switchLogsCallbackCtrl = l.maintenanceCallbacks.Register(id("switch_logs"), l.startSwitchLogs) + l.maintainLogsCallbackCtrl = l.maintenanceCallbacks.Register(id("maintain_logs"), l.startCommitLogsMaintenance) +} + +func commitLogFileName(rootPath, indexName, fileName string) string { + return fmt.Sprintf("%s/%s", commitLogDirectory(rootPath, indexName), fileName) +} + +func commitLogDirectory(rootPath, name string) string { + return fmt.Sprintf("%s/%s.hnsw.commitlog.d", rootPath, name) +} + +func getLatestCommitFileOrCreate(rootPath, name string) (*os.File, error) { + dir := commitLogDirectory(rootPath, name) + err := os.MkdirAll(dir, os.ModePerm) + if err != nil { + return nil, errors.Wrap(err, "create commit logger directory") + } + + fileName, ok, err := getCurrentCommitLogFileName(dir) + if err != nil { + return nil, errors.Wrap(err, "find commit logger file in directory") + } + + if !ok { + // this is a new commit log, initialize with the current time stamp + fileName = fmt.Sprintf("%d", time.Now().Unix()) + } + + fd, err := os.OpenFile(commitLogFileName(rootPath, name, fileName), + os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0o666) + if err != nil { + return nil, errors.Wrap(err, "create commit log file") + } + + return fd, nil +} + +// getCommitFileNames in order, from old to new +func getCommitFileNames(rootPath, id string, createdAfter int64) ([]string, error) { + files, err := getCommitFiles(rootPath, id, createdAfter) + if err != nil { + return nil, err + } + return commitLogFileNames(rootPath, id, files), nil +} + +func getCommitFiles(rootPath, id string, createdAfter int64) ([]os.DirEntry, error) { + dir := commitLogDirectory(rootPath, id) + err := os.MkdirAll(dir, os.ModePerm) + if err != nil { + return nil, errors.Wrap(err, "create commit logger directory") + } + + files, err := os.ReadDir(dir) + if err != nil { + return nil, errors.Wrap(err, "browse commit logger directory") + } + + files = skipTmpScratchOrHiddenFiles(files) + files, err = removeTmpCombiningFiles(dir, files) + if err != nil { + return nil, errors.Wrap(err, "clean up tmp combining files") + } + + if createdAfter > 0 { + files, err = filterNewerCommitLogFiles(files, createdAfter) + if err != nil { + return nil, errors.Wrap(err, "remove old commit files") + } + } + + if len(files) == 0 { + return nil, nil + } + + ec := errorcompounder.New() + sort.Slice(files, func(a, b int) bool { + ts1, err := asTimeStamp(files[a].Name()) + if err != nil { + ec.Add(err) + } + + ts2, err := asTimeStamp(files[b].Name()) + if err != nil { + ec.Add(err) + } + return ts1 < ts2 + }) + if err := ec.ToError(); err != nil { + return nil, err + } + + return files, nil +} + +func commitLogFileNames(rootPath, id string, files []os.DirEntry) []string { + out := make([]string, len(files)) + for i, file := range files { + out[i] = commitLogFileName(rootPath, id, file.Name()) + } + return out +} + +// getCurrentCommitLogFileName returns the fileName and true if a file was +// present. If no file was present, the second arg is false. +func getCurrentCommitLogFileName(dirPath string) (string, bool, error) { + files, err := os.ReadDir(dirPath) + if err != nil { + return "", false, errors.Wrap(err, "browse commit logger directory") + } + + if len(files) > 0 { + files = skipTmpScratchOrHiddenFiles(files) + files, err = removeTmpCombiningFiles(dirPath, files) + if err != nil { + return "", false, errors.Wrap(err, "clean up tmp combining files") + } + } + + if len(files) == 0 { + return "", false, nil + } + + ec := errorcompounder.New() + sort.Slice(files, func(a, b int) bool { + ts1, err := asTimeStamp(files[a].Name()) + if err != nil { + ec.Add(err) + } + + ts2, err := asTimeStamp(files[b].Name()) + if err != nil { + ec.Add(err) + } + return ts1 > ts2 + }) + if err := ec.ToError(); err != nil { + return "", false, err + } + + return files[0].Name(), true, nil +} + +func skipTmpScratchOrHiddenFiles(in []os.DirEntry) []os.DirEntry { + out := make([]os.DirEntry, len(in)) + i := 0 + for _, entry := range in { + if strings.HasSuffix(entry.Name(), ".scratch.tmp") { + continue + } + + if strings.HasPrefix(entry.Name(), ".") { + continue + } + + out[i] = entry + i++ + } + return out[:i] +} + +func skipEmptyFiles(in []os.DirEntry) ([]os.DirEntry, error) { + out := make([]os.DirEntry, len(in)) + i := 0 + for _, entry := range in { + info, err := entry.Info() + if err != nil { + return nil, errors.Wrap(err, "get file info") + } + if info.Size() == 0 { + continue + } + out[i] = entry + i++ + } + return out[:i], nil +} + +func removeTmpCombiningFiles(dirPath string, in []os.DirEntry) ([]os.DirEntry, error) { + out := make([]os.DirEntry, len(in)) + i := 0 + + for _, info := range in { + if strings.HasSuffix(info.Name(), ".combined.tmp") { + // a temporary combining file was found which means that the combining + // process never completed, this file is thus considered corrupt (too + // short) and must be deleted. The original sources still exist (because + // the only get deleted after the .tmp file is removed), so it's safe to + // delete this without data loss. + + if err := os.Remove(filepath.Join(dirPath, info.Name())); err != nil { + return out, errors.Wrap(err, "remove tmp combining file") + } + continue + } + + out[i] = info + i++ + } + + return out[:i], nil +} + +func filterNewerCommitLogFiles(in []os.DirEntry, createdAfter int64) ([]os.DirEntry, error) { + out := make([]os.DirEntry, len(in)) + i := 0 + for _, entry := range in { + ts, err := asTimeStamp(entry.Name()) + if err != nil { + return nil, errors.Wrapf(err, "read commitlog timestamp %q", entry.Name()) + } + + if ts <= createdAfter { + continue + } + + out[i] = entry + i++ + } + + return out[:i], nil +} + +func asTimeStamp(in string) (int64, error) { + return strconv.ParseInt(strings.TrimSuffix(in, ".condensed"), 10, 64) +} + +type Condensor interface { + Do(filename string) error +} + +type HnswCommitType uint8 // 256 options, plenty of room for future extensions + +const ( + AddNode HnswCommitType = iota + SetEntryPointMaxLevel + AddLinkAtLevel + ReplaceLinksAtLevel + AddTombstone + RemoveTombstone + ClearLinks + DeleteNode + ResetIndex + ClearLinksAtLevel // added in v1.8.0-rc.1, see https://github.com/weaviate/weaviate/issues/1701 + AddLinksAtLevel // added in v1.8.0-rc.1, see https://github.com/weaviate/weaviate/issues/1705 + AddPQ + AddSQ + AddMuvera + AddRQ + AddBRQ +) + +func (t HnswCommitType) String() string { + switch t { + case AddNode: + return "AddNode" + case SetEntryPointMaxLevel: + return "SetEntryPointWithMaxLayer" + case AddLinkAtLevel: + return "AddLinkAtLevel" + case AddLinksAtLevel: + return "AddLinksAtLevel" + case ReplaceLinksAtLevel: + return "ReplaceLinksAtLevel" + case AddTombstone: + return "AddTombstone" + case RemoveTombstone: + return "RemoveTombstone" + case ClearLinks: + return "ClearLinks" + case DeleteNode: + return "DeleteNode" + case ResetIndex: + return "ResetIndex" + case ClearLinksAtLevel: + return "ClearLinksAtLevel" + case AddPQ: + return "AddProductQuantizer" + case AddSQ: + return "AddScalarQuantizer" + case AddMuvera: + return "AddMuvera" + case AddRQ: + return "AddRotationalQuantizer" + case AddBRQ: + return "AddBRQCompression" + } + return "unknown commit type" +} + +func (l *hnswCommitLogger) ID() string { + return l.id +} + +func (l *hnswCommitLogger) AddPQCompression(data compressionhelpers.PQData) error { + l.Lock() + defer l.Unlock() + + return l.commitLogger.AddPQCompression(data) +} + +func (l *hnswCommitLogger) AddSQCompression(data compressionhelpers.SQData) error { + l.Lock() + defer l.Unlock() + + return l.commitLogger.AddSQCompression(data) +} + +func (l *hnswCommitLogger) AddRQCompression(data compressionhelpers.RQData) error { + l.Lock() + defer l.Unlock() + + return l.commitLogger.AddRQCompression(data) +} + +func (l *hnswCommitLogger) AddMuvera(data multivector.MuveraData) error { + l.Lock() + defer l.Unlock() + + return l.commitLogger.AddMuvera(data) +} + +func (l *hnswCommitLogger) AddBRQCompression(data compressionhelpers.BRQData) error { + l.Lock() + defer l.Unlock() + + return l.commitLogger.AddBRQCompression(data) +} + +// AddNode adds an empty node +func (l *hnswCommitLogger) AddNode(node *vertex) error { + l.Lock() + defer l.Unlock() + + return l.commitLogger.AddNode(node.id, node.level) +} + +func (l *hnswCommitLogger) SetEntryPointWithMaxLayer(id uint64, level int) error { + l.Lock() + defer l.Unlock() + + return l.commitLogger.SetEntryPointWithMaxLayer(id, level) +} + +func (l *hnswCommitLogger) ReplaceLinksAtLevel(nodeid uint64, level int, targets []uint64) error { + l.Lock() + defer l.Unlock() + + return l.commitLogger.ReplaceLinksAtLevel(nodeid, level, targets) +} + +func (l *hnswCommitLogger) AddLinkAtLevel(nodeid uint64, level int, + target uint64, +) error { + l.Lock() + defer l.Unlock() + + return l.commitLogger.AddLinkAtLevel(nodeid, level, target) +} + +func (l *hnswCommitLogger) AddTombstone(nodeid uint64) error { + l.Lock() + defer l.Unlock() + + return l.commitLogger.AddTombstone(nodeid) +} + +func (l *hnswCommitLogger) RemoveTombstone(nodeid uint64) error { + l.Lock() + defer l.Unlock() + + return l.commitLogger.RemoveTombstone(nodeid) +} + +func (l *hnswCommitLogger) ClearLinks(nodeid uint64) error { + l.Lock() + defer l.Unlock() + + return l.commitLogger.ClearLinks(nodeid) +} + +func (l *hnswCommitLogger) ClearLinksAtLevel(nodeid uint64, level uint16) error { + l.Lock() + defer l.Unlock() + + return l.commitLogger.ClearLinksAtLevel(nodeid, level) +} + +func (l *hnswCommitLogger) DeleteNode(nodeid uint64) error { + l.Lock() + defer l.Unlock() + + return l.commitLogger.DeleteNode(nodeid) +} + +func (l *hnswCommitLogger) Reset() error { + l.Lock() + defer l.Unlock() + + return l.commitLogger.Reset() +} + +// Shutdown waits for ongoing maintenance processes to stop, then cancels their +// scheduling. The caller can be sure that state on disk is immutable after +// calling Shutdown(). +func (l *hnswCommitLogger) Shutdown(ctx context.Context) error { + if l.switchLogsCallbackCtrl != nil { + if err := l.switchLogsCallbackCtrl.Unregister(ctx); err != nil { + return errors.Wrap(err, "failed to unregister commitlog switch from maintenance cycle") + } + } + if l.maintainLogsCallbackCtrl != nil { + if err := l.maintainLogsCallbackCtrl.Unregister(ctx); err != nil { + return errors.Wrap(err, "failed to unregister commitlog condense from maintenance cycle") + } + } + return nil +} + +func (l *hnswCommitLogger) RootPath() string { + return l.rootPath +} + +func (l *hnswCommitLogger) startSwitchLogs(shouldAbort cyclemanager.ShouldAbortCallback) bool { + executed, err := l.switchCommitLogs(false) + if err != nil { + l.logger.WithError(err). + WithField("action", "hnsw_commit_log_switch"). + Error("hnsw commit log switch failed") + } + return executed +} + +func (l *hnswCommitLogger) startCommitLogsMaintenance(shouldAbort cyclemanager.ShouldAbortCallback) bool { + executedCombine, err := l.combineLogs() + if err != nil { + l.logger.WithError(err). + WithField("action", "hnsw_commit_log_combining"). + Error("hnsw commit log maintenance (combining) failed") + } + + executedCondense, err := l.condenseLogs() + if err != nil { + l.logger.WithError(err). + WithField("action", "hnsw_commit_log_condensing"). + Error("hnsw commit log maintenance (condensing) failed") + } + + executedSnapshot, err := l.createSnapshot(shouldAbort) + if err != nil { + l.logger.WithError(err). + WithField("action", "hnsw_snapshot_creating"). + Error("hnsw commit log maintenance (snapshot) failed") + } + + return executedCombine || executedCondense || executedSnapshot +} + +func (l *hnswCommitLogger) SwitchCommitLogs(force bool) error { + _, err := l.switchCommitLogs(force) + return err +} + +func (l *hnswCommitLogger) switchCommitLogs(force bool) (bool, error) { + l.Lock() + defer l.Unlock() + + size, err := l.commitLogger.FileSize() + if err != nil { + return false, err + } + + if size <= l.maxSizeIndividual && !force { + return false, nil + } + + oldFileName, err := l.commitLogger.FileName() + if err != nil { + return false, err + } + + if err := l.commitLogger.Close(); err != nil { + return true, err + } + + // this is a new commit log, initialize with the current time stamp + fileName := fmt.Sprintf("%d", time.Now().Unix()) + + if force { + l.logger.WithField("action", "commit_log_file_switched"). + WithField("id", l.id). + WithField("old_file_name", oldFileName). + WithField("old_file_size", size). + WithField("new_file_name", fileName). + Debug("commit log switched forced") + } else { + l.logger.WithField("action", "commit_log_file_switched"). + WithField("id", l.id). + WithField("old_file_name", oldFileName). + WithField("old_file_size", size). + WithField("new_file_name", fileName). + Info("commit log size crossed threshold, switching to new file") + } + + fd, err := os.OpenFile(commitLogFileName(l.rootPath, l.id, fileName), + os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0o666) + if err != nil { + return true, errors.Wrap(err, "create commit log file") + } + + l.commitLogger = commitlog.NewLoggerWithFile(fd) + + return true, nil +} + +func (l *hnswCommitLogger) condenseLogs() (bool, error) { + files, err := getCommitFileNames(l.rootPath, l.id, 0) + if err != nil { + return false, err + } + + if len(files) <= 1 { + // if there are no files there is nothing to do + // if there is only a single file, it must still be in use, we can't do + // anything yet + return false, nil + } + + // cut off last element, as that's never a candidate + candidates := files[:len(files)-1] + + for _, candidate := range candidates { + if strings.HasSuffix(candidate, ".condensed") { + // don't attempt to condense logs which are already condensed + continue + } + + if l.allocChecker != nil { + // allocChecker is optional, so we can only check this if it's actually set + s, err := os.Stat(candidate) + if err != nil { + return false, fmt.Errorf("stat candidate file %q: %w", candidate, err) + } + + // We're estimating here that the in-mem condensor needs about 1B of + // memory for every byte of data in the log file. This estimate can + // probably be refined. + if err := l.allocChecker.CheckAlloc(s.Size()); err != nil { + l.logger.WithFields(logrus.Fields{ + "action": "hnsw_commit_log_condensing", + "event": "condensing_skipped_oom", + "path": candidate, + "size": s.Size(), + }).WithError(err). + Warnf("skipping hnsw condensing due to memory pressure") + return false, nil + } + + } + + return true, l.condensor.Do(candidate) + } + + return false, nil +} + +func (l *hnswCommitLogger) combineLogs() (bool, error) { + // maxSize is the desired final size, since we assume a lot of redundancy we + // can set the combining threshold higher than the final threshold under the + // assumption that the combined file will be considerably smaller than the + // sum of both input files + threshold := l.logCombiningThreshold() + return NewCommitLogCombiner(l.rootPath, l.id, threshold, l.logger).Do(l.snapshotPartitions...) +} + +// TODO al:snapshot handle should abort +func (l *hnswCommitLogger) createSnapshot(shouldAbort cyclemanager.ShouldAbortCallback) (bool, error) { + if l.snapshotDisabled || l.snapshotCreateInterval <= 0 { + return false, nil + } + + if !time.Now().Add(-l.snapshotCheckInterval).After(l.snapshotLastCheckedAt) { + return false, nil + } + l.snapshotLastCheckedAt = time.Now() + + if !time.Now().Add(-l.snapshotCreateInterval).After(l.snapshotLastCreatedAt) { + return false, nil + } + + created, _, err := l.CreateSnapshot() + return created, err +} + +func (l *hnswCommitLogger) logCombiningThreshold() int64 { + return int64(float64(l.maxSizeCombining) * 1.75) +} + +func (l *hnswCommitLogger) Drop(ctx context.Context) error { + if err := l.commitLogger.Close(); err != nil { + return errors.Wrap(err, "close hnsw commit logger prior to delete") + } + + // stop all goroutines + if err := l.Shutdown(ctx); err != nil { + return errors.Wrap(err, "drop commitlog") + } + + // remove commit log directory if exists + dir := commitLogDirectory(l.rootPath, l.id) + if _, err := os.Stat(dir); err == nil { + err := os.RemoveAll(dir) + if err != nil { + return errors.Wrap(err, "delete commit files directory") + } + } + return nil +} + +func (l *hnswCommitLogger) Flush() error { + l.Lock() + defer l.Unlock() + + return l.commitLogger.Flush() +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/commit_logger_buffered_links_logger.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/commit_logger_buffered_links_logger.go new file mode 100644 index 0000000000000000000000000000000000000000..1165b90157f02c5f05d42b4a506b341b0ace87c5 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/commit_logger_buffered_links_logger.go @@ -0,0 +1,66 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package hnsw + +// type bufferedLinksLogger struct { +// base *hnswCommitLogger +// buf *bytes.Buffer +// } + +// func (b *bufferedLinksLogger) ReplaceLinksAtLevel(nodeid uint64, +// level int, targets []uint64) error { +// b.base.writeCommitType(b.buf, ReplaceLinksAtLevel) +// b.base.writeUint64(b.buf, nodeid) +// b.base.writeUint16(b.buf, uint16(level)) +// targetLength := len(targets) +// if targetLength > math.MaxUint16 { +// // TODO: investigate why we get such massive connections +// targetLength = math.MaxUint16 +// b.base.logger.WithField("action", "hnsw_current_commit_log"). +// WithField("id", b.base.id). +// WithField("original_length", len(targets)). +// WithField("maximum_length", targetLength). +// Warning("condensor length of connections would overflow uint16, cutting off") +// } +// b.base.writeUint16(b.buf, uint16(targetLength)) +// b.base.writeUint64Slice(b.buf, targets[:targetLength]) + +// return nil +// } + +// func (b *bufferedLinksLogger) AddLinkAtLevel(nodeid uint64, level int, +// target uint64) error { +// ec := &errorCompounder{} +// ec.add(b.base.writeCommitType(b.buf, AddLinkAtLevel)) +// ec.add(b.base.writeUint64(b.buf, nodeid)) +// ec.add(b.base.writeUint16(b.buf, uint16(level))) +// ec.add(b.base.writeUint64(b.buf, target)) + +// if err := ec.toError(); err != nil { +// return errors.Wrapf(err, "write link at level %d->%d (%d) to commit log", +// nodeid, target, level) +// } + +// return nil +// } + +// func (b *bufferedLinksLogger) Close() error { +// b.base.Lock() +// defer b.base.Unlock() + +// _, err := b.base.logWriter.Write(b.buf.Bytes()) +// if err != nil { +// return errors.Wrap(err, "flush link buffer to commit logger") +// } + +// return nil +// } diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/commit_logger_functional_options.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/commit_logger_functional_options.go new file mode 100644 index 0000000000000000000000000000000000000000..6bd10833c8bc06b90fc0bc3855f374566c03fdc8 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/commit_logger_functional_options.go @@ -0,0 +1,76 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package hnsw + +import ( + "time" + + "github.com/weaviate/weaviate/usecases/memwatch" +) + +type CommitlogOption func(l *hnswCommitLogger) error + +func WithCommitlogThreshold(size int64) CommitlogOption { + return func(l *hnswCommitLogger) error { + l.maxSizeIndividual = size + return nil + } +} + +func WithCommitlogThresholdForCombining(size int64) CommitlogOption { + return func(l *hnswCommitLogger) error { + l.maxSizeCombining = size + return nil + } +} + +func WithAllocChecker(mm memwatch.AllocChecker) CommitlogOption { + return func(l *hnswCommitLogger) error { + l.allocChecker = mm + return nil + } +} + +func WithCondensor(condensor Condensor) CommitlogOption { + return func(l *hnswCommitLogger) error { + l.condensor = condensor + return nil + } +} + +func WithSnapshotDisabled(disabled bool) CommitlogOption { + return func(l *hnswCommitLogger) error { + l.snapshotDisabled = disabled + return nil + } +} + +func WithSnapshotCreateInterval(interval time.Duration) CommitlogOption { + return func(l *hnswCommitLogger) error { + l.snapshotCreateInterval = interval + return nil + } +} + +func WithSnapshotMinDeltaCommitlogsNumer(number int) CommitlogOption { + return func(l *hnswCommitLogger) error { + l.snapshotMinDeltaCommitlogsNumber = number + return nil + } +} + +func WithSnapshotMinDeltaCommitlogsSizePercentage(percentage int) CommitlogOption { + return func(l *hnswCommitLogger) error { + l.snapshotMinDeltaCommitlogsSizePercentage = percentage + return nil + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/commit_logger_noop.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/commit_logger_noop.go new file mode 100644 index 0000000000000000000000000000000000000000..dfe0aa12d71cb60872dc456f623e5f60decc2447 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/commit_logger_noop.go @@ -0,0 +1,137 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package hnsw + +import ( + "context" + + "github.com/weaviate/weaviate/adapters/repos/db/vector/compressionhelpers" + "github.com/weaviate/weaviate/adapters/repos/db/vector/multivector" +) + +// NoopCommitLogger implements the CommitLogger interface, but does not +// actually write anything to disk +type NoopCommitLogger struct{} + +func (n *NoopCommitLogger) ID() string { + return "" +} + +func (n *NoopCommitLogger) AddPQCompression(data compressionhelpers.PQData) error { + return nil +} + +func (n *NoopCommitLogger) AddSQCompression(data compressionhelpers.SQData) error { + return nil +} + +func (n *NoopCommitLogger) AddRQCompression(data compressionhelpers.RQData) error { + return nil +} + +func (n *NoopCommitLogger) AddMuvera(data multivector.MuveraData) error { + return nil +} + +func (n *NoopCommitLogger) AddBRQCompression(data compressionhelpers.BRQData) error { + return nil +} + +func (n *NoopCommitLogger) AddNode(node *vertex) error { + return nil +} + +func (n *NoopCommitLogger) Flush() error { + return nil +} + +func (n *NoopCommitLogger) SetEntryPointWithMaxLayer(id uint64, level int) error { + return nil +} + +func (n *NoopCommitLogger) AddLinkAtLevel(nodeid uint64, level int, target uint64) error { + return nil +} + +func (n *NoopCommitLogger) ReplaceLinksAtLevel(nodeid uint64, level int, targets []uint64) error { + return nil +} + +func (n *NoopCommitLogger) AddTombstone(nodeid uint64) error { + return nil +} + +func (n *NoopCommitLogger) RemoveTombstone(nodeid uint64) error { + return nil +} + +func (n *NoopCommitLogger) DeleteNode(nodeid uint64) error { + return nil +} + +func (n *NoopCommitLogger) ClearLinks(nodeid uint64) error { + return nil +} + +func (n *NoopCommitLogger) ClearLinksAtLevel(nodeid uint64, level uint16) error { + return nil +} + +func (n *NoopCommitLogger) Reset() error { + return nil +} + +func (n *NoopCommitLogger) Drop(ctx context.Context) error { + return nil +} + +func (n *NoopCommitLogger) Shutdown(context.Context) error { + return nil +} + +func (n *NoopCommitLogger) CreateSnapshot() (bool, int64, error) { + return false, 0, nil +} + +func (n *NoopCommitLogger) CreateAndLoadSnapshot() (*DeserializationResult, int64, error) { + return nil, 0, nil +} + +func (n *NoopCommitLogger) LoadSnapshot() (*DeserializationResult, int64, error) { + return nil, 0, nil +} + +func MakeNoopCommitLogger() (CommitLogger, error) { + return &NoopCommitLogger{}, nil +} + +func (n *NoopCommitLogger) NewBufferedLinksLogger() BufferedLinksLogger { + return n // return self as it does not do anything anyway +} + +func (n *NoopCommitLogger) Close() error { + return nil +} + +func (n *NoopCommitLogger) StartSwitchLogs() chan struct{} { + return make(chan struct{}) +} + +func (n *NoopCommitLogger) RootPath() string { + return "" +} + +func (n *NoopCommitLogger) SwitchCommitLogs(force bool) error { + return nil +} + +func (n *NoopCommitLogger) InitMaintenance() {} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/commit_logger_snapshot.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/commit_logger_snapshot.go new file mode 100644 index 0000000000000000000000000000000000000000..9830e881d87421baf4d1255ff3d393791dbd59db --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/commit_logger_snapshot.go @@ -0,0 +1,1618 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package hnsw + +import ( + "bufio" + "encoding/binary" + "fmt" + "hash" + "hash/crc32" + "io" + "math" + "os" + "path/filepath" + "strconv" + "strings" + "sync" + "time" + + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/weaviate/weaviate/adapters/repos/db/vector/compressionhelpers" + "github.com/weaviate/weaviate/adapters/repos/db/vector/hnsw/packedconn" + "github.com/weaviate/weaviate/adapters/repos/db/vector/multivector" + "github.com/weaviate/weaviate/entities/diskio" + enterrors "github.com/weaviate/weaviate/entities/errors" +) + +const ( + checkpointChunkSize = 100_000 + snapshotConcurrency = 8 // number of goroutines handling snapshot's checkpoints reading + snapshotDirSuffix = ".hnsw.snapshot.d" + snapshotCheckInterval = 10 * time.Minute +) + +const ( + SnapshotCompressionTypePQ = iota + 1 + SnapshotCompressionTypeSQ + SnapshotEncoderTypeMuvera + SnapshotCompressionTypeRQ + SnapshotCompressionTypeBRQ +) + +// version of the snapshot file format +const ( + snapshotVersionV1 = 1 // initial version + snapshotVersionV2 = 2 // added packed connections support +) + +func snapshotName(path string) string { + base := filepath.Base(path) + return strings.TrimSuffix(strings.TrimSuffix(base, ".snapshot"), ".snapshot.checkpoints") +} + +func snapshotTimestamp(path string) (int64, error) { + return asTimeStamp(snapshotName(path)) +} + +func snapshotDirectory(rootPath, name string) string { + return filepath.Join(rootPath, name+snapshotDirSuffix) +} + +// Loads state of last available snapshot. Returns nil if no snaphshot was found. +func (l *hnswCommitLogger) LoadSnapshot() (state *DeserializationResult, createdAt int64, err error) { + l.snapshotLock.Lock() + defer l.snapshotLock.Unlock() + + logger, onFinish := l.setupSnapshotLogger(logrus.Fields{"method": "load_snapshot"}) + defer func() { onFinish(err) }() + + snapshotPath, createdAt, err := l.getLastSnapshot() + if err != nil { + return nil, 0, errors.Wrapf(err, "get last snapshot") + } + if snapshotPath == "" { + logger.Debug("no last snapshot found") + return nil, 0, nil + } + logger.WithField("snapshot", snapshotPath).Debug("last snapshot found") + + state, err = l.readSnapshot(snapshotPath) + if err != nil { + return nil, 0, l.handleReadSnapshotError(logger, snapshotPath, createdAt, err) + } + return state, createdAt, nil +} + +// Creates a snapshot of the commit log. Returns if snapshot was actually created. +// The snapshot is created from the last snapshot and commitlog files created after, +// or from the entire commit log if there is no previous snapshot. +// The snapshot state contains all but last commitlog (may still be in use and mutable). +func (l *hnswCommitLogger) CreateSnapshot() (created bool, createdAt int64, err error) { + l.snapshotLock.Lock() + defer l.snapshotLock.Unlock() + + logger, onFinish := l.setupSnapshotLogger(logrus.Fields{"method": "create_snapshot"}) + defer func() { onFinish(err) }() + + state, createdAt, err := l.createAndOptionallyLoadSnapshot(logger, false) + return state != nil, createdAt, err +} + +// CreateAndLoadSnapshot works like CreateSnapshot, but it will always load the +// last snapshot. It is used at startup to automatically create a snapshot +// while loading the commit log, to avoid having to load the commit log again. +func (l *hnswCommitLogger) CreateAndLoadSnapshot() (state *DeserializationResult, createdAt int64, err error) { + l.snapshotLock.Lock() + defer l.snapshotLock.Unlock() + + logger, onFinish := l.setupSnapshotLogger(logrus.Fields{"method": "create_and_load_snapshot"}) + defer func() { onFinish(err) }() + + return l.createAndOptionallyLoadSnapshot(logger, true) +} + +func (l *hnswCommitLogger) setupSnapshotLogger(fields logrus.Fields) (logger logrus.FieldLogger, onFinish func(err error)) { + logger = l.snapshotLogger.WithFields(fields) + started := time.Now() + + logger.Debug("started") + return logger, func(err error) { + l := logger.WithField("took", time.Since(started)) + if err != nil { + l.WithError(err).Errorf("finished with err") + } else { + l.Debug("finished") + } + } +} + +func (l *hnswCommitLogger) createAndOptionallyLoadSnapshot(logger logrus.FieldLogger, load bool, +) (*DeserializationResult, int64, error) { + lastSnapshotPath, lastCreatedAt, err := l.getLastSnapshot() + if err != nil { + return nil, 0, errors.Wrapf(err, "get last snapshot") + } + + state, path, createdAt, err := l.createAndOptionallyLoadSnapshotOnLastOne(logger, load, lastSnapshotPath, lastCreatedAt) + if path != "" { + l.snapshotLastCreatedAt = time.Now() + l.snapshotPartitions = []string{snapshotName(path)} + } + return state, createdAt, err +} + +func (l *hnswCommitLogger) createAndOptionallyLoadSnapshotOnLastOne(logger logrus.FieldLogger, + load bool, snapshotPath string, createdAt int64, +) (*DeserializationResult, string, int64, error) { + commitlogPaths, err := l.getDeltaCommitlogs(createdAt) + if err != nil { + return nil, "", 0, errors.Wrapf(err, "get delta commitlogs") + } + + // skip allocCheck on forced loading + shouldCreateSnapshot := l.shouldCreateSnapshot(logger, snapshotPath, commitlogPaths, load) + + var state *DeserializationResult + if load || shouldCreateSnapshot { + if snapshotPath != "" { + logger.WithField("snapshot", snapshotPath).Debug("last snapshot found") + + state, err = l.readSnapshot(snapshotPath) + if err != nil { + if err = l.handleReadSnapshotError(logger, snapshotPath, createdAt, err); err != nil { + return nil, "", 0, errors.Wrapf(err, "read snapshot") + } + // call again without last snapshot + return l.createAndOptionallyLoadSnapshotOnLastOne(logger, load, "", 0) + } + } else { + logger.Debug("no last snapshot found") + } + } + + if !shouldCreateSnapshot { + return state, "", createdAt, nil + } + + newState, err := loadCommitLoggerState(l.logger, commitlogPaths, state, nil) + if err != nil { + return nil, "", 0, errors.Wrapf(err, "apply delta commitlogs") + } + if newState == nil { + return nil, "", 0, errors.New("empty state") + } + + ln := len(commitlogPaths) + newSnapshotPath := l.snapshotFileName(commitlogPaths[ln-1]) + newCreatedAt, err := snapshotTimestamp(newSnapshotPath) + if err != nil { + return nil, "", 0, errors.Wrapf(err, "get new snapshot timestamp") + } + if err := l.writeSnapshot(newState, newSnapshotPath); err != nil { + return nil, "", 0, errors.Wrapf(err, "write new snapshot") + } + logger.WithFields(logrus.Fields{ + "delta_commitlogs": ln, + "last_snapshot": snapshotPath, + "snapshot": newSnapshotPath, + }).Info("new snapshot created") + + if err = l.cleanupSnapshots(newCreatedAt); err != nil { + return newState, newSnapshotPath, newCreatedAt, errors.Wrapf(err, "cleanup previous snapshot") + } + + return newState, newSnapshotPath, newCreatedAt, nil +} + +func (l *hnswCommitLogger) shouldCreateSnapshot(logger logrus.FieldLogger, + lastSnapshotPath string, deltaCommitlogPaths []string, skipAllocCheck bool, +) bool { + if ln := len(deltaCommitlogPaths); ln < l.snapshotMinDeltaCommitlogsNumber { + logger.Debugf("not enough delta commitlogs (%d of required %d)", ln, l.snapshotMinDeltaCommitlogsNumber) + return false + } + + // calculate sizes only if needed + snapshotSize := int64(0) + commitlogsSize := int64(0) + if (!skipAllocCheck && l.allocChecker != nil) || + (l.snapshotMinDeltaCommitlogsSizePercentage > 0 && lastSnapshotPath != "") { + snapshotSize = l.calcSnapshotSize(lastSnapshotPath) + commitlogsSize = l.calcCommitlogsSize(deltaCommitlogPaths...) + } + + if l.snapshotMinDeltaCommitlogsSizePercentage > 0 && snapshotSize > 0 { + percentage := float32(commitlogsSize) * 100 / float32(snapshotSize) + if percentage < float32(l.snapshotMinDeltaCommitlogsSizePercentage) { + logger.Debugf("too small delta commitlogs size (%.2f%% of required %d%% of snapshot size)", percentage, l.snapshotMinDeltaCommitlogsSizePercentage) + return false + } + } + + if !skipAllocCheck && l.allocChecker != nil { + requiredSize := snapshotSize + commitlogsSize + if err := l.allocChecker.CheckAlloc(requiredSize); err != nil { + logger.WithField("size", requiredSize). + WithError(err). + Warnf("skipping hnsw snapshot due to memory pressure") + return false + } + } + return true +} + +func (l *hnswCommitLogger) initSnapshotData() error { + dirs := strings.Split(filepath.Clean(l.rootPath), string(os.PathSeparator)) + if ln := len(dirs); ln > 2 { + dirs = dirs[ln-2:] + } + snapshotLogger := l.logger.WithFields(logrus.Fields{ + "action": "hnsw_commit_log_snapshot", + "id": l.id, + "path": filepath.Join(dirs...), + }) + fields := logrus.Fields{"enabled": !l.snapshotDisabled} + + defer func() { + snapshotLogger.WithFields(fields).Debug("snapshot config") + }() + + snapshotPath, createdAt, err := l.getLastSnapshot() + if err != nil { + return errors.Wrapf(err, "get last snapshot") + } + l.snapshotPartitions = []string{} + if snapshotPath != "" { + l.snapshotPartitions = append(l.snapshotPartitions, snapshotName(snapshotPath)) + } + + fields["last_snapshot"] = snapshotPath + fields["partitions"] = l.snapshotPartitions + + if !l.snapshotDisabled { + if err := os.MkdirAll(snapshotDirectory(l.rootPath, l.id), 0o755); err != nil { + return errors.Wrapf(err, "make snapshot directory") + } + + l.snapshotLogger = snapshotLogger + if l.snapshotCreateInterval > 0 { + l.snapshotLastCreatedAt = time.Unix(createdAt, 0) + l.snapshotLastCheckedAt = time.Now() + l.snapshotCheckInterval = min(snapshotCheckInterval, l.snapshotCreateInterval) + + fields["last_created_at"] = l.snapshotLastCreatedAt + fields["last_checked_at"] = l.snapshotLastCheckedAt + fields["check_interval"] = l.snapshotCheckInterval + } + + fields["create_interval"] = l.snapshotCreateInterval + } + return nil +} + +func (l *hnswCommitLogger) handleReadSnapshotError(logger logrus.FieldLogger, + snapshotPath string, createdAt int64, err error, +) error { + logger.WithField("snapshot", snapshotPath). + WithError(err). + Warn("snapshot can not be read, cleanup") + + if err := l.cleanupSnapshots(createdAt + 1); err != nil { + logger.WithField("snapshot", snapshotPath). + WithError(err). + Warn("cleaning snapshots") + } + + // suppress error + return nil +} + +// if file size can not be read, it is skipped +func (l *hnswCommitLogger) calcSnapshotSize(snapshotPath string) int64 { + if snapshotPath == "" { + return 0 + } + + totalSize := int64(0) + if info, err := os.Stat(snapshotPath); err == nil { + totalSize += info.Size() + } + if info, err := os.Stat(snapshotPath + ".checkpoints"); err == nil { + totalSize += info.Size() + } + return totalSize +} + +// if file size can not be read, it is skipped +func (l *hnswCommitLogger) calcCommitlogsSize(commitLogPaths ...string) int64 { + if len(commitLogPaths) == 0 { + return 0 + } + + totalSize := int64(0) + for i := range commitLogPaths { + if info, err := os.Stat(commitLogPaths[i]); err == nil { + totalSize += info.Size() + } + } + return totalSize +} + +func (l *hnswCommitLogger) snapshotFileName(commitLogFileName string) string { + path := strings.TrimSuffix(commitLogFileName, ".condensed") + ".snapshot" + return strings.Replace(path, ".hnsw.commitlog.d", snapshotDirSuffix, 1) +} + +// read the directory and find the latest snapshot file +func (l *hnswCommitLogger) getLastSnapshot() (path string, createdAt int64, err error) { + snapshotDir := snapshotDirectory(l.rootPath, l.id) + + entries, err := os.ReadDir(snapshotDir) + if err != nil { + if errors.Is(err, os.ErrNotExist) { + // no snapshot directory, no snapshot + return "", 0, nil + } + return "", 0, errors.Wrapf(err, "read snapshot directory %q", snapshotDir) + } + + for i := len(entries) - 1; i >= 0; i-- { + entry := entries[i] + + if entry.IsDir() { + continue + } + if !strings.HasSuffix(entry.Name(), ".snapshot") { + // not a snapshot file + continue + } + + createdAt, err = snapshotTimestamp(entry.Name()) + if err != nil { + return "", 0, errors.Wrapf(err, "get snapshot timestamp") + } + return filepath.Join(snapshotDir, entry.Name()), createdAt, nil + } + + // no snapshot found + return "", 0, nil +} + +func (l *hnswCommitLogger) getDeltaCommitlogs(createdAfter int64) (paths []string, err error) { + files, err := getCommitFiles(l.rootPath, l.id, createdAfter) + if err != nil { + return nil, err + } + // skip last file, may still be in use and mutable + if ln := len(files); ln > 1 { + files = files[:ln-1] + } else { + return []string{}, nil + } + files, err = skipEmptyFiles(files) + if err != nil { + return nil, err + } + return commitLogFileNames(l.rootPath, l.id, files), nil +} + +// cleanupSnapshots removes all snapshots, checkpoints and temporary files older than the given timestamp. +func (l *hnswCommitLogger) cleanupSnapshots(before int64) error { + snapshotDir := snapshotDirectory(l.rootPath, l.id) + + files, err := os.ReadDir(snapshotDir) + if err != nil { + return errors.Wrapf(err, "read snapshot directory %q", snapshotDir) + } + for _, file := range files { + name := file.Name() + + if strings.HasSuffix(name, ".snapshot.tmp") { + // a temporary snapshot file was found which means that a previous + // snapshoting process never completed, we can safely remove it. + err := os.Remove(filepath.Join(snapshotDir, name)) + if err != nil { + return errors.Wrapf(err, "remove tmp snapshot file %q", name) + } + } + + if strings.HasSuffix(name, ".snapshot") { + tmstr := strings.TrimSuffix(name, ".snapshot") + i, err := strconv.ParseInt(tmstr, 10, 64) + if err != nil { + return errors.Wrapf(err, "parse snapshot time") + } + + if i < before { + err := os.Remove(filepath.Join(snapshotDir, name)) + if err != nil { + return errors.Wrapf(err, "remove snapshot file %q", name) + } + } + } + + if strings.HasSuffix(name, ".snapshot.checkpoints") { + tmstr := strings.TrimSuffix(name, ".snapshot.checkpoints") + i, err := strconv.ParseInt(tmstr, 10, 64) + if err != nil { + return errors.Wrapf(err, "parse checkpoints time") + } + + if i < before { + err := os.Remove(filepath.Join(snapshotDir, name)) + if err != nil { + return errors.Wrapf(err, "remove checkpoints file %q", name) + } + } + } + } + + return nil +} + +func loadCommitLoggerState(logger logrus.FieldLogger, fileNames []string, state *DeserializationResult, metrics *Metrics) (*DeserializationResult, error) { + start := time.Now() + defer func() { + logger.WithField("commitlog_files", len(fileNames)). + WithField("took", time.Since(start)). + Debug("commit log files loaded") + }() + var err error + + fileNames, err = NewCorruptedCommitLogFixer().Do(fileNames) + if err != nil { + return nil, errors.Wrap(err, "corrupted commit log fixer") + } + + for i, fileName := range fileNames { + beforeIndividual := time.Now() + + err = func() error { + fd, err := os.Open(fileName) + if err != nil { + return errors.Wrapf(err, "open commit log %q for reading", fileName) + } + defer fd.Close() + + info, err := fd.Stat() + if err != nil { + errors.Wrapf(err, "get commit log %qsize", fileName) + } + if info.Size() == 0 { + // nothing to do + return nil + } + + var fdMetered io.Reader = fd + if metrics != nil { + fdMetered = diskio.NewMeteredReader(fd, + metrics.TrackStartupReadCommitlogDiskIO) + } + fdBuf := bufio.NewReaderSize(fdMetered, 512*1024) + + var valid int + state, valid, err = NewDeserializer(logger).Do(fdBuf, state, false) + if err != nil { + if errors.Is(err, io.EOF) || errors.Is(err, io.ErrUnexpectedEOF) { + // we need to check for both EOF or UnexpectedEOF, as we don't know where + // the commit log got corrupted, a field ending that weset a longer + // encoding for would return EOF, whereas a field read with binary.Read + // with a fixed size would return UnexpectedEOF. From our perspective both + // are unexpected. + + logger.WithField("action", "hnsw_load_commit_log_corruption"). + WithField("path", fileName). + Error("write-ahead-log ended abruptly, some elements may not have been recovered") + + // we need to truncate the file to its valid length! + if err := os.Truncate(fileName, int64(valid)); err != nil { + return errors.Wrapf(err, "truncate corrupt commit log %q", fileName) + } + } else { + // only return an actual error on non-EOF errors, otherwise we'll end + // up in a startup crashloop + return errors.Wrapf(err, "deserialize commit log %q", fileName) + } + } + return nil + }() + if err != nil { + return nil, err + } + + if metrics != nil { + metrics.StartupProgress(float64(i+1) / float64(len(fileNames))) + metrics.TrackStartupIndividual(beforeIndividual) + } + } + + return state, nil +} + +func (l *hnswCommitLogger) writeSnapshot(state *DeserializationResult, filename string) error { + tmpSnapshotFileName := fmt.Sprintf("%s.tmp", filename) + checkPointsFileName := fmt.Sprintf("%s.checkpoints", filename) + + // check if checkpoints with the same name already exist + if _, err := os.Stat(checkPointsFileName); err == nil { + l.logger.WithField("action", "write_snapshot"). + WithField("path", checkPointsFileName). + Info("writing new snapshot with same name as last snapshot, deleting checkpoints file") + + err = os.Remove(checkPointsFileName) + if err != nil { + return errors.Wrap(err, "remove existing checkpoints file") + } + } + + snap, err := os.OpenFile(tmpSnapshotFileName, os.O_WRONLY|os.O_TRUNC|os.O_CREATE, 0o666) + if err != nil { + return errors.Wrapf(err, "create snapshot file %q", tmpSnapshotFileName) + } + defer snap.Close() + + // compute the checksum of the snapshot file + w := bufio.NewWriter(snap) + + // write the snapshot to the file + checkpoints, err := l.writeStateTo(state, w) + if err != nil { + return errors.Wrapf(err, "writing snapshot file %q", tmpSnapshotFileName) + } + + // flush the buffered writer + err = w.Flush() + if err != nil { + return errors.Wrapf(err, "flushing snapshot file %q", tmpSnapshotFileName) + } + + // sync the file to disk + err = snap.Sync() + if err != nil { + return errors.Wrapf(err, "fsync snapshot file %q", tmpSnapshotFileName) + } + + // close the file + err = snap.Close() + if err != nil { + return errors.Wrapf(err, "close snapshot file %q", tmpSnapshotFileName) + } + + // write the checkpoints to a separate file + err = writeCheckpoints(checkPointsFileName, checkpoints) + if err != nil { + return errors.Wrap(err, "write checkpoints file") + } + + // rename the temporary snapshot file to the final name + err = os.Rename(tmpSnapshotFileName, filename) + if err != nil { + return errors.Wrapf(err, "rename snapshot file %q", tmpSnapshotFileName) + } + + return nil +} + +func (l *hnswCommitLogger) readSnapshot(path string) (*DeserializationResult, error) { + start := time.Now() + defer func() { + l.logger.WithField("snapshot", path).WithField("took", time.Since(start).String()).Info("snapshot loaded") + }() + + checkpoints, err := readCheckpoints(path) + if err != nil { + // if for any reason the checkpoints file is not found or corrupted + // we need to remove the snapshot file and create a new one from the commit log. + _ = os.Remove(path) + cpPath := path + ".checkpoints" + _ = os.Remove(cpPath) + + l.logger.WithField("action", "hnsw_remove_corrupt_snapshot"). + WithField("path", path). + WithError(err). + Error("checkpoints file not found or corrupted, removing snapshot files") + + return nil, errors.Wrapf(err, "read checkpoints of snapshot '%s'", path) + } + + state, err := l.readStateFrom(path, checkpoints) + if err != nil { + // if for any reason the snapshot file is not found or corrupted + // we need to remove the snapshot file and create a new one from the commit log. + _ = os.Remove(path) + cpPath := path + ".checkpoints" + _ = os.Remove(cpPath) + + l.logger.WithField("action", "hnsw_remove_corrupt_snapshot"). + WithField("path", path). + WithError(err). + Error("snapshot file not found or corrupted, removing snapshot files") + return nil, errors.Wrapf(err, "read state of snapshot '%s'", path) + } + + return state, nil +} + +// returns checkpoints which can be used as parallelizatio hints +func (l *hnswCommitLogger) writeStateTo(state *DeserializationResult, wr io.Writer) ([]Checkpoint, error) { + offset, err := l.writeMetadataTo(state, wr) + if err != nil { + return nil, err + } + + var checkpoints []Checkpoint + // start at the very first node + checkpoints = append(checkpoints, Checkpoint{NodeID: 0, Offset: uint64(offset)}) + + nonNilNodes := 0 + + hasher := crc32.NewIEEE() + w := io.MultiWriter(wr, hasher) + + for i, n := range state.Nodes { + if n == nil { + // nil node + if err := writeByte(w, 0); err != nil { + return nil, err + } + offset += writeByteSize + continue + } + + _, hasATombstone := state.Tombstones[n.id] + _, tombstoneIsCleaned := state.TombstonesDeleted[n.id] + + if hasATombstone && tombstoneIsCleaned { + // if the node has been deleted but its tombstone has been cleaned up + // we can write a nil node + if err := writeByte(w, 0); err != nil { + return nil, err + } + offset += writeByteSize + continue + } + + if nonNilNodes%checkpointChunkSize == 0 && nonNilNodes > 0 { + checkpoints[len(checkpoints)-1].Hash = hasher.Sum32() + hasher.Reset() + checkpoints = append(checkpoints, Checkpoint{NodeID: uint64(i), Offset: uint64(offset)}) + } + + if hasATombstone { + if err := writeByte(w, 1); err != nil { + return nil, err + } + } else { + if err := writeByte(w, 2); err != nil { + return nil, err + } + } + offset += writeByteSize + + if err := writeUint32(w, uint32(n.level)); err != nil { + return nil, err + } + offset += writeUint32Size + + connData := n.connections.Data() + if err := writeUint32(w, uint32(len(connData))); err != nil { + return nil, err + } + offset += writeUint32Size + + _, err = w.Write(connData) + if err != nil { + return nil, errors.Wrapf(err, "write connections data for node %d", n.id) + } + offset += len(connData) + + nonNilNodes++ + } + + // compute last checkpoint hash + checkpoints[len(checkpoints)-1].Hash = hasher.Sum32() + + // add a dummy checkpoint to mark the end of the file + checkpoints = append(checkpoints, Checkpoint{NodeID: math.MaxInt64, Offset: uint64(offset)}) + + return checkpoints, nil +} + +// returns checkpoints which can be used as parallelizatio hints +func (l *hnswCommitLogger) writeMetadataTo(state *DeserializationResult, w io.Writer) (offset int, err error) { + hasher := crc32.NewIEEE() + w = io.MultiWriter(w, hasher) + + // version + offset = 0 + if err := writeByte(w, snapshotVersionV2); err != nil { + return 0, err + } + offset += writeByteSize + + if err := writeUint64(w, state.Entrypoint); err != nil { + return 0, err + } + offset += writeUint64Size + + if err := writeUint16(w, state.Level); err != nil { + return 0, err + } + offset += writeUint16Size + + isCompressed := state.Compressed + + if err := writeBool(w, isCompressed); err != nil { + return 0, err + } + offset += writeByteSize + + if state.Compressed && state.CompressionPQData != nil { // PQ + // first byte is the compression type + if err := writeByte(w, byte(SnapshotCompressionTypePQ)); err != nil { + return 0, err + } + offset += writeByteSize + + if err := writeUint16(w, state.CompressionPQData.Dimensions); err != nil { + return 0, err + } + offset += writeUint16Size + + if err := writeUint16(w, state.CompressionPQData.Ks); err != nil { + return 0, err + } + offset += writeUint16Size + + if err := writeUint16(w, state.CompressionPQData.M); err != nil { + return 0, err + } + offset += writeUint16Size + + if err := writeByte(w, byte(state.CompressionPQData.EncoderType)); err != nil { + return 0, err + } + offset += writeByteSize + + if err := writeByte(w, state.CompressionPQData.EncoderDistribution); err != nil { + return 0, err + } + offset += writeByteSize + + if err := writeBool(w, state.CompressionPQData.UseBitsEncoding); err != nil { + return 0, err + } + offset += writeByteSize + + for _, encoder := range state.CompressionPQData.Encoders { + if n, err := w.Write(encoder.ExposeDataForRestore()); err != nil { + return 0, err + } else { + offset += n + } + } + } else if state.Compressed && state.CompressionSQData != nil { // SQ + // first byte is the compression type + if err := writeByte(w, byte(SnapshotCompressionTypeSQ)); err != nil { + return 0, err + } + offset += writeByteSize + + if err := writeUint16(w, state.CompressionSQData.Dimensions); err != nil { + return 0, err + } + offset += writeUint16Size + + if err := writeUint32(w, math.Float32bits(state.CompressionSQData.A)); err != nil { + return 0, err + } + offset += writeUint32Size + + if err := writeUint32(w, math.Float32bits(state.CompressionSQData.B)); err != nil { + return 0, err + } + offset += writeUint32Size + + } else if state.Compressed && state.CompressionRQData != nil { // RQ + // first byte is the compression type + if err := writeByte(w, byte(SnapshotCompressionTypeRQ)); err != nil { + return 0, err + } + offset += writeByteSize + + if err := writeUint32(w, state.CompressionRQData.InputDim); err != nil { + return 0, err + } + offset += writeUint32Size + + if err := writeUint32(w, state.CompressionRQData.Bits); err != nil { + return 0, err + } + offset += writeUint32Size + + if err := writeUint32(w, state.CompressionRQData.Rotation.OutputDim); err != nil { + return 0, err + } + offset += writeUint32Size + + if err := writeUint32(w, state.CompressionRQData.Rotation.Rounds); err != nil { + return 0, err + } + offset += writeUint32Size + + for _, swap := range state.CompressionRQData.Rotation.Swaps { + for _, dim := range swap { + if err := writeUint16(w, dim.I); err != nil { + return 0, err + } + offset += writeUint16Size + + if err := writeUint16(w, dim.J); err != nil { + return 0, err + } + offset += writeUint16Size + } + } + + for _, sign := range state.CompressionRQData.Rotation.Signs { + for _, dim := range sign { + if err := writeFloat32(w, dim); err != nil { + return 0, err + } + offset += writeFloat32Size + } + } + + } else if state.Compressed && state.CompressionBRQData != nil { // BRQ + // first byte is the compression type + if err := writeByte(w, byte(SnapshotCompressionTypeBRQ)); err != nil { + return 0, err + } + offset += writeByteSize + + if err := writeUint32(w, state.CompressionBRQData.InputDim); err != nil { + return 0, err + } + offset += writeUint32Size + + if err := writeUint32(w, state.CompressionBRQData.Rotation.OutputDim); err != nil { + return 0, err + } + offset += writeUint32Size + + if err := writeUint32(w, state.CompressionBRQData.Rotation.Rounds); err != nil { + return 0, err + } + offset += writeUint32Size + + for _, swap := range state.CompressionBRQData.Rotation.Swaps { + for _, dim := range swap { + if err := writeUint16(w, dim.I); err != nil { + return 0, err + } + offset += writeUint16Size + + if err := writeUint16(w, dim.J); err != nil { + return 0, err + } + offset += writeUint16Size + } + } + + for _, sign := range state.CompressionBRQData.Rotation.Signs { + for _, dim := range sign { + if err := writeFloat32(w, dim); err != nil { + return 0, err + } + offset += writeFloat32Size + } + } + + for _, rounding := range state.CompressionBRQData.Rounding { + if err := writeFloat32(w, rounding); err != nil { + return 0, err + } + offset += writeFloat32Size + } + } + + isEncoded := state.MuveraEnabled + + if err := writeBool(w, isEncoded); err != nil { + return 0, err + } + offset += writeByteSize + + if state.MuveraEnabled && state.EncoderMuvera != nil { // Muvera + // first byte is the encoder type + if err := writeByte(w, byte(SnapshotEncoderTypeMuvera)); err != nil { + return 0, err + } + offset += writeByteSize + + if err := writeUint32(w, state.EncoderMuvera.Dimensions); err != nil { + return 0, err + } + offset += writeUint32Size + + if err := writeUint32(w, state.EncoderMuvera.KSim); err != nil { + return 0, err + } + offset += writeUint32Size + + if err := writeUint32(w, state.EncoderMuvera.NumClusters); err != nil { + return 0, err + } + offset += writeUint32Size + + if err := writeUint32(w, state.EncoderMuvera.DProjections); err != nil { + return 0, err + } + offset += writeUint32Size + + if err := writeUint32(w, state.EncoderMuvera.Repetitions); err != nil { + return 0, err + } + offset += writeUint32Size + + for _, gaussian := range state.EncoderMuvera.Gaussians { + for _, cluster := range gaussian { + for _, el := range cluster { + if err := writeUint32(w, math.Float32bits(el)); err != nil { + return 0, err + } + offset += writeUint32Size + } + } + } + + for _, matrix := range state.EncoderMuvera.S { + for _, vector := range matrix { + for _, el := range vector { + if err := writeUint32(w, math.Float32bits(el)); err != nil { + return 0, err + } + offset += writeUint32Size + } + } + } + } + + if err := writeUint32(w, uint32(len(state.Nodes))); err != nil { + return 0, err + } + offset += writeUint32Size + + // write checksum of the metadata + if err := binary.Write(w, binary.LittleEndian, hasher.Sum32()); err != nil { + return 0, err + } + offset += writeUint32Size + + return offset, nil +} + +func (l *hnswCommitLogger) readStateFrom(filename string, checkpoints []Checkpoint) (*DeserializationResult, error) { + res := &DeserializationResult{ + NodesDeleted: make(map[uint64]struct{}), + Tombstones: make(map[uint64]struct{}), + TombstonesDeleted: make(map[uint64]struct{}), + LinksReplaced: make(map[uint64]map[uint16]struct{}), + } + + f, err := os.Open(filename) + if err != nil { + return nil, errors.Wrapf(err, "open snapshot file %q", filename) + } + defer f.Close() + + hasher := crc32.NewIEEE() + // start with a single-threaded reader until we make it the nodes section + r := bufio.NewReader(f) + + var b [8]byte + + _, err = ReadAndHash(r, hasher, b[:1]) // version + if err != nil { + return nil, errors.Wrapf(err, "read version") + } + version := int(b[0]) + if version < 0 || version > snapshotVersionV2 { + return nil, fmt.Errorf("unsupported snapshot version %d", version) + } + + _, err = ReadAndHash(r, hasher, b[:8]) // entrypoint + if err != nil { + return nil, errors.Wrapf(err, "read entrypoint") + } + res.Entrypoint = binary.LittleEndian.Uint64(b[:8]) + + _, err = ReadAndHash(r, hasher, b[:2]) // level + if err != nil { + return nil, errors.Wrapf(err, "read level") + } + res.Level = binary.LittleEndian.Uint16(b[:2]) + + _, err = ReadAndHash(r, hasher, b[:1]) // isEncoded + if err != nil { + return nil, errors.Wrapf(err, "read compressed") + } + isCompressed := b[0] == 1 + + // Compressed data + if isCompressed { + _, err = ReadAndHash(r, hasher, b[:1]) // encoding type + if err != nil { + return nil, errors.Wrapf(err, "read compressed") + } + + switch b[0] { + case SnapshotEncoderTypeMuvera: // legacy Muvera snapshot + return nil, errors.New("discarding v1 Muvera snapshot") + case SnapshotCompressionTypePQ: + res.Compressed = true + _, err = ReadAndHash(r, hasher, b[:2]) // PQData.Dimensions + if err != nil { + return nil, errors.Wrapf(err, "read PQData.Dimensions") + } + dims := binary.LittleEndian.Uint16(b[:2]) + + _, err = ReadAndHash(r, hasher, b[:2]) // PQData.Ks + if err != nil { + return nil, errors.Wrapf(err, "read PQData.Ks") + } + ks := binary.LittleEndian.Uint16(b[:2]) + + _, err = ReadAndHash(r, hasher, b[:2]) // PQData.M + if err != nil { + return nil, errors.Wrapf(err, "read PQData.M") + } + m := binary.LittleEndian.Uint16(b[:2]) + + _, err = ReadAndHash(r, hasher, b[:1]) // PQData.EncoderType + if err != nil { + return nil, errors.Wrapf(err, "read PQData.EncoderType") + } + encoderType := compressionhelpers.Encoder(b[0]) + + _, err = ReadAndHash(r, hasher, b[:1]) // PQData.EncoderDistribution + if err != nil { + return nil, errors.Wrapf(err, "read PQData.EncoderDistribution") + } + dist := b[0] + + _, err = ReadAndHash(r, hasher, b[:1]) // PQData.UseBitsEncoding + if err != nil { + return nil, errors.Wrapf(err, "read PQData.UseBitsEncoding") + } + useBitsEncoding := b[0] == 1 + + encoder := compressionhelpers.Encoder(encoderType) + + res.CompressionPQData = &compressionhelpers.PQData{ + Dimensions: dims, + EncoderType: encoder, + Ks: ks, + M: m, + EncoderDistribution: dist, + UseBitsEncoding: useBitsEncoding, + } + + var encoderReader func(r io.Reader, res *compressionhelpers.PQData, i uint16) (compressionhelpers.PQEncoder, error) + + switch encoder { + case compressionhelpers.UseTileEncoder: + encoderReader = ReadTileEncoder + case compressionhelpers.UseKMeansEncoder: + encoderReader = ReadKMeansEncoder + default: + return nil, errors.New("unsuported encoder type") + } + + for i := uint16(0); i < m; i++ { + encoder, err := encoderReader(io.TeeReader(r, hasher), res.CompressionPQData, i) + if err != nil { + return nil, err + } + res.CompressionPQData.Encoders = append(res.CompressionPQData.Encoders, encoder) + } + case SnapshotCompressionTypeSQ: + res.Compressed = true + _, err = ReadAndHash(r, hasher, b[:2]) // SQData.Dimensions + if err != nil { + return nil, errors.Wrapf(err, "read SQData.Dimensions") + } + dims := binary.LittleEndian.Uint16(b[:2]) + + _, err = ReadAndHash(r, hasher, b[:4]) // SQData.A + if err != nil { + return nil, errors.Wrapf(err, "read SQData.A") + } + a := math.Float32frombits(binary.LittleEndian.Uint32(b[:4])) + + _, err = ReadAndHash(r, hasher, b[:4]) // SQData.B + if err != nil { + return nil, errors.Wrapf(err, "read SQData.B") + } + b := math.Float32frombits(binary.LittleEndian.Uint32(b[:4])) + + res.CompressionSQData = &compressionhelpers.SQData{ + Dimensions: dims, + A: a, + B: b, + } + case SnapshotCompressionTypeRQ: + res.Compressed = true + _, err = ReadAndHash(r, hasher, b[:4]) // RQData.InputDim + if err != nil { + return nil, errors.Wrapf(err, "read RQData.Dimension") + } + inputDim := binary.LittleEndian.Uint32(b[:4]) + + _, err = ReadAndHash(r, hasher, b[:4]) // RQData.Bits + if err != nil { + return nil, errors.Wrapf(err, "read RQData.Bits") + } + bits := binary.LittleEndian.Uint32(b[:4]) + + _, err = ReadAndHash(r, hasher, b[:4]) // RQData.Rotation.OutputDim + if err != nil { + return nil, errors.Wrapf(err, "read RQData.Rotation.OutputDim") + } + outputDim := binary.LittleEndian.Uint32(b[:4]) + + _, err = ReadAndHash(r, hasher, b[:4]) // RQData.Rotation.Rounds + if err != nil { + return nil, errors.Wrapf(err, "read RQData.Rotation.Rounds") + } + rounds := binary.LittleEndian.Uint32(b[:4]) + + swaps := make([][]compressionhelpers.Swap, rounds) + for i := uint32(0); i < rounds; i++ { + swaps[i] = make([]compressionhelpers.Swap, outputDim/2) + for j := uint32(0); j < outputDim/2; j++ { + _, err = ReadAndHash(r, hasher, b[:2]) // RQData.Rotation.Swaps[i][j].I + if err != nil { + return nil, errors.Wrapf(err, "read RQData.Rotation.Swaps[i][j].I") + } + swaps[i][j].I = binary.LittleEndian.Uint16(b[:2]) + + _, err = ReadAndHash(r, hasher, b[:2]) // RQData.Rotation.Swaps[i][j].J + if err != nil { + return nil, errors.Wrapf(err, "read RQData.Rotation.Swaps[i][j].J") + } + swaps[i][j].J = binary.LittleEndian.Uint16(b[:2]) + } + } + + signs := make([][]float32, rounds) + + for i := uint32(0); i < rounds; i++ { + signs[i] = make([]float32, outputDim) + for j := uint32(0); j < outputDim; j++ { + _, err = ReadAndHash(r, hasher, b[:4]) // RQData.Rotation.Signs[i][j] + if err != nil { + return nil, errors.Wrapf(err, "read RQData.Rotation.Signs[i][j]") + } + signs[i][j] = math.Float32frombits(binary.LittleEndian.Uint32(b[:4])) + } + } + + res.CompressionRQData = &compressionhelpers.RQData{ + InputDim: inputDim, + Bits: bits, + Rotation: compressionhelpers.FastRotation{ + OutputDim: outputDim, + Rounds: rounds, + Swaps: swaps, + Signs: signs, + }, + } + case SnapshotCompressionTypeBRQ: + res.Compressed = true + _, err = ReadAndHash(r, hasher, b[:4]) // BRQData.InputDim + if err != nil { + return nil, errors.Wrapf(err, "read BRQData.InputDim") + } + inputDim := binary.LittleEndian.Uint32(b[:4]) + + _, err = ReadAndHash(r, hasher, b[:4]) // BRQData.Rotation.OutputDim + if err != nil { + return nil, errors.Wrapf(err, "read BRQData.Rotation.OutputDim") + } + outputDim := binary.LittleEndian.Uint32(b[:4]) + + _, err = ReadAndHash(r, hasher, b[:4]) // BRQData.Rotation.Rounds + if err != nil { + return nil, errors.Wrapf(err, "read BRQData.Rotation.Rounds") + } + rounds := binary.LittleEndian.Uint32(b[:4]) + + swaps := make([][]compressionhelpers.Swap, rounds) + + for i := uint32(0); i < rounds; i++ { + swaps[i] = make([]compressionhelpers.Swap, outputDim/2) + for j := uint32(0); j < outputDim/2; j++ { + _, err = ReadAndHash(r, hasher, b[:2]) // BRQData.Rotation.Swaps[i][j].I + if err != nil { + return nil, errors.Wrapf(err, "read BRQData.Rotation.Swaps[i][j].I") + } + swaps[i][j].I = binary.LittleEndian.Uint16(b[:2]) + + _, err = ReadAndHash(r, hasher, b[:2]) // BRQData.Rotation.Swaps[i][j].J + if err != nil { + return nil, errors.Wrapf(err, "read BRQData.Rotation.Swaps[i][j].J") + } + swaps[i][j].J = binary.LittleEndian.Uint16(b[:2]) + } + } + + signs := make([][]float32, rounds) + + for i := uint32(0); i < rounds; i++ { + signs[i] = make([]float32, outputDim) + for j := uint32(0); j < outputDim; j++ { + _, err = ReadAndHash(r, hasher, b[:4]) // BRQData.Rotation.Signs[i][j] + if err != nil { + return nil, errors.Wrapf(err, "read BRQData.Rotation.Signs[i][j]") + } + signs[i][j] = math.Float32frombits(binary.LittleEndian.Uint32(b[:4])) + } + } + + rounding := make([]float32, outputDim) + + for i := uint32(0); i < outputDim; i++ { + _, err = ReadAndHash(r, hasher, b[:4]) // BRQData.Rounding[i] + if err != nil { + return nil, errors.Wrapf(err, "read BRQData.Rounding[i]") + } + rounding[i] = math.Float32frombits(binary.LittleEndian.Uint32(b[:4])) + } + + res.CompressionBRQData = &compressionhelpers.BRQData{ + InputDim: inputDim, + Rotation: compressionhelpers.FastRotation{ + OutputDim: outputDim, + Rounds: rounds, + Swaps: swaps, + Signs: signs, + }, + Rounding: rounding, + } + default: + return nil, fmt.Errorf("unsupported compression type %d", b[0]) + } + } + + isEncoded := false + if version >= 2 { + _, err = ReadAndHash(r, hasher, b[:1]) // isEncoded + if err != nil { + return nil, errors.Wrapf(err, "read isEncoded") + } + isEncoded = b[0] == 1 + } + + if isEncoded { + _, err = ReadAndHash(r, hasher, b[:1]) // encoding type + if err != nil { + return nil, errors.Wrapf(err, "read encoding type") + } + switch b[0] { + case SnapshotEncoderTypeMuvera: + _, err = ReadAndHash(r, hasher, b[:4]) // Muvera.Dimensions + if err != nil { + return nil, errors.Wrapf(err, "read Muvera.Dimensions") + } + dims := binary.LittleEndian.Uint32(b[:4]) + + _, err = ReadAndHash(r, hasher, b[:4]) // Muvera.KSim + if err != nil { + return nil, errors.Wrapf(err, "read Muvera.KSim") + } + kSim := binary.LittleEndian.Uint32(b[:4]) + + _, err = ReadAndHash(r, hasher, b[:4]) // Muvera.NumClusters + if err != nil { + return nil, errors.Wrapf(err, "read Muvera.NumClusters") + } + numClusters := binary.LittleEndian.Uint32(b[:4]) + + _, err = ReadAndHash(r, hasher, b[:4]) // Muvera.DProjections + if err != nil { + return nil, errors.Wrapf(err, "read Muvera.DProjections") + } + dProjections := binary.LittleEndian.Uint32(b[:4]) + + _, err = ReadAndHash(r, hasher, b[:4]) // Muvera.Repetitions + if err != nil { + return nil, errors.Wrapf(err, "read Muvera.Repetitions") + } + repetitions := binary.LittleEndian.Uint32(b[:4]) + + gaussians := make([][][]float32, repetitions) + for i := uint32(0); i < repetitions; i++ { + gaussians[i] = make([][]float32, kSim) + for j := uint32(0); j < kSim; j++ { + gaussians[i][j] = make([]float32, dims) + for k := uint32(0); k < dims; k++ { + _, err = ReadAndHash(r, hasher, b[:4]) + if err != nil { + return nil, errors.Wrapf(err, "read Muvera.Gaussians") + } + bits := binary.LittleEndian.Uint32(b[:4]) + gaussians[i][j][k] = math.Float32frombits(bits) + } + } + } + + s := make([][][]float32, repetitions) + for i := uint32(0); i < repetitions; i++ { + s[i] = make([][]float32, dProjections) + for j := uint32(0); j < dProjections; j++ { + s[i][j] = make([]float32, dims) + for k := uint32(0); k < dims; k++ { + _, err = ReadAndHash(r, hasher, b[:4]) + if err != nil { + return nil, errors.Wrapf(err, "read Muvera.Gaussians") + } + bits := binary.LittleEndian.Uint32(b[:4]) + s[i][j][k] = math.Float32frombits(bits) + } + } + } + + res.MuveraEnabled = true + res.EncoderMuvera = &multivector.MuveraData{ + Dimensions: dims, + NumClusters: numClusters, + KSim: kSim, + DProjections: dProjections, + Repetitions: repetitions, + Gaussians: gaussians, + S: s, + } + default: + return nil, fmt.Errorf("unsupported encoder type %d", b[0]) + } + } + + _, err = ReadAndHash(r, hasher, b[:4]) // nodes + if err != nil { + return nil, errors.Wrapf(err, "read nodes count") + } + nodesCount := int(binary.LittleEndian.Uint32(b[:4])) + + res.Nodes = make([]*vertex, nodesCount) + + // read metadata checksum + _, err = io.ReadFull(r, b[:4]) // checksum + if err != nil { + return nil, errors.Wrapf(err, "read checksum") + } + + // check checksum + checksum := binary.LittleEndian.Uint32(b[:4]) + actualChecksum := hasher.Sum32() + if checksum != actualChecksum { + return nil, fmt.Errorf("invalid checksum: expected %d, got %d", checksum, actualChecksum) + } + + var mu sync.Mutex + + eg := enterrors.NewErrorGroupWrapper(l.logger) + eg.SetLimit(snapshotConcurrency) + for cpPos, cp := range checkpoints { + if cpPos == len(checkpoints)-1 { + // last checkpoint, no need to read + break + } + + start := int(cp.Offset) + end := int(checkpoints[cpPos+1].Offset) + + eg.Go(func() error { + var b [8]byte + var read int + + currNodeID := cp.NodeID + sr := io.NewSectionReader(f, int64(start), int64(end-start)) + hasher := crc32.NewIEEE() + r := bufio.NewReader(io.TeeReader(sr, hasher)) + + for read < end-start { + n, err := io.ReadFull(r, b[:1]) // node existence + if err != nil { + return errors.Wrapf(err, "read node existence") + } + read += n + if b[0] == 0 { + // nil node + currNodeID++ + continue + } + + node := &vertex{id: currNodeID} + + if b[0] == 1 { + mu.Lock() + res.Tombstones[node.id] = struct{}{} + mu.Unlock() + } else if b[0] != 2 { + return fmt.Errorf("unsupported node existence state") + } + + n, err = io.ReadFull(r, b[:4]) // level + if err != nil { + return errors.Wrapf(err, "read node level") + } + read += n + node.level = int(binary.LittleEndian.Uint32(b[:4])) + + n, err = io.ReadFull(r, b[:4]) // connections count + if err != nil { + return errors.Wrapf(err, "read node connections count") + } + read += n + connCount := int(binary.LittleEndian.Uint32(b[:4])) + + if connCount > 0 { + if version < snapshotVersionV2 { + pconn, err := packedconn.NewWithMaxLayer(uint8(connCount)) + if err != nil { + return errors.Wrapf(err, "create packed connections for node %d", node.id) + } + + for l := uint8(0); l < uint8(connCount); l++ { + n, err = io.ReadFull(r, b[:4]) // connections count at level + if err != nil { + return errors.Wrapf(err, "read node connections count at level") + } + read += n + connCountAtLevel := uint64(binary.LittleEndian.Uint32(b[:4])) + + if connCountAtLevel > 0 { + for c := uint64(0); c < connCountAtLevel; c++ { + n, err = io.ReadFull(r, b[:8]) // connection at level + if err != nil { + return errors.Wrapf(err, "read node connection at level") + } + connID := binary.LittleEndian.Uint64(b[:8]) + pconn.InsertAtLayer(connID, l) + read += n + } + } + } + + node.connections = pconn + } else { + // read the connections data + connData := make([]byte, connCount) + n, err = io.ReadFull(r, connData) + if err != nil { + return errors.Wrapf(err, "read node connections data") + } + read += n + + node.connections = packedconn.NewWithData(connData) + } + } + + mu.Lock() + res.Nodes[currNodeID] = node + mu.Unlock() + currNodeID++ + } + + // check checksum of checkpoint + if cp.Hash != hasher.Sum32() { + return fmt.Errorf("invalid checksum for checkpoint %d: expected %d, got %d", cpPos, cp.Hash, hasher.Sum32()) + } + + return nil + }) + } + + err = eg.Wait() + if err != nil { + return nil, err + } + + return res, nil +} + +func ReadAndHash(r io.Reader, hasher hash.Hash, buf []byte) (int, error) { + n, err := io.ReadFull(r, buf) + if err != nil { + return n, err + } + _, err = hasher.Write(buf) + if err != nil { + return n, err + } + return n, nil +} + +type Checkpoint struct { + NodeID uint64 + Offset uint64 + Hash uint32 +} + +func writeCheckpoints(fileName string, checkpoints []Checkpoint) error { + checkpointFile, err := os.OpenFile(fileName, os.O_WRONLY|os.O_TRUNC|os.O_CREATE, 0o666) + if err != nil { + return fmt.Errorf("open new checkpoint file for writing: %w", err) + } + defer checkpointFile.Close() + + // 0-4: checksum + // 4+: checkpoints (20 bytes each) + buffer := make([]byte, 4+len(checkpoints)*20) + offset := 4 + + for _, cp := range checkpoints { + binary.LittleEndian.PutUint64(buffer[offset:offset+8], cp.NodeID) + offset += 8 + binary.LittleEndian.PutUint64(buffer[offset:offset+8], cp.Offset) + offset += 8 + binary.LittleEndian.PutUint32(buffer[offset:offset+4], cp.Hash) + offset += 4 + } + + checksum := crc32.ChecksumIEEE(buffer[4:]) + binary.LittleEndian.PutUint32(buffer[:4], checksum) + + _, err = checkpointFile.Write(buffer) + if err != nil { + return fmt.Errorf("write checkpoint file: %w", err) + } + + return checkpointFile.Sync() +} + +func readCheckpoints(snapshotFileName string) (checkpoints []Checkpoint, err error) { + cpfn := snapshotFileName + ".checkpoints" + + cpFile, err := os.Open(cpfn) + if err != nil { + return nil, err + } + defer cpFile.Close() + + buf, err := io.ReadAll(cpFile) + if err != nil { + return nil, err + } + if len(buf) < 4 { + return nil, fmt.Errorf("corrupted checkpoint file %q", cpfn) + } + + checksum := binary.LittleEndian.Uint32(buf[:4]) + actualChecksum := crc32.ChecksumIEEE(buf[4:]) + if checksum != actualChecksum { + return nil, fmt.Errorf("corrupted checkpoint file %q, checksum mismatch", cpfn) + } + + checkpoints = make([]Checkpoint, 0, len(buf[4:])/20) + for i := 4; i < len(buf); i += 20 { + id := binary.LittleEndian.Uint64(buf[i : i+8]) + offset := binary.LittleEndian.Uint64(buf[i+8 : i+16]) + hash := binary.LittleEndian.Uint32(buf[i+16 : i+20]) + checkpoints = append(checkpoints, Checkpoint{NodeID: id, Offset: offset, Hash: hash}) + } + + return checkpoints, nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/commit_logger_snapshot_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/commit_logger_snapshot_test.go new file mode 100644 index 0000000000000000000000000000000000000000..aa4e63745762ca494fcc8ece974e284dfeefc469 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/commit_logger_snapshot_test.go @@ -0,0 +1,1447 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package hnsw + +import ( + "fmt" + "os" + "path/filepath" + "sync" + "testing" + + "github.com/sirupsen/logrus" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/adapters/repos/db/vector/compressionhelpers" + "github.com/weaviate/weaviate/adapters/repos/db/vector/hnsw/commitlog" + "github.com/weaviate/weaviate/adapters/repos/db/vector/multivector" + "github.com/weaviate/weaviate/entities/cyclemanager" +) + +func createTestCommitLoggerForSnapshotsWithOpts(t *testing.T, rootDir, id string, opts ...CommitlogOption) *hnswCommitLogger { + options := []CommitlogOption{ + WithCommitlogThreshold(1000), + WithCommitlogThresholdForCombining(200), + WithCondensor(&fakeCondensor{}), + WithSnapshotDisabled(false), + } + options = append(options, opts...) + + commitLogDir := commitLogDirectory(rootDir, id) + cl, err := NewCommitLogger(rootDir, id, logrus.New(), cyclemanager.NewCallbackGroupNoop(), options...) + require.NoError(t, err) + + // commit logger always creates an empty file if there is no data, remove it first + files, err := os.ReadDir(commitLogDir) + require.NoError(t, err) + for _, file := range files { + err = os.Remove(fmt.Sprintf("%s/%s", commitLogDir, file.Name())) + require.NoError(t, err) + } + + return cl +} + +func createTestCommitLoggerForSnapshots(t *testing.T, rootDir, id string) *hnswCommitLogger { + return createTestCommitLoggerForSnapshotsWithOpts(t, rootDir, id) +} + +func createCommitlogTestData(t *testing.T, dir string, filenameSizes ...any) { + // create the files with the specified sizes + for i := 0; i < len(filenameSizes); i += 2 { + filename := fmt.Sprintf("%s/%s", dir, filenameSizes[i]) + size := filenameSizes[i+1].(int) + cl := commitlog.NewLogger(filename) + + generateFakeCommitLogData(t, cl, int64(size)) + + err := cl.Close() + require.NoError(t, err) + } +} + +var generateFakeCommitLogData = func(t *testing.T, cl *commitlog.Logger, size int64) { + var err error + + i := 0 + for { + if i > 0 && i%5 == 0 { + err = cl.DeleteNode(uint64(i - 1)) + } else { + err = cl.AddNode(uint64(i), levelForDummyVertex(i)) + } + require.NoError(t, err) + + err = cl.Flush() + require.NoError(t, err) + + fsize, err := cl.FileSize() + require.NoError(t, err) + + if fsize >= size { + break + } + + i++ + } +} + +func createCommitlogAndSnapshotTestData(t *testing.T, cl *hnswCommitLogger, commitlogNameSizes ...any) { + t.Helper() + + require.GreaterOrEqual(t, len(commitlogNameSizes), 4, "at least 2 commitlog files are required to create snapshot") + + clDir := commitLogDirectory(cl.rootPath, cl.id) + createCommitlogTestData(t, clDir, commitlogNameSizes...) + + created, _, err := cl.CreateSnapshot() + require.NoError(t, err) + require.True(t, created) +} + +func readDir(t *testing.T, dir string) []string { + files, err := os.ReadDir(dir) + require.NoError(t, err) + + var result []string + for _, item := range files { + if item.IsDir() { + continue + } + result = append(result, item.Name()) + } + return result +} + +func TestCreateSnapshot(t *testing.T) { + tests := []struct { + name string + setup []any + expected []string + created bool + }{ + { + name: "empty directory", + setup: []any{}, + }, + { + name: "single file", + setup: []any{"1000", 1000}, + }, + { + name: "many non-condensed files", + setup: []any{"1000", 1000, "1001", 1000, "1002", 1000, "1003", 1000}, + expected: []string{"1002.snapshot", "1002.snapshot.checkpoints"}, + created: true, + }, + { + name: "small condensed files", + setup: []any{"1000.condensed", 100, "1001.condensed", 100, "1002.condensed", 100, "1003.condensed", 100}, + expected: []string{"1002.snapshot", "1002.snapshot.checkpoints"}, + created: true, + }, + { + name: "bigger condensed files", + setup: []any{"1000.condensed", 200, "1001.condensed", 200, "1002.condensed", 200, "1003.condensed", 200}, + expected: []string{"1002.snapshot", "1002.snapshot.checkpoints"}, + created: true, + }, + { + name: "not enough condensed files", + setup: []any{"1000.condensed", 1000}, + }, + { + name: "enough condensed files", + setup: []any{"1000.condensed", 1000, "1001.condensed", 1000}, + expected: []string{"1000.snapshot", "1000.snapshot.checkpoints"}, + created: true, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + dir := t.TempDir() + id := "main" + cl := createTestCommitLoggerForSnapshots(t, dir, id) + createCommitlogTestData(t, commitLogDirectory(dir, id), test.setup...) + + created, _, err := cl.CreateSnapshot() + require.NoError(t, err) + require.Equal(t, test.created, created) + require.Equal(t, test.expected, readDir(t, snapshotDirectory(dir, id))) + }) + } +} + +func TestCreateSnapshotWithExistingState(t *testing.T) { + dir := t.TempDir() + id := "main" + clDir := commitLogDirectory(dir, id) + sDir := snapshotDirectory(dir, id) + + cl := createTestCommitLoggerForSnapshots(t, dir, id) + createCommitlogTestData(t, clDir, "1000.condensed", 200, "1001.condensed", 200, "1002.condensed", 200, "1003.condensed", 200) + + // create snapshot + created, _, err := cl.CreateSnapshot() + require.NoError(t, err) + require.True(t, created) + + files := readDir(t, sDir) + require.Equal(t, []string{"1002.snapshot", "1002.snapshot.checkpoints"}, files) + + // add new files + createCommitlogTestData(t, clDir, "1004", 1000, "1005", 5) + + // create snapshot, should create it + created, _, err = cl.CreateSnapshot() + require.NoError(t, err) + require.True(t, created) + files = readDir(t, sDir) + require.Equal(t, []string{"1004.snapshot", "1004.snapshot.checkpoints"}, files) + + // simulate file condensation + err = os.Rename(filepath.Join(clDir, "1004"), filepath.Join(clDir, "1004.condensed")) + require.NoError(t, err) + + // create snapshot, should not create it (no new commitlogs) + created, _, err = cl.CreateSnapshot() + require.NoError(t, err) + require.False(t, created) + files = readDir(t, sDir) + require.Equal(t, []string{"1004.snapshot", "1004.snapshot.checkpoints"}, files) + + // simulate file condensation + err = os.Rename(filepath.Join(clDir, "1005"), filepath.Join(clDir, "1005.condensed")) + require.NoError(t, err) + + // create snapshot, should not create it (no new commitlogs) + created, _, err = cl.CreateSnapshot() + require.NoError(t, err) + require.False(t, created) + files = readDir(t, sDir) + require.Equal(t, []string{"1004.snapshot", "1004.snapshot.checkpoints"}, files) + + // add new files + createCommitlogTestData(t, clDir, "1006", 5) + + // create snapshot, should create it + created, _, err = cl.CreateSnapshot() + require.NoError(t, err) + require.True(t, created) + files = readDir(t, sDir) + require.Equal(t, []string{"1005.snapshot", "1005.snapshot.checkpoints"}, files) + + // simulate file condensation + err = os.Rename(filepath.Join(clDir, "1006"), filepath.Join(clDir, "1006.condensed")) + require.NoError(t, err) + + // create snapshot, should not create it (no new files) + created, _, err = cl.CreateSnapshot() + require.NoError(t, err) + require.False(t, created) + files = readDir(t, sDir) + require.Equal(t, []string{"1005.snapshot", "1005.snapshot.checkpoints"}, files) + + // add new files + createCommitlogTestData(t, clDir, "1007", 5) + + // create snapshot, should create it + created, _, err = cl.CreateSnapshot() + require.NoError(t, err) + require.True(t, created) + files = readDir(t, sDir) + require.Equal(t, []string{"1006.snapshot", "1006.snapshot.checkpoints"}, files) +} + +func TestCreateSnapshotCrashRecovery(t *testing.T) { + t.Run("crash before renaming from .tmp to .snapshot", func(t *testing.T) { + dir := t.TempDir() + id := "main" + cl := createTestCommitLoggerForSnapshots(t, dir, id) + clDir := commitLogDirectory(dir, id) + sDir := snapshotDirectory(dir, id) + os.MkdirAll(sDir, os.ModePerm) + + createCommitlogTestData(t, clDir, "1000.condensed", 1000, "1001.condensed", 1000, "1002.condensed", 1000) + + // simulate shutdown before snapshot renaming + createCommitlogTestData(t, sDir, "1000.snapshot.tmp", 1000) + + // create snapshot + created, _, err := cl.CreateSnapshot() + require.NoError(t, err) + require.True(t, created) + files := readDir(t, sDir) + require.Equal(t, []string{"1001.snapshot", "1001.snapshot.checkpoints"}, files) + }) + + t.Run("missing checkpoints", func(t *testing.T) { + dir := t.TempDir() + id := "main" + cl := createTestCommitLoggerForSnapshots(t, dir, id) + clDir := commitLogDirectory(dir, id) + sDir := snapshotDirectory(dir, id) + + createCommitlogTestData(t, clDir, "1000.condensed", 1000, "1001.condensed", 1000, "1002.condensed", 1000, + "1003", 1000) + + // missing checkpoints + createCommitlogTestData(t, sDir, "1000.snapshot", 1000) + + // create snapshot should still work + created, _, err := cl.CreateSnapshot() + require.NoError(t, err) + require.True(t, created) + files := readDir(t, sDir) + require.Equal(t, []string{"1002.snapshot", "1002.snapshot.checkpoints"}, files) + }) + + t.Run("corrupt snapshot", func(t *testing.T) { + dir := t.TempDir() + id := "main" + cl := createTestCommitLoggerForSnapshots(t, dir, id) + clDir := commitLogDirectory(dir, id) + sDir := snapshotDirectory(dir, id) + + createCommitlogTestData(t, clDir, "1000.condensed", 1000, "1001.condensed", 1000, "1002.condensed", 1000) + + // create snapshot + created, _, err := cl.CreateSnapshot() + require.NoError(t, err) + require.NotNil(t, created) + files := readDir(t, sDir) + require.Equal(t, []string{"1001.snapshot", "1001.snapshot.checkpoints"}, files) + + // corrupt the snapshot + err = os.WriteFile(filepath.Join(sDir, "1001.snapshot"), []byte("corrupt"), 0o644) + require.NoError(t, err) + + // add new files + createCommitlogTestData(t, clDir, "1003.condensed", 1000, "1004.condensed", 1000, "1005.condensed", 1000) + + // create snapshot should still work + created, _, err = cl.CreateSnapshot() + require.NoError(t, err) + require.True(t, created) + files = readDir(t, sDir) + require.Equal(t, []string{"1004.snapshot", "1004.snapshot.checkpoints"}, files) + }) + + t.Run("corrupt checkpoints", func(t *testing.T) { + dir := t.TempDir() + id := "main" + cl := createTestCommitLoggerForSnapshots(t, dir, id) + clDir := commitLogDirectory(dir, id) + sDir := snapshotDirectory(dir, id) + + createCommitlogTestData(t, clDir, "1000.condensed", 1000, "1001.condensed", 1000, "1002.condensed", 1000) + + // create snapshot + created, _, err := cl.CreateSnapshot() + require.NoError(t, err) + require.NotNil(t, created) + files := readDir(t, sDir) + require.Equal(t, []string{"1001.snapshot", "1001.snapshot.checkpoints"}, files) + + // corrupt the checkpoints + err = os.WriteFile(filepath.Join(sDir, "1001.snapshot.checkpoints"), []byte("corrupt"), 0o644) + require.NoError(t, err) + + // add new files + createCommitlogTestData(t, clDir, "1003.condensed", 1000, "1004.condensed", 1000, "1005.condensed", 1000) + + // create snapshot should still work + created, _, err = cl.CreateSnapshot() + require.NoError(t, err) + require.True(t, created) + files = readDir(t, sDir) + require.Equal(t, []string{"1004.snapshot", "1004.snapshot.checkpoints"}, files) + }) + + t.Run("outdated checkpoints", func(t *testing.T) { + dir := t.TempDir() + id := "main" + cl := createTestCommitLoggerForSnapshots(t, dir, id) + clDir := commitLogDirectory(dir, id) + sDir := snapshotDirectory(dir, id) + + createCommitlogTestData(t, clDir, "1000.condensed", 1000, "1001.condensed", 1000, "1002.condensed", 1000) + + // create snapshot + created, _, err := cl.CreateSnapshot() + require.NoError(t, err) + require.NotNil(t, created) + files := readDir(t, sDir) + require.Equal(t, []string{"1001.snapshot", "1001.snapshot.checkpoints"}, files) + + // copy the checkpoints file to a different file + oldCp, err := os.ReadFile(filepath.Join(sDir, "1001.snapshot.checkpoints")) + require.NoError(t, err) + + // add new files + createCommitlogTestData(t, clDir, "1003.condensed", 1500, "1004.condensed", 2500, "1005.condensed", 3000) + + // create new snapshot + created, _, err = cl.CreateSnapshot() + require.NoError(t, err) + require.True(t, created) + files = readDir(t, sDir) + require.Equal(t, []string{"1004.snapshot", "1004.snapshot.checkpoints"}, files) + + // restore the old checkpoints file + err = os.WriteFile(filepath.Join(sDir, "1004.snapshot.checkpoints"), oldCp, 0o644) + require.NoError(t, err) + + // load the snapshot + state, createdAt, err := cl.LoadSnapshot() + require.NoError(t, err) + require.Nil(t, state) + require.Zero(t, createdAt) + files = readDir(t, sDir) + require.Zero(t, files) + }) +} + +func TestCreateAndLoadSnapshot(t *testing.T) { + t.Run("create and load snapshot", func(t *testing.T) { + dir := t.TempDir() + id := "main" + cl := createTestCommitLoggerForSnapshots(t, dir, id) + clDir := commitLogDirectory(dir, id) + sDir := snapshotDirectory(dir, id) + + createCommitlogTestData(t, clDir, "1000.condensed", 1000) + + // try to create a snapshot, should not create it + // because there is not enough data + state, createdAt, err := cl.CreateAndLoadSnapshot() + require.NoError(t, err) + require.Nil(t, state) + require.Zero(t, createdAt) + files := readDir(t, sDir) + require.Empty(t, files) + + // add new files + createCommitlogTestData(t, clDir, "1001.condensed", 1000) + + // create new snapshot + state, createdAt, err = cl.CreateAndLoadSnapshot() + require.NoError(t, err) + require.NotNil(t, state) + require.NotZero(t, createdAt) + files = readDir(t, sDir) + require.ElementsMatch(t, []string{"1000.snapshot", "1000.snapshot.checkpoints"}, files) + + // add new files + createCommitlogTestData(t, clDir, "1002.condensed", 1000) + + // create new snapshot + state, createdAt, err = cl.CreateAndLoadSnapshot() + require.NoError(t, err) + require.NotNil(t, state) + require.NotZero(t, createdAt) + files = readDir(t, sDir) + require.ElementsMatch(t, []string{"1001.snapshot", "1001.snapshot.checkpoints"}, files) + + // try again, should not create a new snapshot + // but should return the existing one + state, createdAt, err = cl.CreateAndLoadSnapshot() + require.NoError(t, err) + require.NotNil(t, state) + require.NotZero(t, createdAt) + files = readDir(t, sDir) + require.ElementsMatch(t, []string{"1001.snapshot", "1001.snapshot.checkpoints"}, files) + }) + + t.Run("empty snapshot", func(t *testing.T) { + dir := t.TempDir() + id := "main" + cl := createTestCommitLoggerForSnapshots(t, dir, id) + clDir := commitLogDirectory(dir, id) + sDir := snapshotDirectory(dir, id) + + createCommitlogTestData(t, clDir, "1000.condensed", 1000, "1001.condensed", 1000, "1002.condensed", 1000) + + // create snapshot + created, createdAt, err := cl.CreateSnapshot() + require.NoError(t, err) + require.True(t, created) + require.NotZero(t, createdAt) + files := readDir(t, sDir) + require.Equal(t, []string{"1001.snapshot", "1001.snapshot.checkpoints"}, files) + + // empty the snapshot + err = os.WriteFile(filepath.Join(sDir, "1001.snapshot"), []byte(""), 0o644) + require.NoError(t, err) + + // create snapshot again + state, createdAt, err := cl.CreateAndLoadSnapshot() + require.NoError(t, err) + require.NotNil(t, state) + require.NotZero(t, createdAt) + files = readDir(t, sDir) + require.Equal(t, []string{"1001.snapshot", "1001.snapshot.checkpoints"}, files) + // snapshot has content now + info, err := os.Stat(filepath.Join(sDir, "1001.snapshot")) + require.NoError(t, err) + require.Less(t, int64(0), info.Size()) + }) +} + +func TestCreateSnapshot_NextOne(t *testing.T) { + s1982 := 1200 // commitlog of size 1200 makes snapshot of size s1982 + + tests := []struct { + name string + setup []any + delta []any + deltaNumber int + deltaSizePercentage int + allocCheckerOOM bool + expectedFiles []string + expectedCreated bool + }{ + // number of delta files + { + name: "no new commitlogs (1 required)", + setup: []any{"1000.condensed", 1000, "1001", 1000}, + delta: []any{}, + deltaNumber: 1, + expectedFiles: []string{"1000.snapshot", "1000.snapshot.checkpoints"}, + expectedCreated: false, + }, + { + name: "1 new commitlog (1 required)", + setup: []any{"1000.condensed", 1000, "1001", 1000}, + delta: []any{"1002", 1000}, + deltaNumber: 1, + expectedFiles: []string{"1001.snapshot", "1001.snapshot.checkpoints"}, + expectedCreated: true, + }, + { + name: "2 new commitlogs (1 required)", + setup: []any{"1000.condensed", 1000, "1001", 1000}, + delta: []any{"1002", 1000, "1003", 1000}, + deltaNumber: 1, + expectedFiles: []string{"1002.snapshot", "1002.snapshot.checkpoints"}, + expectedCreated: true, + }, + { + name: "2 new commitlogs (3 required)", + setup: []any{"1000.condensed", 1000, "1001", 1000}, + delta: []any{"1002.condensed", 1000, "1003", 1000}, + deltaNumber: 3, + expectedFiles: []string{"1000.snapshot", "1000.snapshot.checkpoints"}, + expectedCreated: false, + }, + { + name: "3 new commitlogs (3 required)", + setup: []any{"1000.condensed", 1000, "1001", 1000}, + delta: []any{"1002.condensed", 1000, "1003.condensed", 1000, "1004", 1000}, + deltaNumber: 3, + expectedFiles: []string{"1003.snapshot", "1003.snapshot.checkpoints"}, + expectedCreated: true, + }, + { + name: "4 new commitlogs (3 required)", + setup: []any{"1000.condensed", 1000, "1001", 1000}, + delta: []any{"1002.condensed", 1000, "1003.condensed", 1000, "1004.condensed", 1000, "1005", 1000}, + deltaNumber: 3, + expectedFiles: []string{"1004.snapshot", "1004.snapshot.checkpoints"}, + expectedCreated: true, + }, + + // size % of delta files + { + name: "too small delta size (required 5%)", + setup: []any{"1000.condensed", s1982, "1001", 90}, + delta: []any{"1002", 1200}, + deltaSizePercentage: 5, + expectedFiles: []string{"1000.snapshot", "1000.snapshot.checkpoints"}, + expectedCreated: false, + }, + { + name: "too small delta size, multiple files (required 5%)", + setup: []any{"1000.condensed", s1982, "1001", 30}, + delta: []any{"1002.condensed", 30, "1003.condensed", 30, "1004", 1200}, + deltaSizePercentage: 5, + expectedFiles: []string{"1000.snapshot", "1000.snapshot.checkpoints"}, + expectedCreated: false, + }, + { + name: "big enough delta size (required 5%)", + setup: []any{"1000.condensed", s1982, "1001", 110}, + delta: []any{"1002", 1200}, + deltaSizePercentage: 5, + expectedFiles: []string{"1001.snapshot", "1001.snapshot.checkpoints"}, + expectedCreated: true, + }, + { + name: "big enough delta size, multiple files (required 5%)", + setup: []any{"1000.condensed", s1982, "1001", 35}, + delta: []any{"1002.condensed", 35, "1003.condensed", 35, "1004", 1200}, + deltaSizePercentage: 5, + expectedFiles: []string{"1003.snapshot", "1003.snapshot.checkpoints"}, + expectedCreated: true, + }, + { + name: "too small delta size (required 125%)", + setup: []any{"1000.condensed", s1982, "1001", 1500}, + delta: []any{"1002", 1100}, + deltaSizePercentage: 125, + expectedFiles: []string{"1000.snapshot", "1000.snapshot.checkpoints"}, + expectedCreated: false, + }, + { + name: "too small delta size, multiple files (required 125%)", + setup: []any{"1000.condensed", s1982, "1001", 820}, + delta: []any{"1002.condensed", 820, "1003.condensed", 750, "1004", 1200}, + deltaSizePercentage: 125, + expectedFiles: []string{"1000.snapshot", "1000.snapshot.checkpoints"}, + expectedCreated: false, + }, + { + name: "big enough delta size (required 110%)", + setup: []any{"1000.condensed", s1982, "1001", 2510}, + delta: []any{"1002", 1200}, + deltaSizePercentage: 110, + expectedFiles: []string{"1001.snapshot", "1001.snapshot.checkpoints"}, + expectedCreated: true, + }, + { + name: "big enough delta size, multiple files (required 110%)", + setup: []any{"1000.condensed", s1982, "1001", 830}, + delta: []any{"1002.condensed", 830, "1003.condensed", 830, "1004", 1200}, + deltaSizePercentage: 110, + expectedFiles: []string{"1003.snapshot", "1003.snapshot.checkpoints"}, + expectedCreated: true, + }, + + // number + size % of delta files + allocChecker + // NOTE: data in commitlogs is duplicated, so final snapshot size made out of multiple commitlogs + // will effectively be the same as size of snaptshot created just from biggest commitlog + { + name: "too few delta commitlogs, too small delta size", + setup: []any{"1000.condensed", s1982, "1001", 1010}, + delta: []any{"1002", 1000}, + deltaNumber: 2, + deltaSizePercentage: 75, + expectedFiles: []string{"1000.snapshot", "1000.snapshot.checkpoints"}, + expectedCreated: false, + }, + { + name: "too small delta size", + setup: []any{"1000.condensed", s1982, "1001", 1010}, + delta: []any{"1002", 1000}, + deltaNumber: 1, + deltaSizePercentage: 75, + expectedFiles: []string{"1000.snapshot", "1000.snapshot.checkpoints"}, + expectedCreated: false, + }, + { + name: "too few delta commitlogs", + setup: []any{"1000.condensed", s1982, "1001", 1010}, + delta: []any{"1002", 1000}, + deltaNumber: 2, + deltaSizePercentage: 50, + expectedFiles: []string{"1000.snapshot", "1000.snapshot.checkpoints"}, + expectedCreated: false, + }, + { + name: "enough delta commit logs, enough delta size", + setup: []any{"1000.condensed", s1982, "1001", 1010}, + delta: []any{"1002", 1000}, + deltaNumber: 1, + deltaSizePercentage: 40, + expectedFiles: []string{"1001.snapshot", "1001.snapshot.checkpoints"}, + expectedCreated: true, + }, + { + name: "enough delta commit logs, enough delta size, but oom", + setup: []any{"1000.condensed", s1982, "1001", 1010}, + delta: []any{"1002", 1000}, + deltaNumber: 1, + deltaSizePercentage: 40, + allocCheckerOOM: true, + expectedFiles: []string{"1000.snapshot", "1000.snapshot.checkpoints"}, + expectedCreated: false, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + dir := t.TempDir() + id := "main" + cl := createTestCommitLoggerForSnapshots(t, dir, id) + createCommitlogAndSnapshotTestData(t, cl, test.setup...) + + if len(test.delta) > 0 { + createCommitlogTestData(t, commitLogDirectory(dir, id), test.delta...) + } + + // overwrite settings for next snapshot creation + if test.allocCheckerOOM { + cl.allocChecker = &fakeAllocChecker{shouldErr: true} + } + cl.snapshotMinDeltaCommitlogsNumber = test.deltaNumber + cl.snapshotMinDeltaCommitlogsSizePercentage = test.deltaSizePercentage + + created, _, err := cl.CreateSnapshot() + require.NoError(t, err) + require.Equal(t, test.expectedCreated, created) + require.Equal(t, test.expectedFiles, readDir(t, snapshotDirectory(dir, id))) + }) + } +} + +func TestCreateAndLoadSnapshot_NextOne(t *testing.T) { + s1982 := 1200 // commitlog of size 1200 makes snapshot of size 1982 + + tests := []struct { + name string + setup []any + delta []any + deltaNumber int + deltaSizePercentage int + allocCheckerOOM bool + expectedFiles []string + expectedCreated bool + }{ + // number of delta files + { + name: "no new commitlogs (1 required)", + setup: []any{"1000.condensed", 1000, "1001", 1000}, + delta: []any{}, + deltaNumber: 1, + expectedFiles: []string{"1000.snapshot", "1000.snapshot.checkpoints"}, + expectedCreated: false, + }, + { + name: "1 new commitlog (1 required)", + setup: []any{"1000.condensed", 1000, "1001", 1000}, + delta: []any{"1002", 1000}, + deltaNumber: 1, + expectedFiles: []string{"1001.snapshot", "1001.snapshot.checkpoints"}, + expectedCreated: true, + }, + { + name: "2 new commitlogs (1 required)", + setup: []any{"1000.condensed", 1000, "1001", 1000}, + delta: []any{"1002", 1000, "1003", 1000}, + deltaNumber: 1, + expectedFiles: []string{"1002.snapshot", "1002.snapshot.checkpoints"}, + expectedCreated: true, + }, + { + name: "2 new commitlogs (3 required)", + setup: []any{"1000.condensed", 1000, "1001", 1000}, + delta: []any{"1002.condensed", 1000, "1003", 1000}, + deltaNumber: 3, + expectedFiles: []string{"1000.snapshot", "1000.snapshot.checkpoints"}, + expectedCreated: false, + }, + { + name: "3 new commitlogs (3 required)", + setup: []any{"1000.condensed", 1000, "1001", 1000}, + delta: []any{"1002.condensed", 1000, "1003.condensed", 1000, "1004", 1000}, + deltaNumber: 3, + expectedFiles: []string{"1003.snapshot", "1003.snapshot.checkpoints"}, + expectedCreated: true, + }, + { + name: "4 new commitlogs (3 required)", + setup: []any{"1000.condensed", 1000, "1001", 1000}, + delta: []any{"1002.condensed", 1000, "1003.condensed", 1000, "1004.condensed", 1000, "1005", 1000}, + deltaNumber: 3, + expectedFiles: []string{"1004.snapshot", "1004.snapshot.checkpoints"}, + expectedCreated: true, + }, + + // size % of delta files + { + name: "too small delta size (required 5%)", + setup: []any{"1000.condensed", s1982, "1001", 90}, + delta: []any{"1002", 1200}, + deltaSizePercentage: 5, + expectedFiles: []string{"1000.snapshot", "1000.snapshot.checkpoints"}, + expectedCreated: false, + }, + { + name: "too small delta size, multiple files (required 5%)", + setup: []any{"1000.condensed", s1982, "1001", 30}, + delta: []any{"1002.condensed", 30, "1003.condensed", 30, "1004", 1200}, + deltaSizePercentage: 5, + expectedFiles: []string{"1000.snapshot", "1000.snapshot.checkpoints"}, + expectedCreated: false, + }, + { + name: "big enough delta size (required 5%)", + setup: []any{"1000.condensed", s1982, "1001", 110}, + delta: []any{"1002", 1200}, + deltaSizePercentage: 5, + expectedFiles: []string{"1001.snapshot", "1001.snapshot.checkpoints"}, + expectedCreated: true, + }, + { + name: "big enough delta size, multiple files (required 5%)", + setup: []any{"1000.condensed", s1982, "1001", 35}, + delta: []any{"1002.condensed", 35, "1003.condensed", 35, "1004", 1200}, + deltaSizePercentage: 5, + expectedFiles: []string{"1003.snapshot", "1003.snapshot.checkpoints"}, + expectedCreated: true, + }, + { + name: "too small delta size (required 125%)", + setup: []any{"1000.condensed", s1982, "1001", 2450}, + delta: []any{"1002", 1100}, + deltaSizePercentage: 125, + expectedFiles: []string{"1000.snapshot", "1000.snapshot.checkpoints"}, + expectedCreated: false, + }, + { + name: "too small delta size, multiple files (required 125%)", + setup: []any{"1000.condensed", s1982, "1001", 750}, + delta: []any{"1002.condensed", 750, "1003.condensed", 750, "1004", 1200}, + deltaSizePercentage: 125, + expectedFiles: []string{"1000.snapshot", "1000.snapshot.checkpoints"}, + expectedCreated: false, + }, + { + name: "big enough delta size (required 110%)", + setup: []any{"1000.condensed", s1982, "1001", 2510}, + delta: []any{"1002", 1200}, + deltaSizePercentage: 110, + expectedFiles: []string{"1001.snapshot", "1001.snapshot.checkpoints"}, + expectedCreated: true, + }, + { + name: "big enough delta size, multiple files (required 110%)", + setup: []any{"1000.condensed", s1982, "1001", 830}, + delta: []any{"1002.condensed", 830, "1003.condensed", 830, "1004", 1200}, + deltaSizePercentage: 110, + expectedFiles: []string{"1003.snapshot", "1003.snapshot.checkpoints"}, + expectedCreated: true, + }, + + // number + size % of delta files + allocChecker + // NOTE: data in commitlogs is duplicated, so final snapshot size made out of multiple commitlogs + // will effectively be the same as size of snaptshot created just from biggest commitlog + { + name: "too few delta commitlogs, too small delta size", + setup: []any{"1000.condensed", s1982, "1001", 1010}, + delta: []any{"1002", 1000}, + deltaNumber: 2, + deltaSizePercentage: 75, + expectedFiles: []string{"1000.snapshot", "1000.snapshot.checkpoints"}, + expectedCreated: false, + }, + { + name: "too small delta size", + setup: []any{"1000.condensed", s1982, "1001", 1010}, + delta: []any{"1002", 1000}, + deltaNumber: 1, + deltaSizePercentage: 75, + expectedFiles: []string{"1000.snapshot", "1000.snapshot.checkpoints"}, + expectedCreated: false, + }, + { + name: "too few delta commitlogs", + setup: []any{"1000.condensed", s1982, "1001", 1010}, + delta: []any{"1002", 1000}, + deltaNumber: 2, + deltaSizePercentage: 50, + expectedFiles: []string{"1000.snapshot", "1000.snapshot.checkpoints"}, + expectedCreated: false, + }, + { + name: "enough delta commit logs, enough delta size", + setup: []any{"1000.condensed", s1982, "1001", 1010}, + delta: []any{"1002", 1000}, + deltaNumber: 1, + deltaSizePercentage: 40, + expectedFiles: []string{"1001.snapshot", "1001.snapshot.checkpoints"}, + expectedCreated: true, + }, + { + name: "enough delta commit logs, enough delta size, oom is ignored", + setup: []any{"1000.condensed", s1982, "1001", 1010}, + delta: []any{"1002", 1000}, + deltaNumber: 1, + deltaSizePercentage: 40, + allocCheckerOOM: true, + expectedFiles: []string{"1001.snapshot", "1001.snapshot.checkpoints"}, + expectedCreated: true, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + dir := t.TempDir() + id := "main" + cl := createTestCommitLoggerForSnapshots(t, dir, id) + createCommitlogAndSnapshotTestData(t, cl, test.setup...) + + if len(test.delta) > 0 { + createCommitlogTestData(t, commitLogDirectory(dir, id), test.delta...) + } + + // overwrite settings for next snapshot creation + if test.allocCheckerOOM { + cl.allocChecker = &fakeAllocChecker{shouldErr: true} + } + cl.snapshotMinDeltaCommitlogsNumber = test.deltaNumber + cl.snapshotMinDeltaCommitlogsSizePercentage = test.deltaSizePercentage + + state, createdAt, err := cl.CreateAndLoadSnapshot() + require.NoError(t, err) + require.NotNil(t, state) + require.Equal(t, test.expectedFiles, readDir(t, snapshotDirectory(dir, id))) + // All examples have snapshot 1000 to start with. + // If new one is created it would be named after newer commitlogs + if test.expectedCreated { + require.NotEqual(t, int64(1000), createdAt) + } else { + require.Equal(t, int64(1000), createdAt) + } + }) + } +} + +func TestMetadataWriteAndRestore(t *testing.T) { + t.Run("v1 metadata - basic fields only", func(t *testing.T) { + // Create a basic state with no compression/encoding + state := &DeserializationResult{ + Entrypoint: 23, + Level: 42, + Compressed: false, + Nodes: make([]*vertex, 100), + } + + dir := t.TempDir() + id := "test" + cl := createTestCommitLoggerForSnapshots(t, dir, id) + + // Write snapshot to a temporary file + snapshotPath := filepath.Join(snapshotDirectory(dir, id), "test.snapshot") + err := cl.writeSnapshot(state, snapshotPath) + require.NoError(t, err) + + // Read snapshot back + restoredState, err := cl.readSnapshot(snapshotPath) + require.NoError(t, err) + + // Verify all fields match + require.Equal(t, state.Entrypoint, restoredState.Entrypoint) + require.Equal(t, state.Level, restoredState.Level) + require.Equal(t, state.Compressed, restoredState.Compressed) + require.Equal(t, len(state.Nodes), len(restoredState.Nodes)) + require.False(t, restoredState.MuveraEnabled) + require.Nil(t, restoredState.CompressionPQData) + require.Nil(t, restoredState.CompressionSQData) + require.Nil(t, restoredState.CompressionRQData) + require.Nil(t, restoredState.CompressionBRQData) + require.Nil(t, restoredState.EncoderMuvera) + }) + + t.Run("v2 metadata - basic fields only", func(t *testing.T) { + // Create a basic state with no compression/encoding + state := &DeserializationResult{ + Entrypoint: 43, + Level: 15, + Compressed: false, + Nodes: make([]*vertex, 200), + } + + dir := t.TempDir() + id := "test" + cl := createTestCommitLoggerForSnapshots(t, dir, id) + + // Write snapshot to a temporary file + snapshotPath := filepath.Join(snapshotDirectory(dir, id), "test.snapshot") + err := cl.writeSnapshot(state, snapshotPath) + require.NoError(t, err) + + // Read snapshot back + restoredState, err := cl.readSnapshot(snapshotPath) + require.NoError(t, err) + + // Verify all fields match + require.Equal(t, state.Entrypoint, restoredState.Entrypoint) + require.Equal(t, state.Level, restoredState.Level) + require.Equal(t, state.Compressed, restoredState.Compressed) + require.Equal(t, len(state.Nodes), len(restoredState.Nodes)) + require.False(t, restoredState.MuveraEnabled) + require.Nil(t, restoredState.CompressionPQData) + require.Nil(t, restoredState.CompressionSQData) + require.Nil(t, restoredState.CompressionRQData) + require.Nil(t, restoredState.CompressionBRQData) + require.Nil(t, restoredState.EncoderMuvera) + }) + + t.Run("v2 metadata - with PQ compression", func(t *testing.T) { + // Create state with PQ compression + state := &DeserializationResult{ + Entrypoint: 99, + Level: 7, + Compressed: true, + Nodes: make([]*vertex, 150), + CompressionPQData: &compressionhelpers.PQData{ + Dimensions: 128, + Ks: 256, + M: 8, + EncoderType: compressionhelpers.UseTileEncoder, + EncoderDistribution: 1, + UseBitsEncoding: true, + Encoders: make([]compressionhelpers.PQEncoder, 8), + }, + } + + // Create actual encoders for testing + for i := 0; i < 8; i++ { + state.CompressionPQData.Encoders[i] = compressionhelpers.NewTileEncoder(8, i, compressionhelpers.EncoderDistribution(1)) + } + + dir := t.TempDir() + id := "test" + cl := createTestCommitLoggerForSnapshots(t, dir, id) + + // Write snapshot to a temporary file + snapshotPath := filepath.Join(snapshotDirectory(dir, id), "test.snapshot") + err := cl.writeSnapshot(state, snapshotPath) + require.NoError(t, err) + + // Read snapshot back + restoredState, err := cl.readSnapshot(snapshotPath) + require.NoError(t, err) + + // Verify all fields match + require.Equal(t, state.Entrypoint, restoredState.Entrypoint) + require.Equal(t, state.Level, restoredState.Level) + require.Equal(t, state.Compressed, restoredState.Compressed) + require.Equal(t, len(state.Nodes), len(restoredState.Nodes)) + require.NotNil(t, restoredState.CompressionPQData) + require.Equal(t, state.CompressionPQData.Dimensions, restoredState.CompressionPQData.Dimensions) + require.Equal(t, state.CompressionPQData.Ks, restoredState.CompressionPQData.Ks) + require.Equal(t, state.CompressionPQData.M, restoredState.CompressionPQData.M) + require.Equal(t, state.CompressionPQData.EncoderType, restoredState.CompressionPQData.EncoderType) + require.Equal(t, state.CompressionPQData.EncoderDistribution, restoredState.CompressionPQData.EncoderDistribution) + require.Equal(t, state.CompressionPQData.UseBitsEncoding, restoredState.CompressionPQData.UseBitsEncoding) + require.Equal(t, len(state.CompressionPQData.Encoders), len(restoredState.CompressionPQData.Encoders)) + }) + + t.Run("v2 metadata - with SQ compression", func(t *testing.T) { + // Create state with SQ compression + state := &DeserializationResult{ + Entrypoint: 120, + Level: 12, + Compressed: true, + Nodes: make([]*vertex, 300), + CompressionSQData: &compressionhelpers.SQData{ + Dimensions: 64, + A: 1.5, + B: 2.7, + }, + } + + dir := t.TempDir() + id := "test" + cl := createTestCommitLoggerForSnapshots(t, dir, id) + + // Write snapshot to a temporary file + snapshotPath := filepath.Join(snapshotDirectory(dir, id), "test.snapshot") + err := cl.writeSnapshot(state, snapshotPath) + require.NoError(t, err) + + // Read snapshot back + restoredState, err := cl.readSnapshot(snapshotPath) + require.NoError(t, err) + + // Verify all fields match + require.Equal(t, state.Entrypoint, restoredState.Entrypoint) + require.Equal(t, state.Level, restoredState.Level) + require.Equal(t, state.Compressed, restoredState.Compressed) + require.Equal(t, len(state.Nodes), len(restoredState.Nodes)) + require.NotNil(t, restoredState.CompressionSQData) + require.Equal(t, state.CompressionSQData.Dimensions, restoredState.CompressionSQData.Dimensions) + require.Equal(t, state.CompressionSQData.A, restoredState.CompressionSQData.A) + require.Equal(t, state.CompressionSQData.B, restoredState.CompressionSQData.B) + }) + + t.Run("v2 metadata - with RQ compression", func(t *testing.T) { + // Create state with RQ compression + state := &DeserializationResult{ + Entrypoint: 212, + Level: 5, + Compressed: true, + Nodes: make([]*vertex, 250), + CompressionRQData: &compressionhelpers.RQData{ + InputDim: 8, + Bits: 8, + Rotation: compressionhelpers.FastRotation{ + OutputDim: 8, + Rounds: 1, + Swaps: [][]compressionhelpers.Swap{ + { + {I: 0, J: 1}, + {I: 2, J: 3}, + {I: 4, J: 5}, + {I: 6, J: 7}, + }, + }, + Signs: [][]float32{ + {1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0}, + }, + }, + }, + } + + dir := t.TempDir() + id := "test" + cl := createTestCommitLoggerForSnapshots(t, dir, id) + + // Write snapshot to a temporary file + snapshotPath := filepath.Join(snapshotDirectory(dir, id), "test.snapshot") + err := cl.writeSnapshot(state, snapshotPath) + require.NoError(t, err) + + // Read snapshot back + restoredState, err := cl.readSnapshot(snapshotPath) + require.NoError(t, err) + + // Verify all fields match + require.Equal(t, state.Compressed, true) + require.Equal(t, state.Entrypoint, restoredState.Entrypoint) + require.Equal(t, state.Level, restoredState.Level) + require.Equal(t, state.Compressed, restoredState.Compressed) + require.Equal(t, len(state.Nodes), len(restoredState.Nodes)) + require.NotNil(t, restoredState.CompressionRQData) + require.Equal(t, state.CompressionRQData.InputDim, restoredState.CompressionRQData.InputDim) + require.Equal(t, state.CompressionRQData.Bits, restoredState.CompressionRQData.Bits) + require.Equal(t, state.CompressionRQData.Rotation.OutputDim, restoredState.CompressionRQData.Rotation.OutputDim) + require.Equal(t, state.CompressionRQData.Rotation.Rounds, restoredState.CompressionRQData.Rotation.Rounds) + require.Equal(t, len(state.CompressionRQData.Rotation.Swaps), len(restoredState.CompressionRQData.Rotation.Swaps)) + require.Equal(t, len(state.CompressionRQData.Rotation.Signs), len(restoredState.CompressionRQData.Rotation.Signs)) + require.Equal(t, state.CompressionRQData.Rotation.Swaps[0][0].I, restoredState.CompressionRQData.Rotation.Swaps[0][0].I) + require.Equal(t, state.CompressionRQData.Rotation.Swaps[0][0].J, restoredState.CompressionRQData.Rotation.Swaps[0][0].J) + require.Equal(t, state.CompressionRQData.Rotation.Signs[0][0], restoredState.CompressionRQData.Rotation.Signs[0][0]) + }) + + t.Run("v2 metadata - with Muvera encoding", func(t *testing.T) { + // Create state with Muvera encoding + state := &DeserializationResult{ + Entrypoint: 172, + Level: 8, + MuveraEnabled: true, + Nodes: make([]*vertex, 180), + EncoderMuvera: &multivector.MuveraData{ + Dimensions: 8, + KSim: 2, + NumClusters: 4, + DProjections: 1, + Repetitions: 1, + Gaussians: [][][]float32{ + { + make([]float32, 8), + make([]float32, 8), + }, + }, + S: [][][]float32{ + { + make([]float32, 8), + }, + }, + }, + } + + // Initialize with some values + for i := 0; i < 8; i++ { + state.EncoderMuvera.Gaussians[0][0][i] = float32(i) * 0.1 + state.EncoderMuvera.S[0][0][i] = float32(i) * 0.2 + } + + dir := t.TempDir() + id := "test" + cl := createTestCommitLoggerForSnapshots(t, dir, id) + + // Write snapshot to a temporary file + snapshotPath := filepath.Join(snapshotDirectory(dir, id), "test.snapshot") + err := cl.writeSnapshot(state, snapshotPath) + require.NoError(t, err) + + // Read snapshot back + restoredState, err := cl.readSnapshot(snapshotPath) + require.NoError(t, err) + + // Verify all fields match + require.Equal(t, state.Compressed, false) + require.Equal(t, state.Entrypoint, restoredState.Entrypoint) + require.Equal(t, state.Level, restoredState.Level) + require.Equal(t, state.MuveraEnabled, restoredState.MuveraEnabled) + require.Equal(t, len(state.Nodes), len(restoredState.Nodes)) + require.NotNil(t, restoredState.EncoderMuvera) + require.Equal(t, state.EncoderMuvera.Dimensions, restoredState.EncoderMuvera.Dimensions) + require.Equal(t, state.EncoderMuvera.KSim, restoredState.EncoderMuvera.KSim) + require.Equal(t, state.EncoderMuvera.NumClusters, restoredState.EncoderMuvera.NumClusters) + require.Equal(t, state.EncoderMuvera.DProjections, restoredState.EncoderMuvera.DProjections) + require.Equal(t, state.EncoderMuvera.Repetitions, restoredState.EncoderMuvera.Repetitions) + require.Equal(t, len(state.EncoderMuvera.Gaussians), len(restoredState.EncoderMuvera.Gaussians)) + require.Equal(t, len(state.EncoderMuvera.S), len(restoredState.EncoderMuvera.S)) + require.Equal(t, state.EncoderMuvera.Gaussians[0][0][0], restoredState.EncoderMuvera.Gaussians[0][0][0]) + require.Equal(t, state.EncoderMuvera.S[0][0][0], restoredState.EncoderMuvera.S[0][0][0]) + }) + + t.Run("v2 metadata - with BRQ compression", func(t *testing.T) { + // Create state with BRQ compression + state := &DeserializationResult{ + Entrypoint: 212, + Level: 5, + Compressed: true, + Nodes: make([]*vertex, 250), + CompressionBRQData: &compressionhelpers.BRQData{ + InputDim: 8, + Rotation: compressionhelpers.FastRotation{ + OutputDim: 8, + Rounds: 1, + Swaps: [][]compressionhelpers.Swap{ + { + {I: 0, J: 1}, + {I: 2, J: 3}, + {I: 4, J: 5}, + {I: 6, J: 7}, + }, + }, + Signs: [][]float32{ + {1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0}, + }, + }, + Rounding: []float32{0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8}, + }, + } + + dir := t.TempDir() + id := "test" + cl := createTestCommitLoggerForSnapshots(t, dir, id) + + // Write snapshot to a temporary file + snapshotPath := filepath.Join(snapshotDirectory(dir, id), "test.snapshot") + err := cl.writeSnapshot(state, snapshotPath) + require.NoError(t, err) + + // Read snapshot back + restoredState, err := cl.readSnapshot(snapshotPath) + require.NoError(t, err) + + // Verify all fields match + require.Equal(t, state.Compressed, true) + require.Equal(t, state.Entrypoint, restoredState.Entrypoint) + require.Equal(t, state.Level, restoredState.Level) + require.Equal(t, state.Compressed, restoredState.Compressed) + require.Equal(t, len(state.Nodes), len(restoredState.Nodes)) + require.NotNil(t, restoredState.CompressionBRQData) + require.Equal(t, state.CompressionBRQData.InputDim, restoredState.CompressionBRQData.InputDim) + require.Equal(t, state.CompressionBRQData.Rotation.OutputDim, restoredState.CompressionBRQData.Rotation.OutputDim) + require.Equal(t, state.CompressionBRQData.Rotation.Rounds, restoredState.CompressionBRQData.Rotation.Rounds) + require.Equal(t, len(state.CompressionBRQData.Rotation.Swaps), len(restoredState.CompressionBRQData.Rotation.Swaps)) + require.Equal(t, len(state.CompressionBRQData.Rotation.Signs), len(restoredState.CompressionBRQData.Rotation.Signs)) + require.Equal(t, state.CompressionBRQData.Rotation.Swaps[0][0].I, restoredState.CompressionBRQData.Rotation.Swaps[0][0].I) + require.Equal(t, state.CompressionBRQData.Rotation.Swaps[0][0].J, restoredState.CompressionBRQData.Rotation.Swaps[0][0].J) + require.Equal(t, state.CompressionBRQData.Rotation.Signs[0][0], restoredState.CompressionBRQData.Rotation.Signs[0][0]) + require.Equal(t, state.CompressionBRQData.Rounding[0], restoredState.CompressionBRQData.Rounding[0]) + }) + + t.Run("v2 metadata - with Muvera encoding and SQ compression", func(t *testing.T) { + // Create state with Muvera encoding + state := &DeserializationResult{ + Entrypoint: 172, + Level: 8, + Compressed: true, + MuveraEnabled: true, + Nodes: make([]*vertex, 180), + CompressionSQData: &compressionhelpers.SQData{ + Dimensions: 64, + A: 1.5, + B: 2.7, + }, + EncoderMuvera: &multivector.MuveraData{ + Dimensions: 8, + KSim: 2, + NumClusters: 4, + DProjections: 1, + Repetitions: 1, + Gaussians: [][][]float32{ + { + make([]float32, 8), + make([]float32, 8), + }, + }, + S: [][][]float32{ + { + make([]float32, 8), + }, + }, + }, + } + + // Initialize with some values + for i := 0; i < 8; i++ { + state.EncoderMuvera.Gaussians[0][0][i] = float32(i) * 0.1 + state.EncoderMuvera.S[0][0][i] = float32(i) * 0.2 + } + + dir := t.TempDir() + id := "test" + cl := createTestCommitLoggerForSnapshots(t, dir, id) + + // Write snapshot to a temporary file + snapshotPath := filepath.Join(snapshotDirectory(dir, id), "test.snapshot") + err := cl.writeSnapshot(state, snapshotPath) + require.NoError(t, err) + + // Read snapshot back + restoredState, err := cl.readSnapshot(snapshotPath) + require.NoError(t, err) + + // Verify all fields match + require.Equal(t, state.Compressed, true) + require.Equal(t, state.Entrypoint, restoredState.Entrypoint) + require.Equal(t, state.Level, restoredState.Level) + require.Equal(t, state.CompressionSQData.Dimensions, restoredState.CompressionSQData.Dimensions) + require.Equal(t, state.CompressionSQData.A, restoredState.CompressionSQData.A) + require.Equal(t, state.CompressionSQData.B, restoredState.CompressionSQData.B) + require.Equal(t, state.MuveraEnabled, restoredState.MuveraEnabled) + require.Equal(t, len(state.Nodes), len(restoredState.Nodes)) + require.NotNil(t, restoredState.EncoderMuvera) + require.Equal(t, state.EncoderMuvera.Dimensions, restoredState.EncoderMuvera.Dimensions) + require.Equal(t, state.EncoderMuvera.KSim, restoredState.EncoderMuvera.KSim) + require.Equal(t, state.EncoderMuvera.NumClusters, restoredState.EncoderMuvera.NumClusters) + require.Equal(t, state.EncoderMuvera.DProjections, restoredState.EncoderMuvera.DProjections) + require.Equal(t, state.EncoderMuvera.Repetitions, restoredState.EncoderMuvera.Repetitions) + require.Equal(t, len(state.EncoderMuvera.Gaussians), len(restoredState.EncoderMuvera.Gaussians)) + require.Equal(t, len(state.EncoderMuvera.S), len(restoredState.EncoderMuvera.S)) + require.Equal(t, state.EncoderMuvera.Gaussians[0][0][0], restoredState.EncoderMuvera.Gaussians[0][0][0]) + require.Equal(t, state.EncoderMuvera.S[0][0][0], restoredState.EncoderMuvera.S[0][0][0]) + }) + + t.Run("v2 metadata - compression is supported", func(t *testing.T) { + // Create state with compression (v2 supports compression) + state := &DeserializationResult{ + Entrypoint: 33, + Level: 10, + Compressed: false, + Nodes: make([]*vertex, 100), + } + + dir := t.TempDir() + id := "test" + cl := createTestCommitLoggerForSnapshots(t, dir, id) + + // Write snapshot to a temporary file + snapshotPath := filepath.Join(snapshotDirectory(dir, id), "test.snapshot") + err := cl.writeSnapshot(state, snapshotPath) + require.NoError(t, err) + + // Read snapshot back + restoredState, err := cl.readSnapshot(snapshotPath) + require.NoError(t, err) + require.Equal(t, state.Compressed, restoredState.Compressed) + }) + + t.Run("invalid version should fail", func(t *testing.T) { + state := &DeserializationResult{ + Entrypoint: 29, + Level: 3, + Compressed: false, + Nodes: make([]*vertex, 50), + } + + dir := t.TempDir() + id := "test" + cl := createTestCommitLoggerForSnapshots(t, dir, id) + + // Write snapshot to a temporary file + snapshotPath := filepath.Join(snapshotDirectory(dir, id), "test.snapshot") + err := cl.writeSnapshot(state, snapshotPath) + require.NoError(t, err) + + // Corrupt the version byte to make it invalid + data, err := os.ReadFile(snapshotPath) + require.NoError(t, err) + data[0] = 99 // invalid version + + // Write corrupted data back + err = os.WriteFile(snapshotPath, data, 0o644) + require.NoError(t, err) + + // Read should fail due to invalid version + _, err = cl.readSnapshot(snapshotPath) + require.Error(t, err) + require.Contains(t, err.Error(), "unsupported snapshot version 99") + }) + + t.Run("checksum validation", func(t *testing.T) { + state := &DeserializationResult{ + Entrypoint: 28, + Level: 6, + Compressed: false, + Nodes: make([]*vertex, 75), + } + + dir := t.TempDir() + id := "test" + cl := createTestCommitLoggerForSnapshots(t, dir, id) + + // Write snapshot to a temporary file + snapshotPath := filepath.Join(snapshotDirectory(dir, id), "test.snapshot") + err := cl.writeSnapshot(state, snapshotPath) + require.NoError(t, err) + + // Corrupt the data by changing a byte + data, err := os.ReadFile(snapshotPath) + require.NoError(t, err) + data[5] = data[5] ^ 0xFF // flip bits + + // Write corrupted data back + err = os.WriteFile(snapshotPath, data, 0o644) + require.NoError(t, err) + + // Read should fail due to checksum mismatch + _, err = cl.readSnapshot(snapshotPath) + require.Error(t, err) + require.Contains(t, err.Error(), "invalid checksum") + }) +} + +func TestCommitLogger_Snapshot_Race(t *testing.T) { + dir := t.TempDir() + id := "main" + cl := createTestCommitLoggerForSnapshots(t, dir, id) + clDir := commitLogDirectory(dir, id) + sDir := snapshotDirectory(dir, id) + os.MkdirAll(sDir, os.ModePerm) + + createCommitlogTestData(t, clDir, "1000.condensed", 1000, "1001.condensed", 1000, "1002.condensed", 1000) + + var wg sync.WaitGroup + for range 5 { + wg.Add(1) + go func() { + defer wg.Done() + // create snapshot + _, _, err := cl.CreateSnapshot() + require.NoError(t, err) + files := readDir(t, sDir) + require.Equal(t, []string{"1001.snapshot", "1001.snapshot.checkpoints"}, files) + }() + } + + wg.Wait() +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/commit_logger_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/commit_logger_test.go new file mode 100644 index 0000000000000000000000000000000000000000..d38a4386c3da90841989cabb36ac064c9c6fbba6 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/commit_logger_test.go @@ -0,0 +1,205 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package hnsw + +import ( + "context" + "fmt" + "os" + "testing" + "time" + + "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/entities/cyclemanager" +) + +type MockDirEntry struct { + name string + isDir bool +} + +func (d MockDirEntry) Name() string { + return d.name +} + +func (d MockDirEntry) IsDir() bool { + return d.isDir +} + +func (d MockDirEntry) Type() os.FileMode { + return os.ModePerm +} + +func (d MockDirEntry) Info() (os.FileInfo, error) { + return nil, nil +} + +func TestRemoveTmpScratchOrHiddenFiles(t *testing.T) { + entries := []os.DirEntry{ + MockDirEntry{name: "1682473161", isDir: false}, + MockDirEntry{name: ".nfs6b46801cd962afbc00000005", isDir: false}, + MockDirEntry{name: ".mystery-folder", isDir: false}, + MockDirEntry{name: "1682473161.condensed", isDir: false}, + MockDirEntry{name: "1682473161.scratch.tmp", isDir: false}, + } + + expected := []os.DirEntry{ + MockDirEntry{name: "1682473161", isDir: false}, + MockDirEntry{name: "1682473161.condensed", isDir: false}, + } + + result := skipTmpScratchOrHiddenFiles(entries) + + if len(result) != len(expected) { + t.Errorf("Expected %d entries, got %d", len(expected), len(result)) + } + + for i, entry := range result { + if entry.Name() != expected[i].Name() { + t.Errorf("Expected entry %d to be %s, got %s", i, expected[i].Name(), entry.Name()) + } + } +} + +func TestCondenseLoop(t *testing.T) { + scratchDir := t.TempDir() + commitLogDir := createCondensorTestData(t, scratchDir) + createTestCommitLoggerWithOptions(t, scratchDir, "main", WithCondensor(&fakeCondensor{})) + + assert.EventuallyWithT(t, func(t *assert.CollectT) { + files, err := os.ReadDir(commitLogDir) + assert.Nil(t, err) + + // all existing files should be condensed, but there is of course also an + // active log, so we expect 2 files in total + assert.Len(t, files, 2) + + fileNames := make([]string, 0, len(files)) + for _, file := range files { + fileNames = append(fileNames, file.Name()) + } + + assert.ElementsMatch(t, []string{"1003.condensed", "1004"}, fileNames) + }, 5*time.Second, 50*time.Millisecond, "Condense loop did not run") +} + +func TestCondenseLoop_WithAllocChecker(t *testing.T) { + scratchDir := t.TempDir() + commitLogDir := createCondensorTestData(t, scratchDir) + createTestCommitLoggerWithOptions(t, scratchDir, "main", + WithCondensor(&fakeCondensor{}), WithAllocChecker(&fakeAllocChecker{})) + + assert.EventuallyWithT(t, func(t *assert.CollectT) { + files, err := os.ReadDir(commitLogDir) + assert.Nil(t, err) + + // all existing files should be condensed, but there is of course also an + // active log, so we expect 2 files in total + assert.Len(t, files, 2) + + fileNames := make([]string, 0, len(files)) + for _, file := range files { + fileNames = append(fileNames, file.Name()) + } + + assert.ElementsMatch(t, []string{"1003.condensed", "1004"}, fileNames) + }, 5*time.Second, 50*time.Millisecond, "Condense loop did not run") +} + +func TestCondenseLoop_WithAllocChecker_OOM(t *testing.T) { + scratchDir := t.TempDir() + commitLogDir := createCondensorTestData(t, scratchDir) + createTestCommitLoggerWithOptions(t, scratchDir, "main", + WithCondensor(&fakeCondensor{}), WithAllocChecker(&fakeAllocChecker{shouldErr: true})) + + // Wait 6 commit log cycles (50 ms) + time.Sleep(300 * time.Millisecond) + + // Ensure that files 1002, 1003, and 1004 still exist and have not been + // condensed due to the OOM checker, we ignore 1001.condensed and 1002.condensed + // as combining can still occur when OOM + files, err := os.ReadDir(commitLogDir) + assert.Nil(t, err) + assert.Len(t, files, 4) + + fileNames := make([]string, len(files)) + for i, file := range files { + fileNames[i] = file.Name() + } + + for _, expected := range []string{"1002", "1003", "1004"} { + assert.Contains(t, fileNames, expected) + } +} + +type fakeCondensor struct{} + +func (f fakeCondensor) Do(fileName string) error { + os.Rename(fileName, fmt.Sprintf("%s.condensed", fileName)) + return nil +} + +func createCondensorTestData(t *testing.T, scratchDir string) string { + commitLogDir := fmt.Sprintf("%s/main.hnsw.commitlog.d", scratchDir) + + os.MkdirAll(commitLogDir, os.ModePerm) + + // create dummy data + _, err := os.Create(fmt.Sprintf("%s/1000.condensed", commitLogDir)) + require.Nil(t, err) + _, err = os.Create(fmt.Sprintf("%s/1001.condensed", commitLogDir)) + require.Nil(t, err) + _, err = os.Create(fmt.Sprintf("%s/1002", commitLogDir)) + require.Nil(t, err) + _, err = os.Create(fmt.Sprintf("%s/1003", commitLogDir)) + require.Nil(t, err) + _, err = os.Create(fmt.Sprintf("%s/1004", commitLogDir)) // active log + require.Nil(t, err) + + return commitLogDir +} + +func createTestCommitLoggerWithOptions(t *testing.T, scratchDir string, name string, options ...CommitlogOption) *hnswCommitLogger { + logger, _ := test.NewNullLogger() + cbg := cyclemanager.NewCallbackGroup("test", logger, 10) + ticker := cyclemanager.NewLinearTicker(50*time.Millisecond, 60*time.Millisecond, 1) + cm := cyclemanager.NewManager(ticker, cbg.CycleCallback, logger) + cl, err := NewCommitLogger(scratchDir, name, logger, cbg, options...) + require.Nil(t, err) + cl.InitMaintenance() + cm.Start() + + t.Cleanup(func() { + cl.Shutdown(context.Background()) + cm.Stop(context.Background()) + }) + + return cl +} + +type fakeAllocChecker struct { + shouldErr bool +} + +func (f fakeAllocChecker) CheckAlloc(sizeInBytes int64) error { + if f.shouldErr { + return fmt.Errorf("can't allocate %d bytes", sizeInBytes) + } + return nil +} + +func (f fakeAllocChecker) CheckMappingAndReserve(numberMappings int64, reservationTimeInS int) error { + return nil +} +func (f fakeAllocChecker) Refresh(updateMappings bool) {} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/commitlog/bufiowriter.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/commitlog/bufiowriter.go new file mode 100644 index 0000000000000000000000000000000000000000..1b344609b4deb7dbcab851a1130c3884c8e1611f --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/commitlog/bufiowriter.go @@ -0,0 +1,186 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package commitlog + +import ( + "io" + "os" + "unicode/utf8" +) + +const ( + defaultBufSize = 4096 +) + +// bufWriter implements buffering for an *os.File object. +// If an error occurs writing to a bufWriter, no more data will be +// accepted and all subsequent writes, and Flush, will return the error. +// After all data has been written, the client should call the +// Flush method to guarantee all data has been forwarded to +// the underlying *os.File. +type bufWriter struct { + err error + buf []byte + n int + wr *os.File +} + +// NewWriterSize returns a new Writer whose buffer has at least the specified +// size. If the argument *os.File is already a Writer with large enough +// size, it returns the underlying Writer. +func NewWriterSize(w *os.File, size int) *bufWriter { + if size <= 0 { + size = defaultBufSize + } + return &bufWriter{ + buf: make([]byte, size), + wr: w, + } +} + +// NewWriter returns a new Writer whose buffer has the default size. +func NewWriter(w *os.File) *bufWriter { + return NewWriterSize(w, defaultBufSize) +} + +// Size returns the size of the underlying buffer in bytes. +func (b *bufWriter) Size() int { return len(b.buf) } + +// Reset discards any unflushed buffered data, clears any error, and +// resets b to write its output to w. +func (b *bufWriter) Reset(w *os.File) { + b.err = nil + b.n = 0 + b.wr = w +} + +// Flush writes any buffered data to the underlying *os.File. +func (b *bufWriter) Flush() error { + if b.err != nil { + return b.err + } + if b.n == 0 { + return nil + } + n, err := b.wr.Write(b.buf[0:b.n]) + if n < b.n && err == nil { + err = io.ErrShortWrite + } + if err != nil { + if n > 0 && n < b.n { + copy(b.buf[0:b.n-n], b.buf[n:b.n]) + } + b.n -= n + b.err = err + return err + } + b.n = 0 + return nil +} + +// Available returns how many bytes are unused in the buffer. +func (b *bufWriter) Available() int { return len(b.buf) - b.n } + +// Buffered returns the number of bytes that have been written into the current buffer. +func (b *bufWriter) Buffered() int { return b.n } + +// Write writes the contents of p into the buffer. +// It returns the number of bytes written. +// If nn < len(p), it also returns an error explaining +// why the write is short. +func (b *bufWriter) Write(p []byte) (nn int, err error) { + for len(p) > b.Available() && b.err == nil { + var n int + if b.Buffered() == 0 { + // Large write, empty buffer. + // Write directly from p to avoid copy. + n, b.err = b.wr.Write(p) + } else { + n = copy(b.buf[b.n:], p) + b.n += n + b.Flush() + } + nn += n + p = p[n:] + } + if b.err != nil { + return nn, b.err + } + n := copy(b.buf[b.n:], p) + b.n += n + nn += n + return nn, nil +} + +// WriteByte writes a single byte. +func (b *bufWriter) WriteByte(c byte) error { + if b.err != nil { + return b.err + } + if b.Available() <= 0 && b.Flush() != nil { + return b.err + } + b.buf[b.n] = c + b.n++ + return nil +} + +// WriteRune writes a single Unicode code point, returning +// the number of bytes written and any error. +func (b *bufWriter) WriteRune(r rune) (size int, err error) { + if r < utf8.RuneSelf { + err = b.WriteByte(byte(r)) + if err != nil { + return 0, err + } + return 1, nil + } + if b.err != nil { + return 0, b.err + } + n := b.Available() + if n < utf8.UTFMax { + if b.Flush(); b.err != nil { + return 0, b.err + } + n = b.Available() + if n < utf8.UTFMax { + // Can only happen if buffer is silly small. + return b.WriteString(string(r)) + } + } + size = utf8.EncodeRune(b.buf[b.n:], r) + b.n += size + return size, nil +} + +// WriteString writes a string. +// It returns the number of bytes written. +// If the count is less than len(s), it also returns an error explaining +// why the write is short. +func (b *bufWriter) WriteString(s string) (int, error) { + nn := 0 + for len(s) > b.Available() && b.err == nil { + n := copy(b.buf[b.n:], s) + b.n += n + nn += n + s = s[n:] + b.Flush() + } + if b.err != nil { + return nn, b.err + } + n := copy(b.buf[b.n:], s) + b.n += n + nn += n + return nn, nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/commitlog/logger.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/commitlog/logger.go new file mode 100644 index 0000000000000000000000000000000000000000..a4737c9c8b2b81ce11876606c96c8f3929090ffd --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/commitlog/logger.go @@ -0,0 +1,362 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package commitlog + +import ( + "bytes" + "encoding/binary" + "math" + "os" + + "github.com/pkg/errors" + "github.com/weaviate/weaviate/adapters/repos/db/vector/compressionhelpers" + "github.com/weaviate/weaviate/adapters/repos/db/vector/multivector" +) + +type Logger struct { + file *os.File + bufw *bufWriter +} + +// TODO: these are duplicates with the hnsw package, unify them +type HnswCommitType uint8 // 256 options, plenty of room for future extensions + +// TODO: these are duplicates with the hnsw package, unify them +const ( + AddNode HnswCommitType = iota + SetEntryPointMaxLevel + AddLinkAtLevel + ReplaceLinksAtLevel + AddTombstone + RemoveTombstone + ClearLinks + DeleteNode + ResetIndex + ClearLinksAtLevel // added in v1.8.0-rc.1, see https://github.com/weaviate/weaviate/issues/1701 + AddLinksAtLevel // added in v1.8.0-rc.1, see https://github.com/weaviate/weaviate/issues/1705 + AddPQ + AddSQ + AddMuvera + AddRQ + AddBRQ +) + +func NewLogger(fileName string) *Logger { + file, err := os.Create(fileName) + if err != nil { + panic(err) + } + + return &Logger{file: file, bufw: NewWriter(file)} +} + +func NewLoggerWithFile(file *os.File) *Logger { + return &Logger{file: file, bufw: NewWriterSize(file, 32*1024)} +} + +func (l *Logger) SetEntryPointWithMaxLayer(id uint64, level int) error { + toWrite := make([]byte, 11) + toWrite[0] = byte(SetEntryPointMaxLevel) + binary.LittleEndian.PutUint64(toWrite[1:9], id) + binary.LittleEndian.PutUint16(toWrite[9:11], uint16(level)) + _, err := l.bufw.Write(toWrite) + return err +} + +func (l *Logger) AddNode(id uint64, level int) error { + toWrite := make([]byte, 11) + toWrite[0] = byte(AddNode) + binary.LittleEndian.PutUint64(toWrite[1:9], id) + binary.LittleEndian.PutUint16(toWrite[9:11], uint16(level)) + _, err := l.bufw.Write(toWrite) + return err +} + +func (l *Logger) AddPQCompression(data compressionhelpers.PQData) error { + toWrite := make([]byte, 10) + toWrite[0] = byte(AddPQ) + binary.LittleEndian.PutUint16(toWrite[1:3], data.Dimensions) + toWrite[3] = byte(data.EncoderType) + binary.LittleEndian.PutUint16(toWrite[4:6], data.Ks) + binary.LittleEndian.PutUint16(toWrite[6:8], data.M) + toWrite[8] = data.EncoderDistribution + if data.UseBitsEncoding { + toWrite[9] = 1 + } else { + toWrite[9] = 0 + } + + for _, encoder := range data.Encoders { + toWrite = append(toWrite, encoder.ExposeDataForRestore()...) + } + _, err := l.bufw.Write(toWrite) + return err +} + +func (l *Logger) AddSQCompression(data compressionhelpers.SQData) error { + toWrite := make([]byte, 11) + toWrite[0] = byte(AddSQ) + binary.LittleEndian.PutUint32(toWrite[1:], math.Float32bits(data.A)) + binary.LittleEndian.PutUint32(toWrite[5:], math.Float32bits(data.B)) + binary.LittleEndian.PutUint16(toWrite[9:], data.Dimensions) + _, err := l.bufw.Write(toWrite) + return err +} + +func (l *Logger) AddRQCompression(data compressionhelpers.RQData) error { + swapSize := 2 * data.Rotation.Rounds * (data.Rotation.OutputDim / 2) * 2 + signSize := 4 * data.Rotation.Rounds * data.Rotation.OutputDim + var buf bytes.Buffer + buf.Grow(17 + int(swapSize) + int(signSize)) + + buf.WriteByte(byte(AddRQ)) // 1 + binary.Write(&buf, binary.LittleEndian, data.InputDim) // 4 input dim + binary.Write(&buf, binary.LittleEndian, data.Bits) // 4 bits + binary.Write(&buf, binary.LittleEndian, data.Rotation.OutputDim) // 4 rotation - output dim + binary.Write(&buf, binary.LittleEndian, data.Rotation.Rounds) // 4 rotation - rounds + + for _, swap := range data.Rotation.Swaps { + for _, dim := range swap { + binary.Write(&buf, binary.LittleEndian, dim.I) + binary.Write(&buf, binary.LittleEndian, dim.J) + } + } + + for _, sign := range data.Rotation.Signs { + for _, dim := range sign { + binary.Write(&buf, binary.LittleEndian, dim) + } + } + + _, err := l.bufw.Write(buf.Bytes()) + return err +} + +func (l *Logger) AddMuvera(data multivector.MuveraData) error { + gSize := 4 * data.Repetitions * data.KSim * data.Dimensions + dSize := 4 * data.Repetitions * data.DProjections * data.Dimensions + var buf bytes.Buffer + buf.Grow(21 + int(gSize) + int(dSize)) + + buf.WriteByte(byte(AddMuvera)) // 1 + binary.Write(&buf, binary.LittleEndian, data.KSim) // 4 + binary.Write(&buf, binary.LittleEndian, data.NumClusters) // 4 + binary.Write(&buf, binary.LittleEndian, data.Dimensions) // 4 + binary.Write(&buf, binary.LittleEndian, data.DProjections) // 4 + binary.Write(&buf, binary.LittleEndian, data.Repetitions) // 4 + + for _, gaussian := range data.Gaussians { + for _, cluster := range gaussian { + for _, el := range cluster { + binary.Write(&buf, binary.LittleEndian, math.Float32bits(el)) + } + } + } + + for _, matrix := range data.S { + for _, vector := range matrix { + for _, el := range vector { + binary.Write(&buf, binary.LittleEndian, math.Float32bits(el)) + } + } + } + + _, err := l.bufw.Write(buf.Bytes()) + return err +} + +func (l *Logger) AddBRQCompression(data compressionhelpers.BRQData) error { + swapSize := 2 * data.Rotation.Rounds * (data.Rotation.OutputDim / 2) * 2 + signSize := 4 * data.Rotation.Rounds * data.Rotation.OutputDim + roundingSize := 4 * data.Rotation.OutputDim + var buf bytes.Buffer + buf.Grow(13 + int(swapSize) + int(signSize) + int(roundingSize)) + + buf.WriteByte(byte(AddBRQ)) // 1 + binary.Write(&buf, binary.LittleEndian, data.InputDim) // 4 input dim + binary.Write(&buf, binary.LittleEndian, data.Rotation.OutputDim) // 4 rotation - output dim + binary.Write(&buf, binary.LittleEndian, data.Rotation.Rounds) // 4 rotation - rounds + + for _, swap := range data.Rotation.Swaps { + for _, dim := range swap { + binary.Write(&buf, binary.LittleEndian, dim.I) + binary.Write(&buf, binary.LittleEndian, dim.J) + } + } + + for _, sign := range data.Rotation.Signs { + for _, dim := range sign { + binary.Write(&buf, binary.LittleEndian, dim) + } + } + + for _, rounding := range data.Rounding { + binary.Write(&buf, binary.LittleEndian, rounding) + } + + _, err := l.bufw.Write(buf.Bytes()) + return err +} + +func (l *Logger) AddLinkAtLevel(id uint64, level int, target uint64) error { + toWrite := make([]byte, 19) + toWrite[0] = byte(AddLinkAtLevel) + binary.LittleEndian.PutUint64(toWrite[1:9], id) + binary.LittleEndian.PutUint16(toWrite[9:11], uint16(level)) + binary.LittleEndian.PutUint64(toWrite[11:19], target) + _, err := l.bufw.Write(toWrite) + return err +} + +func (l *Logger) AddLinksAtLevel(id uint64, level int, targets []uint64) error { + toWrite := make([]byte, 13+len(targets)*8) + toWrite[0] = byte(AddLinksAtLevel) + binary.LittleEndian.PutUint64(toWrite[1:9], id) + binary.LittleEndian.PutUint16(toWrite[9:11], uint16(level)) + binary.LittleEndian.PutUint16(toWrite[11:13], uint16(len(targets))) + for i, target := range targets { + offsetStart := 13 + i*8 + offsetEnd := offsetStart + 8 + binary.LittleEndian.PutUint64(toWrite[offsetStart:offsetEnd], target) + } + _, err := l.bufw.Write(toWrite) + return err +} + +// chunks links in increments of 8, so that we never have to allocate a dynamic +// []byte size which would be guaranteed to escape to the heap +func (l *Logger) ReplaceLinksAtLevel(id uint64, level int, targets []uint64) error { + headers := make([]byte, 13) + headers[0] = byte(ReplaceLinksAtLevel) + binary.LittleEndian.PutUint64(headers[1:9], id) + binary.LittleEndian.PutUint16(headers[9:11], uint16(level)) + binary.LittleEndian.PutUint16(headers[11:13], uint16(len(targets))) + _, err := l.bufw.Write(headers) + if err != nil { + return errors.Wrap(err, "write headers") + } + + i := 0 + // chunks of 8 + buf := make([]byte, 64) + for i < len(targets) { + if i != 0 && i%8 == 0 { + if _, err := l.bufw.Write(buf); err != nil { + return errors.Wrap(err, "write link chunk") + } + } + + pos := i % 8 + start := pos * 8 + end := start + 8 + binary.LittleEndian.PutUint64(buf[start:end], targets[i]) + + i++ + } + + // remainder + if i != 0 { + start := 0 + end := i % 8 * 8 + if end == 0 { + end = 64 + } + + if _, err := l.bufw.Write(buf[start:end]); err != nil { + return errors.Wrap(err, "write link remainder") + } + } + + return nil +} + +func (l *Logger) AddTombstone(id uint64) error { + toWrite := make([]byte, 9) + toWrite[0] = byte(AddTombstone) + binary.LittleEndian.PutUint64(toWrite[1:9], id) + _, err := l.bufw.Write(toWrite) + return err +} + +func (l *Logger) RemoveTombstone(id uint64) error { + toWrite := make([]byte, 9) + toWrite[0] = byte(RemoveTombstone) + binary.LittleEndian.PutUint64(toWrite[1:9], id) + _, err := l.bufw.Write(toWrite) + return err +} + +func (l *Logger) ClearLinks(id uint64) error { + toWrite := make([]byte, 9) + toWrite[0] = byte(ClearLinks) + binary.LittleEndian.PutUint64(toWrite[1:9], id) + _, err := l.bufw.Write(toWrite) + return err +} + +func (l *Logger) ClearLinksAtLevel(id uint64, level uint16) error { + toWrite := make([]byte, 11) + toWrite[0] = byte(ClearLinksAtLevel) + binary.LittleEndian.PutUint64(toWrite[1:9], id) + binary.LittleEndian.PutUint16(toWrite[9:11], level) + _, err := l.bufw.Write(toWrite) + return err +} + +func (l *Logger) DeleteNode(id uint64) error { + toWrite := make([]byte, 9) + toWrite[0] = byte(DeleteNode) + binary.LittleEndian.PutUint64(toWrite[1:9], id) + _, err := l.bufw.Write(toWrite) + return err +} + +func (l *Logger) Reset() error { + toWrite := make([]byte, 1) + toWrite[0] = byte(ResetIndex) + _, err := l.bufw.Write(toWrite) + return err +} + +func (l *Logger) FileSize() (int64, error) { + i, err := l.file.Stat() + if err != nil { + return -1, err + } + + return i.Size(), nil +} + +func (l *Logger) FileName() (string, error) { + i, err := l.file.Stat() + if err != nil { + return "", err + } + + return i.Name(), nil +} + +func (l *Logger) Flush() error { + return l.bufw.Flush() +} + +func (l *Logger) Close() error { + if err := l.bufw.Flush(); err != nil { + return err + } + + if err := l.file.Close(); err != nil { + return err + } + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/commitlog/logger_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/commitlog/logger_test.go new file mode 100644 index 0000000000000000000000000000000000000000..f4c9aa186fc91a9d292040d6536d4336b3505462 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/commitlog/logger_test.go @@ -0,0 +1,178 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package commitlog + +import ( + "os" + "testing" +) + +func BenchmarkSetEntryPoint(b *testing.B) { + defer os.Remove("./testfile") + ids := make([]uint64, 100) + levels := make([]int, 100) + + l := NewLogger("./testfile") + + b.ReportAllocs() + + for j := 0; j < b.N; j++ { + for i := 0; i < 100; i++ { + l.SetEntryPointWithMaxLayer(ids[i], levels[i]) + } + } +} + +func BenchmarkAddNode(b *testing.B) { + defer os.Remove("./testfile") + ids := make([]uint64, 100) + levels := make([]int, 100) + + l := NewLogger("./testfile") + + b.ReportAllocs() + + for j := 0; j < b.N; j++ { + for i := 0; i < 100; i++ { + l.AddNode(ids[i], levels[i]) + } + } +} + +func BenchmarkAddLinkAtLevel(b *testing.B) { + defer os.Remove("./testfile") + ids := make([]uint64, 100) + levels := make([]int, 100) + links := make([]uint64, 100) + + l := NewLogger("./testfile") + + b.ReportAllocs() + + for j := 0; j < b.N; j++ { + for i := 0; i < 100; i++ { + l.AddLinkAtLevel(ids[i], levels[i], links[i]) + } + } +} + +func BenchmarkReplaceLinksAtLevel32(b *testing.B) { + defer os.Remove("./testfile") + ids := make([]uint64, 100) + levels := make([]int, 100) + links := make([][]uint64, 100) + for i := range links { + links[i] = make([]uint64, 32) + } + + l := NewLogger("./testfile") + + b.ReportAllocs() + + for j := 0; j < b.N; j++ { + for i := 0; i < 100; i++ { + l.ReplaceLinksAtLevel(ids[i], levels[i], links[i]) + } + } +} + +func BenchmarkReplaceLinksAtLevel33(b *testing.B) { + defer os.Remove("./testfile") + ids := make([]uint64, 100) + levels := make([]int, 100) + links := make([][]uint64, 100) + for i := range links { + links[i] = make([]uint64, 33) + } + + l := NewLogger("./testfile") + + b.ReportAllocs() + + for j := 0; j < b.N; j++ { + for i := 0; i < 100; i++ { + l.ReplaceLinksAtLevel(ids[i], levels[i], links[i]) + } + } +} + +func BenchmarkAddTombstone(b *testing.B) { + defer os.Remove("./testfile") + ids := make([]uint64, 100) + + l := NewLogger("./testfile") + + b.ReportAllocs() + + for j := 0; j < b.N; j++ { + for i := 0; i < 100; i++ { + l.AddTombstone(ids[i]) + } + } +} + +func BenchmarkRemoveTombstone(b *testing.B) { + defer os.Remove("./testfile") + ids := make([]uint64, 100) + + l := NewLogger("./testfile") + + b.ReportAllocs() + + for j := 0; j < b.N; j++ { + for i := 0; i < 100; i++ { + l.AddTombstone(ids[i]) + } + } +} + +func BenchmarkClearLinks(b *testing.B) { + defer os.Remove("./testfile") + ids := make([]uint64, 100) + + l := NewLogger("./testfile") + + b.ReportAllocs() + + for j := 0; j < b.N; j++ { + for i := 0; i < 100; i++ { + l.ClearLinks(ids[i]) + } + } +} + +func BenchmarkDeleteNode(b *testing.B) { + ids := make([]uint64, 100) + + l := NewLogger("./testfile") + + b.ReportAllocs() + + for j := 0; j < b.N; j++ { + for i := 0; i < 100; i++ { + l.DeleteNode(ids[i]) + } + } +} + +func BenchmarkReset(b *testing.B) { + defer os.Remove("./testfile") + l := NewLogger("./testfile") + + b.ReportAllocs() + + for j := 0; j < b.N; j++ { + for i := 0; i < 100; i++ { + l.Reset() + } + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/compress.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/compress.go new file mode 100644 index 0000000000000000000000000000000000000000..eae565ac91c11da0bfb64ee57794879622bd7874 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/compress.go @@ -0,0 +1,162 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package hnsw + +import ( + "context" + "errors" + "fmt" + + "github.com/weaviate/weaviate/adapters/repos/db/vector/common" + "github.com/weaviate/weaviate/adapters/repos/db/vector/compressionhelpers" + "github.com/weaviate/weaviate/entities/storobj" + ent "github.com/weaviate/weaviate/entities/vectorindex/hnsw" +) + +func (h *hnsw) compress(cfg ent.UserConfig) error { + if !cfg.PQ.Enabled && !cfg.BQ.Enabled && !cfg.SQ.Enabled && !cfg.RQ.Enabled { + return nil + } + h.compressActionLock.Lock() + defer h.compressActionLock.Unlock() + data := h.cache.All() + singleVector := !h.multivector.Load() || h.muvera.Load() + if cfg.PQ.Enabled || cfg.SQ.Enabled { + if h.isEmpty() { + return errors.New("compress command cannot be executed before inserting some data") + } + cleanData := make([][]float32, 0, len(data)) + sampler := common.NewSparseFisherYatesIterator(len(data)) + for !sampler.IsDone() { + // Sparse Fisher Yates sampling algorithm to choose random element + sampledIndex := sampler.Next() + if sampledIndex == nil { + break + } + // Rather than just taking the cache dump at face value, let's explicitly + // request the vectors. Otherwise we would miss any vector that's currently + // not in the cache, for example because the cache is not hot yet after a + // restart. + p, err := h.cache.Get(context.Background(), uint64(*sampledIndex)) + if err != nil { + var e storobj.ErrNotFound + if errors.As(err, &e) { + // already deleted, ignore + continue + } else { + return fmt.Errorf("unexpected error obtaining vectors for fitting: %w", err) + } + } + + if p == nil { + // already deleted, ignore + continue + } + + cleanData = append(cleanData, p) + if len(cleanData) >= cfg.PQ.TrainingLimit { + break + } + } + if cfg.PQ.Enabled { + dims := int(h.dims) + + if cfg.PQ.Segments <= 0 { + cfg.PQ.Segments = common.CalculateOptimalSegments(dims) + h.pqConfig.Segments = cfg.PQ.Segments + } + + var err error + if singleVector { + h.compressor, err = compressionhelpers.NewHNSWPQCompressor( + cfg.PQ, h.distancerProvider, dims, 1e12, h.logger, cleanData, h.store, + h.allocChecker) + } else { + h.compressor, err = compressionhelpers.NewHNSWPQMultiCompressor( + cfg.PQ, h.distancerProvider, dims, 1e12, h.logger, cleanData, h.store, + h.allocChecker) + } + if err != nil { + h.pqConfig.Enabled = false + return fmt.Errorf("compressing vectors: %w", err) + } + } else if cfg.SQ.Enabled { + var err error + if singleVector { + h.compressor, err = compressionhelpers.NewHNSWSQCompressor( + h.distancerProvider, 1e12, h.logger, cleanData, h.store, + h.allocChecker) + } else { + h.compressor, err = compressionhelpers.NewHNSWSQMultiCompressor( + h.distancerProvider, 1e12, h.logger, cleanData, h.store, + h.allocChecker) + } + if err != nil { + h.sqConfig.Enabled = false + return fmt.Errorf("compressing vectors: %w", err) + } + } + h.compressor.PersistCompression(h.commitLog) + } else if cfg.BQ.Enabled { + var err error + if singleVector { + h.compressor, err = compressionhelpers.NewBQCompressor( + h.distancerProvider, 1e12, h.logger, h.store, h.allocChecker) + } else { + h.compressor, err = compressionhelpers.NewBQMultiCompressor( + h.distancerProvider, 1e12, h.logger, h.store, h.allocChecker) + } + if err != nil { + return err + } + } else if cfg.RQ.Enabled { + var err error + h.trackRQOnce.Do(func() { + if singleVector { + h.compressor, err = compressionhelpers.NewRQCompressor( + h.distancerProvider, 1e12, h.logger, h.store, h.allocChecker, int(h.rqConfig.Bits), int(h.dims)) + } else { + h.compressor, err = compressionhelpers.NewRQMultiCompressor( + h.distancerProvider, 1e12, h.logger, h.store, h.allocChecker, int(h.rqConfig.Bits), int(h.dims)) + } + if err == nil { + h.rqConfig.RescoreLimit = cfg.RQ.RescoreLimit + h.compressor.PersistCompression(h.commitLog) + } + }) + if err != nil { + return err + } + } + if singleVector { + compressionhelpers.Concurrently(h.logger, uint64(len(data)), + func(index uint64) { + if data[index] == nil { + return + } + h.compressor.Preload(index, data[index]) + }) + } else { + compressionhelpers.Concurrently(h.logger, uint64(len(data)), + func(index uint64) { + if len(data[index]) == 0 { + return + } + docID, relativeID := h.cache.GetKeys(index) + h.compressor.PreloadPassage(index, docID, relativeID, data[index]) + }) + } + + h.compressed.Store(true) + h.cache.Drop() + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/compress_deletes_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/compress_deletes_test.go new file mode 100644 index 0000000000000000000000000000000000000000..9b0b4023f8f8ec6f617b25d65b58532870d948aa --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/compress_deletes_test.go @@ -0,0 +1,187 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +//go:build !race + +package hnsw + +import ( + "context" + "fmt" + "os" + "testing" + + "github.com/sirupsen/logrus/hooks/test" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/adapters/repos/db/vector/common" + "github.com/weaviate/weaviate/adapters/repos/db/vector/compressionhelpers" + "github.com/weaviate/weaviate/adapters/repos/db/vector/hnsw/distancer" + "github.com/weaviate/weaviate/adapters/repos/db/vector/testinghelpers" + "github.com/weaviate/weaviate/entities/cyclemanager" + "github.com/weaviate/weaviate/entities/storobj" + ent "github.com/weaviate/weaviate/entities/vectorindex/hnsw" +) + +func Test_NoRaceCompressDoesNotCrash(t *testing.T) { + logger, _ := test.NewNullLogger() + efConstruction := 64 + ef := 32 + maxNeighbors := 32 + dimensions := 20 + vectors_size := 10000 + queries_size := 100 + k := 100 + ctx := context.Background() + delete_indices := make([]uint64, 0, 1000) + for i := 0; i < 1000; i++ { + delete_indices = append(delete_indices, uint64(i+10)) + } + delete_indices = append(delete_indices, uint64(1)) + + vectors, queries := testinghelpers.RandomVecs(vectors_size, queries_size, dimensions) + distancer := distancer.NewL2SquaredProvider() + + uc := ent.UserConfig{} + uc.MaxConnections = maxNeighbors + uc.EFConstruction = efConstruction + uc.EF = ef + uc.VectorCacheMaxObjects = 10e12 + uc.PQ = ent.PQConfig{Enabled: true, Encoder: ent.PQEncoder{Type: "title", Distribution: "normal"}} + + index, _ := New(Config{ + RootPath: t.TempDir(), + ID: "recallbenchmark", + MakeCommitLoggerThunk: MakeNoopCommitLogger, + DistanceProvider: distancer, + VectorForIDThunk: func(ctx context.Context, id uint64) ([]float32, error) { + if int(id) >= len(vectors) { + return nil, storobj.NewErrNotFoundf(id, "out of range") + } + return vectors[int(id)], nil + }, + TempVectorForIDThunk: func(ctx context.Context, id uint64, container *common.VectorSlice) ([]float32, error) { + copy(container.Slice, vectors[int(id)]) + return container.Slice, nil + }, + }, uc, cyclemanager.NewCallbackGroupNoop(), testinghelpers.NewDummyStore(t)) + defer index.Shutdown(context.Background()) + assert.Nil(t, compressionhelpers.ConcurrentlyWithError(logger, uint64(len(vectors)), func(id uint64) error { + return index.Add(ctx, uint64(id), vectors[id]) + })) + index.Delete(delete_indices...) + + cfg := ent.PQConfig{ + Enabled: true, + Encoder: ent.PQEncoder{ + Type: ent.PQEncoderTypeKMeans, + Distribution: ent.PQEncoderDistributionLogNormal, + }, + Segments: dimensions, + Centroids: 256, + } + uc.PQ = cfg + index.compress(uc) + for _, v := range queries { + _, _, err := index.SearchByVector(ctx, v, k, nil) + assert.Nil(t, err) + } +} + +func TestHnswPqNilVectors(t *testing.T) { + dimensions := 20 + vectors_size := 10_000 + queries_size := 10 + logger, _ := test.NewNullLogger() + vectors, _ := testinghelpers.RandomVecs(vectors_size, queries_size, dimensions) + ctx := context.Background() + + // set some vectors to nil + for i := range vectors { + if i == 500 { + vectors[i] = nil + } + } + + userConfig := ent.UserConfig{ + MaxConnections: 30, + EFConstruction: 64, + EF: 32, + + // The actual size does not matter for this test, but if it defaults to + // zero it will constantly think it's full and needs to be deleted - even + // after just being deleted, so make sure to use a positive number here. + VectorCacheMaxObjects: 1000000, + } + + rootPath := "doesnt-matter-as-committlogger-is-mocked-out" + defer func(path string) { + err := os.RemoveAll(path) + if err != nil { + fmt.Println(err) + } + }(rootPath) + + index, err := New(Config{ + RootPath: rootPath, + ID: "nil-vector-test", + MakeCommitLoggerThunk: MakeNoopCommitLogger, + DistanceProvider: distancer.NewCosineDistanceProvider(), + VectorForIDThunk: func(ctx context.Context, id uint64) ([]float32, error) { + vec := vectors[int(id)] + if vec == nil { + return nil, storobj.NewErrNotFoundf(id, "nil vec") + } + return vec, nil + }, + TempVectorForIDThunk: TempVectorForIDThunk(vectors), + }, userConfig, cyclemanager.NewCallbackGroupNoop(), testinghelpers.NewDummyStore(t)) + + require.NoError(t, err) + + compressionhelpers.Concurrently(logger, uint64(len(vectors)/2), func(id uint64) { + if vectors[id] == nil { + return + } + + err := index.Add(ctx, uint64(id), vectors[id]) + require.Nil(t, err) + }) + + userConfig.PQ = ent.PQConfig{ + Enabled: true, + Encoder: ent.PQEncoder{ + Type: ent.PQEncoderTypeTile, + Distribution: ent.PQEncoderDistributionLogNormal, + }, + BitCompression: false, + Segments: dimensions, + Centroids: 256, + } + + ch := make(chan error) + err = index.UpdateUserConfig(userConfig, func() { + close(ch) + }) + require.NoError(t, err) + + <-ch + start := uint64(len(vectors) / 2) + compressionhelpers.Concurrently(logger, uint64(len(vectors)/2), func(id uint64) { + if vectors[id+start] == nil { + return + } + + err = index.Add(ctx, uint64(id)+start, vectors[id+start]) + require.Nil(t, err) + }) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/compress_recall_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/compress_recall_test.go new file mode 100644 index 0000000000000000000000000000000000000000..4f4a8bfec6a6011513cdfd8a6f2eb34d1c056e4a --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/compress_recall_test.go @@ -0,0 +1,145 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +//go:build !race + +package hnsw_test + +import ( + "context" + "fmt" + "os" + "sync" + "testing" + "time" + + "github.com/sirupsen/logrus/hooks/test" + + "github.com/stretchr/testify/assert" + "github.com/weaviate/weaviate/adapters/repos/db/vector/common" + "github.com/weaviate/weaviate/adapters/repos/db/vector/compressionhelpers" + "github.com/weaviate/weaviate/adapters/repos/db/vector/hnsw" + "github.com/weaviate/weaviate/adapters/repos/db/vector/hnsw/distancer" + "github.com/weaviate/weaviate/adapters/repos/db/vector/testinghelpers" + "github.com/weaviate/weaviate/entities/cyclemanager" + "github.com/weaviate/weaviate/entities/storobj" + ent "github.com/weaviate/weaviate/entities/vectorindex/hnsw" +) + +func distanceWrapper(provider distancer.Provider) func(x, y []float32) float32 { + return func(x, y []float32) float32 { + dist, _ := provider.SingleDist(x, y) + return dist + } +} + +func Test_NoRaceCompressionRecall(t *testing.T) { + ctx := context.Background() + path := t.TempDir() + + efConstruction := 64 + ef := 64 + maxNeighbors := 32 + segments := 4 + dimensions := 64 + vectors_size := 10000 + queries_size := 100 + fmt.Println("Sift1M PQ") + before := time.Now() + vectors, queries := testinghelpers.RandomVecs(vectors_size, queries_size, dimensions) + testinghelpers.Normalize(vectors) + testinghelpers.Normalize(queries) + k := 100 + + logger, _ := test.NewNullLogger() + + distancers := []distancer.Provider{ + distancer.NewL2SquaredProvider(), + distancer.NewCosineDistanceProvider(), + distancer.NewDotProductProvider(), + } + + for _, distancer := range distancers { + truths := make([][]uint64, queries_size) + compressionhelpers.Concurrently(logger, uint64(len(queries)), func(i uint64) { + truths[i], _ = testinghelpers.BruteForce(logger, vectors, queries[i], k, distanceWrapper(distancer)) + }) + fmt.Printf("generating data took %s\n", time.Since(before)) + + uc := ent.UserConfig{ + MaxConnections: maxNeighbors, + EFConstruction: efConstruction, + EF: ef, + VectorCacheMaxObjects: 10e12, + } + index, _ := hnsw.New(hnsw.Config{ + RootPath: path, + ID: "recallbenchmark", + MakeCommitLoggerThunk: hnsw.MakeNoopCommitLogger, + ClassName: "clasRecallBenchmark", + ShardName: "shardRecallBenchmark", + DistanceProvider: distancer, + VectorForIDThunk: func(ctx context.Context, id uint64) ([]float32, error) { + if int(id) >= len(vectors) { + return nil, storobj.NewErrNotFoundf(id, "out of range") + } + return vectors[int(id)], nil + }, + TempVectorForIDThunk: func(ctx context.Context, id uint64, container *common.VectorSlice) ([]float32, error) { + copy(container.Slice, vectors[int(id)]) + return container.Slice, nil + }, + }, uc, cyclemanager.NewCallbackGroupNoop(), testinghelpers.NewDummyStore(t)) + init := time.Now() + compressionhelpers.Concurrently(logger, uint64(vectors_size), func(id uint64) { + index.Add(ctx, id, vectors[id]) + }) + before = time.Now() + fmt.Println("Start compressing...") + uc.PQ = ent.PQConfig{ + Enabled: true, + Segments: dimensions / segments, + Centroids: 256, + Encoder: ent.NewDefaultUserConfig().PQ.Encoder, + } + uc.EF = 256 + wg := sync.WaitGroup{} + wg.Add(1) + index.UpdateUserConfig(uc, func() { + fmt.Printf("Time to compress: %s\n", time.Since(before)) + fmt.Printf("Building the index took %s\n", time.Since(init)) + + var relevant uint64 + var retrieved int + + var querying time.Duration = 0 + compressionhelpers.Concurrently(logger, uint64(len(queries)), func(i uint64) { + before = time.Now() + results, _, _ := index.SearchByVector(ctx, queries[i], k, nil) + querying += time.Since(before) + retrieved += k + relevant += testinghelpers.MatchesInLists(truths[i], results) + }) + + recall := float32(relevant) / float32(retrieved) + latency := float32(querying.Microseconds()) / float32(queries_size) + fmt.Println(recall, latency) + assert.True(t, recall > 0.9) + + err := os.RemoveAll(path) + if err != nil { + fmt.Println(err) + } + wg.Done() + }) + wg.Wait() + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/compress_sift_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/compress_sift_test.go new file mode 100644 index 0000000000000000000000000000000000000000..81fd0190a4675438bf78e96bc5e85b119173b199 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/compress_sift_test.go @@ -0,0 +1,614 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +//go:build benchmarkSiftRecall +// +build benchmarkSiftRecall + +package hnsw_test + +import ( + "context" + "fmt" + "io/ioutil" + "math" + "os" + "strconv" + "strings" + "sync" + "testing" + "time" + + "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/adapters/repos/db/lsmkv" + "github.com/weaviate/weaviate/adapters/repos/db/vector/compressionhelpers" + "github.com/weaviate/weaviate/adapters/repos/db/vector/hnsw" + "github.com/weaviate/weaviate/adapters/repos/db/vector/hnsw/distancer" + "github.com/weaviate/weaviate/adapters/repos/db/vector/testinghelpers" + "github.com/weaviate/weaviate/entities/cyclemanager" + ent "github.com/weaviate/weaviate/entities/vectorindex/hnsw" +) + +func distanceWrapper(provider distancer.Provider) func(x, y []float32) float32 { + return func(x, y []float32) float32 { + dist, _ := provider.SingleDist(x, y) + return dist + } +} + +const rootPath = "doesnt-matter-as-committlogger-is-mocked-out" + +func TestRecall(t *testing.T) { + defer func(path string) { + err := os.RemoveAll(path) + if err != nil { + fmt.Println(err) + } + }(rootPath) + fmt.Println("Sift1MPQKMeans 10K/1K") + efConstruction := 64 + ef := 32 + maxNeighbors := 32 + dimensions := 128 + vectors_size := 200000 + queries_size := 100 + switch_at := vectors_size + before := time.Now() + vectors, queries := testinghelpers.RandomVecs(vectors_size, queries_size, dimensions) + k := 10 + distancer := distancer.NewL2SquaredProvider() + fmt.Printf("generating data took %s\n", time.Since(before)) + + uc := ent.UserConfig{} + uc.MaxConnections = maxNeighbors + uc.EFConstruction = efConstruction + uc.EF = ef + uc.VectorCacheMaxObjects = 10e12 + + index, _ := hnsw.New(hnsw.Config{ + RootPath: rootPath, + ID: "recallbenchmark", + MakeCommitLoggerThunk: hnsw.MakeNoopCommitLogger, + DistanceProvider: distancer, + VectorForIDThunk: func(ctx context.Context, id uint64) ([]float32, error) { + return vectors[int(id)], nil + }, + }, uc, newDummyStore(t)) + init := time.Now() + compressionhelpers.Concurrently(uint64(switch_at), func(_, id uint64, _ *sync.Mutex) { + index.Add(uint64(id), vectors[id]) + if id%1000 == 0 { + fmt.Println(id, time.Since(before)) + } + }) + before = time.Now() + uc.PQ.Enabled = true + index.UpdateUserConfig(uc, func() {}) /*should have configuration.pr.enabled = true*/ + fmt.Printf("Time to compress: %s", time.Since(before)) + fmt.Println() + compressionhelpers.Concurrently(uint64(vectors_size-switch_at), func(_, id uint64, _ *sync.Mutex) { + idx := switch_at + int(id) + index.Add(uint64(idx), vectors[idx]) + if id%1000 == 0 { + fmt.Println(idx, time.Since(before)) + } + }) + fmt.Printf("Building the index took %s\n", time.Since(init)) + + lastRecall := float32(0.0) + for _, currentEF := range []int{32, 64, 128, 256, 512} { + uc.EF = currentEF + index.UpdateUserConfig(uc, func() {}) + fmt.Println(currentEF) + var relevant uint64 + var retrieved int + + var querying time.Duration = 0 + for i := 0; i < len(queries); i++ { + truth := testinghelpers.BruteForce(vectors, queries[i], k, distanceWrapper(distancer)) + before = time.Now() + results, _, _ := index.SearchByVector(queries[i], k, nil) + querying += time.Since(before) + retrieved += k + relevant += testinghelpers.MatchesInLists(truth, results) + } + + recall := float32(relevant) / float32(retrieved) + assert.True(t, recall > float32(lastRecall)) + lastRecall = recall + } + assert.True(t, lastRecall > 0.95) +} + +func TestHnswPqGist(t *testing.T) { + defer func(path string) { + err := os.RemoveAll(path) + if err != nil { + fmt.Println(err) + } + }(rootPath) + params := [][]int{ + //{64, 64, 32}, + {128, 128, 64}, + {256, 256, 128}, + {512, 512, 256}, + } + dimensions := 960 + vectors_size := 1000000 + queries_size := 1000 + switch_at := 200000 + + before := time.Now() + vectors, queries := testinghelpers.ReadVecs(vectors_size, queries_size, dimensions, "gist", "../diskAnn/generated_testdata") + testinghelpers.Normalize(vectors) + testinghelpers.Normalize(queries) + for i, v := range vectors { + for j, x := range v { + if math.IsNaN(float64(x)) { + fmt.Println(i, j, v, x) + } + } + } + k := 100 + distancer := distancer.NewCosineDistanceProvider() + truths := testinghelpers.BuildTruths(queries_size, vectors_size, queries, vectors, k, distanceWrapper(distancer), "../diskAnn/generated_testdata/gist/cosine") + fmt.Printf("generating data took %s\n", time.Since(before)) + for segmentRate := 3; segmentRate < 4; segmentRate++ { + fmt.Println(segmentRate) + fmt.Println() + for i := 0; i < len(params); i++ { + efConstruction := params[i][0] + ef := params[i][1] + maxNeighbors := params[i][2] + + uc := ent.UserConfig{ + MaxConnections: maxNeighbors, + EFConstruction: efConstruction, + EF: ef, + VectorCacheMaxObjects: 10e12, + PQ: ent.PQConfig{ + Enabled: false, + Segments: dimensions / int(math.Pow(2, float64(segmentRate))), + Encoder: ent.PQEncoder{ + Type: ent.PQEncoderTypeKMeans, + Distribution: ent.PQEncoderDistributionLogNormal, + }, + }, + } + index, _ := hnsw.New(hnsw.Config{ + RootPath: rootPath, + ID: "recallbenchmark", + MakeCommitLoggerThunk: hnsw.MakeNoopCommitLogger, + DistanceProvider: distancer, + VectorForIDThunk: func(ctx context.Context, id uint64) ([]float32, error) { + return vectors[int(id)], nil + }, + }, uc, newDummyStore(t)) + init := time.Now() + total := 200000 + compressionhelpers.Concurrently(uint64(switch_at), func(_, id uint64, _ *sync.Mutex) { + total++ + if total%100000 == 0 { + fmt.Println(total) + } + index.Add(uint64(id), vectors[id]) + }) + before = time.Now() + uc.PQ.Enabled = true + index.UpdateUserConfig(uc, func() {}) + fmt.Printf("Time to compress: %s", time.Since(before)) + fmt.Println() + compressionhelpers.Concurrently(uint64(vectors_size-switch_at), func(_, id uint64, _ *sync.Mutex) { + idx := switch_at + int(id) + index.Add(uint64(idx), vectors[idx]) + }) + fmt.Printf("Building the index took %s\n", time.Since(init)) + var relevant uint64 + var retrieved int + + var querying time.Duration = 0 + compressionhelpers.Concurrently(uint64(len(queries)), func(_, i uint64, _ *sync.Mutex) { + before = time.Now() + results, _, _ := index.SearchByVector(queries[i], k, nil) + querying += time.Since(before) + retrieved += k + relevant += testinghelpers.MatchesInLists(truths[i], results) + }) + + recall := float32(relevant) / float32(retrieved) + latency := float32(querying.Microseconds()) / float32(queries_size) + fmt.Println(recall, latency) + assert.True(t, recall > 0.9) + assert.True(t, latency < 100000) + } + } +} + +/* +10K +128 segments, 16 centroids -> 5.280255291s 0.90662 387.505 +64 segments, 256 centroids -> 6.585159916s 0.9326827 410.413 + +100K +128 segments, 16 centroids -> 1m17.634662125s 0.88258 692.081 +64 segments, 256 centroids -> 1m29.259369458s 0.92157 575.844 + +100000 +128 +Building the index took 47.7846745s +0.92627 664.66 + +{64, 64, 32, 256, 0}, + + {64, 64, 32, 1024, 1}, + {64, 64, 32, 4096, 1}, + {64, 64, 32, 16384, 1}, + {64, 64, 32, 65536, 1}, + +generating data took 2.072473792s +0 +Time to compress: 5m39.750884042s +Building the index took 16m30.632114542s +0.91401 747.82 +1 +Time to compress: 13m12.011102334s +Building the index took 28m12.879802125s +0.89564 1041.358 +2 +Time to compress: 58m15.252058416s +Building the index took 1h37m10.217039334s +0.90836 2299.629 +3 +Time to compress: 3h59m39.032524584s +Building the index took 5h54m55.046038916s +0.91295 4786.8 + +generating data took 2.119674416s +0 +Start compressing... +Time to compress: 1m3.037853584s +Building the index took 3m53.429316209s +0.40992 169.937 +1 +Start compressing... +Time to compress: 2m4.653952334s +Building the index took 5m44.454856667s +0.46251252 207.299 +2 +Start compressing... +Time to compress: 4m7.585857584s +Building the index took 8m36.2004675s +0.50494 293.362 +3 +Start compressing... +Time to compress: 8m16.4155035s +Building the index took 14m37.089003166s +0.54421 390.49 +4 +Start compressing... +Time to compress: 16m32.313318708s +Building the index took 26m46.66661125s +0.57827 442.589 +*/ +func TestHnswPqSift(t *testing.T) { + defer func(path string) { + err := os.RemoveAll(path) + if err != nil { + fmt.Println(err) + } + }(rootPath) + params := [][]int{ + {64, 64, 32, 256, 3}, + {64, 64, 32, 512, 3}, + {64, 64, 32, 1024, 3}, + {64, 64, 32, 2048, 3}, + {64, 64, 32, 4096, 3}, + {64, 64, 32, 65536, 3}, + } + dimensions := 128 + vectors_size := 1000000 + queries_size := 1000 + switch_at := 200000 + fmt.Println("Sift1M PQ") + before := time.Now() + vectors, queries := testinghelpers.ReadVecs(vectors_size, queries_size, dimensions, "sift", "../diskAnn/generated_testdata") + k := 100 + distancer := distancer.NewL2SquaredProvider() + truths := testinghelpers.BuildTruths(queries_size, vectors_size, queries, vectors, k, distanceWrapper(distancer), "../diskAnn/generated_testdata") + fmt.Printf("generating data took %s\n", time.Since(before)) + for i := 0; i < len(params); i++ { + fmt.Println(i) + efConstruction := params[i][0] + ef := params[i][1] + maxNeighbors := params[i][2] + centroids := params[i][3] + segmentRate := params[i][4] + if centroids > switch_at { + fmt.Println("Increasing switch at...") + switch_at = 650000 + } + + uc := ent.UserConfig{ + MaxConnections: maxNeighbors, + EFConstruction: efConstruction, + EF: ef, + PQ: ent.PQConfig{ + Enabled: false, + Segments: dimensions / int(math.Pow(2, float64(segmentRate))), + Encoder: ent.PQEncoder{ + Type: ent.PQEncoderTypeTile, + Distribution: ent.PQEncoderDistributionLogNormal, + }, + }, + VectorCacheMaxObjects: 10e12, + } + index, _ := hnsw.New(hnsw.Config{ + RootPath: rootPath, + ID: "recallbenchmark", + MakeCommitLoggerThunk: hnsw.MakeNoopCommitLogger, + DistanceProvider: distancer, + VectorForIDThunk: func(ctx context.Context, id uint64) ([]float32, error) { + return vectors[int(id)], nil + }, + }, uc, newDummyStore(t)) + init := time.Now() + compressionhelpers.Concurrently(uint64(switch_at), func(_, id uint64, _ *sync.Mutex) { + index.Add(uint64(id), vectors[id]) + }) + before = time.Now() + fmt.Println("Start compressing...") + + cfg := ent.PQConfig{ + Enabled: true, + Segments: dimensions / int(math.Pow(2, float64(segmentRate))), + Centroids: centroids, + BitCompression: false, + Encoder: ent.PQEncoder{ + Type: ent.PQEncoderTypeKMeans, + Distribution: ent.PQEncoderDistributionLogNormal, + }, + } + + index.Compress(cfg) /*should have configuration.compressed = true*/ + fmt.Printf("Time to compress: %s", time.Since(before)) + fmt.Println() + compressionhelpers.Concurrently(uint64(vectors_size-switch_at), func(_, id uint64, _ *sync.Mutex) { + idx := switch_at + int(id) + + index.Add(uint64(idx), vectors[idx]) + }) + fmt.Printf("Building the index took %s\n", time.Since(init)) + + var relevant uint64 + var retrieved int + + var querying time.Duration = 0 + compressionhelpers.Concurrently(uint64(len(queries)), func(_, i uint64, _ *sync.Mutex) { + before = time.Now() + results, _, _ := index.SearchByVector(queries[i], k, nil) + querying += time.Since(before) + retrieved += k + relevant += testinghelpers.MatchesInLists(truths[i], results) + }) + + recall := float32(relevant) / float32(retrieved) + latency := float32(querying.Microseconds()) / float32(queries_size) + fmt.Println(recall, latency) + assert.True(t, recall > 0.9) + assert.True(t, latency < 100000) + } +} + +func TestHnswPqSiftDeletes(t *testing.T) { + defer func(path string) { + err := os.RemoveAll(path) + if err != nil { + fmt.Println(err) + } + }(rootPath) + params := [][]int{ + {64, 64, 32}, + } + dimensions := 128 + vectors_size := 10000 + queries_size := 1000 + switch_at := 2000 + fmt.Println("Sift1M PQ Deletes") + before := time.Now() + vectors, queries := testinghelpers.ReadVecs(vectors_size, queries_size, dimensions, "sift", "../diskAnn/generated_testdata") + k := 100 + distancer := distancer.NewL2SquaredProvider() + truths := testinghelpers.BuildTruths(queries_size, vectors_size, queries, vectors, k, distanceWrapper(distancer), "../diskAnn/generated_testdata") + fmt.Printf("generating data took %s\n", time.Since(before)) + for segmentRate := 0; segmentRate < 1; segmentRate++ { + fmt.Println(segmentRate) + fmt.Println() + for i := 0; i < len(params); i++ { + efConstruction := params[i][0] + ef := params[i][1] + maxNeighbors := params[i][2] + + uc := ent.UserConfig{ + MaxConnections: maxNeighbors, + EFConstruction: efConstruction, + EF: ef, + PQ: ent.PQConfig{ + Enabled: false, + Segments: dimensions / int(math.Pow(2, float64(segmentRate))), + Encoder: ent.PQEncoder{ + Type: "tile", + Distribution: "log-normal", + }, + }, + VectorCacheMaxObjects: 10e12, + } + index, _ := hnsw.New(hnsw.Config{ + RootPath: rootPath, + ID: "recallbenchmark", + MakeCommitLoggerThunk: hnsw.MakeNoopCommitLogger, + DistanceProvider: distancer, + VectorForIDThunk: func(ctx context.Context, id uint64) ([]float32, error) { + return vectors[int(id)], nil + }, + }, uc, newDummyStore(t)) + init := time.Now() + compressionhelpers.Concurrently(uint64(switch_at), func(_, id uint64, _ *sync.Mutex) { + index.Add(uint64(id), vectors[id]) + }) + before = time.Now() + uc.PQ.Enabled = true + index.UpdateUserConfig(uc, func() {}) /*should have configuration.compressed = true*/ + fmt.Printf("Time to compress: %s", time.Since(before)) + fmt.Println() + compressionhelpers.Concurrently(uint64(vectors_size-switch_at), func(_, id uint64, _ *sync.Mutex) { + idx := switch_at + int(id) + index.Add(uint64(idx), vectors[idx]) + }) + fmt.Printf("Building the index took %s\n", time.Since(init)) + var relevant uint64 + var retrieved int + + var querying time.Duration = 0 + compressionhelpers.Concurrently(uint64(len(queries)), func(_, i uint64, _ *sync.Mutex) { + before = time.Now() + results, _, _ := index.SearchByVector(queries[i], k, nil) + querying += time.Since(before) + retrieved += k + relevant += testinghelpers.MatchesInLists(truths[i], results) + }) + + recall := float32(relevant) / float32(retrieved) + latency := float32(querying.Microseconds()) / float32(queries_size) + fmt.Println(recall, latency) + assert.True(t, recall > 0.9) + assert.True(t, latency < 100000) + } + } +} + +func TestHnswPqDeepImage(t *testing.T) { + defer func(path string) { + err := os.RemoveAll(path) + if err != nil { + fmt.Println(err) + } + }(rootPath) + vectors_size := 9990000 + queries_size := 1000 + vectors := parseFromTxt("../diskAnn/generated_testdata/deep-image/train.txt", vectors_size) + queries := parseFromTxt("../diskAnn/generated_testdata/deep-image/test.txt", queries_size) + dimensions := 96 + + params := [][]int{ + {64, 64, 32}, + {128, 128, 64}, + {256, 256, 128}, + {512, 512, 256}, + } + switch_at := 1000000 + + fmt.Println("Sift1MPQKMeans 10K/10K") + before := time.Now() + k := 100 + distancer := distancer.NewL2SquaredProvider() + truths := testinghelpers.BuildTruths(queries_size, vectors_size, queries, vectors, k, distanceWrapper(distancer), "../diskAnn/generated_testdata/deep-image") + fmt.Printf("generating data took %s\n", time.Since(before)) + for segmentRate := 1; segmentRate < 4; segmentRate++ { + fmt.Println(segmentRate) + fmt.Println() + for i := 0; i < len(params); i++ { + efConstruction := params[i][0] + ef := params[i][1] + maxNeighbors := params[i][2] + + uc := ent.UserConfig{ + MaxConnections: maxNeighbors, + EFConstruction: efConstruction, + EF: ef, + PQ: ent.PQConfig{ + Enabled: false, + Segments: dimensions / int(math.Pow(2, float64(segmentRate))), + Encoder: ent.PQEncoder{ + Type: ent.PQEncoderTypeKMeans, + Distribution: ent.PQEncoderDistributionNormal, + }, + }, + VectorCacheMaxObjects: 10e12, + } + index, _ := hnsw.New(hnsw.Config{ + RootPath: rootPath, + ID: "recallbenchmark", + MakeCommitLoggerThunk: hnsw.MakeNoopCommitLogger, + DistanceProvider: distancer, + VectorForIDThunk: func(ctx context.Context, id uint64) ([]float32, error) { + return vectors[int(id)], nil + }, + }, uc, newDummyStore(t)) + init := time.Now() + compressionhelpers.Concurrently(uint64(switch_at), func(_, id uint64, _ *sync.Mutex) { + index.Add(uint64(id), vectors[id]) + }) + before = time.Now() + uc.PQ.Enabled = true + index.UpdateUserConfig(uc, func() {}) + fmt.Printf("Time to compress: %s", time.Since(before)) + fmt.Println() + compressionhelpers.Concurrently(uint64(vectors_size-switch_at), func(_, id uint64, _ *sync.Mutex) { + idx := switch_at + int(id) + index.Add(uint64(idx), vectors[idx]) + }) + fmt.Printf("Building the index took %s\n", time.Since(init)) + var relevant uint64 + var retrieved int + + var querying time.Duration = 0 + compressionhelpers.Concurrently(uint64(len(queries)), func(_, i uint64, _ *sync.Mutex) { + before = time.Now() + results, _, _ := index.SearchByVector(queries[i], k, nil) + querying += time.Since(before) + retrieved += k + relevant += testinghelpers.MatchesInLists(truths[i], results) + }) + + recall := float32(relevant) / float32(retrieved) + latency := float32(querying.Microseconds()) / float32(queries_size) + fmt.Println(recall, latency) + assert.True(t, recall > 0.9) + assert.True(t, latency < 100000) + } + } +} + +func parseFromTxt(file string, size int) [][]float32 { + content, _ := ioutil.ReadFile(file) + strContent := string(content) + testArray := strings.Split(strContent, "\n") + test := make([][]float32, 0, len(testArray)) + for j := 0; j < size; j++ { + elementArray := strings.Split(testArray[j], " ") + test = append(test, make([]float32, len(elementArray))) + for i := range elementArray { + f, _ := strconv.ParseFloat(elementArray[i], 16) + test[j][i] = float32(f) + } + } + return test +} + +func newDummyStore(t *testing.T) *lsmkv.Store { + logger, _ := test.NewNullLogger() + storeDir := t.TempDir() + store, err := lsmkv.New(storeDir, storeDir, logger, nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop()) + require.Nil(t, err) + return store +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/compress_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/compress_test.go new file mode 100644 index 0000000000000000000000000000000000000000..6066707754138182d7feb98e6d00183c0d980c57 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/compress_test.go @@ -0,0 +1,90 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package hnsw + +import ( + "context" + "testing" + + "github.com/sirupsen/logrus/hooks/test" + + "github.com/stretchr/testify/assert" + "github.com/weaviate/weaviate/adapters/repos/db/vector/common" + "github.com/weaviate/weaviate/adapters/repos/db/vector/compressionhelpers" + "github.com/weaviate/weaviate/adapters/repos/db/vector/hnsw/distancer" + "github.com/weaviate/weaviate/adapters/repos/db/vector/testinghelpers" + "github.com/weaviate/weaviate/entities/cyclemanager" + "github.com/weaviate/weaviate/entities/storobj" + ent "github.com/weaviate/weaviate/entities/vectorindex/hnsw" +) + +func Test_NoRaceCompressReturnsErrorWhenNotEnoughData(t *testing.T) { + efConstruction := 64 + ef := 32 + maxNeighbors := 32 + dimensions := 200 + vectors_size := 10 + vectors, _ := testinghelpers.RandomVecs(vectors_size, 0, dimensions) + distancer := distancer.NewL2SquaredProvider() + logger, _ := test.NewNullLogger() + ctx := context.Background() + + uc := ent.UserConfig{} + uc.MaxConnections = maxNeighbors + uc.EFConstruction = efConstruction + uc.EF = ef + uc.VectorCacheMaxObjects = 10e12 + uc.PQ = ent.PQConfig{ + Enabled: false, + Encoder: ent.PQEncoder{ + Type: ent.PQEncoderTypeKMeans, + Distribution: ent.PQEncoderDistributionLogNormal, + }, + TrainingLimit: 5, + Segments: dimensions, + Centroids: 256, + } + + index, _ := New(Config{ + RootPath: t.TempDir(), + ID: "recallbenchmark", + MakeCommitLoggerThunk: MakeNoopCommitLogger, + DistanceProvider: distancer, + VectorForIDThunk: func(ctx context.Context, id uint64) ([]float32, error) { + if int(id) >= len(vectors) { + return nil, storobj.NewErrNotFoundf(id, "out of range") + } + return vectors[int(id)], nil + }, + TempVectorForIDThunk: func(ctx context.Context, id uint64, container *common.VectorSlice) ([]float32, error) { + copy(container.Slice, vectors[int(id)]) + return container.Slice, nil + }, + }, uc, cyclemanager.NewCallbackGroupNoop(), testinghelpers.NewDummyStore(t)) + defer index.Shutdown(context.Background()) + assert.Nil(t, compressionhelpers.ConcurrentlyWithError(logger, uint64(len(vectors)), func(id uint64) error { + return index.Add(ctx, uint64(id), vectors[id]) + })) + + cfg := ent.PQConfig{ + Enabled: true, + Encoder: ent.PQEncoder{ + Type: ent.PQEncoderTypeKMeans, + Distribution: ent.PQEncoderDistributionLogNormal, + }, + Segments: dimensions, + Centroids: 256, + } + uc.PQ = cfg + err := index.compress(uc) + assert.NotNil(t, err) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/compression_tests/fixtures/restart-from-zero-segments/1234567 b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/compression_tests/fixtures/restart-from-zero-segments/1234567 new file mode 100644 index 0000000000000000000000000000000000000000..f8f98fed7d57f6fe85b10bd9af653cd8f232e49d --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/compression_tests/fixtures/restart-from-zero-segments/1234567 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:80ebd4e58a85cfda9aac102cda31a61e625849dc39371e3714e5a2920982b01b +size 106452 diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/condensor.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/condensor.go new file mode 100644 index 0000000000000000000000000000000000000000..c97efd48d9477aebcccde8f8b9adeccb005201d8 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/condensor.go @@ -0,0 +1,467 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package hnsw + +import ( + "bufio" + "bytes" + "encoding/binary" + "fmt" + "io" + "math" + "os" + + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/weaviate/weaviate/adapters/repos/db/vector/compressionhelpers" + "github.com/weaviate/weaviate/adapters/repos/db/vector/multivector" + "github.com/weaviate/weaviate/entities/errorcompounder" +) + +type MemoryCondensor struct { + newLogFile *os.File + newLog *bufWriter + logger logrus.FieldLogger +} + +func (c *MemoryCondensor) Do(fileName string) error { + c.logger.WithField("action", "hnsw_condensing").Infof("start hnsw condensing") + defer c.logger.WithField("action", "hnsw_condensing_complete").Infof("completed hnsw condensing") + + fd, err := os.Open(fileName) + if err != nil { + return errors.Wrap(err, "open commit log to be condensed") + } + defer fd.Close() + fdBuf := bufio.NewReaderSize(fd, 256*1024) + + res, _, err := NewDeserializer(c.logger).Do(fdBuf, nil, true) + if err != nil { + return errors.Wrap(err, "read commit log to be condensed") + } + + newLogFile, err := os.OpenFile(fmt.Sprintf("%s.condensed", fileName), + os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0o666) + if err != nil { + return errors.Wrap(err, "open new commit log file for writing") + } + + c.newLogFile = newLogFile + + c.newLog = NewWriterSize(c.newLogFile, 1*1024*1024) + + if res.Compressed { + if res.CompressionPQData != nil { + if err := c.AddPQCompression(*res.CompressionPQData); err != nil { + return fmt.Errorf("write pq data: %w", err) + } + } else if res.CompressionSQData != nil { + if err := c.AddSQCompression(*res.CompressionSQData); err != nil { + return fmt.Errorf("write sq data: %w", err) + } + } else if res.CompressionRQData != nil { + if err := c.AddRQCompression(*res.CompressionRQData); err != nil { + return fmt.Errorf("write rq data: %w", err) + } + } else if res.CompressionBRQData != nil { + if err := c.AddBRQCompression(*res.CompressionBRQData); err != nil { + return fmt.Errorf("write brq data: %w", err) + } + } else { + return errors.Wrap(err, "unavailable compression data") + } + } + if res.MuveraEnabled { + if err := c.AddMuvera(*res.EncoderMuvera); err != nil { + return fmt.Errorf("write muvera data: %w", err) + } + } + + for i := len(res.Nodes) - 1; i >= 0; i-- { + node := res.Nodes[i] + if node == nil { + // nil nodes occur when we've grown, but not inserted anything yet + continue + } + + if node.level > 0 { + // nodes are implicitly added when they are first linked, if the level is + // not zero we know this node was new. If the level is zero it doesn't + // matter if it gets added explicitly or implicitly + if err := c.AddNode(node); err != nil { + return errors.Wrapf(err, "write node %d to commit log", node.id) + } + } + + iter := node.connections.Iterator() + for iter.Next() { + level, links := iter.Current() + if res.ReplaceLinks(node.id, uint16(level)) { + if err := c.SetLinksAtLevel(node.id, int(level), links); err != nil { + return errors.Wrapf(err, + "write links for node %d at level %d to commit log", node.id, level) + } + } else { + if err := c.AddLinksAtLevel(node.id, uint16(level), links); err != nil { + return errors.Wrapf(err, + "write links for node %d at level %d to commit log", node.id, level) + } + } + } + } + + if res.EntrypointChanged { + if err := c.SetEntryPointWithMaxLayer(res.Entrypoint, + int(res.Level)); err != nil { + return errors.Wrap(err, "write entrypoint to commit log") + } + } + + for ts := range res.Tombstones { + // If the tombstone was later removed, consolidate the two operations into a noop + if _, ok := res.TombstonesDeleted[ts]; ok { + continue + } + + if err := c.AddTombstone(ts); err != nil { + return errors.Wrapf(err, + "write tombstone for node %d to commit log", ts) + } + } + + for rmts := range res.TombstonesDeleted { + // If the tombstone was added previously, consolidate the two operations into a noop + if _, ok := res.Tombstones[rmts]; ok { + continue + } + + if err := c.RemoveTombstone(rmts); err != nil { + return errors.Wrapf(err, + "write removed tombstone for node %d to commit log", rmts) + } + } + + for nodesDeleted := range res.NodesDeleted { + if err := c.DeleteNode(nodesDeleted); err != nil { + return errors.Wrapf(err, + "write deleted node %d to commit log", nodesDeleted) + } + } + + if err := c.newLog.Flush(); err != nil { + return errors.Wrap(err, "close new commit log") + } + + if err := c.newLogFile.Close(); err != nil { + return errors.Wrap(err, "close new commit log") + } + + if err := os.Remove(fileName); err != nil { + return errors.Wrap(err, "cleanup old (uncondensed) commit log") + } + + return nil +} + +const writeUint64Size = 8 + +func writeUint64(w io.Writer, in uint64) error { + var b [writeUint64Size]byte + binary.LittleEndian.PutUint64(b[:], in) + _, err := w.Write(b[:]) + return err +} + +const writeUint32Size = 4 + +func writeUint32(w io.Writer, in uint32) error { + var b [writeUint32Size]byte + binary.LittleEndian.PutUint32(b[:], in) + _, err := w.Write(b[:]) + return err +} + +const writeUint16Size = 2 + +func writeUint16(w io.Writer, in uint16) error { + var b [writeUint16Size]byte + binary.LittleEndian.PutUint16(b[:], in) + _, err := w.Write(b[:]) + return err +} + +const writeFloat32Size = 4 + +func writeFloat32(w io.Writer, in float32) error { + var b [writeFloat32Size]byte + binary.LittleEndian.PutUint32(b[:], math.Float32bits(in)) + _, err := w.Write(b[:]) + return err +} + +const writeByteSize = 1 + +func writeByte(w io.Writer, in byte) error { + var b [writeByteSize]byte + b[0] = in + _, err := w.Write(b[:]) + return err +} + +const writeBoolSize = 1 + +func writeBool(w io.Writer, in bool) error { + var b [writeBoolSize]byte + if in { + b[0] = 1 + } + _, err := w.Write(b[:]) + return err +} + +const writeCommitTypeSize = 1 + +func writeCommitType(w io.Writer, in HnswCommitType) error { + var b [writeCommitTypeSize]byte + b[0] = byte(in) + _, err := w.Write(b[:]) + return err +} + +func writeUint64Slice(w io.Writer, in []uint64) error { + for _, v := range in { + err := writeUint64(w, v) + if err != nil { + return err + } + } + + return nil +} + +// AddNode adds an empty node +func (c *MemoryCondensor) AddNode(node *vertex) error { + ec := errorcompounder.New() + ec.Add(writeCommitType(c.newLog, AddNode)) + ec.Add(writeUint64(c.newLog, node.id)) + ec.Add(writeUint16(c.newLog, uint16(node.level))) + + return ec.ToError() +} + +func (c *MemoryCondensor) DeleteNode(id uint64) error { + ec := errorcompounder.New() + ec.Add(writeCommitType(c.newLog, DeleteNode)) + ec.Add(writeUint64(c.newLog, id)) + return ec.ToError() +} + +func (c *MemoryCondensor) SetLinksAtLevel(nodeid uint64, level int, targets []uint64) error { + ec := errorcompounder.New() + ec.Add(writeCommitType(c.newLog, ReplaceLinksAtLevel)) + ec.Add(writeUint64(c.newLog, nodeid)) + ec.Add(writeUint16(c.newLog, uint16(level))) + + targetLength := len(targets) + if targetLength > math.MaxUint16 { + // TODO: investigate why we get such massive connections + targetLength = math.MaxUint16 + c.logger.WithField("action", "condense_commit_log"). + WithField("original_length", len(targets)). + WithField("maximum_length", targetLength). + Warning("condensor length of connections would overflow uint16, cutting off") + } + ec.Add(writeUint16(c.newLog, uint16(targetLength))) + ec.Add(writeUint64Slice(c.newLog, targets[:targetLength])) + + return ec.ToError() +} + +func (c *MemoryCondensor) AddLinksAtLevel(nodeid uint64, level uint16, targets []uint64) error { + toWrite := make([]byte, 13+len(targets)*8) + toWrite[0] = byte(AddLinksAtLevel) + binary.LittleEndian.PutUint64(toWrite[1:9], nodeid) + binary.LittleEndian.PutUint16(toWrite[9:11], uint16(level)) + binary.LittleEndian.PutUint16(toWrite[11:13], uint16(len(targets))) + for i, target := range targets { + offsetStart := 13 + i*8 + offsetEnd := offsetStart + 8 + binary.LittleEndian.PutUint64(toWrite[offsetStart:offsetEnd], target) + } + _, err := c.newLog.Write(toWrite) + return err +} + +func (c *MemoryCondensor) AddLinkAtLevel(nodeid uint64, level uint16, target uint64) error { + ec := errorcompounder.New() + ec.Add(writeCommitType(c.newLog, AddLinkAtLevel)) + ec.Add(writeUint64(c.newLog, nodeid)) + ec.Add(writeUint16(c.newLog, uint16(level))) + ec.Add(writeUint64(c.newLog, target)) + return ec.ToError() +} + +func (c *MemoryCondensor) SetEntryPointWithMaxLayer(id uint64, level int) error { + ec := errorcompounder.New() + ec.Add(writeCommitType(c.newLog, SetEntryPointMaxLevel)) + ec.Add(writeUint64(c.newLog, id)) + ec.Add(writeUint16(c.newLog, uint16(level))) + return ec.ToError() +} + +func (c *MemoryCondensor) AddTombstone(nodeid uint64) error { + ec := errorcompounder.New() + ec.Add(writeCommitType(c.newLog, AddTombstone)) + ec.Add(writeUint64(c.newLog, nodeid)) + return ec.ToError() +} + +func (c *MemoryCondensor) RemoveTombstone(nodeid uint64) error { + ec := errorcompounder.New() + ec.Add(writeCommitType(c.newLog, RemoveTombstone)) + ec.Add(writeUint64(c.newLog, nodeid)) + return ec.ToError() +} + +func (c *MemoryCondensor) AddPQCompression(data compressionhelpers.PQData) error { + toWrite := make([]byte, 10) + toWrite[0] = byte(AddPQ) + binary.LittleEndian.PutUint16(toWrite[1:3], data.Dimensions) + toWrite[3] = byte(data.EncoderType) + binary.LittleEndian.PutUint16(toWrite[4:6], data.Ks) + binary.LittleEndian.PutUint16(toWrite[6:8], data.M) + toWrite[8] = data.EncoderDistribution + if data.UseBitsEncoding { + toWrite[9] = 1 + } else { + toWrite[9] = 0 + } + + for _, encoder := range data.Encoders { + toWrite = append(toWrite, encoder.ExposeDataForRestore()...) + } + _, err := c.newLog.Write(toWrite) + return err +} + +func (c *MemoryCondensor) AddSQCompression(data compressionhelpers.SQData) error { + toWrite := make([]byte, 11) + toWrite[0] = byte(AddSQ) + binary.LittleEndian.PutUint32(toWrite[1:], math.Float32bits(data.A)) + binary.LittleEndian.PutUint32(toWrite[5:], math.Float32bits(data.B)) + binary.LittleEndian.PutUint16(toWrite[9:], data.Dimensions) + _, err := c.newLog.Write(toWrite) + return err +} + +func (c *MemoryCondensor) AddRQCompression(data compressionhelpers.RQData) error { + swapSize := 2 * data.Rotation.Rounds * (data.Rotation.OutputDim / 2) * 2 + signSize := 4 * data.Rotation.Rounds * data.Rotation.OutputDim + var buf bytes.Buffer + buf.Grow(17 + int(swapSize) + int(signSize)) + + buf.WriteByte(byte(AddRQ)) // 1 + binary.Write(&buf, binary.LittleEndian, data.InputDim) // 4 input dim + binary.Write(&buf, binary.LittleEndian, data.Bits) // 4 bits + binary.Write(&buf, binary.LittleEndian, data.Rotation.OutputDim) // 4 rotation - output dim + binary.Write(&buf, binary.LittleEndian, data.Rotation.Rounds) // 4 rotation - rounds + + for _, swap := range data.Rotation.Swaps { + for _, dim := range swap { + binary.Write(&buf, binary.LittleEndian, dim.I) + binary.Write(&buf, binary.LittleEndian, dim.J) + } + } + + for _, sign := range data.Rotation.Signs { + for _, dim := range sign { + binary.Write(&buf, binary.LittleEndian, dim) + } + } + + _, err := c.newLog.Write(buf.Bytes()) + return err +} + +func (c *MemoryCondensor) AddMuvera(data multivector.MuveraData) error { + gSize := 4 * data.Repetitions * data.KSim * data.Dimensions + dSize := 4 * data.Repetitions * data.DProjections * data.Dimensions + var buf bytes.Buffer + buf.Grow(21 + int(gSize) + int(dSize)) + + buf.WriteByte(byte(AddMuvera)) // 1 + binary.Write(&buf, binary.LittleEndian, data.KSim) // 4 + binary.Write(&buf, binary.LittleEndian, data.NumClusters) // 4 + binary.Write(&buf, binary.LittleEndian, data.Dimensions) // 4 + binary.Write(&buf, binary.LittleEndian, data.DProjections) // 4 + binary.Write(&buf, binary.LittleEndian, data.Repetitions) // 4 + + i := 0 + for _, gaussian := range data.Gaussians { + for _, cluster := range gaussian { + for _, el := range cluster { + binary.Write(&buf, binary.LittleEndian, math.Float32bits(el)) + i++ + } + } + } + + i = 0 + for _, matrix := range data.S { + for _, vector := range matrix { + for _, el := range vector { + binary.Write(&buf, binary.LittleEndian, math.Float32bits(el)) + i++ + } + } + } + + _, err := c.newLog.Write(buf.Bytes()) + return err +} + +func (c *MemoryCondensor) AddBRQCompression(data compressionhelpers.BRQData) error { + swapSize := 2 * data.Rotation.Rounds * (data.Rotation.OutputDim / 2) * 2 + signSize := 4 * data.Rotation.Rounds * data.Rotation.OutputDim + roundingSize := 4 * data.Rotation.OutputDim + var buf bytes.Buffer + buf.Grow(13 + int(swapSize) + int(signSize) + int(roundingSize)) + + buf.WriteByte(byte(AddBRQ)) // 1 + binary.Write(&buf, binary.LittleEndian, data.InputDim) // 4 input dim + binary.Write(&buf, binary.LittleEndian, data.Rotation.OutputDim) // 4 rotation - output dim + binary.Write(&buf, binary.LittleEndian, data.Rotation.Rounds) // 4 rotation - rounds + + for _, swap := range data.Rotation.Swaps { + for _, dim := range swap { + binary.Write(&buf, binary.LittleEndian, dim.I) + binary.Write(&buf, binary.LittleEndian, dim.J) + } + } + + for _, sign := range data.Rotation.Signs { + for _, dim := range sign { + binary.Write(&buf, binary.LittleEndian, dim) + } + } + + for _, rounding := range data.Rounding { + binary.Write(&buf, binary.LittleEndian, rounding) + } + + _, err := c.newLog.Write(buf.Bytes()) + return err +} + +func NewMemoryCondensor(logger logrus.FieldLogger) *MemoryCondensor { + return &MemoryCondensor{logger: logger} +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/condensor_integration_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/condensor_integration_test.go new file mode 100644 index 0000000000000000000000000000000000000000..d8960e77bda5194574515babb47509bae3d74aa2 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/condensor_integration_test.go @@ -0,0 +1,1046 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +//go:build integrationTest +// +build integrationTest + +package hnsw + +import ( + "bufio" + "context" + "os" + "strings" + "testing" + + "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/adapters/repos/db/vector/compressionhelpers" + "github.com/weaviate/weaviate/adapters/repos/db/vector/hnsw/packedconn" + "github.com/weaviate/weaviate/adapters/repos/db/vector/multivector" + "github.com/weaviate/weaviate/entities/cyclemanager" +) + +func TestCondensor(t *testing.T) { + rootPath := t.TempDir() + ctx := context.Background() + + logger, _ := test.NewNullLogger() + uncondensed, err := NewCommitLogger(rootPath, "uncondensed", logger, + cyclemanager.NewCallbackGroupNoop()) + require.Nil(t, err) + defer uncondensed.Shutdown(ctx) + + perfect, err := NewCommitLogger(rootPath, "perfect", logger, + cyclemanager.NewCallbackGroupNoop()) + require.Nil(t, err) + defer perfect.Shutdown(ctx) + + t.Run("add redundant data to the original log", func(t *testing.T) { + uncondensed.AddNode(&vertex{id: 0, level: 3}) + uncondensed.AddNode(&vertex{id: 1, level: 3}) + uncondensed.AddNode(&vertex{id: 2, level: 3}) + uncondensed.AddNode(&vertex{id: 3, level: 3}) + + // below are some pointless connection replacements, we expect that most of + // these will be gone after condensing, this gives us a good way of testing + // whether they're really gone + for level := 0; level <= 3; level++ { + uncondensed.ReplaceLinksAtLevel(0, level, []uint64{1, 2, 3}) + uncondensed.ReplaceLinksAtLevel(0, level, []uint64{1, 2}) + uncondensed.ReplaceLinksAtLevel(0, level, []uint64{1}) + uncondensed.ReplaceLinksAtLevel(0, level, []uint64{2}) + uncondensed.ReplaceLinksAtLevel(0, level, []uint64{3}) + uncondensed.ReplaceLinksAtLevel(0, level, []uint64{2, 3}) + uncondensed.ReplaceLinksAtLevel(0, level, []uint64{1, 2, 3}) + uncondensed.ReplaceLinksAtLevel(1, level, []uint64{0, 2, 3}) + uncondensed.ReplaceLinksAtLevel(1, level, []uint64{0, 2}) + uncondensed.ReplaceLinksAtLevel(1, level, []uint64{0}) + uncondensed.ReplaceLinksAtLevel(1, level, []uint64{2}) + uncondensed.ReplaceLinksAtLevel(1, level, []uint64{3}) + uncondensed.ReplaceLinksAtLevel(1, level, []uint64{2, 3}) + uncondensed.ReplaceLinksAtLevel(1, level, []uint64{0, 2, 3}) + uncondensed.ReplaceLinksAtLevel(2, level, []uint64{0, 1, 3}) + uncondensed.ReplaceLinksAtLevel(2, level, []uint64{0, 1}) + uncondensed.ReplaceLinksAtLevel(2, level, []uint64{0}) + uncondensed.ReplaceLinksAtLevel(2, level, []uint64{1}) + uncondensed.ReplaceLinksAtLevel(2, level, []uint64{3}) + uncondensed.ReplaceLinksAtLevel(2, level, []uint64{1, 3}) + uncondensed.ReplaceLinksAtLevel(2, level, []uint64{0, 1, 3}) + uncondensed.ReplaceLinksAtLevel(3, level, []uint64{0, 1, 2}) + uncondensed.ReplaceLinksAtLevel(3, level, []uint64{0, 1}) + uncondensed.ReplaceLinksAtLevel(3, level, []uint64{0}) + uncondensed.ReplaceLinksAtLevel(3, level, []uint64{1}) + uncondensed.ReplaceLinksAtLevel(3, level, []uint64{2}) + uncondensed.ReplaceLinksAtLevel(3, level, []uint64{1, 2}) + uncondensed.ReplaceLinksAtLevel(3, level, []uint64{0, 1, 2}) + } + uncondensed.SetEntryPointWithMaxLayer(3, 3) + uncondensed.AddTombstone(2) + + require.Nil(t, uncondensed.Flush()) + }) + + t.Run("create a hypothetical perfect log", func(t *testing.T) { + perfect.AddNode(&vertex{id: 0, level: 3}) + perfect.AddNode(&vertex{id: 1, level: 3}) + perfect.AddNode(&vertex{id: 2, level: 3}) + perfect.AddNode(&vertex{id: 3, level: 3}) + + // below are some pointless connection replacements, we expect that most of + // these will be gone after condensing, this gives us a good way of testing + // whether they're really gone + for level := 0; level <= 3; level++ { + perfect.ReplaceLinksAtLevel(0, level, []uint64{1, 2, 3}) + perfect.ReplaceLinksAtLevel(1, level, []uint64{0, 2, 3}) + perfect.ReplaceLinksAtLevel(2, level, []uint64{0, 1, 3}) + perfect.ReplaceLinksAtLevel(3, level, []uint64{0, 1, 2}) + } + perfect.SetEntryPointWithMaxLayer(3, 3) + perfect.AddTombstone(2) + + require.Nil(t, perfect.Flush()) + }) + + t.Run("condense the original and verify against the perfect one", func(t *testing.T) { + input, ok, err := getCurrentCommitLogFileName(commitLogDirectory(rootPath, "uncondensed")) + require.Nil(t, err) + require.True(t, ok) + + err = NewMemoryCondensor(logger).Do(commitLogFileName(rootPath, "uncondensed", input)) + require.Nil(t, err) + + control, ok, err := getCurrentCommitLogFileName( + commitLogDirectory(rootPath, "perfect")) + require.Nil(t, err) + require.True(t, ok) + + actual, ok, err := getCurrentCommitLogFileName( + commitLogDirectory(rootPath, "uncondensed")) + require.Nil(t, err) + require.True(t, ok) + + assert.True(t, strings.HasSuffix(actual, ".condensed"), + "commit log is now saved as condensed") + + controlStat, err := os.Stat(commitLogFileName(rootPath, "perfect", control)) + require.Nil(t, err) + + actualStat, err := os.Stat(commitLogFileName(rootPath, "uncondensed", actual)) + require.Nil(t, err) + + assert.Equal(t, controlStat.Size(), actualStat.Size()) + + // dumpIndexFromCommitLog(t, commitLogFileName(rootPath, "uncondensed", actual)) + // dumpIndexFromCommitLog(t, commitLogFileName(rootPath, "perfect", control)) + }) +} + +func TestCondensorAppendNodeLinks(t *testing.T) { + rootPath := t.TempDir() + ctx := context.Background() + + logger, _ := test.NewNullLogger() + uncondensed1, err := NewCommitLogger(rootPath, "uncondensed1", logger, + cyclemanager.NewCallbackGroupNoop()) + require.Nil(t, err) + defer uncondensed1.Shutdown(ctx) + + uncondensed2, err := NewCommitLogger(rootPath, "uncondensed2", logger, + cyclemanager.NewCallbackGroupNoop()) + require.Nil(t, err) + defer uncondensed2.Shutdown(ctx) + + control, err := NewCommitLogger(rootPath, "control", logger, + cyclemanager.NewCallbackGroupNoop()) + require.Nil(t, err) + defer control.Shutdown(ctx) + + t.Run("add data to the first log", func(t *testing.T) { + uncondensed1.AddLinkAtLevel(0, 0, 1) + uncondensed1.AddLinkAtLevel(0, 0, 2) + uncondensed1.AddLinkAtLevel(0, 0, 3) + + require.Nil(t, uncondensed1.Flush()) + }) + + t.Run("append data to the second log", func(t *testing.T) { + uncondensed2.AddLinkAtLevel(0, 0, 4) + uncondensed2.AddLinkAtLevel(0, 0, 5) + uncondensed2.AddLinkAtLevel(0, 0, 6) + + require.Nil(t, uncondensed2.Flush()) + }) + + t.Run("create a control log", func(t *testing.T) { + control.AddNode(&vertex{id: 0, level: 0}) + control.ReplaceLinksAtLevel(0, 0, []uint64{1, 2, 3, 4, 5, 6}) + + require.Nil(t, control.Flush()) + }) + + t.Run("condense both logs and verify the contents against the control", func(t *testing.T) { + input, ok, err := getCurrentCommitLogFileName(commitLogDirectory(rootPath, "uncondensed1")) + require.Nil(t, err) + require.True(t, ok) + + err = NewMemoryCondensor(logger).Do(commitLogFileName(rootPath, "uncondensed1", input)) + require.Nil(t, err) + + input, ok, err = getCurrentCommitLogFileName(commitLogDirectory(rootPath, "uncondensed2")) + require.Nil(t, err) + require.True(t, ok) + + err = NewMemoryCondensor(logger).Do(commitLogFileName(rootPath, "uncondensed2", input)) + require.Nil(t, err) + + control, ok, err := getCurrentCommitLogFileName( + commitLogDirectory(rootPath, "control")) + require.Nil(t, err) + require.True(t, ok) + + condensed1, ok, err := getCurrentCommitLogFileName( + commitLogDirectory(rootPath, "uncondensed1")) + require.Nil(t, err) + require.True(t, ok) + + condensed2, ok, err := getCurrentCommitLogFileName( + commitLogDirectory(rootPath, "uncondensed2")) + require.Nil(t, err) + require.True(t, ok) + + assert.True(t, strings.HasSuffix(condensed1, ".condensed"), + "commit log is now saved as condensed") + assert.True(t, strings.HasSuffix(condensed2, ".condensed"), + "commit log is now saved as condensed") + + assertIndicesFromCommitLogsMatch(t, commitLogFileName(rootPath, "control", control), + []string{ + commitLogFileName(rootPath, "uncondensed1", condensed1), + commitLogFileName(rootPath, "uncondensed2", condensed2), + }) + }) +} + +// This test was added as part of +// https://github.com/weaviate/weaviate/issues/1868 to rule out that +// replace links broken across two independent commit logs. It turned out that +// this was green and not the cause for the bug. The bug could be reproduced +// with the new test added in index_too_many_links_bug_integration_test.go. +// Nevertheless it makes sense to keep this test around as this might have been +// a potential cause as well and by having this test, we can prevent a +// regression. +func TestCondensorReplaceNodeLinks(t *testing.T) { + rootPath := t.TempDir() + ctx := context.Background() + + logger, _ := test.NewNullLogger() + uncondensed1, err := NewCommitLogger(rootPath, "uncondensed1", logger, + cyclemanager.NewCallbackGroupNoop()) + require.Nil(t, err) + defer uncondensed1.Shutdown(ctx) + + uncondensed2, err := NewCommitLogger(rootPath, "uncondensed2", logger, + cyclemanager.NewCallbackGroupNoop()) + require.Nil(t, err) + defer uncondensed2.Shutdown(ctx) + + control, err := NewCommitLogger(rootPath, "control", logger, + cyclemanager.NewCallbackGroupNoop()) + require.Nil(t, err) + defer control.Shutdown(ctx) + + t.Run("add data to the first log", func(t *testing.T) { + uncondensed1.AddNode(&vertex{id: 0, level: 1}) + uncondensed1.AddLinkAtLevel(0, 0, 1) + uncondensed1.AddLinkAtLevel(0, 0, 2) + uncondensed1.AddLinkAtLevel(0, 0, 3) + uncondensed1.AddLinkAtLevel(0, 1, 1) + uncondensed1.AddLinkAtLevel(0, 1, 2) + + require.Nil(t, uncondensed1.Flush()) + }) + + t.Run("replace all data from previous log", func(t *testing.T) { + uncondensed2.AddLinkAtLevel(0, 0, 10) + uncondensed2.ReplaceLinksAtLevel(0, 0, []uint64{4, 5, 6}) + uncondensed2.AddLinkAtLevel(0, 0, 7) + uncondensed2.ReplaceLinksAtLevel(0, 1, []uint64{8}) + + require.Nil(t, uncondensed2.Flush()) + }) + + t.Run("create a control log", func(t *testing.T) { + control.AddNode(&vertex{id: 0, level: 1}) + control.ReplaceLinksAtLevel(0, 0, []uint64{4, 5, 6, 7}) + control.ReplaceLinksAtLevel(0, 1, []uint64{8}) + + require.Nil(t, control.Flush()) + }) + + t.Run("condense both logs and verify the contents against the control", func(t *testing.T) { + input, ok, err := getCurrentCommitLogFileName(commitLogDirectory(rootPath, "uncondensed1")) + require.Nil(t, err) + require.True(t, ok) + + err = NewMemoryCondensor(logger).Do(commitLogFileName(rootPath, "uncondensed1", input)) + require.Nil(t, err) + + input, ok, err = getCurrentCommitLogFileName(commitLogDirectory(rootPath, "uncondensed2")) + require.Nil(t, err) + require.True(t, ok) + + err = NewMemoryCondensor(logger).Do(commitLogFileName(rootPath, "uncondensed2", input)) + require.Nil(t, err) + + control, ok, err := getCurrentCommitLogFileName( + commitLogDirectory(rootPath, "control")) + require.Nil(t, err) + require.True(t, ok) + + condensed1, ok, err := getCurrentCommitLogFileName( + commitLogDirectory(rootPath, "uncondensed1")) + require.Nil(t, err) + require.True(t, ok) + + condensed2, ok, err := getCurrentCommitLogFileName( + commitLogDirectory(rootPath, "uncondensed2")) + require.Nil(t, err) + require.True(t, ok) + + assert.True(t, strings.HasSuffix(condensed1, ".condensed"), + "commit log is now saved as condensed") + assert.True(t, strings.HasSuffix(condensed2, ".condensed"), + "commit log is now saved as condensed") + + assertIndicesFromCommitLogsMatch(t, commitLogFileName(rootPath, "control", control), + []string{ + commitLogFileName(rootPath, "uncondensed1", condensed1), + commitLogFileName(rootPath, "uncondensed2", condensed2), + }) + }) +} + +// This test was added as part of the investigation and fixing of +// https://github.com/weaviate/weaviate/issues/1868. We used the new +// (higher level) test in index_too_many_links_bug_integration_test.go to +// reproduce the problem without knowing what causes it. Eventually we came to +// the conclusion that "ClearLinksAtLevel" was not propagated correctly across +// two independently condensed commit logs. While the higher-level test already +// makes sure that the bug is gone and prevents regressions, this test was +// still added to test the broken (now fixed) behavior in relative isolation. +func TestCondensorClearLinksAtLevel(t *testing.T) { + rootPath := t.TempDir() + ctx := context.Background() + + logger, _ := test.NewNullLogger() + uncondensed1, err := NewCommitLogger(rootPath, "uncondensed1", logger, + cyclemanager.NewCallbackGroupNoop()) + require.Nil(t, err) + defer uncondensed1.Shutdown(ctx) + + uncondensed2, err := NewCommitLogger(rootPath, "uncondensed2", logger, + cyclemanager.NewCallbackGroupNoop()) + require.Nil(t, err) + defer uncondensed2.Shutdown(ctx) + + control, err := NewCommitLogger(rootPath, "control", logger, + cyclemanager.NewCallbackGroupNoop()) + require.Nil(t, err) + defer control.Shutdown(ctx) + + t.Run("add data to the first log", func(t *testing.T) { + uncondensed1.AddNode(&vertex{id: 0, level: 1}) + uncondensed1.AddLinkAtLevel(0, 0, 1) + uncondensed1.AddLinkAtLevel(0, 0, 2) + uncondensed1.AddLinkAtLevel(0, 0, 3) + uncondensed1.AddLinkAtLevel(0, 1, 1) + uncondensed1.AddLinkAtLevel(0, 1, 2) + + require.Nil(t, uncondensed1.Flush()) + }) + + t.Run("replace all data from previous log", func(t *testing.T) { + uncondensed2.AddLinkAtLevel(0, 0, 10) + uncondensed2.ClearLinksAtLevel(0, 0) + uncondensed2.AddLinkAtLevel(0, 0, 4) + uncondensed2.AddLinkAtLevel(0, 0, 5) + uncondensed2.AddLinkAtLevel(0, 0, 6) + uncondensed2.AddLinkAtLevel(0, 0, 7) + uncondensed2.ClearLinksAtLevel(0, 1) + uncondensed2.AddLinkAtLevel(0, 1, 8) + + require.Nil(t, uncondensed2.Flush()) + }) + + t.Run("create a control log", func(t *testing.T) { + control.AddNode(&vertex{id: 0, level: 1}) + control.ReplaceLinksAtLevel(0, 0, []uint64{4, 5, 6, 7}) + control.ReplaceLinksAtLevel(0, 1, []uint64{8}) + + require.Nil(t, control.Flush()) + }) + + t.Run("condense both logs and verify the contents against the control", func(t *testing.T) { + input, ok, err := getCurrentCommitLogFileName(commitLogDirectory(rootPath, "uncondensed1")) + require.Nil(t, err) + require.True(t, ok) + + err = NewMemoryCondensor(logger).Do(commitLogFileName(rootPath, "uncondensed1", input)) + require.Nil(t, err) + + input, ok, err = getCurrentCommitLogFileName(commitLogDirectory(rootPath, "uncondensed2")) + require.Nil(t, err) + require.True(t, ok) + + err = NewMemoryCondensor(logger).Do(commitLogFileName(rootPath, "uncondensed2", input)) + require.Nil(t, err) + + control, ok, err := getCurrentCommitLogFileName( + commitLogDirectory(rootPath, "control")) + require.Nil(t, err) + require.True(t, ok) + + condensed1, ok, err := getCurrentCommitLogFileName( + commitLogDirectory(rootPath, "uncondensed1")) + require.Nil(t, err) + require.True(t, ok) + + condensed2, ok, err := getCurrentCommitLogFileName( + commitLogDirectory(rootPath, "uncondensed2")) + require.Nil(t, err) + require.True(t, ok) + + assert.True(t, strings.HasSuffix(condensed1, ".condensed"), + "commit log is now saved as condensed") + assert.True(t, strings.HasSuffix(condensed2, ".condensed"), + "commit log is now saved as condensed") + + assertIndicesFromCommitLogsMatch(t, commitLogFileName(rootPath, "control", control), + []string{ + commitLogFileName(rootPath, "uncondensed1", condensed1), + commitLogFileName(rootPath, "uncondensed2", condensed2), + }) + }) +} + +func TestCondensorTombstones(t *testing.T) { + rootPath := t.TempDir() + ctx := context.Background() + + logger, _ := test.NewNullLogger() + uncondensed1, err := NewCommitLogger(rootPath, "uncondensed1", logger, + cyclemanager.NewCallbackGroupNoop()) + require.Nil(t, err) + defer uncondensed1.Shutdown(ctx) + + uncondensed2, err := NewCommitLogger(rootPath, "uncondensed2", logger, + cyclemanager.NewCallbackGroupNoop()) + require.Nil(t, err) + defer uncondensed2.Shutdown(ctx) + + control, err := NewCommitLogger(rootPath, "control", logger, + cyclemanager.NewCallbackGroupNoop()) + require.Nil(t, err) + defer control.Shutdown(ctx) + + t.Run("add tombstone data", func(t *testing.T) { + uncondensed1.AddNode(&vertex{id: 0, level: 1}) + uncondensed1.AddNode(&vertex{id: 1, level: 1}) + uncondensed1.AddNode(&vertex{id: 2, level: 1}) + uncondensed1.AddNode(&vertex{id: 3, level: 1}) + + uncondensed1.RemoveTombstone(0) + uncondensed1.AddTombstone(1) + uncondensed1.RemoveTombstone(1) + uncondensed1.AddTombstone(2) + + require.Nil(t, uncondensed1.Flush()) + }) + + t.Run("remove all tombstones except the first", func(t *testing.T) { + uncondensed2.RemoveTombstone(2) + uncondensed2.AddTombstone(3) + uncondensed2.RemoveTombstone(3) + + require.Nil(t, uncondensed2.Flush()) + }) + + t.Run("create a control log", func(t *testing.T) { + control.AddNode(&vertex{id: 0, level: 1}) + control.AddNode(&vertex{id: 1, level: 1}) + control.AddNode(&vertex{id: 2, level: 1}) + control.AddNode(&vertex{id: 3, level: 1}) + + control.RemoveTombstone(0) + + require.Nil(t, control.Flush()) + }) + + t.Run("condense both logs and verify the contents against the control", func(t *testing.T) { + input, ok, err := getCurrentCommitLogFileName(commitLogDirectory(rootPath, "uncondensed1")) + require.Nil(t, err) + require.True(t, ok) + + err = NewMemoryCondensor(logger).Do(commitLogFileName(rootPath, "uncondensed1", input)) + require.Nil(t, err) + + input, ok, err = getCurrentCommitLogFileName(commitLogDirectory(rootPath, "uncondensed2")) + require.Nil(t, err) + require.True(t, ok) + + err = NewMemoryCondensor(logger).Do(commitLogFileName(rootPath, "uncondensed2", input)) + require.Nil(t, err) + + control, ok, err := getCurrentCommitLogFileName( + commitLogDirectory(rootPath, "control")) + require.Nil(t, err) + require.True(t, ok) + + condensed1, ok, err := getCurrentCommitLogFileName( + commitLogDirectory(rootPath, "uncondensed1")) + require.Nil(t, err) + require.True(t, ok) + + condensed2, ok, err := getCurrentCommitLogFileName( + commitLogDirectory(rootPath, "uncondensed2")) + require.Nil(t, err) + require.True(t, ok) + + assert.True(t, strings.HasSuffix(condensed1, ".condensed"), + "commit log is now saved as condensed") + assert.True(t, strings.HasSuffix(condensed2, ".condensed"), + "commit log is now saved as condensed") + + assertIndicesFromCommitLogsMatch(t, commitLogFileName(rootPath, "control", control), + []string{ + commitLogFileName(rootPath, "uncondensed1", condensed1), + commitLogFileName(rootPath, "uncondensed2", condensed2), + }) + }) +} + +func TestCondensorPhantom(t *testing.T) { + rootPath := t.TempDir() + ctx := context.Background() + + logger, _ := test.NewNullLogger() + uncondensed1, err := NewCommitLogger(rootPath, "uncondensed1", logger, + cyclemanager.NewCallbackGroupNoop()) + require.Nil(t, err) + defer uncondensed1.Shutdown(ctx) + + uncondensed2, err := NewCommitLogger(rootPath, "uncondensed2", logger, + cyclemanager.NewCallbackGroupNoop()) + require.Nil(t, err) + defer uncondensed2.Shutdown(ctx) + + control, err := NewCommitLogger(rootPath, "control", logger, + cyclemanager.NewCallbackGroupNoop()) + require.Nil(t, err) + defer control.Shutdown(ctx) + + t.Run("add node via replace links", func(t *testing.T) { + uncondensed1.ReplaceLinksAtLevel(0, 0, []uint64{1, 2, 3}) + require.Nil(t, uncondensed1.Flush()) + }) + + t.Run("start tombstone job, delete node, remove tombstone", func(t *testing.T) { + uncondensed1.AddTombstone(0) + uncondensed2.DeleteNode(0) + uncondensed2.RemoveTombstone(0) + require.Nil(t, uncondensed2.Flush()) + }) + + t.Run("create a control log", func(t *testing.T) { + require.Nil(t, control.Flush()) + }) + + t.Run("condense both logs and verify the contents against the control", func(t *testing.T) { + input, ok, err := getCurrentCommitLogFileName(commitLogDirectory(rootPath, "uncondensed1")) + require.Nil(t, err) + require.True(t, ok) + + err = NewMemoryCondensor(logger).Do(commitLogFileName(rootPath, "uncondensed1", input)) + require.Nil(t, err) + + input, ok, err = getCurrentCommitLogFileName(commitLogDirectory(rootPath, "uncondensed2")) + require.Nil(t, err) + require.True(t, ok) + + err = NewMemoryCondensor(logger).Do(commitLogFileName(rootPath, "uncondensed2", input)) + require.Nil(t, err) + + control, ok, err := getCurrentCommitLogFileName( + commitLogDirectory(rootPath, "control")) + require.Nil(t, err) + require.True(t, ok) + + condensed1, ok, err := getCurrentCommitLogFileName( + commitLogDirectory(rootPath, "uncondensed1")) + require.Nil(t, err) + require.True(t, ok) + + condensed2, ok, err := getCurrentCommitLogFileName( + commitLogDirectory(rootPath, "uncondensed2")) + require.Nil(t, err) + require.True(t, ok) + + assert.True(t, strings.HasSuffix(condensed1, ".condensed"), + "commit log is now saved as condensed") + assert.True(t, strings.HasSuffix(condensed2, ".condensed"), + "commit log is now saved as condensed") + + assertIndicesFromCommitLogsMatch(t, commitLogFileName(rootPath, "control", control), + []string{ + commitLogFileName(rootPath, "uncondensed1", condensed1), + commitLogFileName(rootPath, "uncondensed2", condensed2), + }) + }) +} + +func TestCondensorWithoutEntrypoint(t *testing.T) { + rootPath := t.TempDir() + ctx := context.Background() + + logger, _ := test.NewNullLogger() + uncondensed, err := NewCommitLogger(rootPath, "uncondensed", logger, + cyclemanager.NewCallbackGroupNoop()) + require.Nil(t, err) + defer uncondensed.Shutdown(ctx) + + t.Run("add data, but do not set an entrypoint", func(t *testing.T) { + uncondensed.AddNode(&vertex{id: 0, level: 3}) + + require.Nil(t, uncondensed.Flush()) + }) + + t.Run("condense the original and verify it doesn't overwrite the EP", func(t *testing.T) { + input, ok, err := getCurrentCommitLogFileName(commitLogDirectory(rootPath, "uncondensed")) + require.Nil(t, err) + require.True(t, ok) + + err = NewMemoryCondensor(logger).Do(commitLogFileName(rootPath, "uncondensed", input)) + require.Nil(t, err) + + actual, ok, err := getCurrentCommitLogFileName( + commitLogDirectory(rootPath, "uncondensed")) + require.Nil(t, err) + require.True(t, ok) + + assert.True(t, strings.HasSuffix(actual, ".condensed"), + "commit log is now saved as condensed") + + initialState := DeserializationResult{ + Nodes: nil, + Entrypoint: 17, + Level: 3, + } + fd, err := os.Open(commitLogFileName(rootPath, "uncondensed", actual)) + require.Nil(t, err) + + bufr := bufio.NewReader(fd) + res, _, err := NewDeserializer(logger).Do(bufr, &initialState, false) + require.Nil(t, err) + + conns, _ := packedconn.NewWithMaxLayer(3) + assert.Contains(t, res.Nodes, &vertex{id: 0, level: 3, connections: conns}) + assert.Equal(t, uint64(17), res.Entrypoint) + assert.Equal(t, uint16(3), res.Level) + }) +} + +func TestCondensorWithPQInformation(t *testing.T) { + rootPath := t.TempDir() + ctx := context.Background() + + logger, _ := test.NewNullLogger() + uncondensed, err := NewCommitLogger(rootPath, "uncondensed", logger, + cyclemanager.NewCallbackGroupNoop()) + require.Nil(t, err) + defer uncondensed.Shutdown(ctx) + + encoders := []compressionhelpers.PQEncoder{ + compressionhelpers.NewKMeansEncoderWithCenters( + 4, + 2, + 0, + [][]float32{{1, 2}, {3, 4}, {5, 6}, {7, 8}}, + ), + compressionhelpers.NewKMeansEncoderWithCenters( + 4, + 2, + 1, + [][]float32{{8, 7}, {6, 5}, {4, 3}, {2, 1}}, + ), + compressionhelpers.NewKMeansEncoderWithCenters( + 4, + 2, + 2, + [][]float32{{1, 2}, {3, 4}, {5, 6}, {7, 8}}, + ), + } + + t.Run("add pq info", func(t *testing.T) { + uncondensed.AddPQCompression(compressionhelpers.PQData{ + Ks: 4, + M: 3, + Dimensions: 6, + EncoderType: compressionhelpers.UseKMeansEncoder, + EncoderDistribution: uint8(0), + Encoders: encoders, + UseBitsEncoding: false, + }) + + require.Nil(t, uncondensed.Flush()) + }) + + t.Run("condense the original and verify the PQ info is present", func(t *testing.T) { + input, ok, err := getCurrentCommitLogFileName(commitLogDirectory(rootPath, "uncondensed")) + require.Nil(t, err) + require.True(t, ok) + + err = NewMemoryCondensor(logger).Do(commitLogFileName(rootPath, "uncondensed", input)) + require.Nil(t, err) + + actual, ok, err := getCurrentCommitLogFileName( + commitLogDirectory(rootPath, "uncondensed")) + require.Nil(t, err) + require.True(t, ok) + + assert.True(t, strings.HasSuffix(actual, ".condensed"), + "commit log is now saved as condensed") + + initialState := DeserializationResult{} + fd, err := os.Open(commitLogFileName(rootPath, "uncondensed", actual)) + require.Nil(t, err) + + bufr := bufio.NewReader(fd) + res, _, err := NewDeserializer(logger).Do(bufr, &initialState, false) + require.Nil(t, err) + + assert.True(t, res.Compressed) + expected := compressionhelpers.PQData{ + Ks: 4, + M: 3, + Dimensions: 6, + EncoderType: compressionhelpers.UseKMeansEncoder, + EncoderDistribution: uint8(0), + Encoders: encoders, + UseBitsEncoding: false, + } + + assert.Equal(t, expected, *res.CompressionPQData) + }) +} + +func TestCondensorWithMUVERAInformation(t *testing.T) { + rootPath := t.TempDir() + ctx := context.Background() + + logger, _ := test.NewNullLogger() + uncondensed, err := NewCommitLogger(rootPath, "uncondensed", logger, + cyclemanager.NewCallbackGroupNoop()) + require.Nil(t, err) + defer uncondensed.Shutdown(ctx) + + gaussians := [][][]float32{ + { + {1, 2, 3, 4, 5}, // cluster 1 + {1, 2, 3, 4, 5}, // cluster 2 + }, // rep 1 + { + {5, 6, 7, 8, 9}, // cluster 1 + {5, 6, 7, 8, 9}, // cluster 2 + }, // rep 2 + } // (repetitions, kSim, dimensions) + + s := [][][]float32{ + { + {-1, 1, 1, -1, 1}, // dprojection 1 + {1, -1, 1, 1, -1}, // dprojection 2 + }, // rep 1 + { + {-1, 1, 1, -1, 1}, // dprojection 1 + {1, -1, 1, 1, -1}, // dprojection 2 + }, // rep 2 + } // (repetitions, dProjections, dimensions) + + t.Run("add muvera info", func(t *testing.T) { + uncondensed.AddMuvera(multivector.MuveraData{ + KSim: 2, + NumClusters: 4, + Dimensions: 5, + DProjections: 2, + Repetitions: 2, + Gaussians: gaussians, + S: s, + }) + + require.Nil(t, uncondensed.Flush()) + }) + + t.Run("condense the original and verify the MUVERA info is present", func(t *testing.T) { + input, ok, err := getCurrentCommitLogFileName(commitLogDirectory(rootPath, "uncondensed")) + require.Nil(t, err) + require.True(t, ok) + + err = NewMemoryCondensor(logger).Do(commitLogFileName(rootPath, "uncondensed", input)) + require.Nil(t, err) + + actual, ok, err := getCurrentCommitLogFileName( + commitLogDirectory(rootPath, "uncondensed")) + require.Nil(t, err) + require.True(t, ok) + + assert.True(t, strings.HasSuffix(actual, ".condensed"), + "commit log is now saved as condensed") + + initialState := DeserializationResult{} + fd, err := os.Open(commitLogFileName(rootPath, "uncondensed", actual)) + require.Nil(t, err) + + bufr := bufio.NewReader(fd) + res, _, err := NewDeserializer(logger).Do(bufr, &initialState, false) + require.Nil(t, err) + + assert.True(t, res.MuveraEnabled) + expected := multivector.MuveraData{ + KSim: 2, + NumClusters: 4, + Dimensions: 5, + DProjections: 2, + Repetitions: 2, + Gaussians: gaussians, + S: s, + } + + assert.Equal(t, expected, *res.EncoderMuvera) + }) +} + +func TestCondensorWithRQ8Information(t *testing.T) { + rootPath := t.TempDir() + ctx := context.Background() + + logger, _ := test.NewNullLogger() + uncondensed, err := NewCommitLogger(rootPath, "uncondensed", logger, + cyclemanager.NewCallbackGroupNoop()) + require.Nil(t, err) + defer uncondensed.Shutdown(ctx) + + rqData := compressionhelpers.RQData{ + InputDim: 10, + Bits: 8, + Rotation: compressionhelpers.FastRotation{ + OutputDim: 4, + Rounds: 5, + Swaps: [][]compressionhelpers.Swap{ + { + {I: 0, J: 2}, + {I: 1, J: 3}, + }, + { + {I: 4, J: 6}, + {I: 5, J: 7}, + }, + { + {I: 8, J: 10}, + {I: 9, J: 11}, + }, + { + {I: 12, J: 14}, + {I: 13, J: 15}, + }, + { + {I: 16, J: 18}, + {I: 17, J: 19}, + }, + }, + Signs: [][]float32{ + {1, -1, 1, -1}, + {1, -1, 1, -1}, + {1, -1, 1, -1}, + {1, -1, 1, -1}, + {1, -1, 1, -1}, + }, + }, + } + + t.Run("add rotational quantization info", func(t *testing.T) { + uncondensed.AddRQCompression(rqData) + + require.Nil(t, uncondensed.Flush()) + }) + + t.Run("condense the original and verify the RQ info is present", func(t *testing.T) { + input, ok, err := getCurrentCommitLogFileName(commitLogDirectory(rootPath, "uncondensed")) + require.Nil(t, err) + require.True(t, ok) + + err = NewMemoryCondensor(logger).Do(commitLogFileName(rootPath, "uncondensed", input)) + require.Nil(t, err) + + actual, ok, err := getCurrentCommitLogFileName( + commitLogDirectory(rootPath, "uncondensed")) + require.Nil(t, err) + require.True(t, ok) + + assert.True(t, strings.HasSuffix(actual, ".condensed"), + "commit log is now saved as condensed") + + initialState := DeserializationResult{} + fd, err := os.Open(commitLogFileName(rootPath, "uncondensed", actual)) + require.Nil(t, err) + + bufr := bufio.NewReader(fd) + res, _, err := NewDeserializer(logger).Do(bufr, &initialState, false) + require.Nil(t, err) + + assert.True(t, res.Compressed) + expected := rqData + + assert.Equal(t, expected, *res.CompressionRQData) + }) +} + +func TestCondensorWithRQ1Information(t *testing.T) { + rootPath := t.TempDir() + ctx := context.Background() + + logger, _ := test.NewNullLogger() + uncondensed, err := NewCommitLogger(rootPath, "uncondensed", logger, + cyclemanager.NewCallbackGroupNoop()) + require.Nil(t, err) + defer uncondensed.Shutdown(ctx) + + brqData := compressionhelpers.BRQData{ + InputDim: 10, + Rotation: compressionhelpers.FastRotation{ + OutputDim: 4, + Rounds: 5, + Swaps: [][]compressionhelpers.Swap{ + { + {I: 0, J: 2}, + {I: 1, J: 3}, + }, + { + {I: 4, J: 6}, + {I: 5, J: 7}, + }, + { + {I: 8, J: 10}, + {I: 9, J: 11}, + }, + { + {I: 12, J: 14}, + {I: 13, J: 15}, + }, + { + {I: 16, J: 18}, + {I: 17, J: 19}, + }, + }, + Signs: [][]float32{ + {1, -1, 1, -1}, + {1, -1, 1, -1}, + {1, -1, 1, -1}, + {1, -1, 1, -1}, + {1, -1, 1, -1}, + }, + }, + Rounding: []float32{0.1, 0.2, 0.3, 0.4}, + } + + t.Run("add binary rotational quantization info", func(t *testing.T) { + uncondensed.AddBRQCompression(brqData) + + require.Nil(t, uncondensed.Flush()) + }) + + t.Run("condense the original and verify the BRQ info is present", func(t *testing.T) { + input, ok, err := getCurrentCommitLogFileName(commitLogDirectory(rootPath, "uncondensed")) + require.Nil(t, err) + require.True(t, ok) + + err = NewMemoryCondensor(logger).Do(commitLogFileName(rootPath, "uncondensed", input)) + require.Nil(t, err) + + actual, ok, err := getCurrentCommitLogFileName( + commitLogDirectory(rootPath, "uncondensed")) + require.Nil(t, err) + require.True(t, ok) + + assert.True(t, strings.HasSuffix(actual, ".condensed"), + "commit log is now saved as condensed") + + initialState := DeserializationResult{} + fd, err := os.Open(commitLogFileName(rootPath, "uncondensed", actual)) + require.Nil(t, err) + + bufr := bufio.NewReader(fd) + res, _, err := NewDeserializer(logger).Do(bufr, &initialState, false) + require.Nil(t, err) + + assert.True(t, res.Compressed) + expected := brqData + + assert.Equal(t, expected, *res.CompressionBRQData) + }) +} + +func assertIndicesFromCommitLogsMatch(t *testing.T, fileNameControl string, + fileNames []string, +) { + control := readFromCommitLogs(t, fileNameControl) + actual := readFromCommitLogs(t, fileNames...) + + assert.Equal(t, control, actual) +} + +func readFromCommitLogs(t *testing.T, fileNames ...string) *hnsw { + var res *DeserializationResult + + for _, fileName := range fileNames { + fd, err := os.Open(fileName) + require.Nil(t, err) + + bufr := bufio.NewReader(fd) + logger, _ := test.NewNullLogger() + res, _, err = NewDeserializer(logger).Do(bufr, res, false) + require.Nil(t, err) + } + + return &hnsw{ + nodes: removeTrailingNilNodes(res.Nodes), + currentMaximumLayer: int(res.Level), + entryPointID: res.Entrypoint, + tombstones: res.Tombstones, + } +} + +// just a test helper to make the output easier to compare, remove all trailing +// nil nodes by starting from the last and stopping as soon as a node is not +// nil +func removeTrailingNilNodes(in []*vertex) []*vertex { + pos := len(in) - 1 + + for pos >= 0 { + if in[pos] != nil { + break + } + + pos-- + } + + return in[:pos+1] +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/condensor_mmap.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/condensor_mmap.go new file mode 100644 index 0000000000000000000000000000000000000000..0fffbd857f6631bf80a1cbd0f7eaf8b6ab261ffb --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/condensor_mmap.go @@ -0,0 +1,86 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package hnsw + +import ( + "io" + "os" + + "github.com/pkg/errors" +) + +type MmapCondensor struct { + connectionsPerLevel int +} + +func NewMmapCondensor(connectionsPerLevel int) *MmapCondensor { + return &MmapCondensor{connectionsPerLevel: connectionsPerLevel} +} + +func (c *MmapCondensor) Do(fileName string) error { + fd, err := os.Open(fileName) + if err != nil { + return errors.Wrap(err, "open commit log to be condensed") + } + defer fd.Close() + + index, err := c.analyze(fd) + if err != nil { + return errors.Wrap(err, "analyze commit log and build index") + } + + index.calculateOffsets() + + // "rewind" file so we can read it again, this time into the mmap file + if _, err := fd.Seek(0, io.SeekStart); err != nil { + return errors.Wrap(err, "rewind uncondensed") + } + + if err := c.read(fd, index, fileName+".scratch.tmp"); err != nil { + return errors.Wrap(err, "read uncondensed into mmap file") + } + + return nil +} + +func (c *MmapCondensor) analyze(file *os.File) (mmapIndex, error) { + return newMmapCondensorAnalyzer(c.connectionsPerLevel).Do(file) +} + +func (c *MmapCondensor) read(source *os.File, index mmapIndex, + targetName string, +) error { + return newMmapCondensorReader().Do(source, index, targetName) +} + +func (mi *mmapIndex) calculateOffsets() { + for i := range mi.nodes { + if i == 0 { + // offset for the first element is 0, nothing to do + continue + } + + // we now have the guarantee that elem i-1 exists + mi.nodes[i].offset = mi.nodes[i-1].offset + uint64(mi.nodes[i-1].Size(mi.connectionsPerLevel)) + } +} + +// Size can only return a useful result if offsets have been calculated prior +// to calling Size() +func (mi *mmapIndex) Size() int { + if len(mi.nodes) == 0 { + return -1 + } + + return int(mi.nodes[len(mi.nodes)-1].offset) + + mi.nodes[len(mi.nodes)-1].Size(mi.connectionsPerLevel) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/condensor_mmap_analyzer.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/condensor_mmap_analyzer.go new file mode 100644 index 0000000000000000000000000000000000000000..aeb2d9dae8c3a28a52fc67551225224409711170 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/condensor_mmap_analyzer.go @@ -0,0 +1,261 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package hnsw + +import ( + "bufio" + "encoding/binary" + "io" + "os" + "sort" + + "github.com/pkg/errors" +) + +type mmapIndex struct { + nodes []mmapIndexNode + connectionsPerLevel int +} + +func (mi *mmapIndex) UpsertNodeMaxLevel(node uint64, level uint16) { + n := sort.Search(len(mi.nodes), func(a int) bool { + return mi.nodes[a].id >= node + }) + + if n < len(mi.nodes) && mi.nodes[n].id == node { + // update + if mi.nodes[n].maxLevel < level { + mi.nodes[n].maxLevel = level + } + } else { + // insert + + // See https://github.com/golang/go/wiki/SliceTricks#insert + mi.nodes = append(mi.nodes, mmapIndexNode{}) + copy(mi.nodes[n+1:], mi.nodes[n:]) + mi.nodes[n].id = node + mi.nodes[n].maxLevel = level + } +} + +func (mi *mmapIndex) DeleteNode(node uint64) { +} + +type mmapIndexNode struct { + id uint64 + offset uint64 + maxLevel uint16 +} + +func (n mmapIndexNode) Size(connectionsPerLevel int) int { + return int(n.maxLevel)*2 + // overhead for uint16 length indicators + connectionsPerLevel*int(n.maxLevel+1) // level 0 has 2x connections +} + +type MmapCondensorAnalyzer struct { + reader *bufio.Reader + connectionsPerLevel int + index mmapIndex +} + +func newMmapCondensorAnalyzer(connectionsPerLevel int) *MmapCondensorAnalyzer { + return &MmapCondensorAnalyzer{connectionsPerLevel: connectionsPerLevel} +} + +func (a *MmapCondensorAnalyzer) Do(file *os.File) (mmapIndex, error) { + a.reader = bufio.NewReaderSize(file, 1024*1024) + + a.index = mmapIndex{ + connectionsPerLevel: a.connectionsPerLevel, + nodes: make([]mmapIndexNode, 0, 10000), + } + + if err := a.loop(); err != nil { + return a.index, err + } + + return a.index, nil +} + +func (a *MmapCondensorAnalyzer) loop() error { + for { + ct, err := a.ReadCommitType(a.reader) + if err != nil { + if errors.Is(err, io.EOF) { + break + } + + return err + } + + switch ct { + case AddNode: + err = a.ReadNode(a.reader) + case SetEntryPointMaxLevel: + err = a.ReadEP(a.reader) + case AddLinkAtLevel: + err = a.ReadLink(a.reader) + case ReplaceLinksAtLevel: + err = a.ReadLinks(a.reader) + case AddTombstone: + err = a.ReadAddTombstone(a.reader) + case RemoveTombstone: + err = a.ReadRemoveTombstone(a.reader) + case ClearLinks: + err = a.ReadClearLinks(a.reader) + case DeleteNode: + err = a.ReadDeleteNode(a.reader) + case ResetIndex: + a.index.nodes = make([]mmapIndexNode, 0, 10000) + default: + err = errors.Errorf("unrecognized commit type %d", ct) + } + if err != nil { + // do not return nil, err, because the err could be a recoverable one + return err + } + } + + return nil +} + +func (a *MmapCondensorAnalyzer) ReadNode(r io.Reader) error { + id, err := a.readUint64(r) + if err != nil { + return err + } + + level, err := a.readUint16(r) + if err != nil { + return err + } + + a.index.UpsertNodeMaxLevel(id, level) + return nil +} + +func (a *MmapCondensorAnalyzer) ReadEP(r io.Reader) error { + // TODO: is this an issue because of bufio Read vs ReadFull? + _, err := io.CopyN(io.Discard, r, 10) + return err +} + +func (a *MmapCondensorAnalyzer) ReadLink(r io.Reader) error { + source, err := a.readUint64(r) + if err != nil { + return err + } + + level, err := a.readUint16(r) + if err != nil { + return err + } + + // TODO: is this an issue because of bufio Read vs ReadFull? + _, err = io.CopyN(io.Discard, r, 8) + if err != nil { + return err + } + a.index.UpsertNodeMaxLevel(source, level) + + return nil +} + +func (a *MmapCondensorAnalyzer) ReadLinks(r io.Reader) error { + source, err := a.readUint64(r) + if err != nil { + return err + } + + level, err := a.readUint16(r) + if err != nil { + return err + } + + length, err := a.readUint16(r) + if err != nil { + return err + } + + a.index.UpsertNodeMaxLevel(source, level) + + // TODO: is this an issue because of bufio Read vs ReadFull? + _, err = io.CopyN(io.Discard, r, 8*int64(length)) + if err != nil { + return err + } + + return nil +} + +func (a *MmapCondensorAnalyzer) ReadAddTombstone(r io.Reader) error { + // TODO: is this an issue because of bufio Read vs ReadFull? + _, err := io.CopyN(io.Discard, r, 8) + return err +} + +func (a *MmapCondensorAnalyzer) ReadRemoveTombstone(r io.Reader) error { + // TODO: is this an issue because of bufio Read vs ReadFull? + _, err := io.CopyN(io.Discard, r, 8) + return err +} + +func (a *MmapCondensorAnalyzer) ReadClearLinks(r io.Reader) error { + // TODO: is this an issue because of bufio Read vs ReadFull? + _, err := io.CopyN(io.Discard, r, 8) + return err +} + +func (a *MmapCondensorAnalyzer) ReadDeleteNode(r io.Reader) error { + id, err := a.readUint64(r) + if err != nil { + return err + } + + a.index.DeleteNode(id) + return nil +} + +func (a *MmapCondensorAnalyzer) readUint64(r io.Reader) (uint64, error) { + var value uint64 + tmpBuf := make([]byte, 8) + _, err := io.ReadFull(r, tmpBuf) + if err != nil { + return 0, errors.Wrap(err, "failed to read uint64") + } + + value = binary.LittleEndian.Uint64(tmpBuf) + + return value, nil +} + +func (a *MmapCondensorAnalyzer) readUint16(r io.Reader) (uint16, error) { + var value uint16 + tmpBuf := make([]byte, 2) + _, err := io.ReadFull(r, tmpBuf) + if err != nil { + return 0, errors.Wrap(err, "failed to read uint16") + } + + value = binary.LittleEndian.Uint16(tmpBuf) + + return value, nil +} + +func (a *MmapCondensorAnalyzer) ReadCommitType(r io.Reader) (HnswCommitType, error) { + tmpBuf := make([]byte, 1) + if _, err := io.ReadFull(r, tmpBuf); err != nil { + return 0, errors.Wrap(err, "failed to read commit type") + } + + return HnswCommitType(tmpBuf[0]), nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/condensor_mmap_integration_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/condensor_mmap_integration_test.go new file mode 100644 index 0000000000000000000000000000000000000000..845c007e9cdfdb33f51f28d1bc18fe723011a927 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/condensor_mmap_integration_test.go @@ -0,0 +1,187 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package hnsw + +import ( + "os" + "strings" + "testing" + + "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/entities/cyclemanager" +) + +func TestMmapCondensor(t *testing.T) { + t.Skip() // TODO + + rootPath := t.TempDir() + + logger, _ := test.NewNullLogger() + uncondensed, err := NewCommitLogger(rootPath, "uncondensed", logger, + cyclemanager.NewCallbackGroupNoop()) + require.Nil(t, err) + + perfect, err := NewCommitLogger(rootPath, "perfect", logger, + cyclemanager.NewCallbackGroupNoop()) + require.Nil(t, err) + + t.Run("add redundant data to the original log", func(t *testing.T) { + uncondensed.AddNode(&vertex{id: 0, level: 3}) + uncondensed.AddNode(&vertex{id: 1, level: 3}) + uncondensed.AddNode(&vertex{id: 2, level: 3}) + uncondensed.AddNode(&vertex{id: 3, level: 3}) + + // below are some pointless connection replacements, we expect that most of + // these will be gone after condensing, this gives us a good way of testing + // whether they're really gone + for level := 0; level <= 3; level++ { + uncondensed.ReplaceLinksAtLevel(0, level, []uint64{1, 2, 3}) + uncondensed.ReplaceLinksAtLevel(0, level, []uint64{1, 2}) + uncondensed.ReplaceLinksAtLevel(0, level, []uint64{1}) + uncondensed.ReplaceLinksAtLevel(0, level, []uint64{2}) + uncondensed.ReplaceLinksAtLevel(0, level, []uint64{3}) + uncondensed.ReplaceLinksAtLevel(0, level, []uint64{2, 3}) + uncondensed.ReplaceLinksAtLevel(0, level, []uint64{1, 2, 3}) + uncondensed.ReplaceLinksAtLevel(1, level, []uint64{0, 2, 3}) + uncondensed.ReplaceLinksAtLevel(1, level, []uint64{0, 2}) + uncondensed.ReplaceLinksAtLevel(1, level, []uint64{0}) + uncondensed.ReplaceLinksAtLevel(1, level, []uint64{2}) + uncondensed.ReplaceLinksAtLevel(1, level, []uint64{3}) + uncondensed.ReplaceLinksAtLevel(1, level, []uint64{2, 3}) + uncondensed.ReplaceLinksAtLevel(1, level, []uint64{0, 2, 3}) + uncondensed.ReplaceLinksAtLevel(2, level, []uint64{0, 1, 3}) + uncondensed.ReplaceLinksAtLevel(2, level, []uint64{0, 1}) + uncondensed.ReplaceLinksAtLevel(2, level, []uint64{0}) + uncondensed.ReplaceLinksAtLevel(2, level, []uint64{1}) + uncondensed.ReplaceLinksAtLevel(2, level, []uint64{3}) + uncondensed.ReplaceLinksAtLevel(2, level, []uint64{1, 3}) + uncondensed.ReplaceLinksAtLevel(2, level, []uint64{0, 1, 3}) + uncondensed.ReplaceLinksAtLevel(3, level, []uint64{0, 1, 2}) + uncondensed.ReplaceLinksAtLevel(3, level, []uint64{0, 1}) + uncondensed.ReplaceLinksAtLevel(3, level, []uint64{0}) + uncondensed.ReplaceLinksAtLevel(3, level, []uint64{1}) + uncondensed.ReplaceLinksAtLevel(3, level, []uint64{2}) + uncondensed.ReplaceLinksAtLevel(3, level, []uint64{1, 2}) + uncondensed.ReplaceLinksAtLevel(3, level, []uint64{0, 1, 2}) + } + uncondensed.SetEntryPointWithMaxLayer(3, 3) + uncondensed.AddTombstone(2) + + require.Nil(t, uncondensed.Flush()) + }) + + t.Run("create a hypothetical perfect log", func(t *testing.T) { + perfect.AddNode(&vertex{id: 0, level: 3}) + perfect.AddNode(&vertex{id: 1, level: 3}) + perfect.AddNode(&vertex{id: 2, level: 3}) + perfect.AddNode(&vertex{id: 3, level: 3}) + + // below are some pointless connection replacements, we expect that most of + // these will be gone after condensing, this gives us a good way of testing + // whether they're really gone + for level := 0; level <= 3; level++ { + perfect.ReplaceLinksAtLevel(0, level, []uint64{1, 2, 3}) + perfect.ReplaceLinksAtLevel(1, level, []uint64{0, 2, 3}) + perfect.ReplaceLinksAtLevel(2, level, []uint64{0, 1, 3}) + perfect.ReplaceLinksAtLevel(3, level, []uint64{0, 1, 2}) + } + perfect.SetEntryPointWithMaxLayer(3, 3) + perfect.AddTombstone(2) + + require.Nil(t, perfect.Flush()) + }) + + t.Run("condense the original and verify against the perfect one", func(t *testing.T) { + input, ok, err := getCurrentCommitLogFileName(commitLogDirectory(rootPath, "uncondensed")) + require.Nil(t, err) + require.True(t, ok) + + err = NewMmapCondensor(3).Do(commitLogFileName(rootPath, "uncondensed", input)) + require.Nil(t, err) + + control, ok, err := getCurrentCommitLogFileName( + commitLogDirectory(rootPath, "perfect")) + require.Nil(t, err) + require.True(t, ok) + + actual, ok, err := getCurrentCommitLogFileName( + commitLogDirectory(rootPath, "uncondensed")) + require.Nil(t, err) + require.True(t, ok) + + assert.True(t, strings.HasSuffix(actual, ".condensed"), + "commit log is now saved as condensed") + + controlStat, err := os.Stat(commitLogFileName(rootPath, "perfect", control)) + require.Nil(t, err) + + actualStat, err := os.Stat(commitLogFileName(rootPath, "uncondensed", actual)) + require.Nil(t, err) + + assert.Equal(t, controlStat.Size(), actualStat.Size()) + + // dumpIndexFromCommitLog(t, commitLogFileName(rootPath, "uncondensed", actual)) + // dumpIndexFromCommitLog(t, commitLogFileName(rootPath, "perfect", control)) + }) +} + +// func TestCondensorWithoutEntrypoint(t *testing.T) { +// rand.Seed(time.Now().UnixNano()) +// rootPath := t.TempDir() + +// logger, _ := test.NewNullLogger() +// uncondensed, err := NewCommitLogger(rootPath, "uncondensed", logger, +// cyclemanager.NewCallbackGroupNoop()) +// require.Nil(t, err) + +// t.Run("add data, but do not set an entrypoint", func(t *testing.T) { +// uncondensed.AddNode(&vertex{id: 0, level: 3}) + +// require.Nil(t, uncondensed.Flush()) +// }) + +// t.Run("condense the original and verify it doesn't overwrite the EP", func(t *testing.T) { +// input, ok, err := getCurrentCommitLogFileName(commitLogDirectory(rootPath, "uncondensed")) +// require.Nil(t, err) +// require.True(t, ok) + +// err = NewMemoryCondensor2(logger).Do(commitLogFileName(rootPath, "uncondensed", input)) +// require.Nil(t, err) + +// actual, ok, err := getCurrentCommitLogFileName( +// commitLogDirectory(rootPath, "uncondensed")) +// require.Nil(t, err) +// require.True(t, ok) + +// assert.True(t, strings.HasSuffix(actual, ".condensed"), +// "commit log is now saved as condensed") + +// initialState := DeserializationResult{ +// Nodes: nil, +// Entrypoint: 17, +// Level: 3, +// } +// fd, err := os.Open(commitLogFileName(rootPath, "uncondensed", actual)) +// require.Nil(t, err) + +// bufr := bufio.NewReader(fd) +// res, err := NewDeserializer(logger).Do(bufr, &initialState) +// require.Nil(t, err) + +// assert.Contains(t, res.Nodes, &vertex{id: 0, level: 3, connections: map[int][]uint64{}}) +// assert.Equal(t, uint64(17), res.Entrypoint) +// assert.Equal(t, uint16(3), res.Level) + +// }) +// } diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/condensor_mmap_reader.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/condensor_mmap_reader.go new file mode 100644 index 0000000000000000000000000000000000000000..78878bc91696fe7e70d2c708af541293b7ce4863 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/condensor_mmap_reader.go @@ -0,0 +1,71 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package hnsw + +import ( + "bufio" + "os" + + "github.com/edsrzf/mmap-go" + "github.com/pkg/errors" +) + +type MmapCondensorReader struct { + reader *bufio.Reader + target []byte +} + +func newMmapCondensorReader() *MmapCondensorReader { + return &MmapCondensorReader{} +} + +func (r *MmapCondensorReader) Do(source *os.File, index mmapIndex, targetName string) error { + r.reader = bufio.NewReaderSize(source, 1024*1024) + + scratchFile, err := os.Create(targetName) + if err != nil { + return err + } + + size := index.Size() + if err := scratchFile.Truncate(int64(index.Size())); err != nil { + return errors.Wrap(err, "truncate scratch file to size") + } + + mmapSpace, err := mmap.MapRegion(scratchFile, size, mmap.COPY, 0, 0) + if err != nil { + return errors.Wrap(err, "mmap scratch file") + } + + r.target = mmapSpace + + if err := r.loop(); err != nil { + return err + } + + if err := mmapSpace.Unmap(); err != nil { + return errors.Wrap(err, "munmap scratch file") + } + + if err := scratchFile.Close(); err != nil { + return errors.Wrap(err, "close scratch file") + } + + return nil +} + +func (r *MmapCondensorReader) loop() error { + // TODO: iterate through commit log + // TODO: get offset for specific part + // TODO: write into target at correct position + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/condensor_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/condensor_test.go new file mode 100644 index 0000000000000000000000000000000000000000..198cbeaa0a6d01d5ec3fa110f2535d5a4828935f --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/condensor_test.go @@ -0,0 +1,68 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package hnsw + +import ( + _ "fmt" + "math/rand" + "testing" + + "github.com/sirupsen/logrus/hooks/test" +) + +func BenchmarkCondensor2NewUint64Write(b *testing.B) { + b.StopTimer() + logger, _ := test.NewNullLogger() + c := NewMemoryCondensor(logger) + c.newLog = NewWriterSize(c.newLogFile, 1*1024*1024) + b.StartTimer() + for i := 0; i < b.N; i++ { + writeUint64(c.newLog, rand.Uint64()) + } +} + +func BenchmarkCondensor2NewUint16Write(b *testing.B) { + b.StopTimer() + logger, _ := test.NewNullLogger() + c := NewMemoryCondensor(logger) + c.newLog = NewWriterSize(c.newLogFile, 1*1024*1024) + b.StartTimer() + for i := 0; i < b.N; i++ { + writeUint16(c.newLog, uint16(rand.Uint32())) + } +} + +func BenchmarkCondensor2WriteCommitType(b *testing.B) { + b.StopTimer() + logger, _ := test.NewNullLogger() + c := NewMemoryCondensor(logger) + c.newLog = NewWriterSize(c.newLogFile, 1*1024*1024) + b.StartTimer() + for i := 0; i < b.N; i++ { + writeCommitType(c.newLog, HnswCommitType(1)) + } +} + +func BenchmarkCondensor2WriteUint64Slice(b *testing.B) { + b.StopTimer() + logger, _ := test.NewNullLogger() + c := NewMemoryCondensor(logger) + c.newLog = NewWriterSize(c.newLogFile, 1*1024*1024) + testInts := make([]uint64, 100) + for i := 0; i < 100; i++ { + testInts[i] = rand.Uint64() + } + b.StartTimer() + for i := 0; i < b.N; i++ { + writeUint64Slice(c.newLog, testInts) + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/config.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/config.go new file mode 100644 index 0000000000000000000000000000000000000000..3aa005e9377ebd9dd94a99b279db8c28cbb92016 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/config.go @@ -0,0 +1,113 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package hnsw + +import ( + "context" + + "github.com/sirupsen/logrus" + "github.com/weaviate/weaviate/adapters/repos/db/vector/common" + "github.com/weaviate/weaviate/adapters/repos/db/vector/hnsw/distancer" + "github.com/weaviate/weaviate/entities/errorcompounder" + "github.com/weaviate/weaviate/usecases/memwatch" + "github.com/weaviate/weaviate/usecases/monitoring" +) + +// Config for a new HSNW index, this contains information that is derived +// internally, e.g. by the shard. All User-settable config is specified in +// Config.UserConfig +type Config struct { + // internal + RootPath string + ID string + MakeCommitLoggerThunk MakeCommitLogger + VectorForIDThunk common.VectorForID[float32] + MultiVectorForIDThunk common.VectorForID[[]float32] + TempVectorForIDThunk common.TempVectorForID[float32] + TempMultiVectorForIDThunk common.TempVectorForID[[]float32] + Logger logrus.FieldLogger + DistanceProvider distancer.Provider + PrometheusMetrics *monitoring.PrometheusMetrics + AllocChecker memwatch.AllocChecker + WaitForCachePrefill bool + FlatSearchConcurrency int + AcornFilterRatio float64 + DisableSnapshots bool + SnapshotOnStartup bool + LazyLoadSegments bool + WriteSegmentInfoIntoFileName bool + WriteMetadataFilesEnabled bool + + // metadata for monitoring + ShardName string + ClassName string + + VisitedListPoolMaxSize int +} + +func (c Config) Validate() error { + ec := errorcompounder.New() + + if c.ID == "" { + ec.Addf("id cannot be empty") + } + + if c.RootPath == "" { + ec.Addf("rootPath cannot be empty") + } + + if c.MakeCommitLoggerThunk == nil { + ec.Addf("makeCommitLoggerThunk cannot be nil") + } + + if c.VectorForIDThunk == nil { + ec.Addf("vectorForIDThunk cannot be nil") + } + + if c.DistanceProvider == nil { + ec.Addf("distancerProvider cannot be nil") + } + + return ec.ToError() +} + +func NewVectorForIDThunk[T float32 | []float32](targetVector string, fn func(ctx context.Context, id uint64, targetVector string) ([]T, error)) common.VectorForID[T] { + t := common.TargetVectorForID[T]{ + TargetVector: targetVector, + VectorForIDThunk: fn, + } + return t.VectorForID +} + +func NewMultiVectorForIDThunk(targetVector string, fn func(ctx context.Context, id uint64, targetVector string) ([][]float32, error)) common.VectorForID[[]float32] { + t := common.TargetVectorForID[[]float32]{ + TargetVector: targetVector, + VectorForIDThunk: fn, + } + return t.VectorForID +} + +func NewTempVectorForIDThunk[T float32 | []float32](targetVector string, fn func(ctx context.Context, indexID uint64, container *common.VectorSlice, targetVector string) ([]T, error)) common.TempVectorForID[T] { + t := common.TargetTempVectorForID[T]{ + TargetVector: targetVector, + TempVectorForIDThunk: fn, + } + return t.TempVectorForID +} + +func NewTempMultiVectorForIDThunk(targetVector string, fn func(ctx context.Context, indexID uint64, container *common.VectorSlice, targetVector string) ([][]float32, error)) common.TempVectorForID[[]float32] { + t := common.TargetTempVectorForID[[]float32]{ + TargetVector: targetVector, + TempVectorForIDThunk: fn, + } + return t.TempVectorForID +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/config_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/config_test.go new file mode 100644 index 0000000000000000000000000000000000000000..9715e6eef5eaabf18720cac572c1ad258459bb61 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/config_test.go @@ -0,0 +1,85 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package hnsw + +import ( + "context" + "testing" + + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" + "github.com/weaviate/weaviate/adapters/repos/db/vector/hnsw/distancer" +) + +func Test_ValidConfig(t *testing.T) { + err := validConfig().Validate() + assert.Nil(t, err) +} + +func Test_InValidConfig(t *testing.T) { + type test struct { + config func() Config + expectedErr error + } + + tests := []test{ + { + config: func() Config { + v := validConfig() + v.ID = "" + return v + }, + expectedErr: errors.Errorf("id cannot be empty"), + }, + { + config: func() Config { + v := validConfig() + v.RootPath = "" + return v + }, + expectedErr: errors.Errorf("rootPath cannot be empty"), + }, + { + config: func() Config { + v := validConfig() + v.MakeCommitLoggerThunk = nil + return v + }, + expectedErr: errors.Errorf("makeCommitLoggerThunk cannot be nil"), + }, + { + config: func() Config { + v := validConfig() + v.VectorForIDThunk = nil + return v + }, + expectedErr: errors.Errorf("vectorForIDThunk cannot be nil"), + }, + } + + for _, test := range tests { + t.Run(test.expectedErr.Error(), func(t *testing.T) { + err := test.config().Validate() + assert.Equal(t, test.expectedErr.Error(), err.Error()) + }) + } +} + +func validConfig() Config { + return Config{ + RootPath: "some path", + ID: "someid", + MakeCommitLoggerThunk: func() (CommitLogger, error) { return nil, nil }, + VectorForIDThunk: func(context.Context, uint64) ([]float32, error) { return nil, nil }, + DistanceProvider: distancer.NewCosineDistanceProvider(), + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/config_update.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/config_update.go new file mode 100644 index 0000000000000000000000000000000000000000..835e9f7251449f6e0927b39aaa876bcff0fb4701 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/config_update.go @@ -0,0 +1,192 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package hnsw + +import ( + "os" + "sync/atomic" + + entcfg "github.com/weaviate/weaviate/entities/config" + enterrors "github.com/weaviate/weaviate/entities/errors" + + "github.com/pkg/errors" + "github.com/weaviate/weaviate/entities/schema/config" + + ent "github.com/weaviate/weaviate/entities/vectorindex/hnsw" +) + +func ValidateUserConfigUpdate(initial, updated config.VectorIndexConfig) error { + initialParsed, ok := initial.(ent.UserConfig) + if !ok { + return errors.Errorf("initial is not UserConfig, but %T", initial) + } + + updatedParsed, ok := updated.(ent.UserConfig) + if !ok { + return errors.Errorf("updated is not UserConfig, but %T", updated) + } + + immutableFields := []immutableParameter{ + { + name: "efConstruction", + accessor: func(c ent.UserConfig) interface{} { return c.EFConstruction }, + }, + { + name: "maxConnections", + accessor: func(c ent.UserConfig) interface{} { return c.MaxConnections }, + }, + { + // NOTE: There isn't a technical reason for this to be immutable, it + // simply hasn't been implemented yet. It would require to stop the + // current timer and start a new one. Certainly possible, but let's see + // if anyone actually needs this before implementing it. + name: "cleanupIntervalSeconds", + accessor: func(c ent.UserConfig) interface{} { return c.CleanupIntervalSeconds }, + }, + { + name: "distance", + accessor: func(c ent.UserConfig) interface{} { return c.Distance }, + }, + { + name: "multivector enabled", + accessor: func(c ent.UserConfig) interface{} { return c.Multivector.Enabled }, + }, + { + name: "muvera enabled", + accessor: func(c ent.UserConfig) interface{} { return c.Multivector.MuveraConfig.Enabled }, + }, + { + name: "skipDefaultQuantization", + accessor: func(c ent.UserConfig) interface{} { return c.SkipDefaultQuantization }, + }, + { + name: "trackDefaultQuantization", + accessor: func(c ent.UserConfig) interface{} { return c.TrackDefaultQuantization }, + }, + } + + for _, u := range immutableFields { + if err := validateImmutableField(u, initialParsed, updatedParsed); err != nil { + return err + } + } + + return nil +} + +type immutableParameter struct { + accessor func(c ent.UserConfig) interface{} + name string +} + +func validateImmutableField(u immutableParameter, + previous, next ent.UserConfig, +) error { + oldField := u.accessor(previous) + newField := u.accessor(next) + if oldField != newField { + return errors.Errorf("%s is immutable: attempted change from \"%v\" to \"%v\"", + u.name, oldField, newField) + } + + return nil +} + +func (h *hnsw) UpdateUserConfig(updated config.VectorIndexConfig, callback func()) error { + parsed, ok := updated.(ent.UserConfig) + if !ok { + callback() + return errors.Errorf("config is not UserConfig, but %T", updated) + } + + // Store automatically as a lock here would be very expensive, this value is + // read on every single user-facing search, which can be highly concurrent + atomic.StoreInt64(&h.ef, int64(parsed.EF)) + atomic.StoreInt64(&h.efMin, int64(parsed.DynamicEFMin)) + atomic.StoreInt64(&h.efMax, int64(parsed.DynamicEFMax)) + atomic.StoreInt64(&h.efFactor, int64(parsed.DynamicEFFactor)) + atomic.StoreInt64(&h.flatSearchCutoff, int64(parsed.FlatSearchCutoff)) + + h.acornSearch.Store(parsed.FilterStrategy == ent.FilterStrategyAcorn) + + if !parsed.PQ.Enabled && !parsed.BQ.Enabled && !parsed.SQ.Enabled && !parsed.RQ.Enabled { + callback() + return nil + } + + // check if rq bits is immutable + if h.rqConfig.Enabled && parsed.RQ.Enabled { + if parsed.RQ.Bits != h.rqConfig.Bits { + callback() + return errors.Errorf("rq bits is immutable: attempted change from \"%v\" to \"%v\"", + h.rqConfig.Bits, parsed.RQ.Bits) + } + } + + h.pqConfig = parsed.PQ + h.sqConfig = parsed.SQ + h.bqConfig = parsed.BQ + h.rqConfig = parsed.RQ + if asyncEnabled() { + callback() + return nil + } + + if !h.compressed.Load() { + // the compression will fire the callback once it's complete + return h.Upgrade(callback) + } else { + h.compressor.SetCacheMaxSize(int64(parsed.VectorCacheMaxObjects)) + callback() + return nil + } +} + +func asyncEnabled() bool { + return entcfg.Enabled(os.Getenv("ASYNC_INDEXING")) +} + +func (h *hnsw) Upgrade(callback func()) error { + h.logger.WithField("action", "compress").Info("switching to compressed vectors") + + err := ent.ValidatePQConfig(h.pqConfig) + if err != nil { + callback() + return err + } + + err = ent.ValidateRQConfig(h.rqConfig) + if err != nil { + callback() + return err + } + + enterrors.GoWrapper(func() { h.compressThenCallback(callback) }, h.logger) + + return nil +} + +func (h *hnsw) compressThenCallback(callback func()) { + defer callback() + + uc := ent.UserConfig{ + PQ: h.pqConfig, + BQ: h.bqConfig, + SQ: h.sqConfig, + RQ: h.rqConfig, + } + if err := h.compress(uc); err != nil { + h.logger.Error(err) + return + } + h.logger.WithField("action", "compress").Info("vector compression complete") +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/config_update_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/config_update_test.go new file mode 100644 index 0000000000000000000000000000000000000000..0f419d4eea5c3ddb1f52737e5d253919b1587cc7 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/config_update_test.go @@ -0,0 +1,169 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package hnsw + +import ( + "testing" + + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + schemaConfig "github.com/weaviate/weaviate/entities/schema/config" + ent "github.com/weaviate/weaviate/entities/vectorindex/hnsw" +) + +func TestUserConfigUpdates(t *testing.T) { + t.Run("various immutable and mutable fields", func(t *testing.T) { + type test struct { + name string + initial schemaConfig.VectorIndexConfig + update schemaConfig.VectorIndexConfig + expectedError error + } + + tests := []test{ + { + name: "attempting to change ef construction", + initial: ent.UserConfig{EFConstruction: 64}, + update: ent.UserConfig{EFConstruction: 128}, + expectedError: errors.Errorf( + "efConstruction is immutable: " + + "attempted change from \"64\" to \"128\""), + }, + { + name: "attempting to change max connections", + initial: ent.UserConfig{MaxConnections: 10}, + update: ent.UserConfig{MaxConnections: 15}, + expectedError: errors.Errorf( + "maxConnections is immutable: " + + "attempted change from \"10\" to \"15\""), + }, + { + name: "attempting to change cleanup interval seconds", + initial: ent.UserConfig{CleanupIntervalSeconds: 60}, + update: ent.UserConfig{CleanupIntervalSeconds: 90}, + expectedError: errors.Errorf( + "cleanupIntervalSeconds is immutable: " + + "attempted change from \"60\" to \"90\""), + }, + { + name: "attempting to change distance", + initial: ent.UserConfig{Distance: "cosine"}, + update: ent.UserConfig{Distance: "l2-squared"}, + expectedError: errors.Errorf( + "distance is immutable: " + + "attempted change from \"cosine\" to \"l2-squared\""), + }, + { + name: "attempting to change skipDefaultQuantization", + initial: ent.UserConfig{SkipDefaultQuantization: true}, + update: ent.UserConfig{SkipDefaultQuantization: false}, + expectedError: errors.Errorf( + "skipDefaultQuantization is immutable: " + + "attempted change from \"true\" to \"false\""), + }, + { + name: "attempting to change trackDefaultQuantization", + initial: ent.UserConfig{TrackDefaultQuantization: true}, + update: ent.UserConfig{TrackDefaultQuantization: false}, + expectedError: errors.Errorf( + "trackDefaultQuantization is immutable: " + + "attempted change from \"true\" to \"false\""), + }, + { + name: "attempting to change multivector", + initial: ent.UserConfig{Multivector: ent.MultivectorConfig{ + Enabled: false, + }}, + update: ent.UserConfig{Multivector: ent.MultivectorConfig{ + Enabled: true, + }}, + expectedError: errors.Errorf( + "multivector enabled is immutable: " + + "attempted change from \"false\" to \"true\""), + }, + { + name: "attempting to change muvera", + initial: ent.UserConfig{Multivector: ent.MultivectorConfig{ + Enabled: true, + }}, + update: ent.UserConfig{Multivector: ent.MultivectorConfig{ + Enabled: true, + MuveraConfig: ent.MuveraConfig{ + Enabled: true, + }, + }}, + expectedError: errors.Errorf( + "muvera enabled is immutable: " + + "attempted change from \"false\" to \"true\""), + }, + { + name: "changing ef", + initial: ent.UserConfig{EF: 100}, + update: ent.UserConfig{EF: -1}, + expectedError: nil, + }, + { + name: "changing other mutable settings", + initial: ent.UserConfig{ + VectorCacheMaxObjects: 700, + FlatSearchCutoff: 800, + }, + update: ent.UserConfig{ + VectorCacheMaxObjects: 730, + FlatSearchCutoff: 830, + }, + expectedError: nil, + }, + { + name: "attempting to change dynamic ef settings", + initial: ent.UserConfig{ + DynamicEFMin: 100, + DynamicEFMax: 200, + DynamicEFFactor: 5, + }, + update: ent.UserConfig{ + DynamicEFMin: 101, + DynamicEFMax: 201, + DynamicEFFactor: 6, + }, + expectedError: nil, + }, + { + name: "setting bq compression on", + initial: ent.UserConfig{ + BQ: ent.BQConfig{ + Enabled: false, + }, + }, + update: ent.UserConfig{ + BQ: ent.BQConfig{ + Enabled: true, + }, + }, + expectedError: nil, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + err := ValidateUserConfigUpdate(test.initial, test.update) + if test.expectedError == nil { + assert.Nil(t, err) + } else { + require.NotNil(t, err, "update validation must error") + assert.Equal(t, test.expectedError.Error(), err.Error()) + } + }) + } + }) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/corrupt_commit_logs_fixer.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/corrupt_commit_logs_fixer.go new file mode 100644 index 0000000000000000000000000000000000000000..d7faf62f020f063e7fb4986a84598c05ac8d7926 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/corrupt_commit_logs_fixer.go @@ -0,0 +1,77 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package hnsw + +import ( + "os" + "strings" + + "github.com/pkg/errors" +) + +// CorruptCommitLogFixer helps identify potentially corrupt commit logs and +// tries to mitigate the problem +type CorruptCommitLogFixer struct{} + +func NewCorruptedCommitLogFixer() *CorruptCommitLogFixer { + return &CorruptCommitLogFixer{} +} + +// Do tries to delete files that could be corrupt and removes them from the +// returned list, indicating that the index should no longer try to read them +// +// A file is considered corrupt if it has the .condensed suffix - yet there is +// a file with the same name without that suffix. This would indicate that +// trying to condense the file has somehow failed or been interrupted, as a +// successful condensing would have been succeeded by the removal of the +// original file. We thus assume the file must be corrupted, and delete it, so +// that the original will be used instead. +func (fixer *CorruptCommitLogFixer) Do(fileNames []string) ([]string, error) { + out := make([]string, len(fileNames)) + + i := 0 + for _, fileName := range fileNames { + if !strings.HasSuffix(fileName, ".condensed") { + // has no suffix, so it can never be considered corrupt + out[i] = fileName + i++ + continue + } + + // this file has a suffix, check if one without the suffix exists as well + if !fixer.listContains(fileNames, strings.TrimSuffix(fileName, ".condensed")) { + // does not seem corrupt, proceed + out[i] = fileName + i++ + continue + } + + // we have found a corrupt file, delete it and do not append it to the list + if err := os.Remove(fileName); err != nil { + return out, errors.Wrapf(err, "delete corrupt commit log file %q", fileName) + } + } + + return out[:i], nil +} + +func (fixer *CorruptCommitLogFixer) listContains(haystack []string, + needle string, +) bool { + for _, hay := range haystack { + if hay == needle { + return true + } + } + + return false +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/datasets/neurips23/clustered_runbook.yaml b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/datasets/neurips23/clustered_runbook.yaml new file mode 100644 index 0000000000000000000000000000000000000000..368fc19e2529d9365f526ac5e0229d380ecbc302 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/datasets/neurips23/clustered_runbook.yaml @@ -0,0 +1,392 @@ +random-xs-clustered: + max_pts: 10000 + 1: + operation: "insert" + start: 0 + end: 326 + 2: + operation: "search" + 3: + operation: "insert" + start: 326 + end: 596 + 4: + operation: "search" + 5: + operation: "insert" + start: 596 + end: 945 + 6: + operation: "search" + 7: + operation: "insert" + start: 945 + end: 1323 + 8: + operation: "search" + 9: + operation: "insert" + start: 1323 + end: 1623 + 10: + operation: "search" + 11: + operation: "insert" + start: 1623 + end: 1986 + 12: + operation: "search" + 13: + operation: "insert" + start: 1986 + end: 2199 + 14: + operation: "search" + 15: + operation: "insert" + start: 2199 + end: 2576 + 16: + operation: "search" + 17: + operation: "insert" + start: 2576 + end: 2921 + 18: + operation: "search" + 19: + operation: "insert" + start: 2921 + end: 3252 + 20: + operation: "search" + 21: + operation: "insert" + start: 3252 + end: 3530 + 22: + operation: "search" + 23: + operation: "insert" + start: 3530 + end: 3866 + 24: + operation: "search" + 25: + operation: "insert" + start: 3866 + end: 4150 + 26: + operation: "search" + 27: + operation: "insert" + start: 4150 + end: 4434 + 28: + operation: "search" + 29: + operation: "insert" + start: 4434 + end: 4707 + 30: + operation: "search" + 31: + operation: "insert" + start: 4707 + end: 5073 + 32: + operation: "search" + 33: + operation: "insert" + start: 5073 + end: 5404 + 34: + operation: "search" + 35: + operation: "insert" + start: 5404 + end: 5718 + 36: + operation: "search" + 37: + operation: "insert" + start: 5718 + end: 6072 + 38: + operation: "search" + 39: + operation: "insert" + start: 6072 + end: 6338 + 40: + operation: "search" + 41: + operation: "insert" + start: 6338 + end: 6613 + 42: + operation: "search" + 43: + operation: "insert" + start: 6613 + end: 6908 + 44: + operation: "search" + 45: + operation: "insert" + start: 6908 + end: 7115 + 46: + operation: "search" + 47: + operation: "insert" + start: 7115 + end: 7452 + 48: + operation: "search" + 49: + operation: "insert" + start: 7452 + end: 7717 + 50: + operation: "search" + 51: + operation: "insert" + start: 7717 + end: 8065 + 52: + operation: "search" + 53: + operation: "insert" + start: 8065 + end: 8313 + 54: + operation: "search" + 55: + operation: "insert" + start: 8313 + end: 8698 + 56: + operation: "search" + 57: + operation: "insert" + start: 8698 + end: 9011 + 58: + operation: "search" + 59: + operation: "insert" + start: 9011 + end: 9307 + 60: + operation: "search" + 61: + operation: "insert" + start: 9307 + end: 9651 + 62: + operation: "search" + 63: + operation: "insert" + start: 9651 + end: 10000 + 64: + operation: "search" + gt_url: "https://comp21storage.blob.core.windows.net/publiccontainer/comp23/clustered_data/random-xs-clustered/clustered_runboook.yaml" +msturing-10M-clustered: + max_pts: 10000000 + # On Azure D8lds v5, runtime: 14 minutes + # recall@10: 0.9247 for "R":64, "L":50, "insert_threads":16, "consolidate_threads":16 "Ls":100, "T":16 + 1: + operation: "insert" + start: 0 + end: 255771 + 2: + operation: "search" + 3: + operation: "insert" + start: 255771 + end: 491965 + 4: + operation: "search" + 5: + operation: "insert" + start: 491965 + end: 824781 + 6: + operation: "search" + 7: + operation: "insert" + start: 824781 + end: 1081209 + 8: + operation: "search" + 9: + operation: "insert" + start: 1081209 + end: 1568760 + 10: + operation: "search" + 11: + operation: "insert" + start: 1568760 + end: 1959174 + 12: + operation: "search" + 13: + operation: "insert" + start: 1959174 + end: 2404186 + 14: + operation: "search" + 15: + operation: "insert" + start: 2404186 + end: 2798660 + 16: + operation: "search" + 17: + operation: "insert" + start: 2798660 + end: 3082959 + 18: + operation: "search" + 19: + operation: "insert" + start: 3082959 + end: 3480554 + 20: + operation: "search" + 21: + operation: "insert" + start: 3480554 + end: 3910930 + 22: + operation: "search" + 23: + operation: "insert" + start: 3910930 + end: 4194870 + 24: + operation: "search" + 25: + operation: "insert" + start: 4194870 + end: 4652840 + 26: + operation: "search" + 27: + operation: "insert" + start: 4652840 + end: 4872616 + 28: + operation: "search" + 29: + operation: "insert" + start: 4872616 + end: 5184725 + 30: + operation: "search" + 31: + operation: "insert" + start: 5184725 + end: 5629098 + 32: + operation: "search" + 33: + operation: "insert" + start: 5629098 + end: 6023119 + 34: + operation: "search" + 35: + operation: "insert" + start: 6023119 + end: 6292969 + 36: + operation: "search" + 37: + operation: "insert" + start: 6292969 + end: 6508987 + 38: + operation: "search" + 39: + operation: "insert" + start: 6508987 + end: 6767675 + 40: + operation: "search" + 41: + operation: "insert" + start: 6767675 + end: 7000498 + 42: + operation: "search" + 43: + operation: "insert" + start: 7000498 + end: 7263856 + 44: + operation: "search" + 45: + operation: "insert" + start: 7263856 + end: 7485517 + 46: + operation: "search" + 47: + operation: "insert" + start: 7485517 + end: 7739934 + 48: + operation: "search" + 49: + operation: "insert" + start: 7739934 + end: 8055691 + 50: + operation: "search" + 51: + operation: "insert" + start: 8055691 + end: 8381008 + 52: + operation: "search" + 53: + operation: "insert" + start: 8381008 + end: 8750107 + 54: + operation: "search" + 55: + operation: "insert" + start: 8750107 + end: 8942969 + 56: + operation: "search" + 57: + operation: "insert" + start: 8942969 + end: 9223315 + 58: + operation: "search" + 59: + operation: "insert" + start: 9223315 + end: 9508781 + 60: + operation: "search" + 61: + operation: "insert" + start: 9508781 + end: 9722747 + 62: + operation: "search" + 63: + operation: "insert" + start: 9722747 + end: 10000000 + 64: + operation: "search" + gt_url: "https://comp21storage.blob.core.windows.net/publiccontainer/comp23/clustered_data/msturing-10M-clustered/clustered_runboook.yaml" diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/datasets/neurips23/simple_runbook.yaml b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/datasets/neurips23/simple_runbook.yaml new file mode 100644 index 0000000000000000000000000000000000000000..21986c129cc133a7001821fa0048e449bb712329 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/datasets/neurips23/simple_runbook.yaml @@ -0,0 +1,110 @@ +random-xs: + max_pts: 10000 + 1: + operation: "insert" + start: 0 + end: 10000 + 2: + operation: "search" + 3: + operation: "delete" + start: 0 + end: 5000 + 4: + operation: "search" + 5: + operation: "insert" + start: 0 + end: 5000 + 6: + operation: "search" + gt_url: "https://comp21storage.blob.core.windows.net/publiccontainer/comp23/str_gt/random10000/10000/simple_runbook.yaml" +msturing-10M: + max_pts: 10000000 + # On Azure D8lds v5 with "R":50, "L":50, "insert_threads":16, "consolidate_threads":16 + # ~28 mins run time "Ls":100, "search_threads":16, average recall@10: 0.892 + 1: + operation: "insert" + start: 0 + end: 10000000 + 2: + operation: "search" + 3: + operation: "delete" + start: 0 + end: 5000000 + 4: + operation: "search" + 5: + operation: "insert" + start: 0 + end: 5000000 + 6: + operation: "search" + gt_url: "https://comp21storage.blob.core.windows.net/publiccontainer/comp23/str_gt/MSTuringANNS/10000000/simple_runbook.yaml" +msturing-1M: + max_pts: 1000000 + # On Azure D8lds v5 with "R":50, "L":50, "insert_threads":16, "consolidate_threads":16 + # ~3.5 mins run time "Ls":300, "search_threads":16, average recall@10: 0.906 + # ~2 mins run time "Ls":100, "search_threads":16, average recall@10: 0.958 + 1: + operation: "insert" + start: 0 + end: 1000000 + 2: + operation: "search" + 3: + operation: "delete" + start: 0 + end: 500000 + 4: + operation: "search" + 5: + operation: "insert" + start: 0 + end: 500000 + 6: + operation: "search" + gt_url: "https://comp21storage.blob.core.windows.net/publiccontainer/comp23/str_gt/MSTuringANNS/1000000/simple_runbook.yaml" +msspacev-10M: + max_pts: 10000000 + 1: + operation: "insert" + start: 0 + end: 10000000 + 2: + operation: "search" + 3: + operation: "delete" + start: 0 + end: 5000000 + 4: + operation: "search" + 5: + operation: "insert" + start: 0 + end: 5000000 + 6: + operation: "search" + gt_url: "https://comp21storage.blob.core.windows.net/publiccontainer/comp23/str_gt/MSSPACEV1B/10000000/simple_runbook.yaml" +msspacev-1M: + max_pts: 1000000 + 1: + operation: "insert" + start: 0 + end: 1000000 + 2: + operation: "search" + 3: + operation: "delete" + start: 0 + end: 500000 + 4: + operation: "search" + 5: + operation: "insert" + start: 0 + end: 500000 + 6: + operation: "search" + gt_url: "https://comp21storage.blob.core.windows.net/publiccontainer/comp23/str_gt/MSSPACEV1B/1000000/simple_runbook.yaml" \ No newline at end of file diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/debug.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/debug.go new file mode 100644 index 0000000000000000000000000000000000000000..2bec5fc573962a3077d5dd2166d110a7a81b5f27 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/debug.go @@ -0,0 +1,227 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package hnsw + +import ( + "encoding/json" + "fmt" + "strings" + + "github.com/weaviate/weaviate/adapters/repos/db/vector/common" + "github.com/weaviate/weaviate/adapters/repos/db/vector/hnsw/distancer" + "github.com/weaviate/weaviate/adapters/repos/db/vector/hnsw/packedconn" + "github.com/weaviate/weaviate/entities/cyclemanager" + ent "github.com/weaviate/weaviate/entities/vectorindex/hnsw" +) + +// Dump to stdout for debugging purposes +func (h *hnsw) Dump(labels ...string) { + if len(labels) > 0 { + fmt.Printf("--------------------------------------------------\n") + fmt.Printf("-- %s\n", strings.Join(labels, ", ")) + } + fmt.Printf("--------------------------------------------------\n") + fmt.Printf("ID: %s\n", h.id) + fmt.Printf("Entrypoint: %d\n", h.entryPointID) + fmt.Printf("Max Level: %d\n", h.currentMaximumLayer) + fmt.Printf("Tombstones %v\n", h.tombstones) + fmt.Printf("\nNodes and Connections:\n") + for _, node := range h.nodes { + if node == nil { + continue + } + + fmt.Printf(" Node %d (level %d)\n", node.id, node.level) + iter := node.connections.Iterator() + for iter.Next() { + level, conns := iter.Current() + fmt.Printf(" Level %d: Connections: %v\n", level, conns) + } + } + + fmt.Printf("--------------------------------------------------\n") +} + +// DumpJSON to stdout for debugging purposes +func (h *hnsw) DumpJSON(labels ...string) { + dump := JSONDump{ + Labels: labels, + ID: h.id, + Entrypoint: h.entryPointID, + CurrentMaximumLayer: h.currentMaximumLayer, + Tombstones: h.tombstones, + } + for _, node := range h.nodes { + if node == nil { + continue + } + + dumpNode := JSONDumpNode{ + ID: node.id, + Level: node.level, + Connections: node.connections.GetAllLayers(), + } + dump.Nodes = append(dump.Nodes, dumpNode) + } + + out, err := json.Marshal(dump) + if err != nil { + fmt.Println(err) + } + fmt.Printf("%s\n", string(out)) +} + +type JSONDump struct { + Labels []string `json:"labels"` + ID string `json:"id"` + Entrypoint uint64 `json:"entrypoint"` + CurrentMaximumLayer int `json:"currentMaximumLayer"` + Tombstones map[uint64]struct{} `json:"tombstones"` + Nodes []JSONDumpNode `json:"nodes"` +} + +type JSONDumpNode struct { + ID uint64 `json:"id"` + Level int `json:"level"` + Connections [][]uint64 `json:"connections"` +} + +type JSONDumpMap struct { + Labels []string `json:"labels"` + ID string `json:"id"` + Entrypoint uint64 `json:"entrypoint"` + CurrentMaximumLayer int `json:"currentMaximumLayer"` + Tombstones map[uint64]struct{} `json:"tombstones"` + Nodes []JSONDumpNodeMap `json:"nodes"` +} + +type JSONDumpNodeMap struct { + ID uint64 `json:"id"` + Level int `json:"level"` + Connections map[int][]uint64 `json:"connections"` +} + +func NewFromJSONDump(dumpBytes []byte, vecForID common.VectorForID[float32]) (*hnsw, error) { + var dump JSONDump + err := json.Unmarshal(dumpBytes, &dump) + if err != nil { + return nil, err + } + + index, err := New(Config{ + RootPath: "doesnt-matter-as-committlogger-is-mocked-out", + ID: dump.ID, + MakeCommitLoggerThunk: MakeNoopCommitLogger, + DistanceProvider: distancer.NewCosineDistanceProvider(), + VectorForIDThunk: vecForID, + }, ent.UserConfig{ + MaxConnections: 30, + EFConstruction: 128, + }, cyclemanager.NewCallbackGroupNoop(), nil) + if err != nil { + return nil, err + } + + index.currentMaximumLayer = dump.CurrentMaximumLayer + index.entryPointID = dump.Entrypoint + index.tombstones = dump.Tombstones + + for _, n := range dump.Nodes { + connections, err := packedconn.NewWithElements(n.Connections) + if err != nil { + return nil, err + } + index.nodes[n.ID] = &vertex{ + id: n.ID, + level: n.Level, + connections: connections, + } + } + + return index, nil +} + +func NewFromJSONDumpMap(dumpBytes []byte, vecForID common.VectorForID[float32]) (*hnsw, error) { + var dump JSONDumpMap + err := json.Unmarshal(dumpBytes, &dump) + if err != nil { + return nil, err + } + + index, err := New(Config{ + RootPath: "doesnt-matter-as-committlogger-is-mocked-out", + ID: dump.ID, + MakeCommitLoggerThunk: MakeNoopCommitLogger, + DistanceProvider: distancer.NewCosineDistanceProvider(), + VectorForIDThunk: vecForID, + }, ent.UserConfig{ + MaxConnections: 30, + EFConstruction: 128, + }, cyclemanager.NewCallbackGroupNoop(), nil) + if err != nil { + return nil, err + } + + index.currentMaximumLayer = dump.CurrentMaximumLayer + index.entryPointID = dump.Entrypoint + index.tombstones = dump.Tombstones + + for _, n := range dump.Nodes { + connections, err := packedconn.NewWithMaxLayer(uint8(len(n.Connections) - 1)) + if err != nil { + return nil, err + } + index.nodes[n.ID] = &vertex{ + id: n.ID, + level: n.Level, + connections: connections, + } + for level, conns := range n.Connections { + index.nodes[n.ID].connections.ReplaceLayer(uint8(level), conns) + } + } + + return index, nil +} + +// was added as part of +// https://github.com/weaviate/weaviate/issues/1868 for debugging. It +// is not currently in use anywhere as it is somewhat costly, it would lock the +// entire graph and iterate over every node which would lead to disruptions in +// production. However, keeping this method around may be valuable for future +// investigations where the amount of links may be a problem. +func (h *hnsw) ValidateLinkIntegrity() { + h.RLock() + defer h.RUnlock() + + for i, node := range h.nodes { + if node == nil { + continue + } + + iter := node.connections.Iterator() + for iter.Next() { + level, conns := iter.Current() + m := h.maximumConnections + if level == 0 { + m = h.maximumConnectionsLayerZero + } + + if len(conns) > m { + h.logger.Warnf("node %d at level %d has %d connections", i, level, len(conns)) + } + + } + } + + h.logger.Infof("completed link integrity check") +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/delete.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/delete.go new file mode 100644 index 0000000000000000000000000000000000000000..f28b795ac5f4634ef030b3e0bd779510c7bec6d1 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/delete.go @@ -0,0 +1,895 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package hnsw + +import ( + "context" + "encoding/binary" + "fmt" + "math" + "os" + "runtime" + "runtime/debug" + "strconv" + "sync" + "sync/atomic" + "time" + + "github.com/sirupsen/logrus" + enterrors "github.com/weaviate/weaviate/entities/errors" + entsentry "github.com/weaviate/weaviate/entities/sentry" + + "github.com/pkg/errors" + "github.com/weaviate/weaviate/adapters/repos/db/helpers" + "github.com/weaviate/weaviate/adapters/repos/db/vector/cache" + "github.com/weaviate/weaviate/adapters/repos/db/vector/compressionhelpers" + "github.com/weaviate/weaviate/adapters/repos/db/vector/hnsw/packedconn" + "github.com/weaviate/weaviate/entities/cyclemanager" + "github.com/weaviate/weaviate/entities/storobj" +) + +type breakCleanUpTombstonedNodesFunc func() bool + +// Delete attaches a tombstone to an item so it can be periodically cleaned up +// later and the edges reassigned +func (h *hnsw) Delete(ids ...uint64) error { + h.compressActionLock.RLock() + defer h.compressActionLock.RUnlock() + + h.deleteVsInsertLock.Lock() + defer h.deleteVsInsertLock.Unlock() + + h.deleteLock.Lock() + defer h.deleteLock.Unlock() + + before := time.Now() + defer h.metrics.TrackDelete(before, "total") + + if err := h.addTombstone(ids...); err != nil { + return err + } + + for _, id := range ids { + h.metrics.DeleteVector() + + // Adding a tombstone might not be enough in some cases, if the tombstoned + // entry was the entrypoint this might lead to issues for following inserts: + // On a nearly empty graph the entrypoint might be the only viable element to + // connect to, however, because the entrypoint itself is tombstones + // connections to it are impossible. So, unless we find a new entrypoint, + // subsequent inserts might end up isolated (without edges) in the graph. + // This is especially true if the tombstoned entrypoint is the only node in + // the graph. In this case we must reset the graph, so it acts like an empty + // one. Otherwise we'd insert the next id and have only one possible node to + // connect it to (the entrypoint). With that one being tombstoned, the new + // node would be guaranteed to have zero edges + + node := h.nodeByID(id) + if node == nil { + // node was already deleted/cleaned up + continue + } + + if h.getEntrypoint() == id { + beforeDeleteEP := time.Now() + defer h.metrics.TrackDelete(beforeDeleteEP, "delete_entrypoint") + + denyList := h.tombstonesAsDenyList() + if onlyNode, err := h.resetIfOnlyNode(node, denyList); err != nil { + return errors.Wrap(err, "reset index") + } else if !onlyNode { + if err := h.deleteEntrypoint(node, denyList); err != nil { + return errors.Wrap(err, "delete entrypoint") + } + } + } + } + + return nil +} + +func (h *hnsw) DeleteMulti(docIDs ...uint64) error { + before := time.Now() + defer h.metrics.TrackDelete(before, "total") + + if h.muvera.Load() { + return h.Delete(docIDs...) + } + + for _, docID := range docIDs { + h.RLock() + vecIDs := h.docIDVectors[docID] + h.RUnlock() + + for _, id := range vecIDs { + if err := h.Delete(id); err != nil { + return err + } + idBytes := make([]byte, 8) + binary.BigEndian.PutUint64(idBytes, id) + if err := h.store.Bucket(h.id + "_mv_mappings").Delete(idBytes); err != nil { + return errors.Wrap(err, fmt.Sprintf("failed to delete %s_mv_mappings from the bucket", h.id)) + } + } + h.Lock() + delete(h.docIDVectors, docID) + h.Unlock() + + } + + return nil +} + +func (h *hnsw) resetIfEmpty() (empty bool, err error) { + h.resetLock.Lock() + defer h.resetLock.Unlock() + h.Lock() + defer h.Unlock() + h.tombstoneLock.Lock() + defer h.tombstoneLock.Unlock() + + empty = func() bool { + h.shardedNodeLocks.RLock(h.entryPointID) + defer h.shardedNodeLocks.RUnlock(h.entryPointID) + + return h.isEmptyUnlocked() + }() + // It can happen that between calls of isEmptyUnlocked and resetUnlocked + // values of h.nodes will change (due to locks being RUnlocked and Locked again) + // This is acceptable in order to avoid long Locking of all striped locks + if empty { + h.shardedNodeLocks.LockAll() + defer h.shardedNodeLocks.UnlockAll() + + return true, h.resetUnlocked() + } + return false, nil +} + +func (h *hnsw) resetIfOnlyNode(needle *vertex, denyList helpers.AllowList) (onlyNode bool, err error) { + h.resetLock.Lock() + defer h.resetLock.Unlock() + h.Lock() + defer h.Unlock() + h.tombstoneLock.Lock() + defer h.tombstoneLock.Unlock() + + onlyNode = func() bool { + h.shardedNodeLocks.RLockAll() + defer h.shardedNodeLocks.RUnlockAll() + + return h.isOnlyNodeUnlocked(needle, denyList) + }() + // It can happen that between calls of isOnlyNodeUnlocked and resetUnlocked + // values of h.nodes will change (due to locks being RUnlocked and Locked again) + // This is acceptable in order to avoid long Locking of all striped locks + if onlyNode { + h.shardedNodeLocks.LockAll() + defer h.shardedNodeLocks.UnlockAll() + + return true, h.resetUnlocked() + } + return false, nil +} + +func (h *hnsw) resetUnlocked() error { + h.resetCtxCancel() + resetCtx, resetCtxCancel := context.WithCancel(context.Background()) + h.resetCtx = resetCtx + h.resetCtxCancel = resetCtxCancel + + h.entryPointID = 0 + h.currentMaximumLayer = 0 + h.initialInsertOnce = &sync.Once{} + h.nodes = make([]*vertex, cache.InitialSize) + h.tombstones = make(map[uint64]struct{}) + + return h.commitLog.Reset() +} + +func (h *hnsw) tombstonesAsDenyList() helpers.AllowList { + deleteList := helpers.NewAllowList() + h.tombstoneLock.Lock() + defer h.tombstoneLock.Unlock() + + tombstones := h.tombstones + for id := range tombstones { + deleteList.Insert(id) + } + + return deleteList +} + +func (h *hnsw) getEntrypoint() uint64 { + h.RLock() + defer h.RUnlock() + + return h.entryPointID +} + +func (h *hnsw) copyTombstonesToAllowList(breakCleanUpTombstonedNodes breakCleanUpTombstonedNodesFunc) (ok bool, deleteList helpers.AllowList) { + h.resetLock.Lock() + defer h.resetLock.Unlock() + + if breakCleanUpTombstonedNodes() { + return false, nil + } + + h.RLock() + lenOfNodes := uint64(len(h.nodes)) + h.RUnlock() + + h.tombstoneLock.Lock() + defer h.tombstoneLock.Unlock() + + deleteList = helpers.NewAllowList() + + // First of all, check if we even have enough tombstones to justify a + // cleanup. Cleaning up tombstones requires iteration over every possible + // node in the graph which also includes locking portions of the graph, so we + // want to make sure we have enough tombstones to justify the cleanup. + numberOfTombstones := int64(len(h.tombstones)) + if numberOfTombstones == 0 { + return false, nil + } + + if numberOfTombstones < minTombstonesPerCycle() { + h.logger.WithFields(logrus.Fields{ + "action": "tombstone_cleanup_skipped", + "class": h.className, + "shard": h.shardName, + "tombstones_total": numberOfTombstones, + "tombstones_min": minTombstonesPerCycle(), + "tombstones_max": maxTombstonesPerCycle(), + }).Debugf("class %s: shard %s: skipping tombstone cleanup, not enough tombstones", h.className, h.shardName) + return false, nil + } + + // If we have a very high number of tombstones, we run into scaling issues. + // Simply operations, such as copying lists require a lot of memory + // allocations, etc. + // + // In addition, the cycle would run too long, preventing feedback. With a + // hard limit like this, we do risk that we create connections that will need + // to be touched again in the next cycle, but this is a tradeoff we're + // willing to make. + + elementsOnList := int64(0) + for id := range h.tombstones { + if elementsOnList >= maxTombstonesPerCycle() { + // we've reached the limit of tombstones we want to process in one + break + } + + if lenOfNodes <= id { + // we're trying to delete an id outside the possible range, nothing to do + continue + } + + deleteList.Insert(id) + elementsOnList++ + } + + return true, deleteList +} + +// CleanUpTombstonedNodes removes nodes with a tombstone and reassigns +// edges that were previously pointing to the tombstoned nodes +func (h *hnsw) CleanUpTombstonedNodes(shouldAbort cyclemanager.ShouldAbortCallback) error { + _, err := h.cleanUpTombstonedNodes(shouldAbort) + return err +} + +func (h *hnsw) cleanUpTombstonedNodes(shouldAbort cyclemanager.ShouldAbortCallback) (bool, error) { + if !h.tombstoneCleanupRunning.CompareAndSwap(false, true) { + return false, errors.New("tombstone cleanup already running") + } + defer h.tombstoneCleanupRunning.Store(false) + + h.compressActionLock.RLock() + defer h.compressActionLock.RUnlock() + defer func() { + err := recover() + if err != nil { + entsentry.Recover(err) + h.logger.WithField("panic", err).Errorf("class %s: tombstone cleanup panicked", h.className) + debug.PrintStack() + } + }() + + start := time.Now() + + h.resetLock.Lock() + resetCtx := h.resetCtx + h.resetLock.Unlock() + + breakCleanUpTombstonedNodes := func() bool { + return resetCtx.Err() != nil || shouldAbort() + } + + executed := false + ok, deleteList := h.copyTombstonesToAllowList(breakCleanUpTombstonedNodes) + if !ok { + return executed, nil + } + + h.metrics.StartCleanup(tombstoneDeletionConcurrency()) + defer h.metrics.EndCleanup(tombstoneDeletionConcurrency()) + + h.metrics.SetTombstoneDeleteListSize(deleteList.Len()) + + h.tombstoneLock.Lock() + total_tombstones := len(h.tombstones) + h.tombstoneLock.Unlock() + + h.logger.WithFields(logrus.Fields{ + "action": "tombstone_cleanup_begin", + "class": h.className, + "shard": h.shardName, + "tombstones_in_cycle": deleteList.Len(), + "tombstones_total": total_tombstones, + }).Infof("class %s: shard %s: starting tombstone cleanup", h.className, h.shardName) + + h.metrics.StartTombstoneCycle() + + executed = true + if ok, err := h.reassignNeighborsOf(h.shutdownCtx, deleteList, breakCleanUpTombstonedNodes); err != nil { + return executed, err + } else if !ok { + return executed, nil + } + h.reassignNeighbor(h.shutdownCtx, h.getEntrypoint(), deleteList, breakCleanUpTombstonedNodes, nil) + + if ok, err := h.replaceDeletedEntrypoint(deleteList, breakCleanUpTombstonedNodes); err != nil { + return executed, err + } else if !ok { + return executed, nil + } + + if ok, err := h.removeTombstonesAndNodes(deleteList, breakCleanUpTombstonedNodes); err != nil { + return executed, err + } else if !ok { + return executed, nil + } + + if _, err := h.resetIfEmpty(); err != nil { + return executed, err + } + + if executed { + took := time.Since(start) + h.logger.WithFields(logrus.Fields{ + "action": "tombstone_cleanup_complete", + "class": h.className, + "shard": h.shardName, + "tombstones_in_cycle": deleteList.Len(), + "tombstones_total": total_tombstones, + "duration": took, + }).Infof("class %s: shard %s: completed tombstone cleanup in %s", h.className, h.shardName, took) + } + + h.metrics.EndTombstoneCycle() + + return executed, nil +} + +func (h *hnsw) replaceDeletedEntrypoint(deleteList helpers.AllowList, breakCleanUpTombstonedNodes breakCleanUpTombstonedNodesFunc) (ok bool, err error) { + h.resetLock.Lock() + defer h.resetLock.Unlock() + + if breakCleanUpTombstonedNodes() { + return false, nil + } + + it := deleteList.Iterator() + for id, ok := it.Next(); ok; id, ok = it.Next() { + if h.getEntrypoint() == id { + // this a special case because: + // + // 1. we need to find a new entrypoint, if this is the last point on this + // level, we need to find an entrypoint on a lower level + // 2. there is a risk that this is the only node in the entire graph. In + // this case we must reset the graph + h.shardedNodeLocks.RLock(id) + node := h.nodes[id] + h.shardedNodeLocks.RUnlock(id) + + if err := h.deleteEntrypoint(node, deleteList); err != nil { + return false, errors.Wrap(err, "delete entrypoint") + } + } + } + + return true, nil +} + +func maxTombstonesPerCycle() int64 { + if v := os.Getenv("TOMBSTONE_DELETION_MAX_PER_CYCLE"); v != "" { + asInt, err := strconv.Atoi(v) + if err == nil && asInt > 0 { + return int64(asInt) + } + } + return math.MaxInt64 +} + +func minTombstonesPerCycle() int64 { + if v := os.Getenv("TOMBSTONE_DELETION_MIN_PER_CYCLE"); v != "" { + asInt, err := strconv.Atoi(v) + if err == nil && asInt > 0 { + return int64(asInt) + } + } + return 0 +} + +func tombstoneDeletionConcurrency() int { + if v := os.Getenv("TOMBSTONE_DELETION_CONCURRENCY"); v != "" { + asInt, err := strconv.Atoi(v) + if err == nil && asInt > 0 { + return asInt + } + } + concurrency := runtime.GOMAXPROCS(0) / 2 + if concurrency == 0 { + return 1 + } + return concurrency +} + +func (h *hnsw) reassignNeighborsOf(ctx context.Context, deleteList helpers.AllowList, + breakCleanUpTombstonedNodes breakCleanUpTombstonedNodesFunc, +) (ok bool, err error) { + h.RLock() + size := len(h.nodes) + h.RUnlock() + + g, ctx := enterrors.NewErrorGroupWithContextWrapper(h.logger, ctx) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + ch := make(chan uint64) + var cancelled atomic.Bool + + processedIDs := &sync.Map{} + + for i := 0; i < tombstoneDeletionConcurrency(); i++ { + g.Go(func() error { + for { + if breakCleanUpTombstonedNodes() { + cancelled.Store(true) + cancel() + return nil + } + select { + case <-ctx.Done(): + return nil + case deletedID, ok := <-ch: + if !ok { + return nil + } + // Check if already COMPLETED processing + if _, alreadyProcessed := processedIDs.Load(deletedID); alreadyProcessed { + continue + } + + h.shardedNodeLocks.RLock(deletedID) + if deletedID >= uint64(size) || deletedID >= uint64(len(h.nodes)) || h.nodes[deletedID] == nil { + h.shardedNodeLocks.RUnlock(deletedID) + continue + } + h.shardedNodeLocks.RUnlock(deletedID) + h.resetLock.RLock() + if h.getEntrypoint() != deletedID { + if _, err := h.reassignNeighbor(ctx, deletedID, deleteList, breakCleanUpTombstonedNodes, processedIDs); err != nil { + h.logger.WithError(err).WithField("action", "hnsw_tombstone_cleanup_error"). + Errorf("class %s: shard %s: reassign neighbor", h.className, h.shardName) + } + } + h.resetLock.RUnlock() + } + } + }) + } + +LOOP: + for i := 0; i < size; i++ { + if breakCleanUpTombstonedNodes() { + cancelled.Store(true) + cancel() + } + select { + case ch <- uint64(i): + if i%1000 == 0 { + // updating the metric has virtually no cost, so we can do it every 1k + h.metrics.TombstoneCycleProgress(float64(i) / float64(size)) + } + if i%1_000_000 == 0 { + // the interval of 1M is rather arbitrary, but if we have less than 1M + // nodes in the graph tombstones cleanup should be so fast, we don't + // need to log the progress. + h.logger.WithFields(logrus.Fields{ + "action": "tombstone_cleanup_progress", + "class": h.className, + "shard": h.shardName, + "total_nodes": size, + "processed_nodes": i, + }). + Debugf("class %s: shard %s: %d/%d nodes processed", h.className, h.shardName, i, size) + } + case <-ctx.Done(): + // before https://github.com/weaviate/weaviate/issues/4615 the context + // would not be canceled if a routine panicked. However, with the fix, it is + // now valid to wait for a cancelation – even if a panic occurs. + break LOOP + } + } + + close(ch) + + err = g.Wait() + if errors.Is(err, context.Canceled) { + h.logger.Errorf("class %s: tombstone cleanup canceled", h.className) + return false, nil + } + return !cancelled.Load(), err +} + +func (h *hnsw) reassignNeighbor( + ctx context.Context, + neighbor uint64, + deleteList helpers.AllowList, + breakCleanUpTombstonedNodes breakCleanUpTombstonedNodesFunc, + processedIDs *sync.Map, +) (ok bool, err error) { + if breakCleanUpTombstonedNodes() { + return false, nil + } + + h.metrics.TombstoneReassignNeighbor() + + h.RLock() + h.shardedNodeLocks.RLock(neighbor) + if neighbor >= uint64(len(h.nodes)) { + h.shardedNodeLocks.RUnlock(neighbor) + h.RUnlock() + return true, nil + } + neighborNode := h.nodes[neighbor] + h.shardedNodeLocks.RUnlock(neighbor) + currentMaximumLayer := h.currentMaximumLayer + h.RUnlock() + + if neighborNode == nil || deleteList.Contains(neighborNode.id) { + return true, nil + } + + var neighborVec []float32 + var compressorDistancer compressionhelpers.CompressorDistancer + if h.compressed.Load() { + compressorDistancer, err = h.compressor.NewDistancerFromID(neighbor) + } else { + neighborVec, err = h.cache.Get(context.Background(), neighbor) + } + + if err != nil { + var e storobj.ErrNotFound + if errors.As(err, &e) { + h.handleDeletedNode(e.DocID, "reassignNeighbor") + return true, nil + } else { + // not a typed error, we can recover from, return with err + return false, errors.Wrap(err, "get neighbor vec") + } + } + neighborNode.Lock() + neighborLevel := neighborNode.level + if !connectionsPointTo(neighborNode.connections, deleteList) { + // nothing needs to be changed, skip + neighborNode.Unlock() + return true, nil + } + neighborNode.Unlock() + + neighborNode.markAsMaintenance() + + // the new recursive implementation no longer needs an entrypoint, so we can + // just pass this dummy value to make the neighborFinderConnector happy + dummyEntrypoint := uint64(0) + if err := h.reconnectNeighboursOf(ctx, neighborNode, dummyEntrypoint, neighborVec, compressorDistancer, + neighborLevel, currentMaximumLayer, deleteList, processedIDs); err != nil { + return false, errors.Wrap(err, "find and connect neighbors") + } + neighborNode.unmarkAsMaintenance() + + h.metrics.CleanedUp() + return true, nil +} + +func connectionsPointTo(connections *packedconn.Connections, needles helpers.AllowList) bool { + iter := connections.Iterator() + for iter.Next() { + _, atLevel := iter.Current() + for _, pointer := range atLevel { + if needles.Contains(pointer) { + return true + } + } + } + + return false +} + +// deleteEntrypoint deletes the current entrypoint and replaces it with a new +// one. It respects the attached denyList, so that it doesn't assign another +// node which also has a tombstone and is also in the process of being cleaned +// up +func (h *hnsw) deleteEntrypoint(node *vertex, denyList helpers.AllowList) error { + if h.isOnlyNode(node, denyList) { + // no point in finding another entrypoint if this is the only node + return nil + } + + node.Lock() + level := node.level + id := node.id + node.Unlock() + + newEntrypoint, level, ok := h.findNewGlobalEntrypoint(denyList, level, id) + if !ok { + return nil + } + + h.Lock() + h.entryPointID = newEntrypoint + h.currentMaximumLayer = level + h.Unlock() + if err := h.commitLog.SetEntryPointWithMaxLayer(newEntrypoint, level); err != nil { + return err + } + + return nil +} + +// returns entryPointID, level and whether a change occurred +func (h *hnsw) findNewGlobalEntrypoint(denyList helpers.AllowList, targetLevel int, + oldEntrypoint uint64, +) (uint64, int, bool) { + if h.getEntrypoint() != oldEntrypoint { + // entrypoint has already been changed (this could be due to a new import + // for example, nothing to do for us + return 0, 0, false + } + + h.metrics.TombstoneFindGlobalEntrypoint() + + for l := targetLevel; l >= 0; l-- { + // ideally we can find a new entrypoint at the same level of the + // to-be-deleted node. However, there is a chance it was the only node on + // that level, in that case we need to look at the next lower level for a + // better candidate + + h.RLock() + maxNodes := len(h.nodes) + h.RUnlock() + + for i := 0; i < maxNodes; i++ { + if h.getEntrypoint() != oldEntrypoint { + // entrypoint has already been changed (this could be due to a new import + // for example, nothing to do for us + return 0, 0, false + } + + if denyList.Contains(uint64(i)) { + continue + } + + h.shardedNodeLocks.RLock(uint64(i)) + candidate := h.nodes[i] + h.shardedNodeLocks.RUnlock(uint64(i)) + + if candidate == nil { + continue + } + + candidate.Lock() + candidateLevel := candidate.level + candidate.Unlock() + + if candidateLevel != l { + // not reaching up to the current level, skip in hope of finding another candidate + continue + } + + // we have a node that matches + return uint64(i), l, true + } + } + + if h.isEmpty() { + return 0, 0, false + } + + if h.isOnlyNode(&vertex{id: oldEntrypoint}, denyList) { + return 0, 0, false + } + + // we made it through the entire graph and didn't find a new entrypoint all + // the way down to level 0. This can only mean the graph is empty, which is + // unexpected. This situation should have been prevented by the deleteLock. + panic(fmt.Sprintf( + "class %s: shard %s: findNewEntrypoint called on an empty hnsw graph", + h.className, h.shardName)) +} + +// returns entryPointID, level and whether a change occurred +func (h *hnsw) findNewLocalEntrypoint(denyList helpers.AllowList, oldEntrypoint uint64) (uint64, error) { + if entryPointID := h.getEntrypoint(); entryPointID != oldEntrypoint { + // the current global entrypoint is different from our local entrypoint, so + // we can just use the global one, as the global one is guaranteed to be + // present on every level, i.e. it is always chosen from the highest + // currently available level + return entryPointID, nil + } + + h.metrics.TombstoneFindLocalEntrypoint() + + h.RLock() + maxNodes := len(h.nodes) + targetLevel := h.currentMaximumLayer + h.RUnlock() + + for l := targetLevel; l >= 0; l-- { + // ideally we can find a new entrypoint at the same level of the + // to-be-deleted node. However, there is a chance it was the only node on + // that level, in that case we need to look at the next lower level for a + // better candidate + for i := 0; i < maxNodes; i++ { + if denyList.Contains(uint64(i)) { + continue + } + + h.shardedNodeLocks.RLock(uint64(i)) + candidate := h.nodes[i] + h.shardedNodeLocks.RUnlock(uint64(i)) + + if candidate == nil { + continue + } + + candidate.Lock() + candidateLevel := candidate.level + candidate.Unlock() + + if candidateLevel != l { + // not reaching up to the current level, skip in hope of finding another candidate + continue + } + + // we have a node that matches + return uint64(i), nil + } + } + + if h.isEmpty() { + return 0, nil + } + + if h.isOnlyNode(&vertex{id: oldEntrypoint}, denyList) { + return 0, nil + } + + return 0, fmt.Errorf("class %s: shard %s: findNewLocalEntrypoint called on an empty hnsw graph", h.className, h.shardName) +} + +func (h *hnsw) isOnlyNode(needle *vertex, denyList helpers.AllowList) bool { + h.RLock() + h.shardedNodeLocks.RLockAll() + defer h.RUnlock() + defer h.shardedNodeLocks.RUnlockAll() + + return h.isOnlyNodeUnlocked(needle, denyList) +} + +func (h *hnsw) isOnlyNodeUnlocked(needle *vertex, denyList helpers.AllowList) bool { + for _, node := range h.nodes { + if node == nil || node.id == needle.id || denyList.Contains(node.id) || node.connections.Layers() == 0 { + continue + } + return false + } + return true +} + +func (h *hnsw) hasTombstone(id uint64) bool { + h.tombstoneLock.RLock() + defer h.tombstoneLock.RUnlock() + _, ok := h.tombstones[id] + return ok +} + +// hasTombstones checks whether at least one node of the provided ids has a tombstone attached. +func (h *hnsw) hasTombstones(ids []uint64) bool { + h.tombstoneLock.RLock() + defer h.tombstoneLock.RUnlock() + + var has bool + for _, id := range ids { + _, ok := h.tombstones[id] + has = has || ok + } + return has +} + +func (h *hnsw) addTombstone(ids ...uint64) error { + h.tombstoneLock.Lock() + defer h.tombstoneLock.Unlock() + + if h.tombstones == nil { + h.tombstones = map[uint64]struct{}{} + } + + for _, id := range ids { + h.metrics.AddTombstone() + h.tombstones[id] = struct{}{} + if err := h.commitLog.AddTombstone(id); err != nil { + return err + } + } + return nil +} + +func (h *hnsw) removeTombstonesAndNodes(deleteList helpers.AllowList, breakCleanUpTombstonedNodes breakCleanUpTombstonedNodesFunc) (ok bool, err error) { + it := deleteList.Iterator() + for id, ok := it.Next(); ok; id, ok = it.Next() { + if breakCleanUpTombstonedNodes() { + return false, nil + } + h.metrics.RemoveTombstone() + h.tombstoneLock.Lock() + delete(h.tombstones, id) + h.tombstoneLock.Unlock() + + h.resetLock.Lock() + h.shardedNodeLocks.Lock(id) + if uint64(len(h.nodes)) > id { + h.nodes[id] = nil + } + h.shardedNodeLocks.Unlock(id) + if h.compressed.Load() { + h.compressor.Delete(context.TODO(), id) + } else { + h.cache.Delete(context.TODO(), id) + } + if h.muvera.Load() { + idBytes := make([]byte, 8) + binary.BigEndian.PutUint64(idBytes, id) + if err := h.store.Bucket(h.id + "_muvera_vectors").Delete(idBytes); err != nil { + h.logger.WithFields(logrus.Fields{ + "action": "muvera_delete", + "id": id, + }).WithError(err). + Warnf("cannot delete vector from muvera bucket") + } + } + if err := h.commitLog.DeleteNode(id); err != nil { + h.resetLock.Unlock() + return false, err + } + h.resetLock.Unlock() + + if err := h.commitLog.RemoveTombstone(id); err != nil { + return false, err + } + } + + return true, nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/delete_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/delete_test.go new file mode 100644 index 0000000000000000000000000000000000000000..abca7def0c86c640ce3da969dc1cc80e7b1136d0 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/delete_test.go @@ -0,0 +1,2084 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package hnsw + +import ( + "context" + "fmt" + "math/rand" + "os" + "sort" + "sync" + "testing" + "time" + + "github.com/prometheus/client_golang/prometheus/testutil" + "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/adapters/repos/db/helpers" + "github.com/weaviate/weaviate/adapters/repos/db/lsmkv" + "github.com/weaviate/weaviate/adapters/repos/db/vector/common" + "github.com/weaviate/weaviate/adapters/repos/db/vector/hnsw/distancer" + "github.com/weaviate/weaviate/adapters/repos/db/vector/hnsw/packedconn" + "github.com/weaviate/weaviate/adapters/repos/db/vector/testinghelpers" + "github.com/weaviate/weaviate/entities/cyclemanager" + "github.com/weaviate/weaviate/entities/storobj" + ent "github.com/weaviate/weaviate/entities/vectorindex/hnsw" + "github.com/weaviate/weaviate/usecases/monitoring" +) + +func TempVectorForIDThunk(vectors [][]float32) func(context.Context, uint64, *common.VectorSlice) ([]float32, error) { + return func(ctx context.Context, id uint64, container *common.VectorSlice) ([]float32, error) { + copy(container.Slice, vectors[int(id)]) + return vectors[int(id)], nil + } +} + +func TestDelete_WithoutCleaningUpTombstones(t *testing.T) { + vectors := vectorsForDeleteTest() + ctx := context.Background() + var vectorIndex *hnsw + + store := testinghelpers.NewDummyStore(t) + defer store.Shutdown(context.Background()) + t.Run("import the test vectors", func(t *testing.T) { + index, err := New(Config{ + RootPath: "doesnt-matter-as-committlogger-is-mocked-out", + ID: "delete-test", + MakeCommitLoggerThunk: MakeNoopCommitLogger, + DistanceProvider: distancer.NewCosineDistanceProvider(), + VectorForIDThunk: func(ctx context.Context, id uint64) ([]float32, error) { + return vectors[int(id)], nil + }, + TempVectorForIDThunk: TempVectorForIDThunk(vectors), + }, ent.UserConfig{ + MaxConnections: 30, + EFConstruction: 128, + + // The actual size does not matter for this test, but if it defaults to + // zero it will constantly think it's full and needs to be deleted - even + // after just being deleted, so make sure to use a positive number here. + VectorCacheMaxObjects: 100000, + }, cyclemanager.NewCallbackGroupNoop(), store) + require.Nil(t, err) + vectorIndex = index + + for i, vec := range vectors { + err := vectorIndex.Add(ctx, uint64(i), vec) + require.Nil(t, err) + } + }) + + var control []uint64 + + t.Run("vectors are cached correctly", func(t *testing.T) { + assert.Equal(t, len(vectors), int(vectorIndex.cache.CountVectors())) + }) + + t.Run("doing a control search before delete with the respective allow list", func(t *testing.T) { + allowList := helpers.NewAllowList() + for i := range vectors { + if i%2 == 0 { + continue + } + + allowList.Insert(uint64(i)) + } + + res, _, err := vectorIndex.SearchByVector(ctx, []float32{0.1, 0.1, 0.1}, 20, allowList) + require.Nil(t, err) + require.True(t, len(res) > 0) + control = res + }) + + t.Run("deleting every even element", func(t *testing.T) { + for i := range vectors { + if i%2 != 0 { + continue + } + + err := vectorIndex.Delete(uint64(i)) + require.Nil(t, err) + } + }) + + t.Run("start a search that should only contain the remaining elements", func(t *testing.T) { + res, _, err := vectorIndex.SearchByVector(ctx, []float32{0.1, 0.1, 0.1}, 20, nil) + require.Nil(t, err) + require.True(t, len(res) > 0) + + for _, elem := range res { + if elem%2 == 0 { + t.Errorf("search result contained an even element: %d", elem) + } + } + + assert.Equal(t, control, res) + }) + + t.Run("destroy the index", func(t *testing.T) { + require.Nil(t, vectorIndex.Drop(context.Background())) + }) + + t.Run("vector cache holds no vectors", func(t *testing.T) { + assert.Equal(t, 0, int(vectorIndex.cache.CountVectors())) + }) +} + +func TestDelete_WithCleaningUpTombstonesOnce(t *testing.T) { + // there is a single bulk clean event after all the deletes + vectors := vectorsForDeleteTest() + ctx := context.Background() + var vectorIndex *hnsw + + store := testinghelpers.NewDummyStore(t) + defer store.Shutdown(context.Background()) + + t.Run("import the test vectors", func(t *testing.T) { + index, err := New(Config{ + RootPath: "doesnt-matter-as-committlogger-is-mocked-out", + ID: "delete-test", + MakeCommitLoggerThunk: MakeNoopCommitLogger, + DistanceProvider: distancer.NewCosineDistanceProvider(), + VectorForIDThunk: func(ctx context.Context, id uint64) ([]float32, error) { + return vectors[int(id)], nil + }, + TempVectorForIDThunk: TempVectorForIDThunk(vectors), + }, ent.UserConfig{ + MaxConnections: 30, + EFConstruction: 128, + + // The actual size does not matter for this test, but if it defaults to + // zero it will constantly think it's full and needs to be deleted - even + // after just being deleted, so make sure to use a positive number here. + VectorCacheMaxObjects: 100000, + }, cyclemanager.NewCallbackGroupNoop(), store) + require.Nil(t, err) + vectorIndex = index + + for i, vec := range vectors { + err := vectorIndex.Add(ctx, uint64(i), vec) + require.Nil(t, err) + } + }) + + var control []uint64 + var bfControl []uint64 + + t.Run("doing a control search before delete with the respective allow list", func(t *testing.T) { + allowList := helpers.NewAllowList() + for i := range vectors { + if i%2 == 0 { + continue + } + + allowList.Insert(uint64(i)) + } + + res, _, err := vectorIndex.SearchByVector(ctx, []float32{0.1, 0.1, 0.1}, 20, allowList) + require.Nil(t, err) + require.True(t, len(res) > 0) + require.Len(t, res, 20) + control = res + }) + + t.Run("brute force control", func(t *testing.T) { + bf := bruteForceCosine(vectors, []float32{0.1, 0.1, 0.1}, 100) + bfControl = make([]uint64, len(bf)) + i := 0 + for _, elem := range bf { + if elem%2 == 0 { + continue + } + + bfControl[i] = elem + i++ + } + + if i > 20 { + i = 20 + } + + bfControl = bfControl[:i] + assert.Equal(t, bfControl, control, "control should match bf control") + }) + + fmt.Printf("entrypoint before %d\n", vectorIndex.entryPointID) + t.Run("deleting every even element", func(t *testing.T) { + for i := range vectors { + if i%2 != 0 { + continue + } + + err := vectorIndex.Delete(uint64(i)) + require.Nil(t, err) + } + }) + + t.Run("running the cleanup", func(t *testing.T) { + err := vectorIndex.CleanUpTombstonedNodes(neverStop) + require.Nil(t, err) + }) + + t.Run("start a search that should only contain the remaining elements", func(t *testing.T) { + res, _, err := vectorIndex.SearchByVector(ctx, []float32{0.1, 0.1, 0.1}, 20, nil) + require.Nil(t, err) + require.True(t, len(res) > 0) + + for _, elem := range res { + if elem%2 == 0 { + t.Errorf("search result contained an even element: %d", elem) + } + } + + assert.Equal(t, control, res) + }) + + t.Run("verify the graph no longer has any tombstones", func(t *testing.T) { + assert.Len(t, vectorIndex.tombstones, 0) + }) + + t.Run("destroy the index", func(t *testing.T) { + require.Nil(t, vectorIndex.Drop(context.Background())) + }) +} + +func TestDelete_WithCleaningUpTombstonesTwiceConcurrently(t *testing.T) { + // there is a single bulk clean event after all the deletes + vectors := vectorsForDeleteTest() + ctx := context.Background() + var vectorIndex *hnsw + + store := testinghelpers.NewDummyStore(t) + defer store.Shutdown(context.Background()) + + t.Run("import the test vectors", func(t *testing.T) { + index, err := New(Config{ + RootPath: "doesnt-matter-as-committlogger-is-mocked-out", + ID: "delete-test", + MakeCommitLoggerThunk: MakeNoopCommitLogger, + DistanceProvider: distancer.NewCosineDistanceProvider(), + VectorForIDThunk: func(ctx context.Context, id uint64) ([]float32, error) { + return vectors[int(id)], nil + }, + TempVectorForIDThunk: TempVectorForIDThunk(vectors), + }, ent.UserConfig{ + MaxConnections: 30, + EFConstruction: 128, + + // The actual size does not matter for this test, but if it defaults to + // zero it will constantly think it's full and needs to be deleted - even + // after just being deleted, so make sure to use a positive number here. + VectorCacheMaxObjects: 100000, + }, cyclemanager.NewCallbackGroupNoop(), store) + require.Nil(t, err) + vectorIndex = index + + for i, vec := range vectors { + err := vectorIndex.Add(ctx, uint64(i), vec) + require.Nil(t, err) + } + }) + + t.Run("deleting every even element", func(t *testing.T) { + for i := range vectors { + if i%2 != 0 { + continue + } + + err := vectorIndex.Delete(uint64(i)) + require.Nil(t, err) + } + }) + + t.Run("running two cleanups concurrently", func(t *testing.T) { + var wg sync.WaitGroup + results := make(chan error, 2) + + for i := 0; i < 2; i++ { + wg.Add(1) + go func() { + defer wg.Done() + err := vectorIndex.CleanUpTombstonedNodes(neverStop) + results <- err + }() + } + + wg.Wait() + close(results) + + var errors []error + for err := range results { + errors = append(errors, err) + } + + require.Len(t, errors, 2, "Expected exactly two results") + + successCount := 0 + alreadyRunningCount := 0 + for _, err := range errors { + if err == nil { + successCount++ + } else if err.Error() == "tombstone cleanup already running" { + alreadyRunningCount++ + } else { + t.Errorf("Unexpected error: %v", err) + } + } + + // There is a possibility the first cleanup completes before the second one starts + assert.GreaterOrEqual(t, successCount, 1, "Expected at least one successful cleanup") + assert.LessOrEqual(t, alreadyRunningCount, 1, "Expected at most one 'already running' error") + stats, err := vectorIndex.Stats() + require.Nil(t, err) + assert.Equal(t, 0, stats.NumTombstones, "Expected no tombstones after cleanup") + }) + + t.Run("destroy the index", func(t *testing.T) { + require.Nil(t, vectorIndex.Drop(context.Background())) + }) +} + +func TestDelete_WithConcurrentEntrypointDeletionAndTombstoneCleanup(t *testing.T) { + var vectors [][]float32 + for i := 0; i < 1000; i++ { + vectors = append(vectors, []float32{rand.Float32(), rand.Float32(), rand.Float32()}) + } + var vectorIndex *hnsw + store := testinghelpers.NewDummyStore(t) + ctx := context.Background() + defer store.Shutdown(context.Background()) + + t.Run("import the test vectors", func(t *testing.T) { + index, err := New(Config{ + RootPath: "doesnt-matter-as-committlogger-is-mocked-out", + ID: "delete-test", + MakeCommitLoggerThunk: MakeNoopCommitLogger, + DistanceProvider: distancer.NewCosineDistanceProvider(), + VectorForIDThunk: func(ctx context.Context, id uint64) ([]float32, error) { + return vectors[int(id)], nil + }, + TempVectorForIDThunk: TempVectorForIDThunk(vectors), + }, ent.UserConfig{ + MaxConnections: 30, + EFConstruction: 128, + VectorCacheMaxObjects: 100000, + }, cyclemanager.NewCallbackGroupNoop(), store) + require.Nil(t, err) + vectorIndex = index + vectorIndex.logger = logrus.New() + + for i, vec := range vectors { + err := vectorIndex.Add(ctx, uint64(i), vec) + require.Nil(t, err) + } + }) + + t.Run("concurrent entrypoint deletion and tombstone cleanup", func(t *testing.T) { + var wg sync.WaitGroup + wg.Add(2) + + go func() { + defer wg.Done() + err := vectorIndex.CleanUpTombstonedNodes(neverStop) + require.Nil(t, err) + }() + + go func() { + defer wg.Done() + for i := range vectors { + err := vectorIndex.Delete(uint64(i)) + require.Nil(t, err) + } + }() + + wg.Wait() + }) + + t.Run("inserting more vectors (with largers ids)", func(t *testing.T) { + for i, vec := range vectors { + err := vectorIndex.Add(ctx, uint64(len(vectors)+i), vec) + require.Nil(t, err) + } + }) + + t.Run("concurrent entrypoint deletion and tombstone cleanup", func(t *testing.T) { + var wg sync.WaitGroup + wg.Add(2) + + go func() { + defer wg.Done() + err := vectorIndex.CleanUpTombstonedNodes(neverStop) + require.Nil(t, err) + }() + + go func() { + defer wg.Done() + for i := range vectors { + err := vectorIndex.Delete(uint64(len(vectors) + i)) + require.Nil(t, err) + } + }() + + wg.Wait() + }) + + t.Run("final tombstone cleanup", func(t *testing.T) { + err := vectorIndex.CleanUpTombstonedNodes(neverStop) + require.Nil(t, err) + }) + + t.Run("verify the graph no longer has any tombstones", func(t *testing.T) { + vectorIndex.tombstoneLock.Lock() + defer vectorIndex.tombstoneLock.Unlock() + + assert.Len(t, vectorIndex.tombstones, 0) + }) + + t.Run("destroy the index", func(t *testing.T) { + require.Nil(t, vectorIndex.Drop(context.Background())) + }) +} + +func TestDelete_WithCleaningUpTombstonesInBetween(t *testing.T) { + // there is a single bulk clean event after all the deletes + vectors := vectorsForDeleteTest() + var vectorIndex *hnsw + store := testinghelpers.NewDummyStore(t) + defer store.Shutdown(context.Background()) + ctx := context.Background() + + t.Run("import the test vectors", func(t *testing.T) { + index, err := New(Config{ + RootPath: "doesnt-matter-as-committlogger-is-mocked-out", + ID: "delete-test", + MakeCommitLoggerThunk: MakeNoopCommitLogger, + DistanceProvider: distancer.NewCosineDistanceProvider(), + VectorForIDThunk: func(ctx context.Context, id uint64) ([]float32, error) { + return vectors[int(id)], nil + }, + TempVectorForIDThunk: TempVectorForIDThunk(vectors), + }, ent.UserConfig{ + MaxConnections: 30, + EFConstruction: 128, + + // The actual size does not matter for this test, but if it defaults to + // zero it will constantly think it's full and needs to be deleted - even + // after just being deleted, so make sure to use a positive number here. + VectorCacheMaxObjects: 100000, + }, cyclemanager.NewCallbackGroupNoop(), store) + // makes sure index is build only with level 0. To be removed after fixing WEAVIATE-179 + index.randFunc = func() float64 { return 0.1 } + + require.Nil(t, err) + vectorIndex = index + + for i, vec := range vectors { + err := vectorIndex.Add(ctx, uint64(i), vec) + require.Nil(t, err) + } + }) + + var control []uint64 + + t.Run("doing a control search before delete with the respective allow list", func(t *testing.T) { + allowList := helpers.NewAllowList() + for i := range vectors { + if i%2 == 0 { + continue + } + + allowList.Insert(uint64(i)) + } + + res, _, err := vectorIndex.SearchByVector(ctx, []float32{0.1, 0.1, 0.1}, 20, allowList) + require.Nil(t, err) + require.True(t, len(res) > 0) + + control = res + }) + + t.Run("deleting every even element", func(t *testing.T) { + for i := range vectors { + if i%10 == 0 { + // occasionally run clean up + err := vectorIndex.CleanUpTombstonedNodes(neverStop) + require.Nil(t, err) + } + + if i%2 != 0 { + continue + } + + err := vectorIndex.Delete(uint64(i)) + require.Nil(t, err) + } + + // finally run one final cleanup + err := vectorIndex.CleanUpTombstonedNodes(neverStop) + require.Nil(t, err) + }) + + t.Run("start a search that should only contain the remaining elements", func(t *testing.T) { + res, _, err := vectorIndex.SearchByVector(ctx, []float32{0.1, 0.1, 0.1}, 20, nil) + require.Nil(t, err) + require.True(t, len(res) > 0) + + for _, elem := range res { + if elem%2 == 0 { + t.Errorf("search result contained an even element: %d", elem) + } + } + + assert.Equal(t, control, res) + }) + + t.Run("verify the graph no longer has any tombstones", func(t *testing.T) { + assert.Len(t, vectorIndex.tombstones, 0) + }) + + t.Run("delete the remaining elements", func(t *testing.T) { + for i := range vectors { + if i%2 == 0 { + continue + } + + err := vectorIndex.Delete(uint64(i)) + require.Nil(t, err) + } + + err := vectorIndex.CleanUpTombstonedNodes(neverStop) + require.Nil(t, err) + }) + + t.Run("try to insert again and search", func(t *testing.T) { + for i := 0; i < 5; i++ { + err := vectorIndex.Add(ctx, uint64(i), vectors[i]) + require.Nil(t, err) + } + + res, _, err := vectorIndex.SearchByVector(ctx, []float32{0.1, 0.1, 0.1}, 20, nil) + require.Nil(t, err) + assert.ElementsMatch(t, []uint64{0, 1, 2, 3, 4}, res) + }) + + t.Run("destroy the index", func(t *testing.T) { + require.Nil(t, vectorIndex.Drop(context.Background())) + }) + + store.Shutdown(context.Background()) +} + +func createIndexImportAllVectorsAndDeleteEven(t *testing.T, vectors [][]float32, store *lsmkv.Store) (index *hnsw, remainingResult []uint64) { + ctx := context.Background() + index, err := New(Config{ + RootPath: "doesnt-matter-as-committlogger-is-mocked-out", + ID: "delete-test", + MakeCommitLoggerThunk: MakeNoopCommitLogger, + DistanceProvider: distancer.NewCosineDistanceProvider(), + VectorForIDThunk: func(ctx context.Context, id uint64) ([]float32, error) { + return vectors[int(id)], nil + }, + TempVectorForIDThunk: TempVectorForIDThunk(vectors), + }, ent.UserConfig{ + MaxConnections: 30, + EFConstruction: 128, + + // The actual size does not matter for this test, but if it defaults to + // zero it will constantly think it's full and needs to be deleted - even + // after just being deleted, so make sure to use a positive number here. + VectorCacheMaxObjects: 100000, + }, cyclemanager.NewCallbackGroupNoop(), store) + require.Nil(t, err) + + // makes sure index is build only with level 0. To be removed after fixing WEAVIATE-179 + index.randFunc = func() float64 { return 0.1 } + + // to speed up test execution, size of nodes array is decreased + // from default 25k to little over number of vectors + index.nodes = make([]*vertex, int(1.2*float64(len(vectors)))) + + for i, vec := range vectors { + err := index.Add(ctx, uint64(i), vec) + require.Nil(t, err) + } + + for i := range vectors { + if i%2 != 0 { + continue + } + err := index.Delete(uint64(i)) + require.Nil(t, err) + } + + res, _, err := index.SearchByVector(ctx, []float32{0.1, 0.1, 0.1}, len(vectors), nil) + require.Nil(t, err) + require.True(t, len(res) > 0) + + for _, elem := range res { + if elem%2 == 0 { + t.Errorf("search result contained an even element: %d", elem) + } + } + + return index, res +} + +func genStopAtFunc(i int) func() bool { + counter := 0 + mutex := &sync.Mutex{} + return func() bool { + mutex.Lock() + defer mutex.Unlock() + if counter < i { + counter++ + return false + } + + return true + } +} + +func TestDelete_WithCleaningUpTombstonesStopped(t *testing.T) { + ctx := context.Background() + vectors := vectorsForDeleteTest()[:50] + var index *hnsw + var possibleStopsCount int + // due to not yet resolved bug (https://semi-technology.atlassian.net/browse/WEAVIATE-179) + // db can return less vectors than are actually stored after tombstones cleanup + // controlRemainingResult contains all odd vectors (before cleanup was performed) + // controlRemainingResultAfterCleanup contains most of odd vectors (after cleanup was performed) + // + // this test verifies if partial cleanup will not change search output, therefore depending on + // where cleanup method was stopped, subset of controlRemainingResult is expected, though all + // vectors from controlRemainingResultAfterCleanup should be returned + // TODO to be simplified after fixing WEAVIATE-179, all results should be the same + var controlRemainingResult []uint64 + var controlRemainingResultAfterCleanup []uint64 + store := testinghelpers.NewDummyStore(t) + defer store.Shutdown(context.Background()) + + t.Run("create control index", func(t *testing.T) { + index, controlRemainingResult = createIndexImportAllVectorsAndDeleteEven(t, vectors, store) + }) + + t.Run("count all cleanup tombstones stops", func(t *testing.T) { + counter := 0 + mutex := &sync.Mutex{} + countingStopFunc := func() bool { + mutex.Lock() + counter++ + mutex.Unlock() + return false + } + + err := index.CleanUpTombstonedNodes(countingStopFunc) + require.Nil(t, err) + + possibleStopsCount = counter + }) + + t.Run("search remaining elements after cleanup", func(t *testing.T) { + res, _, err := index.SearchByVector(ctx, []float32{0.1, 0.1, 0.1}, len(vectors), nil) + require.Nil(t, err) + require.True(t, len(res) > 0) + + for _, elem := range res { + if elem%2 == 0 { + t.Errorf("search result contained an even element: %d", elem) + } + } + controlRemainingResultAfterCleanup = res + }) + + t.Run("destroy the control index", func(t *testing.T) { + require.Nil(t, index.Drop(context.Background())) + }) + + for i := 0; i < possibleStopsCount; i++ { + index, _ = createIndexImportAllVectorsAndDeleteEven(t, vectors, store) + + t.Run("stop cleanup at place", func(t *testing.T) { + require.Nil(t, index.CleanUpTombstonedNodes(genStopAtFunc(i))) + }) + + t.Run("search remaining elements after partial cleanup", func(t *testing.T) { + res, _, err := index.SearchByVector(ctx, []float32{0.1, 0.1, 0.1}, len(vectors), nil) + require.Nil(t, err) + require.Subset(t, controlRemainingResult, res) + require.Subset(t, res, controlRemainingResultAfterCleanup) + }) + + t.Run("run complete cleanup", func(t *testing.T) { + require.Nil(t, index.CleanUpTombstonedNodes(neverStop)) + }) + + t.Run("search remaining elements after complete cleanup", func(t *testing.T) { + res, _, err := index.SearchByVector(ctx, []float32{0.1, 0.1, 0.1}, len(vectors), nil) + require.Nil(t, err) + require.Subset(t, controlRemainingResult, res) + require.Subset(t, res, controlRemainingResultAfterCleanup) + }) + + t.Run("destroy the index", func(t *testing.T) { + require.Nil(t, index.Drop(context.Background())) + }) + } +} + +func TestDelete_WithCleaningUpTombstonesStoppedShouldNotRemoveTombstoneMarks(t *testing.T) { + ctx := context.Background() + vectors := vectorsForDeleteTest() + var index *hnsw + store := testinghelpers.NewDummyStore(t) + defer store.Shutdown(context.Background()) + + t.Run("create control index", func(t *testing.T) { + index, _ = createIndexImportAllVectorsAndDeleteEven(t, vectors, store) + }) + + t.Run("count all cleanup tombstones stops", func(t *testing.T) { + counter := 0 + mutex := &sync.Mutex{} + countingStopFunc := func() bool { + mutex.Lock() + counter++ + counterCpy := counter + mutex.Unlock() + return counterCpy > 30 && counterCpy < 40 + } + + err := index.CleanUpTombstonedNodes(countingStopFunc) + require.Nil(t, err) + }) + + time.Sleep(1000 * time.Millisecond) + + t.Run("even ids are not coming back and tombstones are not removed completely", func(t *testing.T) { + ids, _, _ := index.SearchByVector(ctx, vectors[0], len(vectors), nil) + for _, id := range ids { + assert.Equal(t, 1, int(id%2)) + } + assert.True(t, len(index.tombstones) > 0) + }) + + t.Run("destroy the index", func(t *testing.T) { + require.Nil(t, index.Drop(context.Background())) + }) +} + +func TestDelete_InCompressedIndex_WithCleaningUpTombstonesOnce(t *testing.T) { + ctx := context.Background() + defaultUC := ent.NewDefaultUserConfig() + var ( + vectorIndex *hnsw + // there is a single bulk clean event after all the deletes + vectors = vectorsForDeleteTest() + rootPath = t.TempDir() + userConfig = ent.UserConfig{ + MaxConnections: 30, + EFConstruction: 128, + + // The actual size does not matter for this test, but if it defaults to + // zero it will constantly think it's full and needs to be deleted - even + // after just being deleted, so make sure to use a positive number here. + VectorCacheMaxObjects: 100000, + PQ: ent.PQConfig{ + Enabled: true, + Encoder: ent.PQEncoder{ + Type: ent.PQEncoderTypeTile, + Distribution: ent.PQEncoderDistributionNormal, + }, + }, + BQ: defaultUC.BQ, + SQ: defaultUC.SQ, + } + ) + store := testinghelpers.NewDummyStore(t) + defer store.Shutdown(context.Background()) + + t.Run("import the test vectors", func(t *testing.T) { + index, err := New(Config{ + RootPath: rootPath, + ID: "delete-test", + MakeCommitLoggerThunk: MakeNoopCommitLogger, + DistanceProvider: distancer.NewCosineDistanceProvider(), + VectorForIDThunk: func(ctx context.Context, id uint64) ([]float32, error) { + if int(id) >= len(vectors) { + return nil, storobj.NewErrNotFoundf(id, "out of range") + } + return vectors[int(id)], nil + }, + TempVectorForIDThunk: TempVectorForIDThunk(vectors), + }, userConfig, cyclemanager.NewCallbackGroupNoop(), store) + require.Nil(t, err) + vectorIndex = index + + for i, vec := range vectors { + err := vectorIndex.Add(ctx, uint64(i), vec) + require.Nil(t, err) + } + cfg := ent.PQConfig{ + Enabled: true, + Encoder: ent.PQEncoder{ + Type: ent.PQEncoderTypeTile, + Distribution: ent.PQEncoderDistributionLogNormal, + }, + BitCompression: false, + Segments: 3, + Centroids: 256, + TrainingLimit: 100000, + } + userConfig.PQ = cfg + index.compress(userConfig) + }) + + var control []uint64 + var bfControl []uint64 + + t.Run("doing a control search before delete with the respective allow list", func(t *testing.T) { + allowList := helpers.NewAllowList() + for i := range vectors { + if i%2 == 0 { + continue + } + + allowList.Insert(uint64(i)) + } + + res, _, err := vectorIndex.SearchByVector(ctx, []float32{0.1, 0.1, 0.1}, 20, allowList) + require.Nil(t, err) + require.True(t, len(res) > 0) + require.Len(t, res, 20) + control = res + }) + + t.Run("brute force control", func(t *testing.T) { + bf := bruteForceCosine(vectors, []float32{0.1, 0.1, 0.1}, 100) + bfControl = make([]uint64, len(bf)) + i := 0 + for _, elem := range bf { + if elem%2 == 0 { + continue + } + + bfControl[i] = elem + i++ + } + + if i > 20 { + i = 20 + } + + bfControl = bfControl[:i] + recall := float32(testinghelpers.MatchesInLists(bfControl, control)) / float32(len(bfControl)) + fmt.Println(recall) + assert.True(t, recall > 0.6, "control should match bf control") + }) + + fmt.Printf("entrypoint before %d\n", vectorIndex.entryPointID) + t.Run("deleting every even element", func(t *testing.T) { + for i := range vectors { + if i%2 != 0 { + continue + } + + err := vectorIndex.Delete(uint64(i)) + require.Nil(t, err) + } + }) + + t.Run("running the cleanup", func(t *testing.T) { + err := vectorIndex.CleanUpTombstonedNodes(neverStop) + require.Nil(t, err) + }) + + t.Run("start a search that should only contain the remaining elements", func(t *testing.T) { + res, _, err := vectorIndex.SearchByVector(ctx, []float32{0.1, 0.1, 0.1}, 20, nil) + require.Nil(t, err) + require.True(t, len(res) > 0) + + for _, elem := range res { + if elem%2 == 0 { + t.Errorf("search result contained an even element: %d", elem) + } + } + + recall := float32(testinghelpers.MatchesInLists(res, control)) / float32(len(control)) + assert.True(t, recall > 0.6) + }) + + t.Run("verify the graph no longer has any tombstones", func(t *testing.T) { + assert.Len(t, vectorIndex.tombstones, 0) + }) + + t.Run("destroy the index", func(t *testing.T) { + require.Nil(t, vectorIndex.Drop(context.Background())) + }) +} + +func TestDelete_ResetLockDoesNotLockForever(t *testing.T) { + ctx := context.Background() + var ( + vectorIndex *hnsw + // there is a single bulk clean event after all the deletes + vectors = vectorsForDeleteTest() + rootPath = t.TempDir() + userConfig = ent.UserConfig{ + MaxConnections: 30, + EFConstruction: 128, + + // The actual size does not matter for this test, but if it defaults to + // zero it will constantly think it's full and needs to be deleted - even + // after just being deleted, so make sure to use a positive number here. + VectorCacheMaxObjects: 100000, + PQ: ent.PQConfig{ + Enabled: false, + Encoder: ent.PQEncoder{ + Type: ent.PQEncoderTypeTile, + Distribution: ent.PQEncoderDistributionNormal, + }, + }, + } + ) + store := testinghelpers.NewDummyStore(t) + defer store.Shutdown(context.Background()) + + t.Run("import the test vectors", func(t *testing.T) { + index, err := New(Config{ + RootPath: rootPath, + ID: "delete-test", + MakeCommitLoggerThunk: MakeNoopCommitLogger, + DistanceProvider: distancer.NewCosineDistanceProvider(), + VectorForIDThunk: func(ctx context.Context, id uint64) ([]float32, error) { + if int(id) >= len(vectors) { + return nil, storobj.NewErrNotFoundf(id, "out of range") + } + return vectors[int(id)], nil + }, + TempVectorForIDThunk: TempVectorForIDThunk(vectors), + }, userConfig, cyclemanager.NewCallbackGroupNoop(), store) + require.Nil(t, err) + vectorIndex = index + + for i, vec := range vectors { + err := vectorIndex.Add(ctx, uint64(i), vec) + require.Nil(t, err) + } + }) + + fmt.Printf("entrypoint before %d\n", vectorIndex.entryPointID) + deletedIds := make([]uint64, 0, len(vectors)/2) + t.Run("deleting every even element", func(t *testing.T) { + for i := range vectors { + if i%2 != 0 { + continue + } + + deletedIds = append(deletedIds, uint64(i)) + err := vectorIndex.Delete(uint64(i)) + require.Nil(t, err) + } + }) + + t.Run("running the cleanup and an delete, the delete should not take forever", func(t *testing.T) { + var wg sync.WaitGroup + wg.Add(2) + + go func() { + defer wg.Done() + allowList := helpers.NewAllowList(deletedIds...) + ok, err := vectorIndex.reassignNeighborsOf(ctx, allowList, slowNeverStop) + require.Nil(t, err) + require.True(t, ok) + }() + elapsed := time.Duration(0) + go func() { + defer wg.Done() + time.Sleep(time.Millisecond * 100) + starting := time.Now() + err := vectorIndex.Delete(vectorIndex.entryPointID) + elapsed = time.Since(starting) + require.Nil(t, err) + }() + wg.Wait() + fmt.Printf("ms elapsed: %d\n", elapsed.Milliseconds()) + assert.LessOrEqual(t, elapsed.Milliseconds(), int64(180)) + }) + + t.Run("destroy the index", func(t *testing.T) { + require.Nil(t, vectorIndex.Drop(context.Background())) + }) +} + +func TestDelete_InCompressedIndex_WithCleaningUpTombstonesOnce_DoesNotCrash(t *testing.T) { + ctx := context.Background() + defaultUC := ent.NewDefaultUserConfig() + var ( + vectorIndex *hnsw + // there is a single bulk clean event after all the deletes + vectors = vectorsForDeleteTest() + rootPath = t.TempDir() + userConfig = ent.UserConfig{ + MaxConnections: 16, + EFConstruction: 32, + + // The actual size does not matter for this test, but if it defaults to + // zero it will constantly think it's full and needs to be deleted - even + // after just being deleted, so make sure to use a positive number here. + VectorCacheMaxObjects: 100000, + PQ: ent.PQConfig{ + Enabled: true, + Encoder: ent.PQEncoder{ + Type: ent.PQEncoderTypeTile, + Distribution: ent.PQEncoderDistributionNormal, + }, + }, + BQ: defaultUC.BQ, + SQ: defaultUC.SQ, + } + ) + + store := testinghelpers.NewDummyStore(t) + defer store.Shutdown(context.Background()) + + t.Run("import the test vectors", func(t *testing.T) { + index, err := New(Config{ + RootPath: rootPath, + ID: "delete-test", + MakeCommitLoggerThunk: MakeNoopCommitLogger, + DistanceProvider: distancer.NewCosineDistanceProvider(), + VectorForIDThunk: func(ctx context.Context, id uint64) ([]float32, error) { + return vectors[int(id%uint64(len(vectors)))], nil + }, + TempVectorForIDThunk: TempVectorForIDThunk(vectors), + }, userConfig, cyclemanager.NewCallbackGroupNoop(), store) + require.Nil(t, err) + vectorIndex = index + + for i, vec := range vectors { + err := vectorIndex.Add(ctx, uint64(i), vec) + require.Nil(t, err) + } + cfg := ent.PQConfig{ + Enabled: true, + Encoder: ent.PQEncoder{ + Type: ent.PQEncoderTypeKMeans, + Distribution: ent.PQEncoderDistributionNormal, + }, + BitCompression: false, + Segments: 3, + Centroids: 256, + } + userConfig.PQ = cfg + index.compress(userConfig) + for i := len(vectors); i < 1000; i++ { + err := vectorIndex.Add(ctx, uint64(i), vectors[i%len(vectors)]) + require.Nil(t, err) + } + }) + + t.Run("deleting every even element", func(t *testing.T) { + for i := range vectors { + if i%2 != 0 { + continue + } + + err := vectorIndex.Delete(uint64(i)) + require.Nil(t, err) + } + }) + + t.Run("running the cleanup", func(t *testing.T) { + err := vectorIndex.CleanUpTombstonedNodes(neverStop) + require.Nil(t, err) + }) + + t.Run("verify the graph no longer has any tombstones", func(t *testing.T) { + assert.Len(t, vectorIndex.tombstones, 0) + }) + + t.Run("destroy the index", func(t *testing.T) { + require.Nil(t, vectorIndex.Drop(context.Background())) + }) +} + +// we need a certain number of elements so that we can make sure that nodes +// from all layers will eventually be deleted, otherwise our test only tests +// edge cases which aren't very common in real life, but ignore the most common +// deletes +func vectorsForDeleteTest() [][]float32 { + return [][]float32{ + {0.27335858, 0.42670676, 0.12599982}, + {0.34369454, 0.78510034, 0.78000546}, + {0.2342731, 0.076864816, 0.6405078}, + {0.07597838, 0.7752282, 0.87022865}, + {0.78632426, 0.06902865, 0.7423889}, + {0.3055758, 0.3901508, 0.9399572}, + {0.48687622, 0.26338226, 0.06495104}, + {0.5384028, 0.35410047, 0.8821815}, + {0.25123185, 0.62722564, 0.86443096}, + {0.58484185, 0.13103616, 0.4034975}, + {0.0019696166, 0.46822622, 0.42492124}, + {0.42401955, 0.8278863, 0.5952888}, + {0.15367928, 0.70778894, 0.0070928824}, + {0.95760256, 0.45898128, 0.1541115}, + {0.9125976, 0.9021616, 0.21607016}, + {0.9876307, 0.5243228, 0.37294936}, + {0.8194746, 0.56142205, 0.5130103}, + {0.805065, 0.62250346, 0.63715476}, + {0.9969276, 0.5115748, 0.18916714}, + {0.16419733, 0.15029702, 0.36020836}, + {0.9660323, 0.35887036, 0.6072966}, + {0.72765416, 0.27891788, 0.9094314}, + {0.8626208, 0.3540126, 0.3100354}, + {0.7153876, 0.17094712, 0.7801294}, + {0.23180388, 0.107446484, 0.69542855}, + {0.54731685, 0.8949827, 0.68316746}, + {0.15049729, 0.1293767, 0.0574729}, + {0.89379513, 0.67022973, 0.57360715}, + {0.725353, 0.25326362, 0.44264215}, + {0.2568602, 0.4986094, 0.9759933}, + {0.7300015, 0.70019704, 0.49546525}, + {0.54314494, 0.2004176, 0.63803226}, + {0.6180191, 0.5260845, 0.9373999}, + {0.63356537, 0.81430644, 0.78373694}, + {0.69995105, 0.84198904, 0.17851257}, + {0.5197941, 0.11502675, 0.95129955}, + {0.15791401, 0.07516741, 0.113447875}, + {0.06811827, 0.4450082, 0.98595786}, + {0.7153448, 0.41833848, 0.06332495}, + {0.6704102, 0.28931814, 0.031580303}, + {0.47773632, 0.73334247, 0.6925025}, + {0.7976896, 0.9499536, 0.6394833}, + {0.3074854, 0.14025249, 0.35961738}, + {0.49956197, 0.093575336, 0.790093}, + {0.4641653, 0.21276893, 0.528895}, + {0.1021849, 0.9416305, 0.46738508}, + {0.3790398, 0.50099677, 0.98233247}, + {0.39650732, 0.020929832, 0.53968865}, + {0.77604437, 0.8554197, 0.24056046}, + {0.07174444, 0.28758526, 0.67587185}, + {0.22292718, 0.66624546, 0.6077909}, + {0.22090498, 0.36197436, 0.40415043}, + {0.04838009, 0.120789215, 0.17928012}, + {0.55166364, 0.3400502, 0.43698996}, + {0.7638108, 0.47014108, 0.23208627}, + {0.9239513, 0.8418566, 0.23518613}, + {0.289589, 0.85010827, 0.055741556}, + {0.32436147, 0.18756394, 0.4217864}, + {0.041671168, 0.37824047, 0.66486764}, + {0.5052222, 0.07982704, 0.64345413}, + {0.62675995, 0.20138603, 0.8231867}, + {0.86306876, 0.9698708, 0.11398846}, + {0.68566775, 0.22026269, 0.13525572}, + {0.57706076, 0.32325208, 0.6122228}, + {0.80035216, 0.18560356, 0.6328281}, + {0.87145543, 0.19380389, 0.8863942}, + {0.33777508, 0.6056442, 0.9110077}, + {0.3961719, 0.49714503, 0.14191929}, + {0.5344362, 0.8166916, 0.75880384}, + {0.015749464, 0.63223976, 0.5470922}, + {0.10512444, 0.2212036, 0.24995685}, + {0.10831311, 0.27044898, 0.8668174}, + {0.3272971, 0.6659298, 0.87119603}, + {0.42913893, 0.14528985, 0.69957525}, + {0.33012474, 0.81964344, 0.092787445}, + {0.093618214, 0.90637344, 0.94406706}, + {0.12161567, 0.75131124, 0.40563175}, + {0.9154454, 0.75925833, 0.8406739}, + {0.81649286, 0.9025715, 0.3105051}, + {0.2927649, 0.22649862, 0.9708593}, + {0.30813727, 0.0079439245, 0.39662006}, + {0.94943213, 0.36778906, 0.217876}, + {0.716794, 0.3811725, 0.18448676}, + {0.66879725, 0.29722908, 0.0031202603}, + {0.11104216, 0.13094379, 0.0787222}, + {0.8508966, 0.86416596, 0.15885831}, + {0.2303136, 0.56660503, 0.17114973}, + {0.8632685, 0.4229249, 0.1936724}, + {0.03060897, 0.35226125, 0.8115969}, + } +} + +func TestDelete_EntrypointIssues(t *testing.T) { + ctx := context.Background() + // This test is motivated by flakyness of other tests. We seemed to have + // experienced a failure with the following structure + // + // Entrypoint: 6 + // Max Level: 1 + // Tombstones map[] + + // Nodes and Connections: + // Node 0 + // Level 0: Connections: [1 2 3 4 5 6 7 8] + // Node 1 + // Level 0: Connections: [0 2 3 4 5 6 7 8] + // Node 2 + // Level 0: Connections: [1 0 3 4 5 6 7 8] + // Node 3 + // Level 0: Connections: [2 1 0 4 5 6 7 8] + // Node 4 + // Level 0: Connections: [3 2 1 0 5 6 7 8] + // Node 5 + // Level 0: Connections: [3 4 2 1 0 6 7 8] + // Node 6 + // Level 0: Connections: [4 2 1 3 5 0 7 8] + // Level 1: Connections: [7] + // Node 7 + // Level 1: Connections: [6] + // Level 0: Connections: [6 4 3 5 2 1 0 8] + // Node 8 + // Level 0: Connections: [7 6 4 3 5 2 1 0] + // + // This test aims to rebuild this tree exactly (manually) and verifies that + // deletion of the old entrypoint (element 6), works without issue + // + // The underlying test set can be found in vectors_for_test.go + + index, err := New(Config{ + RootPath: "doesnt-matter-as-committlogger-is-mocked-out", + ID: "delete-entrypoint-test", + MakeCommitLoggerThunk: MakeNoopCommitLogger, + DistanceProvider: distancer.NewCosineDistanceProvider(), + VectorForIDThunk: testVectorForID, + }, ent.UserConfig{ + MaxConnections: 30, + EFConstruction: 128, + EF: 36, + + // The actual size does not matter for this test, but if it defaults to + // zero it will constantly think it's full and needs to be deleted - even + // after just being deleted, so make sure to use a positive number here. + VectorCacheMaxObjects: 100000, + }, cyclemanager.NewCallbackGroupNoop(), testinghelpers.NewDummyStore(t)) + require.Nil(t, err) + + // manually build the index + index.entryPointID = 6 + index.currentMaximumLayer = 1 + index.nodes = make([]*vertex, 50) + conns, _ := packedconn.NewWithElements([][]uint64{ + {1, 2, 3, 4, 5, 6, 7, 8}, + }) + index.nodes[0] = &vertex{ + id: 0, + connections: conns, + } + conns, _ = packedconn.NewWithElements([][]uint64{ + {0, 2, 3, 4, 5, 6, 7, 8}, + }) + index.nodes[1] = &vertex{ + id: 1, + connections: conns, + } + conns, _ = packedconn.NewWithElements([][]uint64{ + {1, 0, 3, 4, 5, 6, 7, 8}, + }) + index.nodes[2] = &vertex{ + id: 2, + connections: conns, + } + conns, _ = packedconn.NewWithElements([][]uint64{ + {2, 1, 0, 4, 5, 6, 7, 8}, + }) + index.nodes[3] = &vertex{ + id: 3, + connections: conns, + } + conns, _ = packedconn.NewWithElements([][]uint64{ + {3, 2, 1, 0, 5, 6, 7, 8}, + }) + index.nodes[4] = &vertex{ + id: 4, + connections: conns, + } + conns, _ = packedconn.NewWithElements([][]uint64{ + {3, 4, 2, 1, 0, 6, 7, 8}, + }) + index.nodes[5] = &vertex{ + id: 5, + connections: conns, + } + conns, _ = packedconn.NewWithElements([][]uint64{ + {4, 3, 1, 3, 5, 0, 7, 8}, + {7}, + }) + index.nodes[6] = &vertex{ + id: 6, + connections: conns, + level: 1, + } + conns, _ = packedconn.NewWithElements([][]uint64{ + {6, 4, 3, 5, 2, 1, 0, 8}, + {6}, + }) + index.nodes[7] = &vertex{ + id: 7, + connections: conns, + level: 1, + } + conns, _ = packedconn.NewWithElements([][]uint64{ + {7, 6, 4, 3, 5, 2, 1, 0}, + }) + index.nodes[8] = &vertex{ + id: 8, + connections: conns, + } + + dumpIndex(index, "before delete") + + t.Run("delete some elements and permanently delete tombstoned elements", + func(t *testing.T) { + err := index.Delete(6) + require.Nil(t, err) + err = index.Delete(8) + require.Nil(t, err) + + err = index.CleanUpTombstonedNodes(neverStop) + require.Nil(t, err) + }) + + dumpIndex(index, "after delete") + + expectedResults := []uint64{ + 3, 5, 4, // cluster 2 + 7, // cluster 3 with element 6 and 8 deleted + 2, 1, 0, // cluster 1 + } + + t.Run("verify that the results are correct", func(t *testing.T) { + position := 3 + res, _, err := index.SearchByVector(ctx, testVectors[position], 50, nil) + require.Nil(t, err) + assert.Equal(t, expectedResults, res) + }) + + // t.Fail() + t.Run("destroy the index", func(t *testing.T) { + require.Nil(t, index.Drop(context.Background())) + }) +} + +func TestDelete_MoreEntrypointIssues(t *testing.T) { + ctx := context.Background() + vectors := [][]float32{ + {7, 1}, + {8, 2}, + {23, 14}, + {6.5, -1}, + } + + vecForID := func(ctx context.Context, id uint64) ([]float32, error) { + return vectors[int(id)], nil + } + // This test is motivated by flakyness of other tests. We seemed to have + // experienced a failure with the following structure + // + // ID: thing_geoupdatetestclass_single_location + // Entrypoint: 2 + // Max Level: 1 + // Tombstones map[0:{} 1:{}] + // + // Nodes and Connections: + // Node 0 + // Level 0: Connections: [1] + // Node 1 + // Level 0: Connections: [0 2] + // Level 1: Connections: [2] + // Node 2 + // Level 1: Connections: [1] + // Level 0: Connections: [1] + + index, err := New(Config{ + RootPath: "doesnt-matter-as-committlogger-is-mocked-out", + ID: "more-delete-entrypoint-flakyness-test", + MakeCommitLoggerThunk: MakeNoopCommitLogger, + DistanceProvider: distancer.NewGeoProvider(), + VectorForIDThunk: vecForID, + TempVectorForIDThunk: TempVectorForIDThunk(vectors), + }, ent.UserConfig{ + MaxConnections: 30, + EFConstruction: 128, + + // The actual size does not matter for this test, but if it defaults to + // zero it will constantly think it's full and needs to be deleted - even + // after just being deleted, so make sure to use a positive number here. + VectorCacheMaxObjects: 100000, + }, cyclemanager.NewCallbackGroupNoop(), testinghelpers.NewDummyStore(t)) + require.Nil(t, err) + + // manually build the index + index.entryPointID = 2 + index.currentMaximumLayer = 1 + index.tombstones = map[uint64]struct{}{ + 0: {}, + 1: {}, + } + index.nodes = make([]*vertex, 50) + conns, _ := packedconn.NewWithElements([][]uint64{ + {1}, + }) + index.nodes[0] = &vertex{ + id: 0, + connections: conns, + } + conns, _ = packedconn.NewWithElements([][]uint64{ + 0: {0, 2}, + 1: {2}, + }) + index.nodes[1] = &vertex{ + id: 1, + connections: conns, + } + conns, _ = packedconn.NewWithElements([][]uint64{ + 0: {1}, + 1: {1}, + }) + index.nodes[2] = &vertex{ + id: 2, + connections: conns, + } + + dumpIndex(index, "before adding another element") + t.Run("adding a third element", func(t *testing.T) { + vec, _ := testVectorForID(context.TODO(), 3) + index.Add(ctx, 3, vec) + }) + + expectedResults := []uint64{ + 3, 2, + } + + t.Run("verify that the results are correct", func(t *testing.T) { + position := 3 + res, _, err := index.SearchByVector(ctx, testVectors[position], 50, nil) + require.Nil(t, err) + assert.Equal(t, expectedResults, res) + }) + + t.Run("destroy the index", func(t *testing.T) { + require.Nil(t, index.Drop(context.Background())) + }) +} + +func TestDelete_TombstonedEntrypoint(t *testing.T) { + ctx := context.Background() + vecForID := func(ctx context.Context, id uint64) ([]float32, error) { + // always return same vec for all elements + return []float32{0.1, 0.2}, nil + } + index, err := New(Config{ + RootPath: "doesnt-matter-as-committlogger-is-mocked-out", + ID: "tombstoned-entrypoint-test", + MakeCommitLoggerThunk: MakeNoopCommitLogger, + DistanceProvider: distancer.NewCosineDistanceProvider(), + VectorForIDThunk: vecForID, + TempVectorForIDThunk: TempVectorForIDThunk([][]float32{{0.1, 0.2}}), + }, ent.UserConfig{ + MaxConnections: 30, + EFConstruction: 128, + // explicitly turn off, so we only focus on the tombstoned periods + CleanupIntervalSeconds: 0, + + // The actual size does not matter for this test, but if it defaults to + // zero it will constantly think it's full and needs to be deleted - even + // after just being deleted, so make sure to use a positive number here. + VectorCacheMaxObjects: 100000, + }, cyclemanager.NewCallbackGroupNoop(), testinghelpers.NewDummyStore(t)) + require.Nil(t, err) + + objVec := []float32{0.1, 0.2} + searchVec := []float32{0.05, 0.05} + + require.Nil(t, index.Add(ctx, 0, objVec)) + require.Nil(t, index.Delete(0)) + require.Nil(t, index.Add(ctx, 1, objVec)) + + res, _, err := index.SearchByVector(ctx, searchVec, 100, nil) + require.Nil(t, err) + assert.Equal(t, []uint64{1}, res, "should contain the only result") + + t.Run("destroy the index", func(t *testing.T) { + require.Nil(t, index.Drop(context.Background())) + }) +} + +func TestDelete_Flakyness_gh_1369(t *testing.T) { + ctx := context.Background() + // parse a snapshot form a flaky test + snapshotBefore := []byte(`{"labels":["ran a cleanup cycle"],"id":"delete-test","entrypoint":3,"currentMaximumLayer":3,"tombstones":{},"nodes":[{"id":1,"level":0,"connections":{"0":[11,25,33,3,29,32,5,19,30,7,17,27,21,31,36,34,35,23,15,9,13]}},{"id":3,"level":3,"connections":{"0":[1,29,11,5,25,33,19,32,7,17,30,21,35,31,27,36,23,34,9,15,13],"1":[29,36,13],"2":[29,36],"3":[36]}},{"id":5,"level":0,"connections":{"0":[29,19,7,32,35,21,1,31,3,33,23,25,11,17,36,27,30,9,15,34,13]}},{"id":7,"level":0,"connections":{"0":[32,19,21,31,5,35,23,29,33,36,17,1,9,27,25,30,11,3,15,13,34]}},{"id":9,"level":0,"connections":{"0":[36,23,31,21,15,17,27,7,32,35,30,13,19,33,5,25,29,11,1,34,3]}},{"id":11,"level":0,"connections":{"0":[25,33,1,30,17,3,27,32,34,29,19,7,5,36,15,21,31,23,9,13,35]}},{"id":13,"level":1,"connections":{"0":[15,27,34,36,30,17,9,33,25,31,23,21,11,32,7,1,19,35,5,29,3],"1":[36,29,3]}},{"id":15,"level":0,"connections":{"0":[13,27,36,17,30,9,34,33,31,23,25,21,32,11,7,1,19,35,5,29,3]}},{"id":17,"level":0,"connections":{"0":[27,30,36,33,15,32,25,31,9,11,21,7,23,1,34,13,19,5,29,35,3]}},{"id":19,"level":0,"connections":{"0":[5,7,32,29,35,21,31,23,1,33,17,3,25,36,11,27,9,30,15,34,13]}},{"id":21,"level":0,"connections":{"0":[31,23,7,35,32,19,9,36,5,17,27,33,29,30,15,1,25,11,3,13,34]}},{"id":23,"level":0,"connections":{"0":[31,21,9,35,7,36,32,19,17,5,27,33,15,29,30,25,1,13,11,3,34]}},{"id":25,"level":0,"connections":{"0":[11,33,1,30,17,27,32,3,34,29,7,19,36,5,15,21,31,23,9,13,35]}},{"id":27,"level":0,"connections":{"0":[17,30,36,15,33,25,13,9,34,32,11,31,21,7,23,1,19,5,29,35,3]}},{"id":29,"level":2,"connections":{"0":[5,19,32,7,3,1,33,35,21,25,31,11,23,17,30,36,27,9,15,34,13],"1":[3,36,13],"2":[3,36]}},{"id":30,"level":0,"connections":{"0":[27,17,33,25,15,36,11,34,32,1,13,9,31,7,21,23,19,29,5,3,35]}},{"id":31,"level":0,"connections":{"0":[21,23,7,32,35,9,36,19,17,5,27,33,29,30,15,25,1,11,13,3,34]}},{"id":32,"level":0,"connections":{"0":[7,19,21,31,5,33,29,17,23,1,35,36,25,27,30,11,9,3,15,34,13]}},{"id":33,"level":0,"connections":{"0":[25,11,1,17,30,32,27,7,19,36,29,5,21,31,3,34,15,23,9,35,13]}},{"id":34,"level":0,"connections":{"0":[30,27,15,13,25,17,11,33,36,1,32,9,31,7,21,3,23,19,29,5,35]}},{"id":35,"level":0,"connections":{"0":[21,7,31,23,19,5,32,29,9,36,17,33,1,27,25,30,3,11,15,13,34]}},{"id":36,"level":3,"connections":{"0":[17,9,27,15,31,23,21,30,32,7,33,13,25,19,35,11,34,1,5,29,3],"1":[13,29,3],"2":[29,3],"3":[3]}}]} +`) + + vectors := vectorsForDeleteTest() + vecForID := func(ctx context.Context, id uint64) ([]float32, error) { + return vectors[int(id)], nil + } + + index, err := NewFromJSONDumpMap(snapshotBefore, vecForID) + require.Nil(t, err) + index.forbidFlat = true + + var control []uint64 + t.Run("control search before delete with the respective allow list", func(t *testing.T) { + allowList := helpers.NewAllowList() + for i := range vectors { + if i%2 == 0 { + continue + } + + allowList.Insert(uint64(i)) + } + + res, _, err := index.SearchByVector(ctx, []float32{0.1, 0.1, 0.1}, 20, allowList) + require.Nil(t, err) + require.True(t, len(res) > 0) + + control = res + }) + + t.Run("delete the remaining even entries", func(t *testing.T) { + require.Nil(t, index.Delete(30)) + require.Nil(t, index.Delete(32)) + require.Nil(t, index.Delete(34)) + require.Nil(t, index.Delete(36)) + }) + + t.Run("verify against control BEFORE Tombstone Cleanup", func(t *testing.T) { + res, _, err := index.SearchByVector(ctx, []float32{0.1, 0.1, 0.1}, 20, nil) + require.Nil(t, err) + require.True(t, len(res) > 0) + assert.Equal(t, control, res) + }) + + t.Run("clean up tombstoned nodes", func(t *testing.T) { + require.Nil(t, index.CleanUpTombstonedNodes(neverStop)) + }) + + t.Run("verify against control AFTER Tombstone Cleanup", func(t *testing.T) { + res, _, err := index.SearchByVector(ctx, []float32{0.1, 0.1, 0.1}, 20, nil) + require.Nil(t, err) + require.True(t, len(res) > 0) + assert.Equal(t, control, res) + }) + + t.Run("now delete the entrypoint", func(t *testing.T) { + require.Nil(t, index.Delete(index.entryPointID)) + }) + + t.Run("clean up tombstoned nodes", func(t *testing.T) { + require.Nil(t, index.CleanUpTombstonedNodes(neverStop)) + }) + + t.Run("now delete the entrypoint", func(t *testing.T) { + // this verifies that our findNewLocalEntrypoint also works when the global + // entrypoint is affected + require.Nil(t, index.Delete(index.entryPointID)) + }) + + t.Run("clean up tombstoned nodes", func(t *testing.T) { + require.Nil(t, index.CleanUpTombstonedNodes(neverStop)) + }) + + t.Run("destroy the index", func(t *testing.T) { + require.Nil(t, index.Drop(context.Background())) + }) +} + +func bruteForceCosine(vectors [][]float32, query []float32, k int) []uint64 { + type distanceAndIndex struct { + distance float32 + index uint64 + } + + distances := make([]distanceAndIndex, len(vectors)) + + d := distancer.NewCosineDistanceProvider().New(distancer.Normalize(query)) + for i, vec := range vectors { + dist, _ := d.Distance(distancer.Normalize(vec)) + distances[i] = distanceAndIndex{ + index: uint64(i), + distance: dist, + } + } + + sort.Slice(distances, func(a, b int) bool { + return distances[a].distance < distances[b].distance + }) + + if len(distances) < k { + k = len(distances) + } + + out := make([]uint64, k) + for i := 0; i < k; i++ { + out[i] = distances[i].index + } + + return out +} + +func neverStop() bool { + return false +} + +func slowNeverStop() bool { + time.Sleep(time.Millisecond * 3) + return false +} + +// This test simulates what happens when the EP is removed from the +// VectorForID-serving store +func Test_DeleteEPVecInUnderlyingObjectStore(t *testing.T) { + ctx := context.Background() + var vectorIndex *hnsw + + vectors := [][]float32{ + {1, 1}, + {2, 2}, + {3, 3}, + } + + vectorErrors := []error{ + nil, + nil, + nil, + } + store := testinghelpers.NewDummyStore(t) + defer store.Shutdown(context.Background()) + + t.Run("import the test vectors", func(t *testing.T) { + index, err := New(Config{ + RootPath: "doesnt-matter-as-committlogger-is-mocked-out", + ID: "delete-ep-in-underlying-store-test", + MakeCommitLoggerThunk: MakeNoopCommitLogger, + DistanceProvider: distancer.NewL2SquaredProvider(), + VectorForIDThunk: func(ctx context.Context, id uint64) ([]float32, error) { + fmt.Printf("vec for pos=%d is %v\n", id, vectors[int(id)]) + return vectors[int(id)], vectorErrors[int(id)] + }, + TempVectorForIDThunk: TempVectorForIDThunk(vectors), + }, ent.UserConfig{ + MaxConnections: 30, + EFConstruction: 128, + + // The actual size does not matter for this test, but if it defaults to + // zero it will constantly think it's full and needs to be deleted - even + // after just being deleted, so make sure to use a positive number here. + VectorCacheMaxObjects: 100000, + }, cyclemanager.NewCallbackGroupNoop(), store) + require.Nil(t, err) + vectorIndex = index + + for i, vec := range vectors { + err := vectorIndex.Add(ctx, uint64(i), vec) + require.Nil(t, err) + } + + fmt.Printf("ep is %d\n", vectorIndex.entryPointID) + }) + + t.Run("simulate ep vec deletion in object store", func(t *testing.T) { + vectors[0] = nil + vectorErrors[0] = storobj.NewErrNotFoundf(0, "deleted") + vectorIndex.cache.Delete(context.Background(), 0) + }) + + t.Run("try to insert a fourth vector", func(t *testing.T) { + vectors = append(vectors, []float32{4, 4}) + vectorErrors = append(vectorErrors, nil) + + pos := len(vectors) - 1 + err := vectorIndex.Add(ctx, uint64(pos), vectors[pos]) + require.Nil(t, err) + }) +} + +func TestDelete_WithCleaningUpTombstonesOncePreservesMaxConnections(t *testing.T) { + // there is a single bulk clean event after all the deletes + vectors := vectorsForDeleteTest() + ctx := context.Background() + var vectorIndex *hnsw + + store := testinghelpers.NewDummyStore(t) + defer store.Shutdown(context.Background()) + + index, err := New(Config{ + RootPath: "doesnt-matter-as-committlogger-is-mocked-out", + ID: "delete-test", + MakeCommitLoggerThunk: MakeNoopCommitLogger, + DistanceProvider: distancer.NewCosineDistanceProvider(), + VectorForIDThunk: func(ctx context.Context, id uint64) ([]float32, error) { + return vectors[int(id)], nil + }, + TempVectorForIDThunk: TempVectorForIDThunk(vectors), + }, ent.UserConfig{ + MaxConnections: 30, + EFConstruction: 128, + + // The actual size does not matter for this test, but if it defaults to + // zero it will constantly think it's full and needs to be deleted - even + // after just being deleted, so make sure to use a positive number here. + VectorCacheMaxObjects: 100000, + }, cyclemanager.NewCallbackGroupNoop(), store) + require.Nil(t, err) + vectorIndex = index + + for i, vec := range vectors { + err := vectorIndex.Add(ctx, uint64(i), vec) + require.Nil(t, err) + } + + require.Equal(t, 60, index.maximumConnectionsLayerZero) + some := false + for _, node := range index.nodes { + if node == nil { + continue + } + require.LessOrEqual(t, len(node.connections.GetLayer(0)), index.maximumConnectionsLayerZero) + some = some || len(node.connections.GetLayer(0)) > index.maximumConnections + } + require.True(t, some) + + for i := range vectors { + if i%2 != 0 { + continue + } + + err := vectorIndex.Delete(uint64(i)) + require.Nil(t, err) + } + + err = vectorIndex.CleanUpTombstonedNodes(neverStop) + require.Nil(t, err) + require.Equal(t, 60, index.maximumConnectionsLayerZero) + some = false + for _, node := range index.nodes { + if node == nil { + continue + } + require.LessOrEqual(t, len(node.connections.GetLayer(0)), index.maximumConnectionsLayerZero) + some = some || len(node.connections.GetLayer(0)) > index.maximumConnections + } + require.True(t, some) + + t.Run("destroy the index", func(t *testing.T) { + require.Nil(t, vectorIndex.Drop(context.Background())) + }) +} + +func TestDelete_WithCleaningUpTombstonesOnceRemovesAllRelatedConnections(t *testing.T) { + // there is a single bulk clean event after all the deletes + vectors := vectorsForDeleteTest() + ctx := context.Background() + var vectorIndex *hnsw + store := testinghelpers.NewDummyStore(t) + + index, err := New(Config{ + RootPath: "doesnt-matter-as-committlogger-is-mocked-out", + ID: "delete-test", + MakeCommitLoggerThunk: MakeNoopCommitLogger, + DistanceProvider: distancer.NewCosineDistanceProvider(), + VectorForIDThunk: func(ctx context.Context, id uint64) ([]float32, error) { + return vectors[int(id)], nil + }, + TempVectorForIDThunk: TempVectorForIDThunk(vectors), + }, ent.UserConfig{ + MaxConnections: 30, + EFConstruction: 128, + + // The actual size does not matter for this test, but if it defaults to + // zero it will constantly think it's full and needs to be deleted - even + // after just being deleted, so make sure to use a positive number here. + VectorCacheMaxObjects: 100000, + }, cyclemanager.NewCallbackGroupNoop(), store) + require.Nil(t, err) + vectorIndex = index + + for i, vec := range vectors { + err := vectorIndex.Add(ctx, uint64(i), vec) + require.Nil(t, err) + } + + for i := range vectors { + if i%2 != 0 { + continue + } + + err := vectorIndex.Delete(uint64(i)) + require.Nil(t, err) + } + + err = vectorIndex.CleanUpTombstonedNodes(neverStop) + require.Nil(t, err) + + for i, node := range vectorIndex.nodes { + if node == nil { + continue + } + assert.NotEqual(t, 0, i%2) + iter := node.connections.Iterator() + for iter.Next() { + level, connections := iter.Current() + for _, id := range connections { + assert.NotEqual(t, uint64(0), id%2) + if id%2 == 0 { + fmt.Println("at: ", vectorIndex.entryPointID, i, level, id) + } + } + } + } + + require.Nil(t, vectorIndex.Drop(context.Background())) + store.Shutdown(context.Background()) +} + +func TestDelete_WithCleaningUpTombstonesWithHighConcurrency(t *testing.T) { + ctx := context.Background() + os.Setenv("TOMBSTONE_DELETION_CONCURRENCY", "100") + defer os.Unsetenv("TOMBSTONE_DELETION_CONCURRENCY") + // there is a single bulk clean event after all the deletes + vectors, _ := testinghelpers.RandomVecs(1_000, 1, 64) + var vectorIndex *hnsw + + store := testinghelpers.NewDummyStore(t) + defer store.Shutdown(context.Background()) + + t.Run("import the test vectors", func(t *testing.T) { + index, err := New(Config{ + RootPath: "doesnt-matter-as-committlogger-is-mocked-out", + ID: "delete-test", + MakeCommitLoggerThunk: MakeNoopCommitLogger, + DistanceProvider: distancer.NewCosineDistanceProvider(), + VectorForIDThunk: func(ctx context.Context, id uint64) ([]float32, error) { + return vectors[int(id)], nil + }, + TempVectorForIDThunk: TempVectorForIDThunk(vectors), + }, ent.UserConfig{ + MaxConnections: 30, + EFConstruction: 128, + + // The actual size does not matter for this test, but if it defaults to + // zero it will constantly think it's full and needs to be deleted - even + // after just being deleted, so make sure to use a positive number here. + VectorCacheMaxObjects: 100000, + }, cyclemanager.NewCallbackGroupNoop(), store) + require.Nil(t, err) + vectorIndex = index + + for i, vec := range vectors { + err := vectorIndex.Add(ctx, uint64(i), vec) + require.Nil(t, err) + } + }) + + fmt.Printf("entrypoint before %d\n", vectorIndex.entryPointID) + t.Run("deleting elements", func(t *testing.T) { + for i := range vectors { + if i < 10 { + continue + } + + err := vectorIndex.Delete(uint64(i)) + require.Nil(t, err) + } + }) + + fmt.Printf("entrypoint after %d\n", vectorIndex.entryPointID) + + t.Run("running the cleanup", func(t *testing.T) { + err := vectorIndex.CleanUpTombstonedNodes(neverStop) + require.Nil(t, err) + }) + + t.Run("verify the graph no longer has any tombstones", func(t *testing.T) { + assert.Len(t, vectorIndex.tombstones, 0) + }) + + t.Run("destroy the index", func(t *testing.T) { + require.Nil(t, vectorIndex.Drop(context.Background())) + }) +} + +func Test_ResetNodesDuringTombstoneCleanup(t *testing.T) { + ctx := context.Background() + vectors := vectorsForDeleteTest() // Use your existing test vectors + store := testinghelpers.NewDummyStore(t) + defer store.Shutdown(context.Background()) + + // Initialize the HNSW index + index, err := New(Config{ + RootPath: "doesnt-matter-as-committlogger-is-mocked-out", + ID: "concurrent-growth-test", + MakeCommitLoggerThunk: MakeNoopCommitLogger, + DistanceProvider: distancer.NewCosineDistanceProvider(), + VectorForIDThunk: func(ctx context.Context, id uint64) ([]float32, error) { + return vectors[int(id)%len(vectors)], nil // Wrap around to reuse vectors + }, + TempVectorForIDThunk: TempVectorForIDThunk(vectors), + }, ent.UserConfig{ + MaxConnections: 30, + EFConstruction: 128, + VectorCacheMaxObjects: 100000, + }, cyclemanager.NewCallbackGroupNoop(), store) + require.Nil(t, err) + index.logger = logrus.New() // Ensure logging is set up for debugging + + // Step 1: Import initial vectors + initialSize := 50 + offset := 20000 + for i := 0; i < initialSize; i++ { + err := index.Add(ctx, uint64(offset+i), vectors[i%len(vectors)]) + require.Nil(t, err) + } + + // Step 2: Delete some nodes to create tombstones + for i := 0; i < initialSize; i += 2 { // Delete even-numbered nodes + err := index.Delete(uint64(offset + i)) + require.Nil(t, err) + } + + // Step 3: Run concurrent cleanup and growth + var wg sync.WaitGroup + wg.Add(2) + + // Goroutine 1: Run tombstone cleanup + go func() { + defer wg.Done() + err := index.CleanUpTombstonedNodes(neverStop) + if err != nil { + t.Logf("Cleanup error: %v", err) + } + }() + + // Goroutine 2: Insert new nodes to trigger growth + go func() { + defer wg.Done() + index.Lock() + index.shardedNodeLocks.LockAll() + index.nodes = make([]*vertex, 10) + index.shardedNodeLocks.UnlockAll() + index.Unlock() + if err != nil { + t.Logf("Drop error: %v", err) + } + }() + + // Wait for both operations to complete + wg.Wait() +} + +func Test_DeleteTombstoneMetrics(t *testing.T) { + vectors := vectorsForDeleteTest() + ctx := context.Background() + var vectorIndex *hnsw + + store := testinghelpers.NewDummyStore(t) + defer store.Shutdown(context.Background()) + + metrics := monitoring.GetMetrics() + + t.Run("import the test vectors", func(t *testing.T) { + index, err := New(Config{ + RootPath: "doesnt-matter-as-committlogger-is-mocked-out", + ID: "delete-test", + MakeCommitLoggerThunk: MakeNoopCommitLogger, + DistanceProvider: distancer.NewCosineDistanceProvider(), + VectorForIDThunk: func(ctx context.Context, id uint64) ([]float32, error) { + return vectors[int(id)], nil + }, + TempVectorForIDThunk: TempVectorForIDThunk(vectors), + PrometheusMetrics: metrics, + }, ent.UserConfig{ + MaxConnections: 30, + EFConstruction: 64, + VectorCacheMaxObjects: 100000, + }, cyclemanager.NewCallbackGroupNoop(), store) + require.Nil(t, err) + vectorIndex = index + + for i, vec := range vectors { + err := vectorIndex.Add(ctx, uint64(i), vec) + require.Nil(t, err) + } + }) + + t.Run("deleting every even element", func(t *testing.T) { + for i := range vectors { + if i%2 != 0 { + continue + } + + err := vectorIndex.Delete(uint64(i)) + require.Nil(t, err) + } + }) + + t.Run("verify tombstones metric is updated correctly", func(t *testing.T) { + metric, err := metrics.VectorIndexTombstones.GetMetricWithLabelValues("", "") + require.Nil(t, err) + metricValue := testutil.ToFloat64(metric) + require.Equal(t, float64(len(vectors)/2+1), metricValue, "dimensions should match expected value") + }) + + t.Run("restart index without tombstone cleanup", func(t *testing.T) { + err := vectorIndex.Flush() + require.Nil(t, err) + err = vectorIndex.Shutdown(context.TODO()) + require.Nil(t, err) + index, err := New(Config{ + RootPath: "doesnt-matter-as-committlogger-is-mocked-out", + ID: "delete-test", + MakeCommitLoggerThunk: MakeNoopCommitLogger, + DistanceProvider: distancer.NewCosineDistanceProvider(), + VectorForIDThunk: func(ctx context.Context, id uint64) ([]float32, error) { + return vectors[int(id)], nil + }, + TempVectorForIDThunk: TempVectorForIDThunk(vectors), + }, ent.UserConfig{ + MaxConnections: 30, + EFConstruction: 64, + VectorCacheMaxObjects: 100000, + }, cyclemanager.NewCallbackGroupNoop(), store) + require.Nil(t, err) + vectorIndex = index + }) + + t.Run("verify tombstone metric is correct after restart", func(t *testing.T) { + metric, err := metrics.VectorIndexTombstones.GetMetricWithLabelValues("", "") + require.Nil(t, err) + metricValue := testutil.ToFloat64(metric) + require.Equal(t, float64(len(vectors)/2+1), metricValue, "dimensions should match expected value") + }) + + t.Run("running the cleanup", func(t *testing.T) { + err := vectorIndex.CleanUpTombstonedNodes(neverStop) + require.Nil(t, err) + }) + + t.Run("verify the graph no longer has any tombstones", func(t *testing.T) { + assert.Len(t, vectorIndex.tombstones, 0) + }) + + t.Run("verify tombstone metric is zero", func(t *testing.T) { + metric, err := metrics.VectorIndexTombstones.GetMetricWithLabelValues("n/a", "n/a") + require.Nil(t, err) + metricValue := testutil.ToFloat64(metric) + require.Equal(t, float64(0), metricValue, "dimensions should match expected value") + }) + + t.Run("destroy the index", func(t *testing.T) { + require.Nil(t, vectorIndex.Drop(context.Background())) + }) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/deserializer.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/deserializer.go new file mode 100644 index 0000000000000000000000000000000000000000..d4c42edf3f810d9df3921a8eddaf3bdfcb2b7971 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/deserializer.go @@ -0,0 +1,991 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package hnsw + +import ( + "bufio" + "encoding/binary" + "io" + "math" + + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/weaviate/weaviate/adapters/repos/db/vector/cache" + "github.com/weaviate/weaviate/adapters/repos/db/vector/compressionhelpers" + "github.com/weaviate/weaviate/adapters/repos/db/vector/hnsw/packedconn" + "github.com/weaviate/weaviate/adapters/repos/db/vector/multivector" +) + +const ( + maxConnectionsPerNode = 4096 // max number of connections per node, used to truncate links +) + +type Deserializer struct { + logger logrus.FieldLogger + reusableBuffer []byte + reusableConnectionsSlice []uint64 +} + +type DeserializationResult struct { + Nodes []*vertex + NodesDeleted map[uint64]struct{} + Entrypoint uint64 + Level uint16 + Tombstones map[uint64]struct{} + TombstonesDeleted map[uint64]struct{} + EntrypointChanged bool + CompressionPQData *compressionhelpers.PQData + CompressionSQData *compressionhelpers.SQData + CompressionRQData *compressionhelpers.RQData + CompressionBRQData *compressionhelpers.BRQData + MuveraEnabled bool + EncoderMuvera *multivector.MuveraData + Compressed bool + + // If there is no entry for the links at a level to be replaced, we must + // assume that all links were appended and prior state must exist + // Similarly if we run into a "Clear" we need to explicitly set the replace + // flag, so that future appends aren't always appended and we run into a + // situation where reading multiple condensed logs in succession leads to too + // many connections as discovered in + // https://github.com/weaviate/weaviate/issues/1868 + LinksReplaced map[uint64]map[uint16]struct{} +} + +func (dr DeserializationResult) ReplaceLinks(node uint64, level uint16) bool { + levels, ok := dr.LinksReplaced[node] + if !ok { + return false + } + + _, ok = levels[level] + return ok +} + +func NewDeserializer(logger logrus.FieldLogger) *Deserializer { + return &Deserializer{logger: logger} +} + +func (d *Deserializer) resetResusableBuffer(size int) { + if size <= cap(d.reusableBuffer) { + d.reusableBuffer = d.reusableBuffer[:size] + } else { + d.reusableBuffer = make([]byte, size, size*2) + } +} + +func (d *Deserializer) resetReusableConnectionsSlice(size int) { + if size <= cap(d.reusableConnectionsSlice) { + d.reusableConnectionsSlice = d.reusableConnectionsSlice[:size] + } else { + d.reusableConnectionsSlice = make([]uint64, size, size*2) + } +} + +func (d *Deserializer) Do(fd *bufio.Reader, initialState *DeserializationResult, keepLinkReplaceInformation bool) (*DeserializationResult, int, error) { + validLength := 0 + out := initialState + commitTypeMetrics := make(map[HnswCommitType]int) + if out == nil { + out = &DeserializationResult{ + Nodes: make([]*vertex, cache.InitialSize), + NodesDeleted: make(map[uint64]struct{}), + Tombstones: make(map[uint64]struct{}), + TombstonesDeleted: make(map[uint64]struct{}), + LinksReplaced: make(map[uint64]map[uint16]struct{}), + } + } + + for { + ct, err := ReadCommitType(fd) + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return nil, validLength, err + } + commitTypeMetrics[ct]++ + var readThisRound int + switch ct { + case AddNode: + err = d.ReadNode(fd, out) + readThisRound = 10 + case SetEntryPointMaxLevel: + var entrypoint uint64 + var level uint16 + entrypoint, level, err = d.ReadEP(fd) + out.Entrypoint = entrypoint + out.Level = level + out.EntrypointChanged = true + readThisRound = 10 + case AddLinkAtLevel: + err = d.ReadLink(fd, out) + readThisRound = 18 + case AddLinksAtLevel: + readThisRound, err = d.ReadAddLinks(fd, out) + case ReplaceLinksAtLevel: + readThisRound, err = d.ReadLinks(fd, out, keepLinkReplaceInformation) + case AddTombstone: + err = d.ReadAddTombstone(fd, out.Tombstones) + readThisRound = 8 + case RemoveTombstone: + err = d.ReadRemoveTombstone(fd, out.Tombstones, out.TombstonesDeleted) + readThisRound = 8 + case ClearLinks: + err = d.ReadClearLinks(fd, out, keepLinkReplaceInformation) + readThisRound = 8 + case ClearLinksAtLevel: + err = d.ReadClearLinksAtLevel(fd, out, keepLinkReplaceInformation) + readThisRound = 10 + case DeleteNode: + err = d.ReadDeleteNode(fd, out, out.NodesDeleted) + readThisRound = 8 + case ResetIndex: + out.Entrypoint = 0 + out.Level = 0 + out.Nodes = make([]*vertex, cache.InitialSize) + out.Tombstones = make(map[uint64]struct{}) + case AddPQ: + var totalRead int + totalRead, err = d.ReadPQ(fd, out) + readThisRound = 9 + totalRead + case AddSQ: + err = d.ReadSQ(fd, out) + readThisRound = 10 + case AddMuvera: + var totalRead int + totalRead, err = d.ReadMuvera(fd, out) + readThisRound = 20 + totalRead + case AddRQ: + var totalRead int + totalRead, err = d.ReadRQ(fd, out) + readThisRound = 16 + totalRead + case AddBRQ: + var totalRead int + totalRead, err = d.ReadBRQ(fd, out) + readThisRound = 16 + totalRead + default: + err = errors.Errorf("unrecognized commit type %d", ct) + } + if err != nil { + // do not return nil, err, because the err could be a recoverable one + return out, validLength, err + } else { + validLength += 1 + readThisRound // 1 byte for commit type + } + } + for commitType, count := range commitTypeMetrics { + d.logger.WithFields(logrus.Fields{"action": "hnsw_deserialization", "ops": count}).Debugf("hnsw commit logger %s", commitType) + } + return out, validLength, nil +} + +func (d *Deserializer) ReadNode(r io.Reader, res *DeserializationResult) error { + id, err := d.readUint64(r) + if err != nil { + return err + } + + level, err := readUint16(r) + if err != nil { + return err + } + + newNodes, changed, err := growIndexToAccomodateNode(res.Nodes, id, d.logger) + if err != nil { + return err + } + + if changed { + res.Nodes = newNodes + } + + if res.Nodes[id] == nil { + conns, err := packedconn.NewWithMaxLayer(uint8(level)) + if err != nil { + return err + } + res.Nodes[id] = &vertex{level: int(level), id: id, connections: conns} + } else { + if res.Nodes[id].connections == nil { + res.Nodes[id].connections, err = packedconn.NewWithMaxLayer(uint8(level)) + if err != nil { + return err + } + } else { + res.Nodes[id].connections.GrowLayersTo(uint8(level)) + } + res.Nodes[id].level = int(level) + } + return nil +} + +func (d *Deserializer) ReadEP(r io.Reader) (uint64, uint16, error) { + id, err := d.readUint64(r) + if err != nil { + return 0, 0, err + } + + level, err := readUint16(r) + if err != nil { + return 0, 0, err + } + + return id, level, nil +} + +func (d *Deserializer) ReadLink(r io.Reader, res *DeserializationResult) error { + source, err := d.readUint64(r) + if err != nil { + return err + } + + level, err := readUint16(r) + if err != nil { + return err + } + + target, err := d.readUint64(r) + if err != nil { + return err + } + + newNodes, changed, err := growIndexToAccomodateNode(res.Nodes, source, d.logger) + if err != nil { + return err + } + + if changed { + res.Nodes = newNodes + } + + if res.Nodes[int(source)] == nil { + conns, err := packedconn.NewWithMaxLayer(uint8(level)) + if err != nil { + return err + } + res.Nodes[int(source)] = &vertex{id: source, connections: conns} + } + + if res.Nodes[source].connections == nil { + conns, err := packedconn.NewWithMaxLayer(uint8(level)) + if err != nil { + return err + } + res.Nodes[source].connections = conns + } else { + res.Nodes[source].connections.GrowLayersTo(uint8(level)) + } + res.Nodes[source].connections.InsertAtLayer(target, uint8(level)) + return nil +} + +func (d *Deserializer) ReadLinks(r io.Reader, res *DeserializationResult, + keepReplaceInfo bool, +) (int, error) { + d.resetResusableBuffer(12) + _, err := io.ReadFull(r, d.reusableBuffer) + if err != nil { + return 0, err + } + + source := binary.LittleEndian.Uint64(d.reusableBuffer[0:8]) + level := binary.LittleEndian.Uint16(d.reusableBuffer[8:10]) + length := binary.LittleEndian.Uint16(d.reusableBuffer[10:12]) + + targets, err := d.readUint64Slice(r, int(length)) + if err != nil { + return 0, err + } + + if len(targets) >= maxConnectionsPerNode { + d.logger.Warnf("read ReplaceLinksAtLevel with %v (>= %d) connections for node %d at level %d, truncating to %d", len(targets), maxConnectionsPerNode, source, level, maxConnectionsPerNode) + targets = targets[:maxConnectionsPerNode] + length = uint16(len(targets)) + } + + newNodes, changed, err := growIndexToAccomodateNode(res.Nodes, source, d.logger) + if err != nil { + return 0, err + } + + if changed { + res.Nodes = newNodes + } + + if res.Nodes[int(source)] == nil { + res.Nodes[int(source)] = &vertex{id: source} + } + + if res.Nodes[source].connections == nil { + res.Nodes[source].connections = &packedconn.Connections{} + } else { + res.Nodes[source].connections.GrowLayersTo(uint8(level)) + } + res.Nodes[source].connections.ReplaceLayer(uint8(level), targets) + + if keepReplaceInfo { + // mark the replace flag for this node and level, so that new commit logs + // generated on this result (condensing) do not lose information + + if _, ok := res.LinksReplaced[source]; !ok { + res.LinksReplaced[source] = map[uint16]struct{}{} + } + + res.LinksReplaced[source][level] = struct{}{} + } + + return 12 + int(length)*8, nil +} + +func (d *Deserializer) ReadAddLinks(r io.Reader, + res *DeserializationResult, +) (int, error) { + d.resetResusableBuffer(12) + _, err := io.ReadFull(r, d.reusableBuffer) + if err != nil { + return 0, err + } + + source := binary.LittleEndian.Uint64(d.reusableBuffer[0:8]) + level := binary.LittleEndian.Uint16(d.reusableBuffer[8:10]) + length := binary.LittleEndian.Uint16(d.reusableBuffer[10:12]) + + targets, err := d.readUint64Slice(r, int(length)) + if err != nil { + return 0, err + } + + if len(targets) >= maxConnectionsPerNode { + d.logger.Warnf("read AddLinksAtLevel with %v (>= %d) connections for node %d at level %d, truncating to %d", len(targets), maxConnectionsPerNode, source, level, maxConnectionsPerNode) + targets = targets[:maxConnectionsPerNode] + length = uint16(len(targets)) + } + + newNodes, changed, err := growIndexToAccomodateNode(res.Nodes, source, d.logger) + if err != nil { + return 0, err + } + + if changed { + res.Nodes = newNodes + } + + if res.Nodes[int(source)] == nil { + res.Nodes[int(source)] = &vertex{id: source} + } + if res.Nodes[source].connections == nil { + res.Nodes[source].connections = &packedconn.Connections{} + } else { + res.Nodes[source].connections.GrowLayersTo(uint8(level)) + } + + res.Nodes[source].connections.BulkInsertAtLayer(targets, uint8(level)) + + return 12 + int(length)*8, nil +} + +func (d *Deserializer) ReadAddTombstone(r io.Reader, tombstones map[uint64]struct{}) error { + id, err := d.readUint64(r) + if err != nil { + return err + } + + tombstones[id] = struct{}{} + + return nil +} + +func (d *Deserializer) ReadRemoveTombstone(r io.Reader, tombstones map[uint64]struct{}, tombstonesDeleted map[uint64]struct{}) error { + id, err := d.readUint64(r) + if err != nil { + return err + } + + _, ok := tombstones[id] + if !ok { + // Tombstone is not present but may exist in older commit log + // wWe need to keep track of it so we can delete it later + tombstonesDeleted[id] = struct{}{} + } else { + // Tombstone is present, we can delete it + delete(tombstones, id) + } + + return nil +} + +func (d *Deserializer) ReadClearLinks(r io.Reader, res *DeserializationResult, + keepReplaceInfo bool, +) error { + id, err := d.readUint64(r) + if err != nil { + return err + } + + newNodes, changed, err := growIndexToAccomodateNode(res.Nodes, id, d.logger) + if err != nil { + return err + } + + if changed { + res.Nodes = newNodes + } + + if res.Nodes[id] == nil { + // node has been deleted or never existed, nothing to do + return nil + } + + res.Nodes[id].connections, err = packedconn.NewWithMaxLayer(uint8(res.Nodes[id].level)) + return err +} + +func (d *Deserializer) ReadClearLinksAtLevel(r io.Reader, res *DeserializationResult, + keepReplaceInfo bool, +) error { + id, err := d.readUint64(r) + if err != nil { + return err + } + + level, err := readUint16(r) + if err != nil { + return err + } + + newNodes, changed, err := growIndexToAccomodateNode(res.Nodes, id, d.logger) + if err != nil { + return err + } + + if changed { + res.Nodes = newNodes + } + + if keepReplaceInfo { + // mark the replace flag for this node and level, so that new commit logs + // generated on this result (condensing) do not lose information + + if _, ok := res.LinksReplaced[id]; !ok { + res.LinksReplaced[id] = map[uint16]struct{}{} + } + + res.LinksReplaced[id][level] = struct{}{} + } + + if res.Nodes[id] == nil { + if !keepReplaceInfo { + // node has been deleted or never existed and we are not looking at a + // single log in isolation, nothing to do + return nil + } + + // we need to keep the replace info, meaning we have to explicitly create + // this node in order to be able to store the "clear links" information for + // it + conns, err := packedconn.NewWithMaxLayer(uint8(level)) + if err != nil { + return err + } + res.Nodes[id] = &vertex{ + id: id, + connections: conns, + } + } + + if res.Nodes[id].connections == nil { + conns, err := packedconn.NewWithMaxLayer(uint8(level)) + if err != nil { + return err + } + res.Nodes[id].connections = conns + } else { + res.Nodes[id].connections.GrowLayersTo(uint8(level)) + // Only clear if the layer is not already empty + if res.Nodes[id].connections.LenAtLayer(uint8(level)) > 0 { + res.Nodes[id].connections.ClearLayer(uint8(level)) + } + } + + if keepReplaceInfo { + // mark the replace flag for this node and level, so that new commit logs + // generated on this result (condensing) do not lose information + + if _, ok := res.LinksReplaced[id]; !ok { + res.LinksReplaced[id] = map[uint16]struct{}{} + } + + res.LinksReplaced[id][level] = struct{}{} + } + + return nil +} + +func (d *Deserializer) ReadDeleteNode(r io.Reader, res *DeserializationResult, nodesDeleted map[uint64]struct{}) error { + id, err := d.readUint64(r) + if err != nil { + return err + } + + newNodes, changed, err := growIndexToAccomodateNode(res.Nodes, id, d.logger) + if err != nil { + return err + } + + if changed { + res.Nodes = newNodes + } + + res.Nodes[id] = nil + nodesDeleted[id] = struct{}{} + return nil +} + +func ReadTileEncoder(r io.Reader, res *compressionhelpers.PQData, i uint16) (compressionhelpers.PQEncoder, error) { + bins, err := readFloat64(r) + if err != nil { + return nil, err + } + mean, err := readFloat64(r) + if err != nil { + return nil, err + } + stdDev, err := readFloat64(r) + if err != nil { + return nil, err + } + size, err := readFloat64(r) + if err != nil { + return nil, err + } + s1, err := readFloat64(r) + if err != nil { + return nil, err + } + s2, err := readFloat64(r) + if err != nil { + return nil, err + } + segment, err := readUint16(r) + if err != nil { + return nil, err + } + encDistribution, err := readByte(r) + if err != nil { + return nil, err + } + return compressionhelpers.RestoreTileEncoder(bins, mean, stdDev, size, s1, s2, segment, encDistribution), nil +} + +func ReadKMeansEncoder(r io.Reader, data *compressionhelpers.PQData, i uint16) (compressionhelpers.PQEncoder, error) { + ds := int(data.Dimensions / data.M) + centers := make([][]float32, 0, data.Ks) + for k := uint16(0); k < data.Ks; k++ { + center := make([]float32, 0, ds) + for i := 0; i < ds; i++ { + c, err := readFloat32(r) + if err != nil { + return nil, err + } + center = append(center, c) + } + centers = append(centers, center) + } + kms := compressionhelpers.NewKMeansEncoderWithCenters( + int(data.Ks), + ds, + int(i), + centers, + ) + return kms, nil +} + +func (d *Deserializer) ReadPQ(r io.Reader, res *DeserializationResult) (int, error) { + dims, err := readUint16(r) + if err != nil { + return 0, err + } + enc, err := readByte(r) + if err != nil { + return 0, err + } + ks, err := readUint16(r) + if err != nil { + return 0, err + } + m, err := readUint16(r) + if err != nil { + return 0, err + } + dist, err := readByte(r) + if err != nil { + return 0, err + } + useBitsEncoding, err := readByte(r) + if err != nil { + return 0, err + } + encoder := compressionhelpers.Encoder(enc) + pqData := compressionhelpers.PQData{ + Dimensions: dims, + EncoderType: encoder, + Ks: ks, + M: m, + EncoderDistribution: byte(dist), + UseBitsEncoding: useBitsEncoding != 0, + } + var encoderReader func(io.Reader, *compressionhelpers.PQData, uint16) (compressionhelpers.PQEncoder, error) + var totalRead int + switch encoder { + case compressionhelpers.UseTileEncoder: + encoderReader = ReadTileEncoder + totalRead = 51 * int(pqData.M) + case compressionhelpers.UseKMeansEncoder: + encoderReader = ReadKMeansEncoder + totalRead = int(pqData.Dimensions) * int(pqData.Ks) * 4 + default: + return 0, errors.New("Unsuported encoder type") + } + + for i := uint16(0); i < m; i++ { + encoder, err := encoderReader(r, &pqData, i) + if err != nil { + return 0, err + } + pqData.Encoders = append(pqData.Encoders, encoder) + } + res.Compressed = true + + res.CompressionPQData = &pqData + + return totalRead, nil +} + +func (d *Deserializer) ReadSQ(r io.Reader, res *DeserializationResult) error { + a, err := readFloat32(r) + if err != nil { + return err + } + b, err := readFloat32(r) + if err != nil { + return err + } + dims, err := readUint16(r) + if err != nil { + return err + } + res.CompressionSQData = &compressionhelpers.SQData{ + A: a, + B: b, + Dimensions: dims, + } + res.Compressed = true + + return nil +} + +/* +buf.WriteByte(byte(AddRQ)) // 1 +binary.Write(&buf, binary.LittleEndian, data.Dimension) // 4 +binary.Write(&buf, binary.LittleEndian, data.DataBits) // 1 +binary.Write(&buf, binary.LittleEndian, data.QueryBits) // 1 +binary.Write(&buf, binary.LittleEndian, data.Rotation.OutputDim) // 4 +binary.Write(&buf, binary.LittleEndian, data.Rotation.Rounds) // 4 +*/ +func (d *Deserializer) ReadRQ(r io.Reader, res *DeserializationResult) (int, error) { + inputDim, err := readUint32(r) + if err != nil { + return 0, err + } + bits, err := readUint32(r) + if err != nil { + return 0, err + } + outputDim, err := readUint32(r) + if err != nil { + return 0, err + } + rounds, err := readUint32(r) + if err != nil { + return 0, err + } + + swapSize := 2 * rounds * (outputDim / 2) * 2 + signSize := 4 * rounds * outputDim + totalRead := int(swapSize) + int(signSize) + + swaps := make([][]compressionhelpers.Swap, rounds) + for i := uint32(0); i < rounds; i++ { + swaps[i] = make([]compressionhelpers.Swap, outputDim/2) + for j := uint32(0); j < outputDim/2; j++ { + swaps[i][j].I, err = readUint16(r) + if err != nil { + return 0, err + } + swaps[i][j].J, err = readUint16(r) + if err != nil { + return 0, err + } + } + } + + signs := make([][]float32, rounds) + for i := uint32(0); i < rounds; i++ { + signs[i] = make([]float32, outputDim) + for j := uint32(0); j < outputDim; j++ { + sign, err := readFloat32(r) + if err != nil { + return 0, err + } + signs[i][j] = sign + } + } + + res.CompressionRQData = &compressionhelpers.RQData{ + InputDim: inputDim, + Bits: bits, + Rotation: compressionhelpers.FastRotation{ + OutputDim: outputDim, + Rounds: rounds, + Swaps: swaps, + Signs: signs, + }, + } + res.Compressed = true + + return totalRead, nil +} + +func (d *Deserializer) ReadMuvera(r io.Reader, res *DeserializationResult) (int, error) { + kSim, err := readUint32(r) + if err != nil { + return 0, err + } + numClusters, err := readUint32(r) + if err != nil { + return 0, err + } + dimensions, err := readUint32(r) + if err != nil { + return 0, err + } + dProjections, err := readUint32(r) + if err != nil { + return 0, err + } + repetitions, err := readUint32(r) + if err != nil { + return 0, err + } + + totalRead := int(repetitions)*int(kSim)*int(dimensions)*4 + + int(repetitions)*int(dProjections)*int(dimensions)*4 + + gaussians := make([][][]float32, repetitions) + for i := uint32(0); i < repetitions; i++ { + gaussians[i] = make([][]float32, kSim) + for j := uint32(0); j < kSim; j++ { + gaussians[i][j] = make([]float32, dimensions) + for k := uint32(0); k < dimensions; k++ { + gaussians[i][j][k], err = readFloat32(r) + if err != nil { + return 0, err + } + } + } + } + + s := make([][][]float32, repetitions) + for i := uint32(0); i < repetitions; i++ { + s[i] = make([][]float32, dProjections) + for j := uint32(0); j < dProjections; j++ { + s[i][j] = make([]float32, dimensions) + for k := uint32(0); k < dimensions; k++ { + s[i][j][k], err = readFloat32(r) + if err != nil { + return 0, err + } + } + } + } + muveraData := multivector.MuveraData{ + KSim: kSim, + NumClusters: numClusters, + Dimensions: dimensions, + DProjections: dProjections, + Repetitions: repetitions, + Gaussians: gaussians, + S: s, + } + res.EncoderMuvera = &muveraData + res.MuveraEnabled = true + return totalRead, nil +} + +func (d *Deserializer) ReadBRQ(r io.Reader, res *DeserializationResult) (int, error) { + inputDim, err := readUint32(r) + if err != nil { + return 0, err + } + outputDim, err := readUint32(r) + if err != nil { + return 0, err + } + rounds, err := readUint32(r) + if err != nil { + return 0, err + } + + swapSize := 2 * rounds * (outputDim / 2) * 2 + signSize := 4 * rounds * outputDim + roundingSize := 4 * outputDim + totalRead := int(swapSize) + int(signSize) + int(roundingSize) + + swaps := make([][]compressionhelpers.Swap, rounds) + for i := uint32(0); i < rounds; i++ { + swaps[i] = make([]compressionhelpers.Swap, outputDim/2) + for j := uint32(0); j < outputDim/2; j++ { + swaps[i][j].I, err = readUint16(r) + if err != nil { + return 0, err + } + swaps[i][j].J, err = readUint16(r) + if err != nil { + return 0, err + } + } + } + + signs := make([][]float32, rounds) + for i := uint32(0); i < rounds; i++ { + signs[i] = make([]float32, outputDim) + for j := uint32(0); j < outputDim; j++ { + sign, err := readFloat32(r) + if err != nil { + return 0, err + } + signs[i][j] = sign + } + } + + rounding := make([]float32, outputDim) + for i := uint32(0); i < outputDim; i++ { + rounding[i], err = readFloat32(r) + if err != nil { + return 0, err + } + } + + res.CompressionBRQData = &compressionhelpers.BRQData{ + InputDim: inputDim, + Rotation: compressionhelpers.FastRotation{ + OutputDim: outputDim, + Rounds: rounds, + Swaps: swaps, + Signs: signs, + }, + Rounding: rounding, + } + res.Compressed = true + + return totalRead, nil +} + +func (d *Deserializer) readUint64(r io.Reader) (uint64, error) { + var value uint64 + d.resetResusableBuffer(8) + _, err := io.ReadFull(r, d.reusableBuffer) + if err != nil { + return 0, errors.Wrap(err, "failed to read uint64") + } + + value = binary.LittleEndian.Uint64(d.reusableBuffer) + + return value, nil +} + +func readFloat64(r io.Reader) (float64, error) { + var b [8]byte + _, err := io.ReadFull(r, b[:]) + if err != nil { + return 0, errors.Wrap(err, "failed to read float64") + } + + bits := binary.LittleEndian.Uint64(b[:]) + return math.Float64frombits(bits), nil +} + +func readFloat32(r io.Reader) (float32, error) { + var b [4]byte + _, err := io.ReadFull(r, b[:]) + if err != nil { + return 0, errors.Wrap(err, "failed to read float32") + } + + bits := binary.LittleEndian.Uint32(b[:]) + return math.Float32frombits(bits), nil +} + +func readUint16(r io.Reader) (uint16, error) { + var b [2]byte + _, err := io.ReadFull(r, b[:]) + if err != nil { + return 0, errors.Wrap(err, "failed to read uint16") + } + + return binary.LittleEndian.Uint16(b[:]), nil +} + +func readUint32(r io.Reader) (uint32, error) { + var b [4]byte + _, err := io.ReadFull(r, b[:]) + if err != nil { + return 0, errors.Wrap(err, "failed to read uint32") + } + + return binary.LittleEndian.Uint32(b[:]), nil +} + +func readByte(r io.Reader) (byte, error) { + var b [1]byte + _, err := io.ReadFull(r, b[:]) + if err != nil { + return 0, errors.Wrap(err, "failed to read byte") + } + + return b[0], nil +} + +func ReadCommitType(r io.Reader) (HnswCommitType, error) { + var b [1]byte + if _, err := io.ReadFull(r, b[:]); err != nil { + return 0, errors.Wrap(err, "failed to read commit type") + } + + return HnswCommitType(b[0]), nil +} + +func (d *Deserializer) readUint64Slice(r io.Reader, length int) ([]uint64, error) { + d.resetResusableBuffer(length * 8) + d.resetReusableConnectionsSlice(length) + _, err := io.ReadFull(r, d.reusableBuffer) + if err != nil { + return nil, errors.Wrap(err, "failed to read uint64 slice") + } + + for i := range d.reusableConnectionsSlice { + d.reusableConnectionsSlice[i] = binary.LittleEndian.Uint64(d.reusableBuffer[i*8 : (i+1)*8]) + } + + return d.reusableConnectionsSlice, nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/deserializer_benchmark_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/deserializer_benchmark_test.go new file mode 100644 index 0000000000000000000000000000000000000000..ddb5a47f477611d21f6ad90523657a93231acf53 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/deserializer_benchmark_test.go @@ -0,0 +1,223 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package hnsw + +import ( + "bufio" + "bytes" + "encoding/binary" + "math/rand" + "testing" + "time" + + "github.com/sirupsen/logrus/hooks/test" +) + +func sampleCommitType(r *rand.Rand) HnswCommitType { + sample := r.Float64() + if sample < 0.75 { + return AddLinkAtLevel + } else if sample < 0.95 { + return ReplaceLinksAtLevel + } else if sample < 0.97 { + return AddNode + } else { + return ClearLinksAtLevel + } +} + +func BenchmarkDeserializerPerf(b *testing.B) { + buf := new(bytes.Buffer) + writer := bufio.NewWriter(buf) + + maxNodeID := uint64(1000000) + r := rand.New(rand.NewSource(42)) + const M = 32 + commitLogs := 5000000 + connections := make([]int, maxNodeID) + skipped := 0 + + // Generate realistic level using HNSW probability (most at level 0) + generateLevel := func() uint16 { + level := 0 + for r.Float64() < 0.5 && level < 6 { // Cap at level 6 (realistic max) + level++ + } + return uint16(level) + } + + // Generate realistic connection count based on level and HNSW limits + generateConnectionCount := func(level uint16) uint16 { + var maxConn int + if level == 0 { + maxConn = M * 2 // Allow some overflow during construction + } else { + maxConn = M + } + + // Most connections are near the limit (realistic HNSW behavior) + minConn := maxConn / 2 + return uint16(r.Intn(maxConn-minConn+1) + minConn) + } + + for i := 0; i < commitLogs; i++ { + commit := sampleCommitType(r) + + switch commit { + case ReplaceLinksAtLevel: + sourceID := uint64(r.Int63n(int64(maxNodeID))) + level := generateLevel() + connCount := generateConnectionCount(level) + + if connections[sourceID] > 2*M { + skipped += 1 + continue + } else { + connections[sourceID] += int(connCount) + } + + writer.WriteByte(byte(commit)) + binary.Write(writer, binary.LittleEndian, sourceID) + binary.Write(writer, binary.LittleEndian, level) + binary.Write(writer, binary.LittleEndian, connCount) + for j := 0; j < int(connCount); j++ { + binary.Write(writer, binary.LittleEndian, uint64(r.Int63n(int64(maxNodeID)))) + } + + case AddNode: + nodeID := uint64(r.Int63n(int64(maxNodeID))) + level := generateLevel() + writer.WriteByte(byte(commit)) + binary.Write(writer, binary.LittleEndian, nodeID) + binary.Write(writer, binary.LittleEndian, level) + + case AddLinkAtLevel: + sourceID := uint64(r.Int63n(int64(maxNodeID))) + + if connections[sourceID] > 2*M { + skipped += 1 + continue + } else { + connections[sourceID] += 1 + } + + level := generateLevel() + target := uint64(r.Int63n(int64(maxNodeID))) + + writer.WriteByte(byte(commit)) + binary.Write(writer, binary.LittleEndian, sourceID) + binary.Write(writer, binary.LittleEndian, level) + binary.Write(writer, binary.LittleEndian, target) + + case ClearLinksAtLevel: + nodeID := uint64(r.Int63n(int64(maxNodeID))) + connections[nodeID] = 0 + level := generateLevel() + writer.WriteByte(byte(commit)) + binary.Write(writer, binary.LittleEndian, nodeID) + binary.Write(writer, binary.LittleEndian, level) + default: + continue + } + } + writer.Flush() + + // Create deserializer + logger, _ := test.NewNullLogger() + deserializer := NewDeserializer(logger) + + b.ResetTimer() + + for i := 0; i < b.N; i++ { + reader := bufio.NewReader(bytes.NewReader(buf.Bytes())) + _, _, err := deserializer.Do(reader, nil, true) + if err != nil { + b.Fatal(err) + } + } + + b.ReportMetric(float64(commitLogs), "commits/op") + b.ReportMetric(float64(skipped), "skipped") + nsPerOp := float64(b.Elapsed().Nanoseconds()) / float64(b.N) + commitsPerSecond := float64(commitLogs) * float64(time.Second.Nanoseconds()) / nsPerOp + b.ReportMetric(commitsPerSecond, "commits/sec") +} + +func BenchmarkAddLinksAtLevelPerf(b *testing.B) { + buf := new(bytes.Buffer) + writer := bufio.NewWriter(buf) + + maxNodeID := uint64(1000000) + r := rand.New(rand.NewSource(42)) + const linksPerOperation = 64 + commitLogs := 1000000 // 1M operations + level := uint16(0) // Always use level 0 for simiplicity + + // Track which nodes have links at level 0 + nodesWithLinks := make(map[uint64]bool) + addLinksCount := 0 + clearLinksCount := 0 + + for i := 0; i < commitLogs; i++ { + sourceID := uint64(r.Int63n(int64(maxNodeID))) + + // Check if this node already has links at level 0 + if nodesWithLinks[sourceID] { + // Clear existing links first + writer.WriteByte(byte(ClearLinksAtLevel)) + binary.Write(writer, binary.LittleEndian, sourceID) + binary.Write(writer, binary.LittleEndian, level) + clearLinksCount++ + nodesWithLinks[sourceID] = false + } + + // Add 64 random links + targets := make([]uint64, linksPerOperation) + for j := 0; j < linksPerOperation; j++ { + targets[j] = uint64(r.Int63n(int64(maxNodeID))) + } + + writer.WriteByte(byte(AddLinksAtLevel)) + binary.Write(writer, binary.LittleEndian, sourceID) + binary.Write(writer, binary.LittleEndian, level) + binary.Write(writer, binary.LittleEndian, uint16(linksPerOperation)) + for _, target := range targets { + binary.Write(writer, binary.LittleEndian, target) + } + + nodesWithLinks[sourceID] = true + addLinksCount++ + } + writer.Flush() + + // Create deserializer + logger, _ := test.NewNullLogger() + deserializer := NewDeserializer(logger) + + b.ResetTimer() + + for i := 0; i < b.N; i++ { + reader := bufio.NewReader(bytes.NewReader(buf.Bytes())) + _, _, err := deserializer.Do(reader, nil, true) + if err != nil { + b.Fatal(err) + } + } + + b.ReportMetric(float64(commitLogs), "operations/op") + b.ReportMetric(float64(addLinksCount), "add_links_operations") + b.ReportMetric(float64(clearLinksCount), "clear_links_operations") + b.ReportMetric(float64(linksPerOperation), "links_per_operation") + nsPerOp := float64(b.Elapsed().Nanoseconds()) / float64(b.N) + operationsPerSecond := float64(commitLogs) * float64(time.Second.Nanoseconds()) / nsPerOp + b.ReportMetric(operationsPerSecond, "operations/sec") +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/deserializer_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/deserializer_test.go new file mode 100644 index 0000000000000000000000000000000000000000..966ab6420dc8e071f5a3e3025d13da17e0fa0a33 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/deserializer_test.go @@ -0,0 +1,741 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package hnsw + +import ( + "bufio" + "bytes" + "context" + "encoding/binary" + "math" + "math/rand" + "os" + "testing" + + "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/adapters/repos/db/vector/compressionhelpers" + "github.com/weaviate/weaviate/adapters/repos/db/vector/hnsw/packedconn" + "github.com/weaviate/weaviate/adapters/repos/db/vector/multivector" + "github.com/weaviate/weaviate/adapters/repos/db/vector/testinghelpers" + "github.com/weaviate/weaviate/entities/cyclemanager" +) + +func BenchmarkDeserializer2ReadUint64(b *testing.B) { + b.StopTimer() + + randUint64 := rand.Uint64() + + val := make([]byte, 8) + binary.LittleEndian.PutUint64(val, uint64(randUint64)) + data := bytes.NewReader(val) + logger, _ := test.NewNullLogger() + d := NewDeserializer(logger) + reader := bufio.NewReader(data) + b.StartTimer() + + for i := 0; i < b.N; i++ { + d.readUint64(reader) + } +} + +func BenchmarkDeserializer2ReadUint16(b *testing.B) { + b.StopTimer() + + randUint16 := uint16(rand.Uint32()) + + val := make([]byte, 2) + binary.LittleEndian.PutUint16(val, randUint16) + data := bytes.NewReader(val) + reader := bufio.NewReader(data) + b.StartTimer() + + for i := 0; i < b.N; i++ { + readUint16(reader) + } +} + +func BenchmarkDeserializer2ReadCommitType(b *testing.B) { + b.StopTimer() + + commitType := SetEntryPointMaxLevel + + val := make([]byte, 1) + val[0] = byte(commitType) + data := bytes.NewReader(val) + reader := bufio.NewReader(data) + b.StartTimer() + + for i := 0; i < b.N; i++ { + ReadCommitType(reader) + } +} + +func BenchmarkDeserializer2ReadUint64Slice(b *testing.B) { + b.StopTimer() + + uint64Slice := []uint64{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10} + + val := make([]byte, len(uint64Slice)*8) + for i, v := range uint64Slice { + binary.LittleEndian.PutUint64(val[i*8:], uint64(v)) + } + + data := bytes.NewReader(val) + logger, _ := test.NewNullLogger() + d := NewDeserializer(logger) + reader := bufio.NewReader(data) + b.StartTimer() + + for i := 0; i < b.N; i++ { + d.readUint64Slice(reader, len(uint64Slice)) + } +} + +func TestDeserializer2ReadCommitType(t *testing.T) { + commitTypes := []HnswCommitType{ + AddNode, + SetEntryPointMaxLevel, + AddLinkAtLevel, + ReplaceLinksAtLevel, + AddTombstone, + RemoveTombstone, + ClearLinks, + DeleteNode, + ResetIndex, + AddPQ, + } + for _, commitType := range commitTypes { + b := make([]byte, 1) + b[0] = byte(commitType) + data := bytes.NewReader(b) + reader := bufio.NewReader(data) + res, err := ReadCommitType(reader) + if err != nil { + t.Errorf("Error reading commit type: %v", err) + } + if res != commitType { + t.Errorf("Commit type is not equal") + } + + } +} + +func TestDeserializerReadDeleteNode(t *testing.T) { + nodes := generateDummyVertices(4) + res := &DeserializationResult{ + Nodes: nodes, + NodesDeleted: map[uint64]struct{}{}, + } + ids := []uint64{2, 3, 4, 5, 6} + + for _, id := range ids { + val := make([]byte, 8) + binary.LittleEndian.PutUint64(val, id) + data := bytes.NewReader(val) + logger, _ := test.NewNullLogger() + d := NewDeserializer(logger) + reader := bufio.NewReader(data) + + err := d.ReadDeleteNode(reader, res, res.NodesDeleted) + if err != nil { + t.Errorf("Error reading commit type: %v", err) + } + } + + for _, id := range ids { + if _, ok := res.NodesDeleted[id]; !ok { + t.Errorf("Node %d not marked deleted", id) + } + } +} + +func TestDeserializerReadClearLinks(t *testing.T) { + nodes := generateDummyVertices(4) + res := &DeserializationResult{ + Nodes: nodes, + } + ids := []uint64{2, 3, 4, 5, 6} + + for _, id := range ids { + val := make([]byte, 8) + binary.LittleEndian.PutUint64(val, id) + data := bytes.NewReader(val) + logger, _ := test.NewNullLogger() + d := NewDeserializer(logger) + + reader := bufio.NewReader(data) + + err := d.ReadClearLinks(reader, res, true) + if err != nil { + t.Errorf("Error reading links: %v", err) + } + } +} + +func dummyInitialDeserializerState() *DeserializationResult { + conns, _ := packedconn.NewWithMaxLayer(15) + return &DeserializationResult{ + LinksReplaced: make(map[uint64]map[uint16]struct{}), + Nodes: []*vertex{ + nil, + nil, + { + // This is a lower level than we will read, so this node will require + // growing + level: 1, + }, + { + // This is a lower level than we will read, so this node will require + // growing + level: 8, + connections: conns, + }, + }, + } +} + +func TestDeserializerReadNode(t *testing.T) { + res := dummyInitialDeserializerState() + ids := []uint64{2, 3, 4, 5, 6} + + for _, id := range ids { + val := make([]byte, 10) + level := uint16(id * 2) + binary.LittleEndian.PutUint64(val[:8], id) + binary.LittleEndian.PutUint16(val[8:10], level) + data := bytes.NewReader(val) + logger, _ := test.NewNullLogger() + d := NewDeserializer(logger) + + reader := bufio.NewReader(data) + + err := d.ReadNode(reader, res) + require.Nil(t, err) + require.NotNil(t, res.Nodes[id]) + assert.Equal(t, int(level), res.Nodes[id].level) + } +} + +func TestDeserializerReadEP(t *testing.T) { + ids := []uint64{2, 3, 4, 5, 6} + + for _, id := range ids { + val := make([]byte, 10) + level := uint16(id * 2) + binary.LittleEndian.PutUint64(val[:8], id) + binary.LittleEndian.PutUint16(val[8:10], level) + data := bytes.NewReader(val) + logger, _ := test.NewNullLogger() + d := NewDeserializer(logger) + + reader := bufio.NewReader(data) + + ep, l, err := d.ReadEP(reader) + require.Nil(t, err) + assert.Equal(t, id, ep) + assert.Equal(t, level, l) + } +} + +func TestDeserializerReadLink(t *testing.T) { + res := dummyInitialDeserializerState() + ids := []uint64{2, 3, 4, 5, 6} + + for _, id := range ids { + level := uint16(id * 2) + target := id * 3 + val := make([]byte, 18) + binary.LittleEndian.PutUint64(val[:8], id) + binary.LittleEndian.PutUint16(val[8:10], level) + binary.LittleEndian.PutUint64(val[10:18], target) + data := bytes.NewReader(val) + logger, _ := test.NewNullLogger() + d := NewDeserializer(logger) + + reader := bufio.NewReader(data) + + err := d.ReadLink(reader, res) + require.Nil(t, err) + require.NotNil(t, res.Nodes[id]) + conns := res.Nodes[id].connections.GetLayer(uint8(level)) + lastAddedConnection := conns[len(conns)-1] + assert.Equal(t, target, lastAddedConnection) + } +} + +func TestDeserializerReadLinks(t *testing.T) { + res := dummyInitialDeserializerState() + ids := []uint64{2, 3, 4, 5, 6} + + for _, id := range ids { + level := uint16(id * 2) + connLen := uint16(id * 4) + val := make([]byte, 12+connLen*8) + binary.LittleEndian.PutUint64(val[:8], id) + binary.LittleEndian.PutUint16(val[8:10], level) + binary.LittleEndian.PutUint16(val[10:12], connLen) + for i := 0; i < int(connLen); i++ { + target := id + uint64(i) + binary.LittleEndian.PutUint64(val[12+(i*8):12+(i*8+8)], target) + } + data := bytes.NewReader(val) + logger, _ := test.NewNullLogger() + d := NewDeserializer(logger) + + reader := bufio.NewReader(data) + + _, err := d.ReadLinks(reader, res, true) + require.Nil(t, err) + require.NotNil(t, res.Nodes[id]) + conns := res.Nodes[id].connections.GetLayer(uint8(level)) + lastAddedConnection := conns[len(conns)-1] + assert.Equal(t, id+uint64(connLen)-1, lastAddedConnection) + } +} + +func TestDeserializerTruncateReadLinks(t *testing.T) { + res := dummyInitialDeserializerState() + ids := []uint64{1, 2, 3, 4} + + for _, id := range ids { + level := uint16(id * 2) + var connLen uint16 + if id%2 == 0 { + connLen = uint16(5000) + } else { + connLen = uint16(2000) + } + val := make([]byte, 12+connLen*8) + binary.LittleEndian.PutUint64(val[:8], id) + binary.LittleEndian.PutUint16(val[8:10], level) + binary.LittleEndian.PutUint16(val[10:12], connLen) + for i := 0; i < int(connLen); i++ { + target := id + uint64(i) + binary.LittleEndian.PutUint64(val[12+(i*8):12+(i*8+8)], target) + } + data := bytes.NewReader(val) + logger, _ := test.NewNullLogger() + d := NewDeserializer(logger) + + reader := bufio.NewReader(data) + + _, err := d.ReadLinks(reader, res, true) + require.Nil(t, err) + require.NotNil(t, res.Nodes[id]) + conns := res.Nodes[id].connections.GetLayer(uint8(level)) + if id%2 == 0 { + require.Equal(t, 4095, len(conns), "Expected connections to be truncated to 4095") + } else { + require.Equal(t, 2000, len(conns), "Expected connections to be read as is") + } + } +} + +func TestDeserializerReadAddLinks(t *testing.T) { + res := dummyInitialDeserializerState() + ids := []uint64{2, 3, 4, 5, 6} + + for _, id := range ids { + level := uint16(id * 2) + connLen := uint16(id * 4) + val := make([]byte, 12+connLen*8) + binary.LittleEndian.PutUint64(val[:8], id) + binary.LittleEndian.PutUint16(val[8:10], level) + binary.LittleEndian.PutUint16(val[10:12], connLen) + for i := 0; i < int(connLen); i++ { + target := id + uint64(i) + binary.LittleEndian.PutUint64(val[12+(i*8):12+(i*8+8)], target) + } + data := bytes.NewReader(val) + logger, _ := test.NewNullLogger() + d := NewDeserializer(logger) + + reader := bufio.NewReader(data) + + _, err := d.ReadAddLinks(reader, res) + require.Nil(t, err) + require.NotNil(t, res.Nodes[id]) + conns := res.Nodes[id].connections.GetLayer(uint8(level)) + lastAddedConnection := conns[len(conns)-1] + assert.Equal(t, id+uint64(connLen)-1, lastAddedConnection) + } +} + +func TestDeserializerTruncateReadAddLinks(t *testing.T) { + res := dummyInitialDeserializerState() + ids := []uint64{1, 2, 3, 4} + + for _, id := range ids { + level := uint16(id * 2) + var connLen uint16 + if id%2 == 0 { + connLen = uint16(5000) + } else { + connLen = uint16(2000) + } + val := make([]byte, 12+connLen*8) + binary.LittleEndian.PutUint64(val[:8], id) + binary.LittleEndian.PutUint16(val[8:10], level) + binary.LittleEndian.PutUint16(val[10:12], connLen) + for i := 0; i < int(connLen); i++ { + target := id + uint64(i) + binary.LittleEndian.PutUint64(val[12+(i*8):12+(i*8+8)], target) + } + data := bytes.NewReader(val) + logger, _ := test.NewNullLogger() + d := NewDeserializer(logger) + + reader := bufio.NewReader(data) + + _, err := d.ReadAddLinks(reader, res) + require.Nil(t, err) + require.NotNil(t, res.Nodes[id]) + conns := res.Nodes[id].connections.GetLayer(uint8(level)) + if id%2 == 0 { + require.Equal(t, 4095, len(conns), "Expected connections to be truncated to 4095") + } else { + require.Equal(t, 2000, len(conns), "Expected connections to be read as is") + } + } +} + +func TestDeserializerAddTombstone(t *testing.T) { + tombstones := map[uint64]struct{}{} + ids := []uint64{2, 3, 4, 5, 6} + + for _, id := range ids { + val := make([]byte, 8) + binary.LittleEndian.PutUint64(val[:8], id) + data := bytes.NewReader(val) + logger, _ := test.NewNullLogger() + d := NewDeserializer(logger) + + reader := bufio.NewReader(data) + + err := d.ReadAddTombstone(reader, tombstones) + require.Nil(t, err) + } + + expected := map[uint64]struct{}{ + 2: {}, + 3: {}, + 4: {}, + 5: {}, + 6: {}, + } + + assert.Equal(t, expected, tombstones) +} + +func TestDeserializerRemoveTombstone(t *testing.T) { + tombstones := map[uint64]struct{}{ + 1: {}, + 2: {}, + 3: {}, + 4: {}, + 5: {}, + } + ids := []uint64{2, 3, 4, 5, 7} + deletedTombstones := map[uint64]struct{}{ + 6: {}, + } + + for _, id := range ids { + val := make([]byte, 8) + binary.LittleEndian.PutUint64(val[:8], id) + data := bytes.NewReader(val) + logger, _ := test.NewNullLogger() + d := NewDeserializer(logger) + + reader := bufio.NewReader(data) + + err := d.ReadRemoveTombstone(reader, tombstones, deletedTombstones) + require.Nil(t, err) + } + + expectedTombstones := map[uint64]struct{}{ + 1: {}, + } + + expectedDeletedTombstones := map[uint64]struct{}{ + 6: {}, + 7: {}, + } + + assert.Equal(t, expectedTombstones, tombstones) + assert.Equal(t, expectedDeletedTombstones, deletedTombstones) +} + +func TestDeserializerClearLinksAtLevel(t *testing.T) { + conns, _ := packedconn.NewWithMaxLayer(3) + res := &DeserializationResult{ + LinksReplaced: make(map[uint64]map[uint16]struct{}), + Nodes: []*vertex{ + nil, + nil, + { + // This is a lower level than we will read, so this node will require + // growing + level: 1, + }, + { + // This is a lower level than we will read, so this node will require + // growing + level: 4, + connections: conns, + }, + nil, + nil, + }, + } + ids := []uint64{2, 3, 4, 5, 6} + + for _, id := range ids { + level := uint16(id * 2) + val := make([]byte, 10) + binary.LittleEndian.PutUint64(val[:8], id) + binary.LittleEndian.PutUint16(val[8:10], level) + data := bytes.NewReader(val) + logger, _ := test.NewNullLogger() + d := NewDeserializer(logger) + + reader := bufio.NewReader(data) + + err := d.ReadClearLinksAtLevel(reader, res, true) + require.Nil(t, err) + } +} + +func TestDeserializerTotalReadPQ(t *testing.T) { + rootPath := t.TempDir() + ctx := context.Background() + + logger, _ := test.NewNullLogger() + commitLogger, err := NewCommitLogger(rootPath, "tmpLogger", logger, + cyclemanager.NewCallbackGroupNoop()) + require.Nil(t, err) + dimensions := 16 + centroids := 16 + + t.Run("add pq data to the first log", func(t *testing.T) { + data, _ := testinghelpers.RandomVecs(20, 0, dimensions) + kms := make([]compressionhelpers.PQEncoder, 4) + for i := 0; i < 4; i++ { + kms[i] = compressionhelpers.NewKMeansEncoder( + dimensions, + 4, + int(i), + ) + err := kms[i].Fit(data) + require.Nil(t, err) + } + pqData := compressionhelpers.PQData{ + Ks: uint16(centroids), + M: 4, + Dimensions: uint16(dimensions), + EncoderType: compressionhelpers.UseKMeansEncoder, + EncoderDistribution: byte(compressionhelpers.NormalEncoderDistribution), + UseBitsEncoding: false, + TrainingLimit: 100_000, + Encoders: kms, + } + + commitLogger.AddPQCompression(pqData) + require.Nil(t, commitLogger.Flush()) + require.Nil(t, commitLogger.Shutdown(ctx)) + }) + + t.Run("deserialize the first log", func(t *testing.T) { + nullLogger, _ := test.NewNullLogger() + commitLoggerPath := rootPath + "/tmpLogger.hnsw.commitlog.d" + + fileName, found, err := getCurrentCommitLogFileName(commitLoggerPath) + require.Nil(t, err) + require.True(t, found) + + t.Logf("name: %v\n", fileName) + + fd, err := os.Open(commitLoggerPath + "/" + fileName) + require.Nil(t, err) + + defer fd.Close() + fdBuf := bufio.NewReaderSize(fd, 256*1024) + + _, deserializeSize, err := NewDeserializer(nullLogger).Do(fdBuf, nil, true) + require.Nil(t, err) + + require.Equal(t, 4*centroids*dimensions+10, deserializeSize) + t.Logf("deserializeSize: %v\n", deserializeSize) + }) +} + +func TestDeserializerTotalReadMUVERA(t *testing.T) { + rootPath := t.TempDir() + ctx := context.Background() + + logger, _ := test.NewNullLogger() + commitLogger, err := NewCommitLogger(rootPath, "tmpLogger", logger, + cyclemanager.NewCallbackGroupNoop()) + require.Nil(t, err) + + repetitions := 2 + ksim := 2 + dimensions := 5 + dprojections := 2 + t.Run("add muvera data to the first log", func(t *testing.T) { + gaussians := [][][]float32{ + { + {1, 2, 3, 4, 5}, // cluster 1 + {1, 2, 3, 4, 5}, // cluster 2 + }, // rep 1 + { + {5, 6, 7, 8, 9}, // cluster 1 + {5, 6, 7, 8, 9}, // cluster 2 + }, // rep 2 + } // (repetitions, kSim, dimensions) + + s := [][][]float32{ + { + {-1, 1, 1, -1, 1}, // dprojection 1 + {1, -1, 1, 1, -1}, // dprojection 2 + }, // rep 1 + { + {-1, 1, 1, -1, 1}, // dprojection 1 + {1, -1, 1, 1, -1}, // dprojection 2 + }, // rep 2 + } // (repetitions, dProjections, dimensions) + + muveraData := multivector.MuveraData{ + KSim: uint32(ksim), + NumClusters: uint32(math.Pow(2, float64(ksim))), + Dimensions: uint32(dimensions), + DProjections: uint32(dprojections), + Repetitions: uint32(repetitions), + Gaussians: gaussians, + S: s, + } + + commitLogger.AddMuvera(muveraData) + require.Nil(t, commitLogger.Flush()) + require.Nil(t, commitLogger.Shutdown(ctx)) + }) + + t.Run("deserialize the first log", func(t *testing.T) { + nullLogger, _ := test.NewNullLogger() + commitLoggerPath := rootPath + "/tmpLogger.hnsw.commitlog.d" + + fileName, found, err := getCurrentCommitLogFileName(commitLoggerPath) + require.Nil(t, err) + require.True(t, found) + + t.Logf("name: %v\n", fileName) + + fd, err := os.Open(commitLoggerPath + "/" + fileName) + require.Nil(t, err) + + defer fd.Close() + fdBuf := bufio.NewReaderSize(fd, 256*1024) + + _, deserializeSize, err := NewDeserializer(nullLogger).Do(fdBuf, nil, true) + require.Nil(t, err) + + gaussianSize := 4 * repetitions * ksim * dimensions + randomSize := 4 * repetitions * dprojections * dimensions + require.Equal(t, gaussianSize+randomSize+21, deserializeSize) + t.Logf("deserializeSize: %v\n", deserializeSize) + }) +} + +func TestDeserializerTotalReadRQ(t *testing.T) { + rootPath := t.TempDir() + ctx := context.Background() + + logger, _ := test.NewNullLogger() + commitLogger, err := NewCommitLogger(rootPath, "tmpLogger", logger, + cyclemanager.NewCallbackGroupNoop()) + require.Nil(t, err) + + dimension := uint32(10) + bits := uint32(8) + rotation := compressionhelpers.FastRotation{ + OutputDim: 4, + Rounds: 5, + Swaps: [][]compressionhelpers.Swap{ + { + {I: 0, J: 2}, + {I: 1, J: 3}, + }, + { + {I: 4, J: 6}, + {I: 5, J: 7}, + }, + { + {I: 8, J: 10}, + {I: 9, J: 11}, + }, + { + {I: 12, J: 14}, + {I: 13, J: 15}, + }, + { + {I: 16, J: 18}, + {I: 17, J: 19}, + }, + }, + Signs: [][]float32{ + {1, -1, 1, -1}, + {1, -1, 1, -1}, + {1, -1, 1, -1}, + {1, -1, 1, -1}, + {1, -1, 1, -1}, + }, + } + t.Run("add rotational quantization data to the first log", func(t *testing.T) { + rqData := compressionhelpers.RQData{ + InputDim: dimension, + Bits: bits, + Rotation: rotation, + } + + err = commitLogger.AddRQCompression(rqData) + require.Nil(t, err) + require.Nil(t, commitLogger.Flush()) + require.Nil(t, commitLogger.Shutdown(ctx)) + }) + + t.Run("deserialize the first log", func(t *testing.T) { + nullLogger, _ := test.NewNullLogger() + commitLoggerPath := rootPath + "/tmpLogger.hnsw.commitlog.d" + + fileName, found, err := getCurrentCommitLogFileName(commitLoggerPath) + require.Nil(t, err) + require.True(t, found) + + t.Logf("name: %v\n", fileName) + + fd, err := os.Open(commitLoggerPath + "/" + fileName) + require.Nil(t, err) + + defer fd.Close() + fdBuf := bufio.NewReaderSize(fd, 256*1024) + + _, deserializeSize, err := NewDeserializer(nullLogger).Do(fdBuf, nil, true) + require.Nil(t, err) + + swapSize := 2 * rotation.Rounds * (rotation.OutputDim / 2) * 2 + signSize := 4 * rotation.Rounds * rotation.OutputDim + require.Equal(t, int(swapSize+signSize+17), deserializeSize) + t.Logf("deserializeSize: %v\n", deserializeSize) + }) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/doc.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/doc.go new file mode 100644 index 0000000000000000000000000000000000000000..1fa436475ef121b89341b3648c8f50ade3788941 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/doc.go @@ -0,0 +1,13 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// asm only has amd64 specific implementations at the moment +package asm diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/dot.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/dot.go new file mode 100644 index 0000000000000000000000000000000000000000..a3dc1b1f7cc93206ceb3e383f5eae2142810d2ea --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/dot.go @@ -0,0 +1,112 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +//go:build ignore +// +build ignore + +package main + +import ( + . "github.com/mmcloughlin/avo/build" + . "github.com/mmcloughlin/avo/operand" + . "github.com/mmcloughlin/avo/reg" +) + +var unroll = 4 + +// inspired by the avo example which is itself inspired by the PeachPy example. +// Adjusted unrolling that fits our use cases better. +func main() { + TEXT("Dot", NOSPLIT, "func(x, y []float32) float32") + x := Mem{Base: Load(Param("x").Base(), GP64())} + y := Mem{Base: Load(Param("y").Base(), GP64())} + n := Load(Param("x").Len(), GP64()) + + acc := make([]VecVirtual, unroll) + for i := 0; i < unroll; i++ { + acc[i] = YMM() + } + + for i := 0; i < unroll; i++ { + VXORPS(acc[i], acc[i], acc[i]) + } + + blockitems := 8 * unroll + blocksize := 4 * blockitems + Label("blockloop") + CMPQ(n, U32(blockitems)) + JL(LabelRef("tail")) + + // Load x. + xs := make([]VecVirtual, unroll) + for i := 0; i < unroll; i++ { + xs[i] = YMM() + } + + for i := 0; i < unroll; i++ { + VMOVUPS(x.Offset(32*i), xs[i]) + } + + // The actual FMA. + for i := 0; i < unroll; i++ { + VFMADD231PS(y.Offset(32*i), xs[i], acc[i]) + } + + ADDQ(U32(blocksize), x.Base) + ADDQ(U32(blocksize), y.Base) + SUBQ(U32(blockitems), n) + JMP(LabelRef("blockloop")) + + // Process any trailing entries. + Label("tail") + tail := XMM() + VXORPS(tail, tail, tail) + + Label("tailloop") + CMPQ(n, U32(0)) + JE(LabelRef("reduce")) + + xt := XMM() + VMOVSS(x, xt) + VFMADD231SS(y, xt, tail) + + ADDQ(U32(4), x.Base) + ADDQ(U32(4), y.Base) + DECQ(n) + JMP(LabelRef("tailloop")) + + // Reduce the lanes to one. + Label("reduce") + if unroll != 4 { + // we have hard-coded the reduction for this specific unrolling as it + // allows us to do 0+1 and 2+3 and only then have a multiplication which + // touches both. + panic("addition is hard-coded") + } + + // Manual reduction + VADDPS(acc[0], acc[1], acc[0]) + VADDPS(acc[2], acc[3], acc[2]) + VADDPS(acc[0], acc[2], acc[0]) + + result := acc[0].AsX() + top := XMM() + VEXTRACTF128(U8(1), acc[0], top) + VADDPS(result, top, result) + VADDPS(result, tail, result) + VHADDPS(result, result, result) + VHADDPS(result, result, result) + Store(result, ReturnIndex(0)) + + RET() + + Generate() +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/dot_amd64.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/dot_amd64.go new file mode 100644 index 0000000000000000000000000000000000000000..1050698d9a1e1c3b403cbccbbbaf25d4ff724abf --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/dot_amd64.go @@ -0,0 +1,98 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package asm + +//go:generate goat ../c/dot_avx256_amd64.c -O3 -mavx2 -mfma -mavx512f -mavx512dq -e="-mfloat-abi=hard" -e="-Rpass-analysis=loop-vectorize" -e="-Rpass=loop-vectorize" -e="-Rpass-missed=loop-vectorize" +//go:generate goat ../c/dot_avx512_amd64.c -O3 -mavx2 -mfma -mavx512f -mavx512dq -e="-mfloat-abi=hard" -e="-Rpass-analysis=loop-vectorize" -e="-Rpass=loop-vectorize" -e="-Rpass-missed=loop-vectorize" + +import ( + "unsafe" +) + +func DotAVX256(x []float32, y []float32) float32 { + var res float32 + + l := len(x) + dot_256( + unsafe.Pointer(unsafe.SliceData(x)), + unsafe.Pointer(unsafe.SliceData(y)), + unsafe.Pointer(&res), + unsafe.Pointer(&l)) + + return res +} + +func DotAVX512(x []float32, y []float32) float32 { + var res float32 + + l := len(x) + dot_512( + unsafe.Pointer(unsafe.SliceData(x)), + unsafe.Pointer(unsafe.SliceData(y)), + unsafe.Pointer(&res), + unsafe.Pointer(&l)) + + return res +} + +func DotByteAVX256(x []uint8, y []uint8) uint32 { + switch len(x) { + case 2: + return dot2[uint8, uint32](x, y) + case 3: + return dot3[uint8, uint32](x, y) + case 4: + return dot4[uint8, uint32](x, y) + case 5: + return dot5[uint8, uint32](x, y) + case 6: + return dot6[uint8, uint32](x, y) + case 7: + return dot7[uint8, uint32](x, y) + case 8: + // manually inlined dot8(x, y) + sum := uint32(x[7])*uint32(y[7]) + uint32(x[6])*uint32(y[6]) + return dot6[uint8, uint32](x, y) + uint32(sum) + case 10: + // manually inlined dot10(x, y) + sum := uint32(x[9])*uint32(y[9]) + uint32(x[8])*uint32(y[8]) + uint32(x[7])*uint32(y[7]) + uint32(x[6])*uint32(y[6]) + return dot6[uint8, uint32](x, y) + uint32(sum) + case 12: + // manually inlined dot12(x, y) + sum := uint32(x[11])*uint32(y[11]) + uint32(x[10])*uint32(y[10]) + uint32(x[9])*uint32(y[9]) + uint32(x[8])*uint32(y[8]) + uint32(x[7])*uint32(y[7]) + uint32(x[6])*uint32(y[6]) + return dot6[uint8, uint32](x, y) + uint32(sum) + } + + var res uint32 + + l := len(x) + dot_byte_256( + unsafe.Pointer(unsafe.SliceData(x)), + unsafe.Pointer(unsafe.SliceData(y)), + unsafe.Pointer(&res), + unsafe.Pointer(&l)) + + return res +} + +func DotFloatByteAVX256(x []float32, y []uint8) float32 { + var res float32 + + l := len(x) + dot_float_byte_256( + unsafe.Pointer(unsafe.SliceData(x)), + unsafe.Pointer(unsafe.SliceData(y)), + unsafe.Pointer(&res), + unsafe.Pointer(&l)) + + return res +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/dot_amd64.s b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/dot_amd64.s new file mode 100644 index 0000000000000000000000000000000000000000..5c070d1a2c7f2de209f4c1ea646d31ab536fc4c1 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/dot_amd64.s @@ -0,0 +1,55 @@ +// Code generated by command: go run dot.go -out dot.s -stubs dot_stub.go. DO NOT EDIT. + +#include "textflag.h" + +// func Dot(x []float32, y []float32) float32 +// Requires: AVX, FMA3, SSE +TEXT ·Dot(SB), NOSPLIT, $0-52 + MOVQ x_base+0(FP), AX + MOVQ y_base+24(FP), CX + MOVQ x_len+8(FP), DX + VXORPS Y0, Y0, Y0 + VXORPS Y1, Y1, Y1 + VXORPS Y2, Y2, Y2 + VXORPS Y3, Y3, Y3 + +blockloop: + CMPQ DX, $0x00000020 + JL tail + VMOVUPS (AX), Y4 + VMOVUPS 32(AX), Y5 + VMOVUPS 64(AX), Y6 + VMOVUPS 96(AX), Y7 + VFMADD231PS (CX), Y4, Y0 + VFMADD231PS 32(CX), Y5, Y1 + VFMADD231PS 64(CX), Y6, Y2 + VFMADD231PS 96(CX), Y7, Y3 + ADDQ $0x00000080, AX + ADDQ $0x00000080, CX + SUBQ $0x00000020, DX + JMP blockloop + +tail: + VXORPS X4, X4, X4 + +tailloop: + CMPQ DX, $0x00000000 + JE reduce + VMOVSS (AX), X5 + VFMADD231SS (CX), X5, X4 + ADDQ $0x00000004, AX + ADDQ $0x00000004, CX + DECQ DX + JMP tailloop + +reduce: + VADDPS Y0, Y1, Y0 + VADDPS Y2, Y3, Y2 + VADDPS Y0, Y2, Y0 + VEXTRACTF128 $0x01, Y0, X1 + VADDPS X0, X1, X0 + VADDPS X0, X4, X0 + VHADDPS X0, X0, X0 + VHADDPS X0, X0, X0 + MOVSS X0, ret+48(FP) + RET diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/dot_arm64.s b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/dot_arm64.s new file mode 100644 index 0000000000000000000000000000000000000000..7b61b52f23a5dc780a0beac4e5840b85057e4096 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/dot_arm64.s @@ -0,0 +1,138 @@ +//go:build !noasm && arm64 +// AUTO-GENERATED BY GOAT -- DO NOT EDIT + +TEXT ·dot(SB), $0-32 + MOVD a+0(FP), R0 + MOVD b+8(FP), R1 + MOVD res+16(FP), R2 + MOVD len+24(FP), R3 + WORD $0xa9bf7bfd // stp x29, x30, [sp, + WORD $0xf9400068 // ldr x8, [x3] + WORD $0x910003fd // mov x29, sp + WORD $0x6b0803e9 // negs w9, w8 + WORD $0x1200050a // and w10, w8, + WORD $0x12000529 // and w9, w9, + WORD $0x5a89454a // csneg w10, w10, w9, mi + WORD $0x4b0a0109 // sub w9, w8, w10 + WORD $0x7100413f // cmp w9, + WORD $0x540000ea // b.ge .LBB0_2 + WORD $0x6f00e400 // movi v0.2d, + WORD $0x2a1f03eb // mov w11, wzr + WORD $0x6f00e401 // movi v1.2d, + WORD $0x6f00e403 // movi v3.2d, + WORD $0x6f00e402 // movi v2.2d, + WORD $0x14000016 // b .LBB0_4 + +LBB0_2: + WORD $0x6f00e402 // movi v2.2d, + WORD $0xaa1f03eb // mov x11, xzr + WORD $0x6f00e403 // movi v3.2d, + WORD $0xaa0003ec // mov x12, x0 + WORD $0x6f00e401 // movi v1.2d, + WORD $0xaa0103ed // mov x13, x1 + WORD $0x6f00e400 // movi v0.2d, + +LBB0_3: + WORD $0x4cdf2984 // ld1 { v4.4s, v5.4s, v6.4s, v7.4s }, [x12], + WORD $0x9100816e // add x14, x11, + WORD $0x9100416b // add x11, x11, + WORD $0xeb0901df // cmp x14, x9 + WORD $0x4cdf29b0 // ld1 { v16.4s, v17.4s, v18.4s, v19.4s }, [x13], + WORD $0x6e30dc94 // fmul v20.4s, v4.4s, v16.4s + WORD $0x6e31dcb5 // fmul v21.4s, v5.4s, v17.4s + WORD $0x6e32dcd6 // fmul v22.4s, v6.4s, v18.4s + WORD $0x6e33dce4 // fmul v4.4s, v7.4s, v19.4s + WORD $0x4e34d442 // fadd v2.4s, v2.4s, v20.4s + WORD $0x4e35d463 // fadd v3.4s, v3.4s, v21.4s + WORD $0x4e36d421 // fadd v1.4s, v1.4s, v22.4s + WORD $0x4e24d400 // fadd v0.4s, v0.4s, v4.4s + WORD $0x54fffe69 // b.ls .LBB0_3 + +LBB0_4: + WORD $0x6b09017f // cmp w11, w9 + WORD $0x540001ca // b.ge .LBB0_7 + WORD $0x2a0b03eb // mov w11, w11 + WORD $0x2a0903ec // mov w12, w9 + WORD $0xd37ef56e // lsl x14, x11, + WORD $0x93407d8c // sxtw x12, w12 + WORD $0x8b0e002d // add x13, x1, x14 + WORD $0x8b0e000e // add x14, x0, x14 + +LBB0_6: + WORD $0x3cc105c4 // ldr q4, [x14], + WORD $0x3cc105a5 // ldr q5, [x13], + WORD $0x9100116b // add x11, x11, + WORD $0xeb0c017f // cmp x11, x12 + WORD $0x6e25dc84 // fmul v4.4s, v4.4s, v5.4s + WORD $0x4e24d442 // fadd v2.4s, v2.4s, v4.4s + WORD $0x54ffff4b // b.lt .LBB0_6 + +LBB0_7: + WORD $0x6e22d442 // faddp v2.4s, v2.4s, v2.4s + WORD $0x7100055f // cmp w10, + WORD $0x6e23d463 // faddp v3.4s, v3.4s, v3.4s + WORD $0x6e21d421 // faddp v1.4s, v1.4s, v1.4s + WORD $0x6e20d400 // faddp v0.4s, v0.4s, v0.4s + WORD $0x7e30d842 // faddp s2, v2.2s + WORD $0x7e30d863 // faddp s3, v3.2s + WORD $0x7e30d821 // faddp s1, v1.2s + WORD $0x7e30d800 // faddp s0, v0.2s + WORD $0x1e232842 // fadd s2, s2, s3 + WORD $0x1e212841 // fadd s1, s2, s1 + WORD $0x1e202820 // fadd s0, s1, s0 + WORD $0x540005eb // b.lt .LBB0_13 + WORD $0x93407d08 // sxtw x8, w8 + WORD $0x93407d29 // sxtw x9, w9 + WORD $0x9100052a // add x10, x9, + WORD $0xeb08015f // cmp x10, x8 + WORD $0x9a89d50a // csinc x10, x8, x9, le + WORD $0xcb09014a // sub x10, x10, x9 + WORD $0xf100215f // cmp x10, + WORD $0x54000403 // b.lo .LBB0_12 + WORD $0xd37ef52c // lsl x12, x9, + WORD $0x927df14b // and x11, x10, + WORD $0x9100418d // add x13, x12, + WORD $0x8b090169 // add x9, x11, x9 + WORD $0x8b0d000c // add x12, x0, x13 + WORD $0x8b0d002d // add x13, x1, x13 + WORD $0xaa0b03ee // mov x14, x11 + +LBB0_10: + WORD $0x3cdf0181 // ldur q1, [x12, + WORD $0xf10021ce // subs x14, x14, + WORD $0x3cdf01a2 // ldur q2, [x13, + WORD $0x6e22dc21 // fmul v1.4s, v1.4s, v2.4s + WORD $0x5e0c0422 // mov s2, v1.s[1] + WORD $0x1e212800 // fadd s0, s0, s1 + WORD $0x5e140423 // mov s3, v1.s[2] + WORD $0x5e1c0421 // mov s1, v1.s[3] + WORD $0x1e222800 // fadd s0, s0, s2 + WORD $0x3cc20582 // ldr q2, [x12], + WORD $0x1e232800 // fadd s0, s0, s3 + WORD $0x3cc205a3 // ldr q3, [x13], + WORD $0x6e23dc42 // fmul v2.4s, v2.4s, v3.4s + WORD $0x1e212800 // fadd s0, s0, s1 + WORD $0x5e0c0441 // mov s1, v2.s[1] + WORD $0x1e222800 // fadd s0, s0, s2 + WORD $0x5e140443 // mov s3, v2.s[2] + WORD $0x1e212800 // fadd s0, s0, s1 + WORD $0x5e1c0441 // mov s1, v2.s[3] + WORD $0x1e232800 // fadd s0, s0, s3 + WORD $0x1e212800 // fadd s0, s0, s1 + WORD $0x54fffd61 // b.ne .LBB0_10 + WORD $0xeb0b015f // cmp x10, x11 + WORD $0x54000100 // b.eq .LBB0_13 + +LBB0_12: + WORD $0xd37ef52a // lsl x10, x9, + WORD $0x91000529 // add x9, x9, + WORD $0xeb08013f // cmp x9, x8 + WORD $0xbc6a6801 // ldr s1, [x0, x10] + WORD $0xbc6a6822 // ldr s2, [x1, x10] + WORD $0x1f020020 // fmadd s0, s1, s2, s0 + WORD $0x54ffff4b // b.lt .LBB0_12 + +LBB0_13: + WORD $0xbd000040 // str s0, [x2] + WORD $0xa8c17bfd // ldp x29, x30, [sp], + WORD $0xd65f03c0 // ret diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/dot_avx256_amd64.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/dot_avx256_amd64.go new file mode 100644 index 0000000000000000000000000000000000000000..b14971fc2fb4b11e8eb80ded33b18f8f54005d44 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/dot_avx256_amd64.go @@ -0,0 +1,21 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +//go:build !noasm && amd64 + +// AUTO-GENERATED BY GOAT -- DO NOT EDIT + +package asm + +import "unsafe" + +//go:noescape +func dot_256(a, b, res, len unsafe.Pointer) diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/dot_avx256_amd64.s b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/dot_avx256_amd64.s new file mode 100644 index 0000000000000000000000000000000000000000..82876fb373286229ccc7a93a958feb539ae92ef0 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/dot_avx256_amd64.s @@ -0,0 +1,205 @@ +//go:build !noasm && amd64 +// AUTO-GENERATED BY GOAT -- DO NOT EDIT + +TEXT ·dot_256(SB), $0-32 + MOVQ a+0(FP), DI + MOVQ b+8(FP), SI + MOVQ res+16(FP), DX + MOVQ len+24(FP), CX + BYTE $0x55 // pushq %rbp + WORD $0x8948; BYTE $0xe5 // movq %rsp, %rbp + LONG $0xf8e48348 // andq $-8, %rsp + WORD $0x8b4c; BYTE $0x09 // movq (%rcx), %r9 + LONG $0x07f98341 // cmpl $7, %r9d + JG LBB0_9 + LONG $0xff418d45 // leal -1(%r9), %r8d + LONG $0x03c1f641 // testb $3, %r9b + JE LBB0_2 + WORD $0x8944; BYTE $0xc8 // movl %r9d, %eax + WORD $0xe083; BYTE $0x03 // andl $3, %eax + LONG $0xc057f8c5 // vxorps %xmm0, %xmm0, %xmm0 + WORD $0xc931 // xorl %ecx, %ecx + +LBB0_4: + LONG $0x0f10fac5 // vmovss (%rdi), %xmm1 + LONG $0xb971e2c4; BYTE $0x06 // vfmadd231ss (%rsi), %xmm1, %xmm0 + LONG $0x04c78348 // addq $4, %rdi + LONG $0x04c68348 // addq $4, %rsi + LONG $0x01c18348 // addq $1, %rcx + WORD $0xc839 // cmpl %ecx, %eax + JNE LBB0_4 + WORD $0x2941; BYTE $0xc9 // subl %ecx, %r9d + LONG $0x03f88341 // cmpl $3, %r8d + JAE LBB0_7 + +LBB0_31: + LONG $0x0211fac5 // vmovss %xmm0, (%rdx) + WORD $0x8948; BYTE $0xec // movq %rbp, %rsp + BYTE $0x5d // popq %rbp + BYTE $0xc3 // retq + +LBB0_9: + LONG $0xc057f8c5 // vxorps %xmm0, %xmm0, %xmm0 + LONG $0x20f98341 // cmpl $32, %r9d + JB LBB0_10 + LONG $0xe0498d41 // leal -32(%r9), %ecx + WORD $0xc1f6; BYTE $0x20 // testb $32, %cl + JNE LBB0_12 + LONG $0x1f10fcc5 // vmovups (%rdi), %ymm3 + LONG $0x5710fcc5; BYTE $0x20 // vmovups 32(%rdi), %ymm2 + LONG $0x4f10fcc5; BYTE $0x40 // vmovups 64(%rdi), %ymm1 + LONG $0x4710fcc5; BYTE $0x60 // vmovups 96(%rdi), %ymm0 + LONG $0xe457d8c5 // vxorps %xmm4, %xmm4, %xmm4 + LONG $0x985de2c4; BYTE $0x1e // vfmadd132ps (%rsi), %ymm4, %ymm3 + LONG $0x985de2c4; WORD $0x2056 // vfmadd132ps 32(%rsi), %ymm4, %ymm2 + LONG $0x985de2c4; WORD $0x404e // vfmadd132ps 64(%rsi), %ymm4, %ymm1 + LONG $0x985de2c4; WORD $0x6046 // vfmadd132ps 96(%rsi), %ymm4, %ymm0 + LONG $0x80ef8348 // subq $-128, %rdi + LONG $0x80ee8348 // subq $-128, %rsi + WORD $0x8941; BYTE $0xc9 // movl %ecx, %r9d + WORD $0xf983; BYTE $0x20 // cmpl $32, %ecx + JAE LBB0_20 + JMP LBB0_15 + +LBB0_10: + LONG $0xc957f0c5 // vxorps %xmm1, %xmm1, %xmm1 + LONG $0xd257e8c5 // vxorps %xmm2, %xmm2, %xmm2 + LONG $0xdb57e0c5 // vxorps %xmm3, %xmm3, %xmm3 + JMP LBB0_16 + +LBB0_2: + LONG $0xc057f8c5 // vxorps %xmm0, %xmm0, %xmm0 + LONG $0x03f88341 // cmpl $3, %r8d + JB LBB0_31 + +LBB0_7: + WORD $0x8944; BYTE $0xc8 // movl %r9d, %eax + WORD $0xc931 // xorl %ecx, %ecx + +LBB0_8: + LONG $0x0c10fac5; BYTE $0x8f // vmovss (%rdi,%rcx,4), %xmm1 + LONG $0x5410fac5; WORD $0x048f // vmovss 4(%rdi,%rcx,4), %xmm2 + LONG $0x9979e2c4; WORD $0x8e0c // vfmadd132ss (%rsi,%rcx,4), %xmm0, %xmm1 + LONG $0xb969e2c4; WORD $0x8e4c; BYTE $0x04 // vfmadd231ss 4(%rsi,%rcx,4), %xmm2, %xmm1 + LONG $0x5410fac5; WORD $0x088f // vmovss 8(%rdi,%rcx,4), %xmm2 + LONG $0x9971e2c4; WORD $0x8e54; BYTE $0x08 // vfmadd132ss 8(%rsi,%rcx,4), %xmm1, %xmm2 + LONG $0x4410fac5; WORD $0x0c8f // vmovss 12(%rdi,%rcx,4), %xmm0 + LONG $0x9969e2c4; WORD $0x8e44; BYTE $0x0c // vfmadd132ss 12(%rsi,%rcx,4), %xmm2, %xmm0 + LONG $0x04c18348 // addq $4, %rcx + WORD $0xc839 // cmpl %ecx, %eax + JNE LBB0_8 + JMP LBB0_31 + +LBB0_12: + LONG $0xc057f8c5 // vxorps %xmm0, %xmm0, %xmm0 + LONG $0xc957f0c5 // vxorps %xmm1, %xmm1, %xmm1 + LONG $0xd257e8c5 // vxorps %xmm2, %xmm2, %xmm2 + LONG $0xdb57e0c5 // vxorps %xmm3, %xmm3, %xmm3 + WORD $0xf983; BYTE $0x20 // cmpl $32, %ecx + JB LBB0_15 + +LBB0_20: + LONG $0x2710fcc5 // vmovups (%rdi), %ymm4 + LONG $0x6f10fcc5; BYTE $0x20 // vmovups 32(%rdi), %ymm5 + LONG $0x7710fcc5; BYTE $0x40 // vmovups 64(%rdi), %ymm6 + LONG $0x7f10fcc5; BYTE $0x60 // vmovups 96(%rdi), %ymm7 + LONG $0x9865e2c4; BYTE $0x26 // vfmadd132ps (%rsi), %ymm3, %ymm4 + LONG $0x986de2c4; WORD $0x206e // vfmadd132ps 32(%rsi), %ymm2, %ymm5 + LONG $0x9875e2c4; WORD $0x4076 // vfmadd132ps 64(%rsi), %ymm1, %ymm6 + LONG $0x987de2c4; WORD $0x607e // vfmadd132ps 96(%rsi), %ymm0, %ymm7 + QUAD $0x000000809f10fcc5 // vmovups 128(%rdi), %ymm3 + QUAD $0x000000a09710fcc5 // vmovups 160(%rdi), %ymm2 + QUAD $0x000000c08f10fcc5 // vmovups 192(%rdi), %ymm1 + QUAD $0x000000e08710fcc5 // vmovups 224(%rdi), %ymm0 + QUAD $0x0000809e985de2c4; BYTE $0x00 // vfmadd132ps 128(%rsi), %ymm4, %ymm3 + QUAD $0x0000a0969855e2c4; BYTE $0x00 // vfmadd132ps 160(%rsi), %ymm5, %ymm2 + QUAD $0x0000c08e984de2c4; BYTE $0x00 // vfmadd132ps 192(%rsi), %ymm6, %ymm1 + QUAD $0x0000e0869845e2c4; BYTE $0x00 // vfmadd132ps 224(%rsi), %ymm7, %ymm0 + LONG $0xc0c18341 // addl $-64, %r9d + LONG $0x00c78148; WORD $0x0001; BYTE $0x00 // addq $256, %rdi + LONG $0x00c68148; WORD $0x0001; BYTE $0x00 // addq $256, %rsi + LONG $0x1ff98341 // cmpl $31, %r9d + JA LBB0_20 + WORD $0x8944; BYTE $0xc9 // movl %r9d, %ecx + +LBB0_15: + WORD $0x8941; BYTE $0xc9 // movl %ecx, %r9d + WORD $0xf983; BYTE $0x08 // cmpl $8, %ecx + JB LBB0_18 + +LBB0_16: + WORD $0x8944; BYTE $0xc9 // movl %r9d, %ecx + +LBB0_17: + LONG $0x2710fcc5 // vmovups (%rdi), %ymm4 + LONG $0xb85de2c4; BYTE $0x1e // vfmadd231ps (%rsi), %ymm4, %ymm3 + WORD $0xc183; BYTE $0xf8 // addl $-8, %ecx + LONG $0x20c78348 // addq $32, %rdi + LONG $0x20c68348 // addq $32, %rsi + WORD $0xf983; BYTE $0x07 // cmpl $7, %ecx + JA LBB0_17 + +LBB0_18: + WORD $0xc985 // testl %ecx, %ecx + JE LBB0_19 + LONG $0xff418d44 // leal -1(%rcx), %r8d + WORD $0xc1f6; BYTE $0x03 // testb $3, %cl + JE LBB0_23 + WORD $0x8941; BYTE $0xc9 // movl %ecx, %r9d + LONG $0x03e18341 // andl $3, %r9d + LONG $0xe457d8c5 // vxorps %xmm4, %xmm4, %xmm4 + WORD $0xc031 // xorl %eax, %eax + +LBB0_25: + LONG $0x2f10fac5 // vmovss (%rdi), %xmm5 + LONG $0xb951e2c4; BYTE $0x26 // vfmadd231ss (%rsi), %xmm5, %xmm4 + LONG $0x04c78348 // addq $4, %rdi + LONG $0x04c68348 // addq $4, %rsi + LONG $0x01c08348 // addq $1, %rax + WORD $0x3941; BYTE $0xc1 // cmpl %eax, %r9d + JNE LBB0_25 + WORD $0xc129 // subl %eax, %ecx + LONG $0x03f88341 // cmpl $3, %r8d + JAE LBB0_28 + JMP LBB0_30 + +LBB0_19: + LONG $0xe457d8c5 // vxorps %xmm4, %xmm4, %xmm4 + JMP LBB0_30 + +LBB0_23: + LONG $0xe457d8c5 // vxorps %xmm4, %xmm4, %xmm4 + LONG $0x03f88341 // cmpl $3, %r8d + JB LBB0_30 + +LBB0_28: + WORD $0xc889 // movl %ecx, %eax + WORD $0xc931 // xorl %ecx, %ecx + +LBB0_29: + LONG $0x2c10fac5; BYTE $0x8f // vmovss (%rdi,%rcx,4), %xmm5 + LONG $0x7410fac5; WORD $0x048f // vmovss 4(%rdi,%rcx,4), %xmm6 + LONG $0x9959e2c4; WORD $0x8e2c // vfmadd132ss (%rsi,%rcx,4), %xmm4, %xmm5 + LONG $0xb949e2c4; WORD $0x8e6c; BYTE $0x04 // vfmadd231ss 4(%rsi,%rcx,4), %xmm6, %xmm5 + LONG $0x7410fac5; WORD $0x088f // vmovss 8(%rdi,%rcx,4), %xmm6 + LONG $0x9951e2c4; WORD $0x8e74; BYTE $0x08 // vfmadd132ss 8(%rsi,%rcx,4), %xmm5, %xmm6 + LONG $0x6410fac5; WORD $0x0c8f // vmovss 12(%rdi,%rcx,4), %xmm4 + LONG $0x9949e2c4; WORD $0x8e64; BYTE $0x0c // vfmadd132ss 12(%rsi,%rcx,4), %xmm6, %xmm4 + LONG $0x04c18348 // addq $4, %rcx + WORD $0xc839 // cmpl %ecx, %eax + JNE LBB0_29 + +LBB0_30: + LONG $0xd358ecc5 // vaddps %ymm3, %ymm2, %ymm2 + LONG $0xc058f4c5 // vaddps %ymm0, %ymm1, %ymm0 + LONG $0xc258fcc5 // vaddps %ymm2, %ymm0, %ymm0 + LONG $0xc07cffc5 // vhaddps %ymm0, %ymm0, %ymm0 + LONG $0xc07cffc5 // vhaddps %ymm0, %ymm0, %ymm0 + LONG $0x197de3c4; WORD $0x01c1 // vextractf128 $1, %ymm0, %xmm1 + LONG $0xc158fac5 // vaddss %xmm1, %xmm0, %xmm0 + LONG $0xc058dac5 // vaddss %xmm0, %xmm4, %xmm0 + LONG $0x0211fac5 // vmovss %xmm0, (%rdx) + WORD $0x8948; BYTE $0xec // movq %rbp, %rsp + BYTE $0x5d // popq %rbp + WORD $0xf8c5; BYTE $0x77 // vzeroupper + BYTE $0xc3 // retq diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/dot_avx512_amd64.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/dot_avx512_amd64.go new file mode 100644 index 0000000000000000000000000000000000000000..5ab28786fbaecc5280ef70c6dc03ff3edaef81b6 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/dot_avx512_amd64.go @@ -0,0 +1,21 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +//go:build !noasm && amd64 + +// AUTO-GENERATED BY GOAT -- DO NOT EDIT + +package asm + +import "unsafe" + +//go:noescape +func dot_512(a, b, res, len unsafe.Pointer) diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/dot_avx512_amd64.s b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/dot_avx512_amd64.s new file mode 100644 index 0000000000000000000000000000000000000000..30c79cf61133b991360ec69d1a214f85fa029bbd --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/dot_avx512_amd64.s @@ -0,0 +1,301 @@ +//go:build !noasm && amd64 +// AUTO-GENERATED BY GOAT -- DO NOT EDIT + +TEXT ·dot_512(SB), $0-32 + MOVQ a+0(FP), DI + MOVQ b+8(FP), SI + MOVQ res+16(FP), DX + MOVQ len+24(FP), CX + BYTE $0x55 // pushq %rbp + WORD $0x8948; BYTE $0xe5 // movq %rsp, %rbp + LONG $0xf8e48348 // andq $-8, %rsp + WORD $0x8b48; BYTE $0x01 // movq (%rcx), %rax + WORD $0xf883; BYTE $0x07 // cmpl $7, %eax + JG LBB0_6 + LONG $0xff408d44 // leal -1(%rax), %r8d + WORD $0x03a8 // testb $3, %al + JE LBB0_15 + WORD $0x8941; BYTE $0xc1 // movl %eax, %r9d + LONG $0x03e18341 // andl $3, %r9d + LONG $0xc057f8c5 // vxorps %xmm0, %xmm0, %xmm0 + WORD $0xc931 // xorl %ecx, %ecx + +LBB0_3: + LONG $0x0f10fac5 // vmovss (%rdi), %xmm1 + LONG $0xb971e2c4; BYTE $0x06 // vfmadd231ss (%rsi), %xmm1, %xmm0 + LONG $0x04c78348 // addq $4, %rdi + LONG $0x04c68348 // addq $4, %rsi + LONG $0x01c18348 // addq $1, %rcx + WORD $0x3941; BYTE $0xc9 // cmpl %ecx, %r9d + JNE LBB0_3 + WORD $0xc829 // subl %ecx, %eax + LONG $0x03f88341 // cmpl $3, %r8d + JAE LBB0_16 + JMP LBB0_5 + +LBB0_6: + LONG $0xc057f8c5 // vxorps %xmm0, %xmm0, %xmm0 + LONG $0x0000803d; BYTE $0x00 // cmpl $128, %eax + JB LBB0_13 + LONG $0xc957f0c5 // vxorps %xmm1, %xmm1, %xmm1 + LONG $0xd257e8c5 // vxorps %xmm2, %xmm2, %xmm2 + LONG $0xdb57e0c5 // vxorps %xmm3, %xmm3, %xmm3 + LONG $0xed57d0c5 // vxorps %xmm5, %xmm5, %xmm5 + LONG $0xe457d8c5 // vxorps %xmm4, %xmm4, %xmm4 + LONG $0xf657c8c5 // vxorps %xmm6, %xmm6, %xmm6 + LONG $0xff57c0c5 // vxorps %xmm7, %xmm7, %xmm7 + LONG $0x573841c4; BYTE $0xc0 // vxorps %xmm8, %xmm8, %xmm8 + +LBB0_8: + LONG $0x487c7162; WORD $0x0f10 // vmovups (%rdi), %zmm9 + LONG $0x487c7162; WORD $0x5710; BYTE $0x01 // vmovups 64(%rdi), %zmm10 + LONG $0x487c7162; WORD $0x5f10; BYTE $0x02 // vmovups 128(%rdi), %zmm11 + LONG $0x487c7162; WORD $0x6710; BYTE $0x03 // vmovups 192(%rdi), %zmm12 + LONG $0x487c7162; WORD $0x6f10; BYTE $0x04 // vmovups 256(%rdi), %zmm13 + LONG $0x487c7162; WORD $0x7710; BYTE $0x05 // vmovups 320(%rdi), %zmm14 + LONG $0x487c7162; WORD $0x7f10; BYTE $0x06 // vmovups 384(%rdi), %zmm15 + LONG $0x4835f262; WORD $0x0eb8 // vfmadd231ps (%rsi), %zmm9, %zmm1 + LONG $0x482df262; WORD $0x56b8; BYTE $0x01 // vfmadd231ps 64(%rsi), %zmm10, %zmm2 + LONG $0x4825f262; WORD $0x5eb8; BYTE $0x02 // vfmadd231ps 128(%rsi), %zmm11, %zmm3 + LONG $0x481df262; WORD $0x6eb8; BYTE $0x03 // vfmadd231ps 192(%rsi), %zmm12, %zmm5 + LONG $0x4815f262; WORD $0x66b8; BYTE $0x04 // vfmadd231ps 256(%rsi), %zmm13, %zmm4 + LONG $0x480df262; WORD $0x76b8; BYTE $0x05 // vfmadd231ps 320(%rsi), %zmm14, %zmm6 + LONG $0x4805f262; WORD $0x7eb8; BYTE $0x06 // vfmadd231ps 384(%rsi), %zmm15, %zmm7 + LONG $0x487c7162; WORD $0x4f10; BYTE $0x07 // vmovups 448(%rdi), %zmm9 + LONG $0x48357262; WORD $0x46b8; BYTE $0x07 // vfmadd231ps 448(%rsi), %zmm9, %zmm8 + WORD $0xc083; BYTE $0x80 // addl $-128, %eax + LONG $0x00c78148; WORD $0x0002; BYTE $0x00 // addq $512, %rdi + LONG $0x00c68148; WORD $0x0002; BYTE $0x00 // addq $512, %rsi + WORD $0xbe0f; BYTE $0xc8 // movsbl %al, %ecx + WORD $0xc139 // cmpl %eax, %ecx + JNE LBB0_8 + LONG $0x4874f162; WORD $0xca58 // vaddps %zmm2, %zmm1, %zmm1 + LONG $0x4864f162; WORD $0xd558 // vaddps %zmm5, %zmm3, %zmm2 + LONG $0x4874f162; WORD $0xca58 // vaddps %zmm2, %zmm1, %zmm1 + LONG $0x485cf162; WORD $0xd658 // vaddps %zmm6, %zmm4, %zmm2 + LONG $0x4844d162; WORD $0xd858 // vaddps %zmm8, %zmm7, %zmm3 + LONG $0x486cf162; WORD $0xd358 // vaddps %zmm3, %zmm2, %zmm2 + LONG $0x4874f162; WORD $0xca58 // vaddps %zmm2, %zmm1, %zmm1 + LONG $0x48fdf362; WORD $0xcb1b; BYTE $0x01 // vextractf64x4 $1, %zmm1, %ymm3 + LONG $0xd257e8c5 // vxorps %xmm2, %xmm2, %xmm2 + LONG $0xca58f4c5 // vaddps %ymm2, %ymm1, %ymm1 + LONG $0xcb58f4c5 // vaddps %ymm3, %ymm1, %ymm1 + WORD $0xc085 // testl %eax, %eax + JE LBB0_18 + WORD $0xf883; BYTE $0x20 // cmpl $32, %eax + JB LBB0_14 + +LBB0_11: + LONG $0xe0488d44 // leal -32(%rax), %r9d + LONG $0x20c1f641 // testb $32, %r9b + JNE LBB0_19 + LONG $0x2710fcc5 // vmovups (%rdi), %ymm4 + LONG $0x4710fcc5; BYTE $0x20 // vmovups 32(%rdi), %ymm0 + LONG $0x5f10fcc5; BYTE $0x40 // vmovups 64(%rdi), %ymm3 + LONG $0x5710fcc5; BYTE $0x60 // vmovups 96(%rdi), %ymm2 + LONG $0xb85de2c4; BYTE $0x0e // vfmadd231ps (%rsi), %ymm4, %ymm1 + LONG $0xe457d8c5 // vxorps %xmm4, %xmm4, %xmm4 + LONG $0x985de2c4; WORD $0x2046 // vfmadd132ps 32(%rsi), %ymm4, %ymm0 + LONG $0x985de2c4; WORD $0x405e // vfmadd132ps 64(%rsi), %ymm4, %ymm3 + LONG $0x985de2c4; WORD $0x6056 // vfmadd132ps 96(%rsi), %ymm4, %ymm2 + LONG $0x80ef8348 // subq $-128, %rdi + LONG $0x80ee8348 // subq $-128, %rsi + WORD $0x8944; BYTE $0xc8 // movl %r9d, %eax + LONG $0x20f98341 // cmpl $32, %r9d + JAE LBB0_20 + JMP LBB0_22 + +LBB0_13: + LONG $0xc957f0c5 // vxorps %xmm1, %xmm1, %xmm1 + WORD $0xf883; BYTE $0x20 // cmpl $32, %eax + JAE LBB0_11 + +LBB0_14: + LONG $0xdb57e0c5 // vxorps %xmm3, %xmm3, %xmm3 + LONG $0xd257e8c5 // vxorps %xmm2, %xmm2, %xmm2 + JMP LBB0_21 + +LBB0_15: + LONG $0xc057f8c5 // vxorps %xmm0, %xmm0, %xmm0 + LONG $0x03f88341 // cmpl $3, %r8d + JB LBB0_5 + +LBB0_16: + WORD $0xc089 // movl %eax, %eax + WORD $0xc931 // xorl %ecx, %ecx + +LBB0_17: + LONG $0x0c10fac5; BYTE $0x8f // vmovss (%rdi,%rcx,4), %xmm1 + LONG $0x5410fac5; WORD $0x048f // vmovss 4(%rdi,%rcx,4), %xmm2 + LONG $0x9979e2c4; WORD $0x8e0c // vfmadd132ss (%rsi,%rcx,4), %xmm0, %xmm1 + LONG $0xb969e2c4; WORD $0x8e4c; BYTE $0x04 // vfmadd231ss 4(%rsi,%rcx,4), %xmm2, %xmm1 + LONG $0x5410fac5; WORD $0x088f // vmovss 8(%rdi,%rcx,4), %xmm2 + LONG $0x9971e2c4; WORD $0x8e54; BYTE $0x08 // vfmadd132ss 8(%rsi,%rcx,4), %xmm1, %xmm2 + LONG $0x4410fac5; WORD $0x0c8f // vmovss 12(%rdi,%rcx,4), %xmm0 + LONG $0x9969e2c4; WORD $0x8e44; BYTE $0x0c // vfmadd132ss 12(%rsi,%rcx,4), %xmm2, %xmm0 + LONG $0x04c18348 // addq $4, %rcx + WORD $0xc839 // cmpl %ecx, %eax + JNE LBB0_17 + +LBB0_5: + LONG $0x0211fac5 // vmovss %xmm0, (%rdx) + WORD $0x8948; BYTE $0xec // movq %rbp, %rsp + BYTE $0x5d // popq %rbp + BYTE $0xc3 // retq + +LBB0_18: + LONG $0xc258f4c5 // vaddps %ymm2, %ymm1, %ymm0 + LONG $0xc07cffc5 // vhaddps %ymm0, %ymm0, %ymm0 + LONG $0xc07cffc5 // vhaddps %ymm0, %ymm0, %ymm0 + LONG $0x197de3c4; WORD $0x01c1 // vextractf128 $1, %ymm0, %xmm1 + LONG $0xc158fac5 // vaddss %xmm1, %xmm0, %xmm0 + LONG $0xc957f0c5 // vxorps %xmm1, %xmm1, %xmm1 + LONG $0xc158fac5 // vaddss %xmm1, %xmm0, %xmm0 + LONG $0x0211fac5 // vmovss %xmm0, (%rdx) + WORD $0x8948; BYTE $0xec // movq %rbp, %rsp + BYTE $0x5d // popq %rbp + WORD $0xf8c5; BYTE $0x77 // vzeroupper + BYTE $0xc3 // retq + +LBB0_19: + LONG $0xd257e8c5 // vxorps %xmm2, %xmm2, %xmm2 + LONG $0xdb57e0c5 // vxorps %xmm3, %xmm3, %xmm3 + LONG $0xc057f8c5 // vxorps %xmm0, %xmm0, %xmm0 + LONG $0x20f98341 // cmpl $32, %r9d + JB LBB0_22 + +LBB0_20: + LONG $0x2710fcc5 // vmovups (%rdi), %ymm4 + LONG $0x6f10fcc5; BYTE $0x20 // vmovups 32(%rdi), %ymm5 + LONG $0x7710fcc5; BYTE $0x40 // vmovups 64(%rdi), %ymm6 + LONG $0x7f10fcc5; BYTE $0x60 // vmovups 96(%rdi), %ymm7 + LONG $0x9875e2c4; BYTE $0x26 // vfmadd132ps (%rsi), %ymm1, %ymm4 + LONG $0x987de2c4; WORD $0x206e // vfmadd132ps 32(%rsi), %ymm0, %ymm5 + LONG $0x9865e2c4; WORD $0x4076 // vfmadd132ps 64(%rsi), %ymm3, %ymm6 + LONG $0x986de2c4; WORD $0x607e // vfmadd132ps 96(%rsi), %ymm2, %ymm7 + QUAD $0x000000808f10fcc5 // vmovups 128(%rdi), %ymm1 + QUAD $0x000000a08710fcc5 // vmovups 160(%rdi), %ymm0 + QUAD $0x000000c09f10fcc5 // vmovups 192(%rdi), %ymm3 + QUAD $0x000000e09710fcc5 // vmovups 224(%rdi), %ymm2 + QUAD $0x0000808e985de2c4; BYTE $0x00 // vfmadd132ps 128(%rsi), %ymm4, %ymm1 + QUAD $0x0000a0869855e2c4; BYTE $0x00 // vfmadd132ps 160(%rsi), %ymm5, %ymm0 + QUAD $0x0000c09e984de2c4; BYTE $0x00 // vfmadd132ps 192(%rsi), %ymm6, %ymm3 + QUAD $0x0000e0969845e2c4; BYTE $0x00 // vfmadd132ps 224(%rsi), %ymm7, %ymm2 + WORD $0xc083; BYTE $0xc0 // addl $-64, %eax + LONG $0x00c78148; WORD $0x0001; BYTE $0x00 // addq $256, %rdi + LONG $0x00c68148; WORD $0x0001; BYTE $0x00 // addq $256, %rsi + WORD $0xf883; BYTE $0x1f // cmpl $31, %eax + JA LBB0_20 + +LBB0_21: + WORD $0x8941; BYTE $0xc1 // movl %eax, %r9d + +LBB0_22: + LONG $0x08f98341 // cmpl $8, %r9d + JB LBB0_29 + LONG $0xf8418d45 // leal -8(%r9), %r8d + WORD $0x8944; BYTE $0xc0 // movl %r8d, %eax + WORD $0xe8c1; BYTE $0x03 // shrl $3, %eax + WORD $0x488d; BYTE $0x01 // leal 1(%rax), %ecx + WORD $0xc1f6; BYTE $0x03 // testb $3, %cl + JE LBB0_27 + WORD $0x0104 // addb $1, %al + WORD $0xb60f; BYTE $0xc0 // movzbl %al, %eax + WORD $0xe083; BYTE $0x03 // andl $3, %eax + LONG $0x03e0c148 // shlq $3, %rax + WORD $0xc931 // xorl %ecx, %ecx + +LBB0_25: + LONG $0x2710fcc5 // vmovups (%rdi), %ymm4 + LONG $0xb85de2c4; BYTE $0x0e // vfmadd231ps (%rsi), %ymm4, %ymm1 + LONG $0x20c78348 // addq $32, %rdi + LONG $0x20c68348 // addq $32, %rsi + LONG $0x08c18348 // addq $8, %rcx + WORD $0xc839 // cmpl %ecx, %eax + JNE LBB0_25 + WORD $0x2941; BYTE $0xc9 // subl %ecx, %r9d + +LBB0_27: + LONG $0x18f88341 // cmpl $24, %r8d + JB LBB0_29 + +LBB0_28: + LONG $0x2710fcc5 // vmovups (%rdi), %ymm4 + LONG $0x6f10fcc5; BYTE $0x20 // vmovups 32(%rdi), %ymm5 + LONG $0x7710fcc5; BYTE $0x40 // vmovups 64(%rdi), %ymm6 + LONG $0x7f10fcc5; BYTE $0x60 // vmovups 96(%rdi), %ymm7 + LONG $0x9875e2c4; BYTE $0x26 // vfmadd132ps (%rsi), %ymm1, %ymm4 + LONG $0xb855e2c4; WORD $0x2066 // vfmadd231ps 32(%rsi), %ymm5, %ymm4 + LONG $0xb84de2c4; WORD $0x4066 // vfmadd231ps 64(%rsi), %ymm6, %ymm4 + LONG $0xcc28fcc5 // vmovaps %ymm4, %ymm1 + LONG $0xb845e2c4; WORD $0x604e // vfmadd231ps 96(%rsi), %ymm7, %ymm1 + LONG $0xe0c18341 // addl $-32, %r9d + LONG $0x80ef8348 // subq $-128, %rdi + LONG $0x80ee8348 // subq $-128, %rsi + LONG $0x07f98341 // cmpl $7, %r9d + JA LBB0_28 + +LBB0_29: + WORD $0x8545; BYTE $0xc9 // testl %r9d, %r9d + JE LBB0_34 + LONG $0xff418d45 // leal -1(%r9), %r8d + LONG $0x03c1f641 // testb $3, %r9b + JE LBB0_35 + WORD $0x8944; BYTE $0xc9 // movl %r9d, %ecx + WORD $0xe183; BYTE $0x03 // andl $3, %ecx + LONG $0xe457d8c5 // vxorps %xmm4, %xmm4, %xmm4 + WORD $0xc031 // xorl %eax, %eax + +LBB0_32: + LONG $0x2f10fac5 // vmovss (%rdi), %xmm5 + LONG $0xb951e2c4; BYTE $0x26 // vfmadd231ss (%rsi), %xmm5, %xmm4 + LONG $0x04c78348 // addq $4, %rdi + LONG $0x04c68348 // addq $4, %rsi + LONG $0x01c08348 // addq $1, %rax + WORD $0xc139 // cmpl %eax, %ecx + JNE LBB0_32 + WORD $0x2941; BYTE $0xc1 // subl %eax, %r9d + LONG $0x03f88341 // cmpl $3, %r8d + JAE LBB0_36 + JMP LBB0_38 + +LBB0_34: + LONG $0xe457d8c5 // vxorps %xmm4, %xmm4, %xmm4 + JMP LBB0_38 + +LBB0_35: + LONG $0xe457d8c5 // vxorps %xmm4, %xmm4, %xmm4 + LONG $0x03f88341 // cmpl $3, %r8d + JB LBB0_38 + +LBB0_36: + WORD $0x8944; BYTE $0xc8 // movl %r9d, %eax + WORD $0xc931 // xorl %ecx, %ecx + +LBB0_37: + LONG $0x2c10fac5; BYTE $0x8f // vmovss (%rdi,%rcx,4), %xmm5 + LONG $0x7410fac5; WORD $0x048f // vmovss 4(%rdi,%rcx,4), %xmm6 + LONG $0x9959e2c4; WORD $0x8e2c // vfmadd132ss (%rsi,%rcx,4), %xmm4, %xmm5 + LONG $0xb949e2c4; WORD $0x8e6c; BYTE $0x04 // vfmadd231ss 4(%rsi,%rcx,4), %xmm6, %xmm5 + LONG $0x7410fac5; WORD $0x088f // vmovss 8(%rdi,%rcx,4), %xmm6 + LONG $0x9951e2c4; WORD $0x8e74; BYTE $0x08 // vfmadd132ss 8(%rsi,%rcx,4), %xmm5, %xmm6 + LONG $0x6410fac5; WORD $0x0c8f // vmovss 12(%rdi,%rcx,4), %xmm4 + LONG $0x9949e2c4; WORD $0x8e64; BYTE $0x0c // vfmadd132ss 12(%rsi,%rcx,4), %xmm6, %xmm4 + LONG $0x04c18348 // addq $4, %rcx + WORD $0xc839 // cmpl %ecx, %eax + JNE LBB0_37 + +LBB0_38: + LONG $0xc158fcc5 // vaddps %ymm1, %ymm0, %ymm0 + LONG $0xca58e4c5 // vaddps %ymm2, %ymm3, %ymm1 + LONG $0xc058f4c5 // vaddps %ymm0, %ymm1, %ymm0 + LONG $0xc07cffc5 // vhaddps %ymm0, %ymm0, %ymm0 + LONG $0xc07cffc5 // vhaddps %ymm0, %ymm0, %ymm0 + LONG $0x197de3c4; WORD $0x01c1 // vextractf128 $1, %ymm0, %xmm1 + LONG $0xc158fac5 // vaddss %xmm1, %xmm0, %xmm0 + LONG $0xc058dac5 // vaddss %xmm0, %xmm4, %xmm0 + LONG $0x0211fac5 // vmovss %xmm0, (%rdx) + WORD $0x8948; BYTE $0xec // movq %rbp, %rsp + BYTE $0x5d // popq %rbp + WORD $0xf8c5; BYTE $0x77 // vzeroupper + BYTE $0xc3 // retq diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/dot_byte_arm64.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/dot_byte_arm64.go new file mode 100644 index 0000000000000000000000000000000000000000..7c8bbfa6c625fb167fb50d943026df6b9d54bd4f --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/dot_byte_arm64.go @@ -0,0 +1,21 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +//go:build !noasm && arm64 + +// AUTO-GENERATED BY GOAT -- DO NOT EDIT + +package asm + +import "unsafe" + +//go:noescape +func dot_byte_256(a, b, res, len unsafe.Pointer) diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/dot_byte_arm64.s b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/dot_byte_arm64.s new file mode 100644 index 0000000000000000000000000000000000000000..3c55ba1afb6eb7709103822762ef828d9cdaef76 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/dot_byte_arm64.s @@ -0,0 +1,100 @@ +//go:build !noasm && arm64 +// AUTO-GENERATED BY GOAT -- DO NOT EDIT + +TEXT ·dot_byte_256(SB), $0-32 + MOVD a+0(FP), R0 + MOVD b+8(FP), R1 + MOVD res+16(FP), R2 + MOVD len+24(FP), R3 + WORD $0xa9bf7bfd // stp x29, x30, [sp, #-16]! + WORD $0xf9400068 // ldr x8, [x3] + WORD $0x910003fd // mov x29, sp + WORD $0x6b0803e9 // negs w9, w8 + WORD $0x12000d0a // and w10, w8, #0xf + WORD $0x12000d29 // and w9, w9, #0xf + WORD $0x5a894549 // csneg w9, w10, w9, mi + WORD $0x4b09010a // sub w10, w8, w9 + WORD $0x7101015f // cmp w10, #64 + WORD $0x540000ea // b.ge .LBB0_2 + WORD $0x6f00e400 // movi v0.2d, #0000000000000000 + WORD $0x2a1f03ec // mov w12, wzr + WORD $0x6f00e402 // movi v2.2d, #0000000000000000 + WORD $0x6f00e403 // movi v3.2d, #0000000000000000 + WORD $0x6f00e401 // movi v1.2d, #0000000000000000 + WORD $0x1400001f // b .LBB0_4 + +LBB0_2: + WORD $0x6f00e401 // movi v1.2d, #0000000000000000 + WORD $0xaa1f03ec // mov x12, xzr + WORD $0x6f00e403 // movi v3.2d, #0000000000000000 + WORD $0x93407d4b // sxtw x11, w10 + WORD $0x6f00e402 // movi v2.2d, #0000000000000000 + WORD $0x6f00e400 // movi v0.2d, #0000000000000000 + +LBB0_3: + WORD $0x8b0c000d // add x13, x0, x12 + WORD $0x4c4021a4 // ld1 { v4.16b, v5.16b, v6.16b, v7.16b }, [x13] + WORD $0x8b0c002d // add x13, x1, x12 + WORD $0x4c4021b0 // ld1 { v16.16b, v17.16b, v18.16b, v19.16b }, [x13] + WORD $0x9102018d // add x13, x12, #128 + WORD $0x9101018c // add x12, x12, #64 + WORD $0xeb0b01bf // cmp x13, x11 + WORD $0x2e24c214 // umull v20.8h, v16.8b, v4.8b + WORD $0x2e25c236 // umull v22.8h, v17.8b, v5.8b + WORD $0x2e26c258 // umull v24.8h, v18.8b, v6.8b + WORD $0x2e27c27a // umull v26.8h, v19.8b, v7.8b + WORD $0x6e24c215 // umull2 v21.8h, v16.16b, v4.16b + WORD $0x6e25c237 // umull2 v23.8h, v17.16b, v5.16b + WORD $0x6e26c259 // umull2 v25.8h, v18.16b, v6.16b + WORD $0x6e27c264 // umull2 v4.8h, v19.16b, v7.16b + WORD $0x6e606a81 // uadalp v1.4s, v20.8h + WORD $0x6e606ac3 // uadalp v3.4s, v22.8h + WORD $0x6e606b02 // uadalp v2.4s, v24.8h + WORD $0x6e606b40 // uadalp v0.4s, v26.8h + WORD $0x6e606aa1 // uadalp v1.4s, v21.8h + WORD $0x6e606ae3 // uadalp v3.4s, v23.8h + WORD $0x6e606b22 // uadalp v2.4s, v25.8h + WORD $0x6e606880 // uadalp v0.4s, v4.8h + WORD $0x54fffd2d // b.le .LBB0_3 + +LBB0_4: + WORD $0x6b0a019f // cmp w12, w10 + WORD $0x5400018a // b.ge .LBB0_7 + WORD $0x2a0c03eb // mov w11, w12 + WORD $0x93407d4a // sxtw x10, w10 + +LBB0_6: + WORD $0x3ceb6804 // ldr q4, [x0, x11] + WORD $0x3ceb6825 // ldr q5, [x1, x11] + WORD $0x9100416b // add x11, x11, #16 + WORD $0xeb0a017f // cmp x11, x10 + WORD $0x2e24c0a6 // umull v6.8h, v5.8b, v4.8b + WORD $0x6e24c0a4 // umull2 v4.8h, v5.16b, v4.16b + WORD $0x6e6068c1 // uadalp v1.4s, v6.8h + WORD $0x6e606881 // uadalp v1.4s, v4.8h + WORD $0x54ffff0b // b.lt .LBB0_6 + +LBB0_7: + WORD $0x4ea18461 // add v1.4s, v3.4s, v1.4s + WORD $0x7100053f // cmp w9, #1 + WORD $0x4ea28421 // add v1.4s, v1.4s, v2.4s + WORD $0x4ea08420 // add v0.4s, v1.4s, v0.4s + WORD $0x4eb1b800 // addv s0, v0.4s + WORD $0x1e26000a // fmov w10, s0 + WORD $0x5400014b // b.lt .LBB0_10 + WORD $0x4b0903e9 // neg w9, w9 + WORD $0x93407d08 // sxtw x8, w8 + WORD $0x8b29c109 // add x9, x8, w9, sxtw + +LBB0_9: + WORD $0x3869680b // ldrb w11, [x0, x9] + WORD $0x3869682c // ldrb w12, [x1, x9] + WORD $0x91000529 // add x9, x9, #1 + WORD $0xeb08013f // cmp x9, x8 + WORD $0x1b0b298a // madd w10, w12, w11, w10 + WORD $0x54ffff6b // b.lt .LBB0_9 + +LBB0_10: + WORD $0xb900004a // str w10, [x2] + WORD $0xa8c17bfd // ldp x29, x30, [sp], #16 + WORD $0xd65f03c0 // ret diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/dot_byte_avx256.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/dot_byte_avx256.go new file mode 100644 index 0000000000000000000000000000000000000000..864fdb62d54a428cfc81325d498afb722f6858ad --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/dot_byte_avx256.go @@ -0,0 +1,21 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +//go:build !noasm && amd64 + +// AUTO-GENERATED BY GOAT -- DO NOT EDIT + +package asm + +import "unsafe" + +//go:noescape +func dot_byte_256(a, b, res, len unsafe.Pointer) diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/dot_byte_avx256.s b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/dot_byte_avx256.s new file mode 100644 index 0000000000000000000000000000000000000000..f6caf29e0ac22cc6efc39fc6fb1e0401653b85c3 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/dot_byte_avx256.s @@ -0,0 +1,242 @@ +//go:build !noasm && amd64 +// Code generated by GoAT. DO NOT EDIT. + +TEXT ·dot_byte_256(SB), $0-32 + MOVQ a+0(FP), DI + MOVQ b+8(FP), SI + MOVQ res+16(FP), DX + MOVQ len+24(FP), CX + BYTE $0x55 // pushq %rbp + WORD $0x8948; BYTE $0xe5 // movq %rsp, %rbp + WORD $0x5741 // pushq %r15 + WORD $0x5641 // pushq %r14 + BYTE $0x53 // pushq %rbx + LONG $0xf8e48348 // andq $-8, %rsp + WORD $0x8b4c; BYTE $0x19 // movq (%rcx), %r11 + WORD $0x8945; BYTE $0xd9 // movl %r11d, %r9d + LONG $0x20fb8341 // cmpl $32, %r11d + JGE LBB0_1 + WORD $0x8545; BYTE $0xdb // testl %r11d, %r11d + JLE LBB0_7 + LONG $0x20f98341 // cmpl $32, %r9d + JAE LBB0_10 + WORD $0xc931 // xorl %ecx, %ecx + WORD $0x3145; BYTE $0xff // xorl %r15d, %r15d + JMP LBB0_13 + +LBB0_1: + WORD $0x6349; BYTE $0xc1 // movslq %r9d, %rax + LONG $0xc0eff9c5 // vpxor %xmm0, %xmm0, %xmm0 + LONG $0x00001fb9; BYTE $0x00 // movl $31, %ecx + +LBB0_2: + LONG $0x4c6ffec5; WORD $0xe10f // vmovdqu -31(%rdi,%rcx), %ymm1 + LONG $0xd171edc5; BYTE $0x08 // vpsrlw $8, %ymm1, %ymm2 + LONG $0xf171f5c5; BYTE $0x08 // vpsllw $8, %ymm1, %ymm1 + LONG $0xd171f5c5; BYTE $0x08 // vpsrlw $8, %ymm1, %ymm1 + LONG $0x5c6ffec5; WORD $0xe10e // vmovdqu -31(%rsi,%rcx), %ymm3 + LONG $0xd371ddc5; BYTE $0x08 // vpsrlw $8, %ymm3, %ymm4 + LONG $0xd4f5edc5 // vpmaddwd %ymm4, %ymm2, %ymm2 + LONG $0xc0feedc5 // vpaddd %ymm0, %ymm2, %ymm0 + LONG $0xf371edc5; BYTE $0x08 // vpsllw $8, %ymm3, %ymm2 + LONG $0xd271edc5; BYTE $0x08 // vpsrlw $8, %ymm2, %ymm2 + LONG $0xcaf5f5c5 // vpmaddwd %ymm2, %ymm1, %ymm1 + LONG $0xc1fefdc5 // vpaddd %ymm1, %ymm0, %ymm0 + LONG $0x20c18348 // addq $32, %rcx + WORD $0x3948; BYTE $0xc1 // cmpq %rax, %rcx + JL LBB0_2 + WORD $0x8945; BYTE $0xc8 // movl %r9d, %r8d + LONG $0xe0e08341 // andl $-32, %r8d + LONG $0x397de3c4; WORD $0x01c1 // vextracti128 $1, %ymm0, %xmm1 + LONG $0xc0fef1c5 // vpaddd %xmm0, %xmm1, %xmm0 + LONG $0xc870f9c5; BYTE $0x1b // vpshufd $27, %xmm0, %xmm1 # xmm1 = xmm0[3,2,1,0] + LONG $0xc0fef1c5 // vpaddd %xmm0, %xmm1, %xmm0 + LONG $0xc870f9c5; BYTE $0x55 // vpshufd $85, %xmm0, %xmm1 # xmm1 = xmm0[1,1,1,1] + LONG $0xc0fef1c5 // vpaddd %xmm0, %xmm1, %xmm0 + LONG $0x7e79c1c4; BYTE $0xc7 // vmovd %xmm0, %r15d + WORD $0x3945; BYTE $0xd8 // cmpl %r11d, %r8d + JGE LBB0_25 + WORD $0x8945; BYTE $0xce // movl %r9d, %r14d + LONG $0xe0e68341 // andl $-32, %r14d + LONG $0x01468d41 // leal 1(%r14), %eax + WORD $0x3944; BYTE $0xd8 // cmpl %r11d, %eax + LONG $0xc34e0f41 // cmovlel %r11d, %eax + WORD $0xf741; BYTE $0xd6 // notl %r14d + WORD $0x0141; BYTE $0xc6 // addl %eax, %r14d + LONG $0x07fe8341 // cmpl $7, %r14d + JAE LBB0_14 + WORD $0x894c; BYTE $0xc3 // movq %r8, %rbx + JMP LBB0_24 + +LBB0_7: + WORD $0x3145; BYTE $0xff // xorl %r15d, %r15d + JMP LBB0_25 + +LBB0_10: + WORD $0x8944; BYTE $0xcb // movl %r9d, %ebx + WORD $0xe383; BYTE $0x1f // andl $31, %ebx + WORD $0x8944; BYTE $0xc9 // movl %r9d, %ecx + WORD $0xe183; BYTE $0xe0 // andl $-32, %ecx + LONG $0xc0eff9c5 // vpxor %xmm0, %xmm0, %xmm0 + WORD $0xc031 // xorl %eax, %eax + LONG $0xc9eff1c5 // vpxor %xmm1, %xmm1, %xmm1 + LONG $0xd2efe9c5 // vpxor %xmm2, %xmm2, %xmm2 + LONG $0xdbefe1c5 // vpxor %xmm3, %xmm3, %xmm3 + +LBB0_11: + LONG $0x487df262; WORD $0x2432; BYTE $0x07 // vpmovzxbq (%rdi,%rax), %zmm4 # zmm4 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero,mem[2],zero,zero,zero,zero,zero,zero,zero,mem[3],zero,zero,zero,zero,zero,zero,zero,mem[4],zero,zero,zero,zero,zero,zero,zero,mem[5],zero,zero,zero,zero,zero,zero,zero,mem[6],zero,zero,zero,zero,zero,zero,zero,mem[7],zero,zero,zero,zero,zero,zero,zero + QUAD $0x01076c32487df262 // vpmovzxbq 8(%rdi,%rax), %zmm5 # zmm5 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero,mem[2],zero,zero,zero,zero,zero,zero,zero,mem[3],zero,zero,zero,zero,zero,zero,zero,mem[4],zero,zero,zero,zero,zero,zero,zero,mem[5],zero,zero,zero,zero,zero,zero,zero,mem[6],zero,zero,zero,zero,zero,zero,zero,mem[7],zero,zero,zero,zero,zero,zero,zero + QUAD $0x02077432487df262 // vpmovzxbq 16(%rdi,%rax), %zmm6 # zmm6 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero,mem[2],zero,zero,zero,zero,zero,zero,zero,mem[3],zero,zero,zero,zero,zero,zero,zero,mem[4],zero,zero,zero,zero,zero,zero,zero,mem[5],zero,zero,zero,zero,zero,zero,zero,mem[6],zero,zero,zero,zero,zero,zero,zero,mem[7],zero,zero,zero,zero,zero,zero,zero + QUAD $0x03077c32487df262 // vpmovzxbq 24(%rdi,%rax), %zmm7 # zmm7 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero,mem[2],zero,zero,zero,zero,zero,zero,zero,mem[3],zero,zero,zero,zero,zero,zero,zero,mem[4],zero,zero,zero,zero,zero,zero,zero,mem[5],zero,zero,zero,zero,zero,zero,zero,mem[6],zero,zero,zero,zero,zero,zero,zero,mem[7],zero,zero,zero,zero,zero,zero,zero + LONG $0x487d7262; WORD $0x0432; BYTE $0x06 // vpmovzxbq (%rsi,%rax), %zmm8 # zmm8 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero,mem[2],zero,zero,zero,zero,zero,zero,zero,mem[3],zero,zero,zero,zero,zero,zero,zero,mem[4],zero,zero,zero,zero,zero,zero,zero,mem[5],zero,zero,zero,zero,zero,zero,zero,mem[6],zero,zero,zero,zero,zero,zero,zero,mem[7],zero,zero,zero,zero,zero,zero,zero + LONG $0x48bdf262; WORD $0xe428 // vpmuldq %zmm4, %zmm8, %zmm4 + LONG $0x48ddf162; WORD $0xc0d4 // vpaddq %zmm0, %zmm4, %zmm0 + QUAD $0x01066432487df262 // vpmovzxbq 8(%rsi,%rax), %zmm4 # zmm4 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero,mem[2],zero,zero,zero,zero,zero,zero,zero,mem[3],zero,zero,zero,zero,zero,zero,zero,mem[4],zero,zero,zero,zero,zero,zero,zero,mem[5],zero,zero,zero,zero,zero,zero,zero,mem[6],zero,zero,zero,zero,zero,zero,zero,mem[7],zero,zero,zero,zero,zero,zero,zero + LONG $0x48ddf262; WORD $0xe528 // vpmuldq %zmm5, %zmm4, %zmm4 + LONG $0x48ddf162; WORD $0xc9d4 // vpaddq %zmm1, %zmm4, %zmm1 + QUAD $0x02066432487df262 // vpmovzxbq 16(%rsi,%rax), %zmm4 # zmm4 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero,mem[2],zero,zero,zero,zero,zero,zero,zero,mem[3],zero,zero,zero,zero,zero,zero,zero,mem[4],zero,zero,zero,zero,zero,zero,zero,mem[5],zero,zero,zero,zero,zero,zero,zero,mem[6],zero,zero,zero,zero,zero,zero,zero,mem[7],zero,zero,zero,zero,zero,zero,zero + LONG $0x48ddf262; WORD $0xe628 // vpmuldq %zmm6, %zmm4, %zmm4 + LONG $0x48ddf162; WORD $0xd2d4 // vpaddq %zmm2, %zmm4, %zmm2 + QUAD $0x03066432487df262 // vpmovzxbq 24(%rsi,%rax), %zmm4 # zmm4 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero,mem[2],zero,zero,zero,zero,zero,zero,zero,mem[3],zero,zero,zero,zero,zero,zero,zero,mem[4],zero,zero,zero,zero,zero,zero,zero,mem[5],zero,zero,zero,zero,zero,zero,zero,mem[6],zero,zero,zero,zero,zero,zero,zero,mem[7],zero,zero,zero,zero,zero,zero,zero + LONG $0x48ddf262; WORD $0xe728 // vpmuldq %zmm7, %zmm4, %zmm4 + LONG $0x48ddf162; WORD $0xdbd4 // vpaddq %zmm3, %zmm4, %zmm3 + LONG $0x20c08348 // addq $32, %rax + WORD $0x3948; BYTE $0xc1 // cmpq %rax, %rcx + JNE LBB0_11 + LONG $0x48f5f162; WORD $0xc0d4 // vpaddq %zmm0, %zmm1, %zmm0 + LONG $0x48edf162; WORD $0xc0d4 // vpaddq %zmm0, %zmm2, %zmm0 + LONG $0x48e5f162; WORD $0xc0d4 // vpaddq %zmm0, %zmm3, %zmm0 + LONG $0x48fdf362; WORD $0xc13b; BYTE $0x01 // vextracti64x4 $1, %zmm0, %ymm1 + LONG $0x48fdf162; WORD $0xc1d4 // vpaddq %zmm1, %zmm0, %zmm0 + LONG $0x397de3c4; WORD $0x01c1 // vextracti128 $1, %ymm0, %xmm1 + LONG $0xc1d4f9c5 // vpaddq %xmm1, %xmm0, %xmm0 + LONG $0xc870f9c5; BYTE $0xee // vpshufd $238, %xmm0, %xmm1 # xmm1 = xmm0[2,3,2,3] + LONG $0xc1d4f9c5 // vpaddq %xmm1, %xmm0, %xmm0 + LONG $0x7ef9c1c4; BYTE $0xc7 // vmovq %xmm0, %r15 + WORD $0x8548; BYTE $0xdb // testq %rbx, %rbx + JE LBB0_25 + +LBB0_13: + LONG $0x0f1cb60f // movzbl (%rdi,%rcx), %ebx + LONG $0x0e04b60f // movzbl (%rsi,%rcx), %eax + LONG $0xc3af0f48 // imulq %rbx, %rax + WORD $0x0149; BYTE $0xc7 // addq %rax, %r15 + LONG $0x01c18348 // addq $1, %rcx + WORD $0x3949; BYTE $0xc9 // cmpq %rcx, %r9 + JNE LBB0_13 + JMP LBB0_25 + +LBB0_14: + QUAD $0x0001ffffffc0ba49; WORD $0x0000 // movabsq $8589934528, %r10 # imm = 0x1FFFFFFC0 + LONG $0x3ffe8341 // cmpl $63, %r14d + JAE LBB0_19 + WORD $0xc931 // xorl %ecx, %ecx + JMP LBB0_16 + +LBB0_19: + LONG $0x01c68349 // addq $1, %r14 + WORD $0x894c; BYTE $0xf1 // movq %r14, %rcx + WORD $0x214c; BYTE $0xd1 // andq %r10, %rcx + LONG $0x6e79c1c4; BYTE $0xc7 // vmovd %r15d, %xmm0 + LONG $0x073c8d4e // leaq (%rdi,%r8), %r15 + LONG $0x30c78349 // addq $48, %r15 + LONG $0x061c8d4a // leaq (%rsi,%r8), %rbx + LONG $0x30c38348 // addq $48, %rbx + LONG $0xc9eff1c5 // vpxor %xmm1, %xmm1, %xmm1 + WORD $0xc031 // xorl %eax, %eax + LONG $0xd2efe9c5 // vpxor %xmm2, %xmm2, %xmm2 + LONG $0xdbefe1c5 // vpxor %xmm3, %xmm3, %xmm3 + +LBB0_20: + QUAD $0xfd076431487dd262 // vpmovzxbd -48(%r15,%rax), %zmm4 # zmm4 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero,mem[8],zero,zero,zero,mem[9],zero,zero,zero,mem[10],zero,zero,zero,mem[11],zero,zero,zero,mem[12],zero,zero,zero,mem[13],zero,zero,zero,mem[14],zero,zero,zero,mem[15],zero,zero,zero + QUAD $0xfe076c31487dd262 // vpmovzxbd -32(%r15,%rax), %zmm5 # zmm5 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero,mem[8],zero,zero,zero,mem[9],zero,zero,zero,mem[10],zero,zero,zero,mem[11],zero,zero,zero,mem[12],zero,zero,zero,mem[13],zero,zero,zero,mem[14],zero,zero,zero,mem[15],zero,zero,zero + QUAD $0xff077431487dd262 // vpmovzxbd -16(%r15,%rax), %zmm6 # zmm6 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero,mem[8],zero,zero,zero,mem[9],zero,zero,zero,mem[10],zero,zero,zero,mem[11],zero,zero,zero,mem[12],zero,zero,zero,mem[13],zero,zero,zero,mem[14],zero,zero,zero,mem[15],zero,zero,zero + LONG $0x487dd262; WORD $0x3c31; BYTE $0x07 // vpmovzxbd (%r15,%rax), %zmm7 # zmm7 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero,mem[8],zero,zero,zero,mem[9],zero,zero,zero,mem[10],zero,zero,zero,mem[11],zero,zero,zero,mem[12],zero,zero,zero,mem[13],zero,zero,zero,mem[14],zero,zero,zero,mem[15],zero,zero,zero + QUAD $0xfd034431487d7262 // vpmovzxbd -48(%rbx,%rax), %zmm8 # zmm8 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero,mem[8],zero,zero,zero,mem[9],zero,zero,zero,mem[10],zero,zero,zero,mem[11],zero,zero,zero,mem[12],zero,zero,zero,mem[13],zero,zero,zero,mem[14],zero,zero,zero,mem[15],zero,zero,zero + LONG $0x483df262; WORD $0xe440 // vpmulld %zmm4, %zmm8, %zmm4 + LONG $0x485df162; WORD $0xc0fe // vpaddd %zmm0, %zmm4, %zmm0 + QUAD $0xfe036431487df262 // vpmovzxbd -32(%rbx,%rax), %zmm4 # zmm4 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero,mem[8],zero,zero,zero,mem[9],zero,zero,zero,mem[10],zero,zero,zero,mem[11],zero,zero,zero,mem[12],zero,zero,zero,mem[13],zero,zero,zero,mem[14],zero,zero,zero,mem[15],zero,zero,zero + LONG $0x485df262; WORD $0xe540 // vpmulld %zmm5, %zmm4, %zmm4 + LONG $0x485df162; WORD $0xc9fe // vpaddd %zmm1, %zmm4, %zmm1 + QUAD $0xff036431487df262 // vpmovzxbd -16(%rbx,%rax), %zmm4 # zmm4 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero,mem[8],zero,zero,zero,mem[9],zero,zero,zero,mem[10],zero,zero,zero,mem[11],zero,zero,zero,mem[12],zero,zero,zero,mem[13],zero,zero,zero,mem[14],zero,zero,zero,mem[15],zero,zero,zero + LONG $0x485df262; WORD $0xe640 // vpmulld %zmm6, %zmm4, %zmm4 + LONG $0x485df162; WORD $0xd2fe // vpaddd %zmm2, %zmm4, %zmm2 + LONG $0x487df262; WORD $0x2431; BYTE $0x03 // vpmovzxbd (%rbx,%rax), %zmm4 # zmm4 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero,mem[8],zero,zero,zero,mem[9],zero,zero,zero,mem[10],zero,zero,zero,mem[11],zero,zero,zero,mem[12],zero,zero,zero,mem[13],zero,zero,zero,mem[14],zero,zero,zero,mem[15],zero,zero,zero + LONG $0x485df262; WORD $0xe740 // vpmulld %zmm7, %zmm4, %zmm4 + LONG $0x485df162; WORD $0xdbfe // vpaddd %zmm3, %zmm4, %zmm3 + LONG $0x40c08348 // addq $64, %rax + WORD $0x3948; BYTE $0xc1 // cmpq %rax, %rcx + JNE LBB0_20 + LONG $0x4875f162; WORD $0xc0fe // vpaddd %zmm0, %zmm1, %zmm0 + LONG $0x486df162; WORD $0xc0fe // vpaddd %zmm0, %zmm2, %zmm0 + LONG $0x4865f162; WORD $0xc0fe // vpaddd %zmm0, %zmm3, %zmm0 + LONG $0x48fdf362; WORD $0xc13b; BYTE $0x01 // vextracti64x4 $1, %zmm0, %ymm1 + LONG $0x487df162; WORD $0xc1fe // vpaddd %zmm1, %zmm0, %zmm0 + LONG $0x397de3c4; WORD $0x01c1 // vextracti128 $1, %ymm0, %xmm1 + LONG $0xc1fef9c5 // vpaddd %xmm1, %xmm0, %xmm0 + LONG $0xc870f9c5; BYTE $0xee // vpshufd $238, %xmm0, %xmm1 # xmm1 = xmm0[2,3,2,3] + LONG $0xc1fef9c5 // vpaddd %xmm1, %xmm0, %xmm0 + LONG $0xc870f9c5; BYTE $0x55 // vpshufd $85, %xmm0, %xmm1 # xmm1 = xmm0[1,1,1,1] + LONG $0xc1fef9c5 // vpaddd %xmm1, %xmm0, %xmm0 + LONG $0x7e79c1c4; BYTE $0xc7 // vmovd %xmm0, %r15d + WORD $0x3949; BYTE $0xce // cmpq %rcx, %r14 + JE LBB0_25 + LONG $0x38c6f641 // testb $56, %r14b + JE LBB0_23 + +LBB0_16: + LONG $0xe0e18341 // andl $-32, %r9d + LONG $0x01418d41 // leal 1(%r9), %eax + WORD $0x3944; BYTE $0xd8 // cmpl %r11d, %eax + LONG $0xc34e0f41 // cmovlel %r11d, %eax + WORD $0xf741; BYTE $0xd1 // notl %r9d + WORD $0x0141; BYTE $0xc1 // addl %eax, %r9d + LONG $0x01c18349 // addq $1, %r9 + LONG $0x38c28349 // addq $56, %r10 + WORD $0x214d; BYTE $0xca // andq %r9, %r10 + LONG $0x021c8d4b // leaq (%r10,%r8), %rbx + LONG $0x6e79c1c4; BYTE $0xc7 // vmovd %r15d, %xmm0 + LONG $0x07048d4a // leaq (%rdi,%r8), %rax + WORD $0x0149; BYTE $0xf0 // addq %rsi, %r8 + +LBB0_17: + LONG $0x3079e2c4; WORD $0x080c // vpmovzxbw (%rax,%rcx), %xmm1 # xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero + LONG $0x3079c2c4; WORD $0x0814 // vpmovzxbw (%r8,%rcx), %xmm2 # xmm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero + LONG $0xc9f5e9c5 // vpmaddwd %xmm1, %xmm2, %xmm1 + LONG $0xc0fef5c5 // vpaddd %ymm0, %ymm1, %ymm0 + LONG $0x08c18348 // addq $8, %rcx + WORD $0x3949; BYTE $0xca // cmpq %rcx, %r10 + JNE LBB0_17 + LONG $0x397de3c4; WORD $0x01c1 // vextracti128 $1, %ymm0, %xmm1 + LONG $0xc1fef9c5 // vpaddd %xmm1, %xmm0, %xmm0 + LONG $0xc870f9c5; BYTE $0xee // vpshufd $238, %xmm0, %xmm1 # xmm1 = xmm0[2,3,2,3] + LONG $0xc1fef9c5 // vpaddd %xmm1, %xmm0, %xmm0 + LONG $0xc870f9c5; BYTE $0x55 // vpshufd $85, %xmm0, %xmm1 # xmm1 = xmm0[1,1,1,1] + LONG $0xc1fef9c5 // vpaddd %xmm1, %xmm0, %xmm0 + LONG $0x7e79c1c4; BYTE $0xc7 // vmovd %xmm0, %r15d + WORD $0x394d; BYTE $0xd1 // cmpq %r10, %r9 + JNE LBB0_24 + JMP LBB0_25 + +LBB0_23: + WORD $0x014c; BYTE $0xc1 // addq %r8, %rcx + WORD $0x8948; BYTE $0xcb // movq %rcx, %rbx + +LBB0_24: + LONG $0x1f04b60f // movzbl (%rdi,%rbx), %eax + LONG $0x1e0cb60f // movzbl (%rsi,%rbx), %ecx + WORD $0xaf0f; BYTE $0xc8 // imull %eax, %ecx + WORD $0x0141; BYTE $0xcf // addl %ecx, %r15d + LONG $0x01c38348 // addq $1, %rbx + WORD $0x3944; BYTE $0xdb // cmpl %r11d, %ebx + JL LBB0_24 + +LBB0_25: + WORD $0x8944; BYTE $0x3a // movl %r15d, (%rdx) + LONG $0xe8658d48 // leaq -24(%rbp), %rsp + BYTE $0x5b // popq %rbx + WORD $0x5e41 // popq %r14 + WORD $0x5f41 // popq %r15 + BYTE $0x5d // popq %rbp + WORD $0xf8c5; BYTE $0x77 // vzeroupper + BYTE $0xc3 // retq diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/dot_float_byte_arm64.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/dot_float_byte_arm64.go new file mode 100644 index 0000000000000000000000000000000000000000..c4a6d1208428f7b4de95dd005d0ea5fd4790e945 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/dot_float_byte_arm64.go @@ -0,0 +1,21 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +//go:build !noasm && arm64 + +// AUTO-GENERATED BY GOAT -- DO NOT EDIT + +package asm + +import "unsafe" + +//go:noescape +func dot_float_byte_neon(a, b, res, len unsafe.Pointer) diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/dot_float_byte_arm64.s b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/dot_float_byte_arm64.s new file mode 100644 index 0000000000000000000000000000000000000000..4d554e4c79f3b762e7798cd193353d1d63bce4d2 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/dot_float_byte_arm64.s @@ -0,0 +1,228 @@ +//go:build !noasm && arm64 +// AUTO-GENERATED BY GOAT -- DO NOT EDIT + +TEXT ·dot_float_byte_neon(SB), $0-32 + MOVD a+0(FP), R0 + MOVD b+8(FP), R1 + MOVD res+16(FP), R2 + MOVD len+24(FP), R3 + WORD $0xa9bf7bfd // stp x29, x30, [sp, #-16]! + WORD $0xf9400068 // ldr x8, [x3] + WORD $0x910003fd // mov x29, sp + WORD $0x7100411f // cmp w8, #16 + WORD $0x540001ca // b.ge .LBB0_5 + WORD $0x2f00e400 // movi d0, #0000000000000000 + WORD $0x34001828 // cbz w8, .LBB0_17 + WORD $0x5100050c // sub w12, w8, #1 + WORD $0x71001d9f // cmp w12, #7 + WORD $0x54001703 // b.lo .LBB0_16 + WORD $0x91000589 // add x9, x12, #1 + WORD $0x9000000a // adrp x10, .LCPI0_2 + WORD $0x9000000b // adrp x11, .LCPI0_3 + WORD $0xf1007d9f // cmp x12, #31 + WORD $0x54000482 // b.hs .LBB0_8 + WORD $0x2f00e400 // movi d0, #0000000000000000 + WORD $0xaa1f03ec // mov x12, xzr + WORD $0x14000089 // b .LBB0_12 + +LBB0_5: + WORD $0x6f00e400 // movi v0.2d, #0000000000000000 + WORD $0x11004108 // add w8, w8, #16 + +LBB0_6: + WORD $0x3cc10421 // ldr q1, [x1], #16 + WORD $0xad401404 // ldp q4, q5, [x0] + WORD $0x51004108 // sub w8, w8, #16 + WORD $0x2f08a422 // ushll v2.8h, v1.8b, #0 + WORD $0x71007d1f // cmp w8, #31 + WORD $0x6f08a421 // ushll2 v1.8h, v1.16b, #0 + WORD $0x2f10a443 // ushll v3.4s, v2.4h, #0 + WORD $0x6f10a442 // ushll2 v2.4s, v2.8h, #0 + WORD $0x6e21d863 // ucvtf v3.4s, v3.4s + WORD $0x6e21d842 // ucvtf v2.4s, v2.4s + WORD $0x4e24cc60 // fmla v0.4s, v3.4s, v4.4s + WORD $0x2f10a423 // ushll v3.4s, v1.4h, #0 + WORD $0x6f10a421 // ushll2 v1.4s, v1.8h, #0 + WORD $0x6e21d863 // ucvtf v3.4s, v3.4s + WORD $0x4e25cc40 // fmla v0.4s, v2.4s, v5.4s + WORD $0x6e21d821 // ucvtf v1.4s, v1.4s + WORD $0xad411002 // ldp q2, q4, [x0, #32] + WORD $0x91010000 // add x0, x0, #64 + WORD $0x4e22cc60 // fmla v0.4s, v3.4s, v2.4s + WORD $0x4e24cc20 // fmla v0.4s, v1.4s, v4.4s + WORD $0x54fffd88 // b.hi .LBB0_6 + WORD $0x4e0c0401 // dup v1.4s, v0.s[1] + WORD $0x4e140402 // dup v2.4s, v0.s[2] + WORD $0x4e21d401 // fadd v1.4s, v0.4s, v1.4s + WORD $0x4e1c0400 // dup v0.4s, v0.s[3] + WORD $0x4e21d441 // fadd v1.4s, v2.4s, v1.4s + WORD $0x4e21d400 // fadd v0.4s, v0.4s, v1.4s + WORD $0x2f00e401 // movi d1, #0000000000000000 + WORD $0x1e212800 // fadd s0, s0, s1 + WORD $0x14000096 // b .LBB0_17 + +LBB0_8: + WORD $0x9000000e // adrp x14, .LCPI0_0 + WORD $0x9000000f // adrp x15, .LCPI0_1 + WORD $0x927b6d2c // and x12, x9, #0x1ffffffe0 + WORD $0x2f00e400 // movi d0, #0000000000000000 + WORD $0x9100402d // add x13, x1, #16 + WORD $0x3dc00143 // ldr q3, [x10, :lo12:.LCPI0_2] + WORD $0x3dc001c1 // ldr q1, [x14, :lo12:.LCPI0_0] + WORD $0x9101000e // add x14, x0, #64 + WORD $0x3dc001e2 // ldr q2, [x15, :lo12:.LCPI0_1] + WORD $0xaa0c03ef // mov x15, x12 + WORD $0x3dc00164 // ldr q4, [x11, :lo12:.LCPI0_3] + +LBB0_9: + WORD $0x3cdf01a5 // ldur q5, [x13, #-16] + WORD $0xf10081ef // subs x15, x15, #32 + WORD $0xad7e41c7 // ldp q7, q16, [x14, #-64] + WORD $0x4e0400a6 // tbl v6.16b, { v5.16b }, v4.16b + WORD $0x4e0300b1 // tbl v17.16b, { v5.16b }, v3.16b + WORD $0x6e21d8c6 // ucvtf v6.4s, v6.4s + WORD $0x6e21da31 // ucvtf v17.4s, v17.4s + WORD $0x6e26dce6 // fmul v6.4s, v7.4s, v6.4s + WORD $0x5e0c04c7 // mov s7, v6.s[1] + WORD $0x1e262800 // fadd s0, s0, s6 + WORD $0x1e272800 // fadd s0, s0, s7 + WORD $0x5e1404c7 // mov s7, v6.s[2] + WORD $0x5e1c04c6 // mov s6, v6.s[3] + WORD $0x1e272800 // fadd s0, s0, s7 + WORD $0x6e31de07 // fmul v7.4s, v16.4s, v17.4s + WORD $0x4e0200b0 // tbl v16.16b, { v5.16b }, v2.16b + WORD $0x4e0100a5 // tbl v5.16b, { v5.16b }, v1.16b + WORD $0x1e262800 // fadd s0, s0, s6 + WORD $0x5e0c04e6 // mov s6, v7.s[1] + WORD $0x5e1404f1 // mov s17, v7.s[2] + WORD $0x6e21d8a5 // ucvtf v5.4s, v5.4s + WORD $0x1e272800 // fadd s0, s0, s7 + WORD $0x5e1c04e7 // mov s7, v7.s[3] + WORD $0x1e262800 // fadd s0, s0, s6 + WORD $0x6e21da06 // ucvtf v6.4s, v16.4s + WORD $0x1e312800 // fadd s0, s0, s17 + WORD $0xad7f45d0 // ldp q16, q17, [x14, #-32] + WORD $0x1e272800 // fadd s0, s0, s7 + WORD $0x6e26de06 // fmul v6.4s, v16.4s, v6.4s + WORD $0x6e25de25 // fmul v5.4s, v17.4s, v5.4s + WORD $0x5e0c04c7 // mov s7, v6.s[1] + WORD $0x1e262800 // fadd s0, s0, s6 + WORD $0x5e1404d0 // mov s16, v6.s[2] + WORD $0x5e1c04c6 // mov s6, v6.s[3] + WORD $0x5e1404b1 // mov s17, v5.s[2] + WORD $0x1e272800 // fadd s0, s0, s7 + WORD $0x5e0c04a7 // mov s7, v5.s[1] + WORD $0x1e302800 // fadd s0, s0, s16 + WORD $0x1e262800 // fadd s0, s0, s6 + WORD $0x3cc205a6 // ldr q6, [x13], #32 + WORD $0x1e252800 // fadd s0, s0, s5 + WORD $0x5e1c04a5 // mov s5, v5.s[3] + WORD $0x4e0400d0 // tbl v16.16b, { v6.16b }, v4.16b + WORD $0x1e272800 // fadd s0, s0, s7 + WORD $0x6e21da07 // ucvtf v7.4s, v16.4s + WORD $0x1e312800 // fadd s0, s0, s17 + WORD $0xad4045d0 // ldp q16, q17, [x14] + WORD $0x1e252800 // fadd s0, s0, s5 + WORD $0x6e27de07 // fmul v7.4s, v16.4s, v7.4s + WORD $0x4e0300d0 // tbl v16.16b, { v6.16b }, v3.16b + WORD $0x5e0c04e5 // mov s5, v7.s[1] + WORD $0x1e272800 // fadd s0, s0, s7 + WORD $0x6e21da10 // ucvtf v16.4s, v16.4s + WORD $0x1e252800 // fadd s0, s0, s5 + WORD $0x5e1404e5 // mov s5, v7.s[2] + WORD $0x5e1c04e7 // mov s7, v7.s[3] + WORD $0x1e252800 // fadd s0, s0, s5 + WORD $0x6e30de25 // fmul v5.4s, v17.4s, v16.4s + WORD $0x4e0200d0 // tbl v16.16b, { v6.16b }, v2.16b + WORD $0x4e0100c6 // tbl v6.16b, { v6.16b }, v1.16b + WORD $0x1e272800 // fadd s0, s0, s7 + WORD $0x5e0c04a7 // mov s7, v5.s[1] + WORD $0x5e1404b1 // mov s17, v5.s[2] + WORD $0x1e252800 // fadd s0, s0, s5 + WORD $0x5e1c04a5 // mov s5, v5.s[3] + WORD $0x1e272800 // fadd s0, s0, s7 + WORD $0x6e21da07 // ucvtf v7.4s, v16.4s + WORD $0x1e312800 // fadd s0, s0, s17 + WORD $0xad4145d0 // ldp q16, q17, [x14, #32] + WORD $0x910201ce // add x14, x14, #128 + WORD $0x1e252800 // fadd s0, s0, s5 + WORD $0x6e27de07 // fmul v7.4s, v16.4s, v7.4s + WORD $0x5e0c04e5 // mov s5, v7.s[1] + WORD $0x1e272800 // fadd s0, s0, s7 + WORD $0x1e252800 // fadd s0, s0, s5 + WORD $0x5e1404e5 // mov s5, v7.s[2] + WORD $0x1e252800 // fadd s0, s0, s5 + WORD $0x6e21d8c5 // ucvtf v5.4s, v6.4s + WORD $0x5e1c04e6 // mov s6, v7.s[3] + WORD $0x6e25de25 // fmul v5.4s, v17.4s, v5.4s + WORD $0x1e262800 // fadd s0, s0, s6 + WORD $0x5e0c04a6 // mov s6, v5.s[1] + WORD $0x1e252800 // fadd s0, s0, s5 + WORD $0x1e262800 // fadd s0, s0, s6 + WORD $0x5e1404a6 // mov s6, v5.s[2] + WORD $0x5e1c04a5 // mov s5, v5.s[3] + WORD $0x1e262800 // fadd s0, s0, s6 + WORD $0x1e252800 // fadd s0, s0, s5 + WORD $0x54fff501 // b.ne .LBB0_9 + WORD $0xeb0c013f // cmp x9, x12 + WORD $0x54000600 // b.eq .LBB0_17 + WORD $0xf27d053f // tst x9, #0x18 + WORD $0x540004a0 // b.eq .LBB0_15 + +LBB0_12: + WORD $0x927d752d // and x13, x9, #0x1fffffff8 + WORD $0x8b0c002e // add x14, x1, x12 + WORD $0x8b0d0021 // add x1, x1, x13 + WORD $0x4b0d0108 // sub w8, w8, w13 + WORD $0x8b0c080f // add x15, x0, x12, lsl #2 + WORD $0x8b0d0800 // add x0, x0, x13, lsl #2 + WORD $0x3dc00141 // ldr q1, [x10, :lo12:.LCPI0_2] + WORD $0xcb0d018a // sub x10, x12, x13 + WORD $0x3dc00162 // ldr q2, [x11, :lo12:.LCPI0_3] + +LBB0_13: + WORD $0xfc4085c3 // ldr d3, [x14], #8 + WORD $0xacc119e5 // ldp q5, q6, [x15], #32 + WORD $0xb100214a // adds x10, x10, #8 + WORD $0x4e020064 // tbl v4.16b, { v3.16b }, v2.16b + WORD $0x4e010063 // tbl v3.16b, { v3.16b }, v1.16b + WORD $0x6e21d884 // ucvtf v4.4s, v4.4s + WORD $0x6e21d863 // ucvtf v3.4s, v3.4s + WORD $0x6e24dca4 // fmul v4.4s, v5.4s, v4.4s + WORD $0x6e23dcc3 // fmul v3.4s, v6.4s, v3.4s + WORD $0x5e0c0485 // mov s5, v4.s[1] + WORD $0x1e242800 // fadd s0, s0, s4 + WORD $0x5e140487 // mov s7, v4.s[2] + WORD $0x5e1c0484 // mov s4, v4.s[3] + WORD $0x1e252800 // fadd s0, s0, s5 + WORD $0x5e140465 // mov s5, v3.s[2] + WORD $0x1e272800 // fadd s0, s0, s7 + WORD $0x1e242800 // fadd s0, s0, s4 + WORD $0x5e0c0464 // mov s4, v3.s[1] + WORD $0x1e232800 // fadd s0, s0, s3 + WORD $0x5e1c0463 // mov s3, v3.s[3] + WORD $0x1e242800 // fadd s0, s0, s4 + WORD $0x1e252800 // fadd s0, s0, s5 + WORD $0x1e232800 // fadd s0, s0, s3 + WORD $0x54fffd21 // b.ne .LBB0_13 + WORD $0xeb0d013f // cmp x9, x13 + WORD $0x540000a1 // b.ne .LBB0_16 + WORD $0x1400000a // b .LBB0_17 + +LBB0_15: + WORD $0x4b0c0108 // sub w8, w8, w12 + WORD $0x8b0c0021 // add x1, x1, x12 + WORD $0x8b0c0800 // add x0, x0, x12, lsl #2 + +LBB0_16: + WORD $0x38401429 // ldrb w9, [x1], #1 + WORD $0xbc404402 // ldr s2, [x0], #4 + WORD $0x71000508 // subs w8, w8, #1 + WORD $0x1e230121 // ucvtf s1, w9 + WORD $0x1f010040 // fmadd s0, s2, s1, s0 + WORD $0x54ffff61 // b.ne .LBB0_16 + +LBB0_17: + WORD $0xbd000040 // str s0, [x2] + WORD $0xa8c17bfd // ldp x29, x30, [sp], #16 + WORD $0xd65f03c0 // ret diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/dot_float_byte_avx256.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/dot_float_byte_avx256.go new file mode 100644 index 0000000000000000000000000000000000000000..da4aea29bcebcc763394f36dd8f5ba31ddb6ba41 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/dot_float_byte_avx256.go @@ -0,0 +1,21 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +//go:build !noasm && amd64 + +// Code generated by GoAT. DO NOT EDIT. + +package asm + +import "unsafe" + +//go:noescape +func dot_float_byte_256(a, b, res, len unsafe.Pointer) diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/dot_float_byte_avx256.s b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/dot_float_byte_avx256.s new file mode 100644 index 0000000000000000000000000000000000000000..9fe3b84a34596dfab26706c29b6c0817d4f2e37d --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/dot_float_byte_avx256.s @@ -0,0 +1,183 @@ +//go:build !noasm && amd64 +// Code generated by GoAT. DO NOT EDIT. + +TEXT ·dot_float_byte_256(SB), $0-32 + MOVQ a+0(FP), DI + MOVQ b+8(FP), SI + MOVQ res+16(FP), DX + MOVQ len+24(FP), CX + BYTE $0x55 // pushq %rbp + WORD $0x8948; BYTE $0xe5 // movq %rsp, %rbp + LONG $0xf8e48348 // andq $-8, %rsp + WORD $0x8b4c; BYTE $0x11 // movq (%rcx), %r10 + LONG $0x07fa8341 // cmpl $7, %r10d + JG LBB0_9 + LONG $0xff428d45 // leal -1(%r10), %r8d + LONG $0x03c2f641 // testb $3, %r10b + JE LBB0_2 + WORD $0x8945; BYTE $0xd1 // movl %r10d, %r9d + LONG $0x03e18341 // andl $3, %r9d + LONG $0xc057f8c5 // vxorps %xmm0, %xmm0, %xmm0 + WORD $0xc931 // xorl %ecx, %ecx + +LBB0_4: + LONG $0x0e04b60f // movzbl (%rsi,%rcx), %eax + LONG $0xc82aeac5 // vcvtsi2ss %eax, %xmm2, %xmm1 + LONG $0xb971e2c4; BYTE $0x07 // vfmadd231ss (%rdi), %xmm1, %xmm0 # xmm0 = (xmm1 * mem) + xmm0 + LONG $0x04c78348 // addq $4, %rdi + LONG $0x01c18348 // addq $1, %rcx + WORD $0x3941; BYTE $0xc9 // cmpl %ecx, %r9d + JNE LBB0_4 + WORD $0x2941; BYTE $0xca // subl %ecx, %r10d + WORD $0x0148; BYTE $0xce // addq %rcx, %rsi + LONG $0x03f88341 // cmpl $3, %r8d + JAE LBB0_7 + +LBB0_26: + LONG $0x0211fac5 // vmovss %xmm0, (%rdx) + WORD $0x8948; BYTE $0xec // movq %rbp, %rsp + BYTE $0x5d // popq %rbp + BYTE $0xc3 // retq + +LBB0_9: + LONG $0xc057f8c5 // vxorps %xmm0, %xmm0, %xmm0 + LONG $0x20fa8341 // cmpl $32, %r10d + JB LBB0_10 + LONG $0xc057f8c5 // vxorps %xmm0, %xmm0, %xmm0 + LONG $0xc957f0c5 // vxorps %xmm1, %xmm1, %xmm1 + LONG $0xd257e8c5 // vxorps %xmm2, %xmm2, %xmm2 + LONG $0xdb57e0c5 // vxorps %xmm3, %xmm3, %xmm3 + +LBB0_16: + LONG $0x317de2c4; BYTE $0x26 // vpmovzxbd (%rsi), %ymm4 # ymm4 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero + LONG $0x317de2c4; WORD $0x086e // vpmovzxbd 8(%rsi), %ymm5 # ymm5 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero + LONG $0xe45bfcc5 // vcvtdq2ps %ymm4, %ymm4 + LONG $0xed5bfcc5 // vcvtdq2ps %ymm5, %ymm5 + LONG $0x317de2c4; WORD $0x1076 // vpmovzxbd 16(%rsi), %ymm6 # ymm6 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero + LONG $0xf65bfcc5 // vcvtdq2ps %ymm6, %ymm6 + LONG $0x317de2c4; WORD $0x187e // vpmovzxbd 24(%rsi), %ymm7 # ymm7 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero + LONG $0xff5bfcc5 // vcvtdq2ps %ymm7, %ymm7 + LONG $0xb85de2c4; BYTE $0x1f // vfmadd231ps (%rdi), %ymm4, %ymm3 # ymm3 = (ymm4 * mem) + ymm3 + LONG $0xb855e2c4; WORD $0x2057 // vfmadd231ps 32(%rdi), %ymm5, %ymm2 # ymm2 = (ymm5 * mem) + ymm2 + LONG $0xb84de2c4; WORD $0x404f // vfmadd231ps 64(%rdi), %ymm6, %ymm1 # ymm1 = (ymm6 * mem) + ymm1 + LONG $0xb845e2c4; WORD $0x6047 // vfmadd231ps 96(%rdi), %ymm7, %ymm0 # ymm0 = (ymm7 * mem) + ymm0 + LONG $0xe0c28341 // addl $-32, %r10d + LONG $0x80ef8348 // subq $-128, %rdi + LONG $0x20c68348 // addq $32, %rsi + LONG $0x1ffa8341 // cmpl $31, %r10d + JA LBB0_16 + LONG $0x08fa8341 // cmpl $8, %r10d + JAE LBB0_11 + JMP LBB0_13 + +LBB0_10: + LONG $0xc957f0c5 // vxorps %xmm1, %xmm1, %xmm1 + LONG $0xd257e8c5 // vxorps %xmm2, %xmm2, %xmm2 + LONG $0xdb57e0c5 // vxorps %xmm3, %xmm3, %xmm3 + +LBB0_11: + LONG $0x317de2c4; BYTE $0x26 // vpmovzxbd (%rsi), %ymm4 # ymm4 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero + LONG $0xe45bfcc5 // vcvtdq2ps %ymm4, %ymm4 + LONG $0xb85de2c4; BYTE $0x1f // vfmadd231ps (%rdi), %ymm4, %ymm3 # ymm3 = (ymm4 * mem) + ymm3 + LONG $0xf8c28341 // addl $-8, %r10d + LONG $0x20c78348 // addq $32, %rdi + LONG $0x08c68348 // addq $8, %rsi + LONG $0x07fa8341 // cmpl $7, %r10d + JA LBB0_11 + +LBB0_13: + WORD $0x8545; BYTE $0xd2 // testl %r10d, %r10d + JE LBB0_14 + LONG $0xff428d45 // leal -1(%r10), %r8d + LONG $0x03c2f641 // testb $3, %r10b + JE LBB0_18 + WORD $0x8945; BYTE $0xd1 // movl %r10d, %r9d + LONG $0x03e18341 // andl $3, %r9d + LONG $0xe457d8c5 // vxorps %xmm4, %xmm4, %xmm4 + WORD $0xc931 // xorl %ecx, %ecx + +LBB0_20: + LONG $0x0e04b60f // movzbl (%rsi,%rcx), %eax + LONG $0xe82abac5 // vcvtsi2ss %eax, %xmm8, %xmm5 + LONG $0xb951e2c4; BYTE $0x27 // vfmadd231ss (%rdi), %xmm5, %xmm4 # xmm4 = (xmm5 * mem) + xmm4 + LONG $0x04c78348 // addq $4, %rdi + LONG $0x01c18348 // addq $1, %rcx + WORD $0x3941; BYTE $0xc9 // cmpl %ecx, %r9d + JNE LBB0_20 + WORD $0x0148; BYTE $0xce // addq %rcx, %rsi + WORD $0x2941; BYTE $0xca // subl %ecx, %r10d + LONG $0x03f88341 // cmpl $3, %r8d + JAE LBB0_23 + JMP LBB0_25 + +LBB0_2: + LONG $0xc057f8c5 // vxorps %xmm0, %xmm0, %xmm0 + LONG $0x03f88341 // cmpl $3, %r8d + JB LBB0_26 + +LBB0_7: + WORD $0x8945; BYTE $0xd0 // movl %r10d, %r8d + WORD $0xc931 // xorl %ecx, %ecx + +LBB0_8: + LONG $0x0e04b60f // movzbl (%rsi,%rcx), %eax + LONG $0xc82ae2c5 // vcvtsi2ss %eax, %xmm3, %xmm1 + LONG $0x9979e2c4; WORD $0x8f0c // vfmadd132ss (%rdi,%rcx,4), %xmm0, %xmm1 # xmm1 = (xmm1 * mem) + xmm0 + LONG $0x0e44b60f; BYTE $0x01 // movzbl 1(%rsi,%rcx), %eax + LONG $0xc02ae2c5 // vcvtsi2ss %eax, %xmm3, %xmm0 + LONG $0x0e44b60f; BYTE $0x02 // movzbl 2(%rsi,%rcx), %eax + LONG $0xd02ae2c5 // vcvtsi2ss %eax, %xmm3, %xmm2 + LONG $0x9971e2c4; WORD $0x8f44; BYTE $0x04 // vfmadd132ss 4(%rdi,%rcx,4), %xmm1, %xmm0 # xmm0 = (xmm0 * mem) + xmm1 + LONG $0x9979e2c4; WORD $0x8f54; BYTE $0x08 // vfmadd132ss 8(%rdi,%rcx,4), %xmm0, %xmm2 # xmm2 = (xmm2 * mem) + xmm0 + LONG $0x0e44b60f; BYTE $0x03 // movzbl 3(%rsi,%rcx), %eax + LONG $0xc02ae2c5 // vcvtsi2ss %eax, %xmm3, %xmm0 + LONG $0x9969e2c4; WORD $0x8f44; BYTE $0x0c // vfmadd132ss 12(%rdi,%rcx,4), %xmm2, %xmm0 # xmm0 = (xmm0 * mem) + xmm2 + LONG $0x04c18348 // addq $4, %rcx + WORD $0x3941; BYTE $0xc8 // cmpl %ecx, %r8d + JNE LBB0_8 + JMP LBB0_26 + +LBB0_14: + LONG $0xe457d8c5 // vxorps %xmm4, %xmm4, %xmm4 + JMP LBB0_25 + +LBB0_18: + LONG $0xe457d8c5 // vxorps %xmm4, %xmm4, %xmm4 + LONG $0x03f88341 // cmpl $3, %r8d + JB LBB0_25 + +LBB0_23: + WORD $0x8945; BYTE $0xd0 // movl %r10d, %r8d + WORD $0xc931 // xorl %ecx, %ecx + +LBB0_24: + LONG $0x0e04b60f // movzbl (%rsi,%rcx), %eax + LONG $0xe82abac5 // vcvtsi2ss %eax, %xmm8, %xmm5 + LONG $0x9959e2c4; WORD $0x8f2c // vfmadd132ss (%rdi,%rcx,4), %xmm4, %xmm5 # xmm5 = (xmm5 * mem) + xmm4 + LONG $0x0e44b60f; BYTE $0x01 // movzbl 1(%rsi,%rcx), %eax + LONG $0xe02abac5 // vcvtsi2ss %eax, %xmm8, %xmm4 + LONG $0x0e44b60f; BYTE $0x02 // movzbl 2(%rsi,%rcx), %eax + LONG $0xf02abac5 // vcvtsi2ss %eax, %xmm8, %xmm6 + LONG $0x9951e2c4; WORD $0x8f64; BYTE $0x04 // vfmadd132ss 4(%rdi,%rcx,4), %xmm5, %xmm4 # xmm4 = (xmm4 * mem) + xmm5 + LONG $0x9959e2c4; WORD $0x8f74; BYTE $0x08 // vfmadd132ss 8(%rdi,%rcx,4), %xmm4, %xmm6 # xmm6 = (xmm6 * mem) + xmm4 + LONG $0x0e44b60f; BYTE $0x03 // movzbl 3(%rsi,%rcx), %eax + LONG $0xe02abac5 // vcvtsi2ss %eax, %xmm8, %xmm4 + LONG $0x9949e2c4; WORD $0x8f64; BYTE $0x0c // vfmadd132ss 12(%rdi,%rcx,4), %xmm6, %xmm4 # xmm4 = (xmm4 * mem) + xmm6 + LONG $0x04c18348 // addq $4, %rcx + WORD $0x3941; BYTE $0xc8 // cmpl %ecx, %r8d + JNE LBB0_24 + +LBB0_25: + LONG $0xd358ecc5 // vaddps %ymm3, %ymm2, %ymm2 + LONG $0xc058f4c5 // vaddps %ymm0, %ymm1, %ymm0 + LONG $0xc258fcc5 // vaddps %ymm2, %ymm0, %ymm0 + LONG $0xc07cffc5 // vhaddps %ymm0, %ymm0, %ymm0 + LONG $0xc07cffc5 // vhaddps %ymm0, %ymm0, %ymm0 + LONG $0x197de3c4; WORD $0x01c1 // vextractf128 $1, %ymm0, %xmm1 + LONG $0xc158fac5 // vaddss %xmm1, %xmm0, %xmm0 + LONG $0xc058dac5 // vaddss %xmm0, %xmm4, %xmm0 + LONG $0x0211fac5 // vmovss %xmm0, (%rdx) + WORD $0x8948; BYTE $0xec // movq %rbp, %rsp + BYTE $0x5d // popq %rbp + WORD $0xf8c5; BYTE $0x77 // vzeroupper + BYTE $0xc3 // retq diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/dot_inline.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/dot_inline.go new file mode 100644 index 0000000000000000000000000000000000000000..c2fc4ec3c75df65392d5c643fc8db3fa7d1c39bb --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/dot_inline.go @@ -0,0 +1,59 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package asm + +// Experiment with inlining and flattening the L2Squared distancer. +// Theoretically, this should be faster than the loop version for small vectors +// - it avoids the loop overhead +// - it eliminates the bounds check by reversing the iteration +// - it allows dot2, dot4 and dot6 to be inlined (the other ones are too large) +// See go tool compile -d=ssa/check_bce/debug=1 -m dot_inline.go + +type number interface { + ~uint8 | ~uint32 | ~float32 +} + +func dot2[T number, U number](x []T, y []T) U { + sum := U(x[1])*U(y[1]) + U(x[0])*U(y[0]) + + return U(sum) +} + +func dot3[T, U number](x []T, y []T) U { + sum := U(x[2]) * U(y[2]) + + return dot2[T, U](x, y) + U(sum) +} + +func dot4[T, U number](x []T, y []T) U { + sum := U(x[3])*U(y[3]) + U(x[2])*U(y[2]) + + return dot2[T, U](x, y) + U(sum) +} + +func dot5[T, U number](x []T, y []T) U { + sum := U(x[4]) * U(y[4]) + + return dot4[T, U](x, y) + U(sum) +} + +func dot6[T, U number](x []T, y []T) U { + sum := U(x[5])*U(y[5]) + U(x[4])*U(y[4]) + + return dot4[T, U](x, y) + U(sum) +} + +func dot7[T, U number](x []T, y []T) U { + sum := U(x[6]) * U(y[6]) + + return dot6[T, U](x, y) + U(sum) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/dot_inline_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/dot_inline_test.go new file mode 100644 index 0000000000000000000000000000000000000000..ec065d57828db7c9b67b7484bf2d466af17c9df6 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/dot_inline_test.go @@ -0,0 +1,72 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package asm + +import ( + "fmt" + "math/rand" + "testing" +) + +func dotLoop(x, y []float32) float32 { + var sum float32 + for i := range x { + sum += x[i] * y[i] + } + + return sum +} + +func BenchmarkDotInlineVsLoop(b *testing.B) { + lengths := []int{2, 4, 6, 8, 10, 12} + for _, length := range lengths { + x := make([]float32, length) + y := make([]float32, length) + + for i := range x { + x[i] = rand.Float32() + y[i] = rand.Float32() + } + + b.Run(fmt.Sprintf("vector dim=%d", length), func(b *testing.B) { + b.Run("loop", func(b *testing.B) { + for i := 0; i < b.N; i++ { + dotLoop(x, y) + } + }) + + b.Run("flat", func(b *testing.B) { + // written to ensure that the compiler + // inlines the function when possible + switch length { + case 2: + b.ResetTimer() + for i := 0; i < b.N; i++ { + dot2[float32, float32](x, y) + } + case 4: + b.ResetTimer() + for i := 0; i < b.N; i++ { + dot4[float32, float32](x, y) + } + case 6: + b.ResetTimer() + for i := 0; i < b.N; i++ { + dot6[float32, float32](x, y) + } + default: + panic("unsupported length") + } + }) + }) + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/dot_neon_arm64.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/dot_neon_arm64.go new file mode 100644 index 0000000000000000000000000000000000000000..a4520f1c9921494b6eb63263db8637d272af5ce5 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/dot_neon_arm64.go @@ -0,0 +1,21 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +//go:build !noasm && arm64 + +// AUTO-GENERATED BY GOAT -- DO NOT EDIT + +package asm + +import "unsafe" + +//go:noescape +func dot_neon(a, b, res, len unsafe.Pointer) diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/dot_neon_arm64.s b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/dot_neon_arm64.s new file mode 100644 index 0000000000000000000000000000000000000000..4ef2c90265903fe61348420d7e2e8c1f3f15ad40 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/dot_neon_arm64.s @@ -0,0 +1,142 @@ +//go:build !noasm && arm64 +// AUTO-GENERATED BY GOAT -- DO NOT EDIT + +TEXT ·dot_neon(SB), $0-32 + MOVD a+0(FP), R0 + MOVD b+8(FP), R1 + MOVD res+16(FP), R2 + MOVD len+24(FP), R3 + WORD $0xa9bf7bfd // stp x29, x30, [sp, #-16]! + WORD $0xf9400069 // ldr x9, [x3] + WORD $0x910003fd // mov x29, sp + WORD $0x6b0903e8 // negs w8, w9 + WORD $0x1200052a // and w10, w9, #0x3 + WORD $0x12000508 // and w8, w8, #0x3 + WORD $0x5a884548 // csneg w8, w10, w8, mi + WORD $0x4b08012a // sub w10, w9, w8 + WORD $0x7100415f // cmp w10, #16 + WORD $0x540000ea // b.ge .LBB0_2 + WORD $0x6f00e400 // movi v0.2d, #0000000000000000 + WORD $0x6f00e401 // movi v1.2d, #0000000000000000 + WORD $0x2a1f03eb // mov w11, wzr + WORD $0x6f00e403 // movi v3.2d, #0000000000000000 + WORD $0x6f00e402 // movi v2.2d, #0000000000000000 + WORD $0x14000016 // b .LBB0_4 + +LBB0_2: + WORD $0x6f00e402 // movi v2.2d, #0000000000000000 + WORD $0x6f00e403 // movi v3.2d, #0000000000000000 + WORD $0xaa1f03eb // mov x11, xzr + WORD $0x6f00e401 // movi v1.2d, #0000000000000000 + WORD $0x6f00e400 // movi v0.2d, #0000000000000000 + WORD $0xaa0003ec // mov x12, x0 + WORD $0xaa0103ed // mov x13, x1 + +LBB0_3: + WORD $0x4cdf2984 // ld1 { v4.4s, v5.4s, v6.4s, v7.4s }, [x12], #64 + WORD $0x9100816e // add x14, x11, #32 + WORD $0x4cdf29b0 // ld1 { v16.4s, v17.4s, v18.4s, v19.4s }, [x13], #64 + WORD $0xeb0a01df // cmp x14, x10 + WORD $0x9100416b // add x11, x11, #16 + WORD $0x6e30dc94 // fmul v20.4s, v4.4s, v16.4s + WORD $0x6e31dcb5 // fmul v21.4s, v5.4s, v17.4s + WORD $0x6e32dcd6 // fmul v22.4s, v6.4s, v18.4s + WORD $0x6e33dce4 // fmul v4.4s, v7.4s, v19.4s + WORD $0x4e34d442 // fadd v2.4s, v2.4s, v20.4s + WORD $0x4e35d463 // fadd v3.4s, v3.4s, v21.4s + WORD $0x4e36d421 // fadd v1.4s, v1.4s, v22.4s + WORD $0x4e24d400 // fadd v0.4s, v0.4s, v4.4s + WORD $0x54fffe69 // b.ls .LBB0_3 + +LBB0_4: + WORD $0x6b0a017f // cmp w11, w10 + WORD $0x540001ca // b.ge .LBB0_7 + WORD $0x2a0b03ec // mov w12, w11 + WORD $0x93407d4a // sxtw x10, w10 + WORD $0x2a0b03eb // mov w11, w11 + WORD $0xd37e7d8d // ubfiz x13, x12, #2, #32 + WORD $0x8b0d002c // add x12, x1, x13 + WORD $0x8b0d000d // add x13, x0, x13 + +LBB0_6: + WORD $0x3cc105a4 // ldr q4, [x13], #16 + WORD $0x9100116b // add x11, x11, #4 + WORD $0x3cc10585 // ldr q5, [x12], #16 + WORD $0xeb0a017f // cmp x11, x10 + WORD $0x6e25dc84 // fmul v4.4s, v4.4s, v5.4s + WORD $0x4e24d442 // fadd v2.4s, v2.4s, v4.4s + WORD $0x54ffff4b // b.lt .LBB0_6 + +LBB0_7: + WORD $0x6e22d442 // faddp v2.4s, v2.4s, v2.4s + WORD $0x6e23d463 // faddp v3.4s, v3.4s, v3.4s + WORD $0x7100051f // cmp w8, #1 + WORD $0x6e21d421 // faddp v1.4s, v1.4s, v1.4s + WORD $0x6e20d400 // faddp v0.4s, v0.4s, v0.4s + WORD $0x7e30d842 // faddp s2, v2.2s + WORD $0x7e30d863 // faddp s3, v3.2s + WORD $0x7e30d821 // faddp s1, v1.2s + WORD $0x7e30d800 // faddp s0, v0.2s + WORD $0x1e232842 // fadd s2, s2, s3 + WORD $0x1e212841 // fadd s1, s2, s1 + WORD $0x1e202820 // fadd s0, s1, s0 + WORD $0x5400066b // b.lt .LBB0_13 + WORD $0x93407d29 // sxtw x9, w9 + WORD $0x4b0803ec // neg w12, w8 + WORD $0xcb08012a // sub x10, x9, x8 + WORD $0x9100054b // add x11, x10, #1 + WORD $0xeb09017f // cmp x11, x9 + WORD $0x9a8ad52a // csinc x10, x9, x10, le + WORD $0x8b08014a // add x10, x10, x8 + WORD $0xcb09014b // sub x11, x10, x9 + WORD $0x8b2cc12a // add x10, x9, w12, sxtw + WORD $0xf100217f // cmp x11, #8 + WORD $0x54000423 // b.lo .LBB0_12 + WORD $0xd37ef52c // lsl x12, x9, #2 + WORD $0xcb28c98c // sub x12, x12, w8, sxtw #2 + WORD $0x927df168 // and x8, x11, #0xfffffffffffffff8 + WORD $0x8b08014a // add x10, x10, x8 + WORD $0xaa0803ee // mov x14, x8 + WORD $0x9100418d // add x13, x12, #16 + WORD $0x8b0d002c // add x12, x1, x13 + WORD $0x8b0d000d // add x13, x0, x13 + +LBB0_10: + WORD $0xad7f9181 // ldp q1, q4, [x12, #-16] + WORD $0xf10021ce // subs x14, x14, #8 + WORD $0xad7f8da2 // ldp q2, q3, [x13, #-16] + WORD $0x9100818c // add x12, x12, #32 + WORD $0x910081ad // add x13, x13, #32 + WORD $0x6e21dc41 // fmul v1.4s, v2.4s, v1.4s + WORD $0x5e0c0422 // mov s2, v1.s[1] + WORD $0x1e212800 // fadd s0, s0, s1 + WORD $0x5e140425 // mov s5, v1.s[2] + WORD $0x5e1c0421 // mov s1, v1.s[3] + WORD $0x1e222800 // fadd s0, s0, s2 + WORD $0x6e24dc62 // fmul v2.4s, v3.4s, v4.4s + WORD $0x1e252800 // fadd s0, s0, s5 + WORD $0x5e140443 // mov s3, v2.s[2] + WORD $0x1e212800 // fadd s0, s0, s1 + WORD $0x5e0c0441 // mov s1, v2.s[1] + WORD $0x1e222800 // fadd s0, s0, s2 + WORD $0x1e212800 // fadd s0, s0, s1 + WORD $0x5e1c0441 // mov s1, v2.s[3] + WORD $0x1e232800 // fadd s0, s0, s3 + WORD $0x1e212800 // fadd s0, s0, s1 + WORD $0x54fffd61 // b.ne .LBB0_10 + WORD $0xeb08017f // cmp x11, x8 + WORD $0x54000100 // b.eq .LBB0_13 + +LBB0_12: + WORD $0xd37ef548 // lsl x8, x10, #2 + WORD $0x9100054a // add x10, x10, #1 + WORD $0xeb09015f // cmp x10, x9 + WORD $0xbc686801 // ldr s1, [x0, x8] + WORD $0xbc686822 // ldr s2, [x1, x8] + WORD $0x1f020020 // fmadd s0, s1, s2, s0 + WORD $0x54ffff4b // b.lt .LBB0_12 + +LBB0_13: + WORD $0xbd000040 // str s0, [x2] + WORD $0xa8c17bfd // ldp x29, x30, [sp], #16 + WORD $0xd65f03c0 // ret diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/dot_stub_amd64.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/dot_stub_amd64.go new file mode 100644 index 0000000000000000000000000000000000000000..f38cca66ebcde78a53dfafbc779266314f6f56c9 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/dot_stub_amd64.go @@ -0,0 +1,16 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by command: go run dot.go -out dot.s -stubs dot_stub.go. DO NOT EDIT. + +package asm + +func Dot(x []float32, y []float32) float32 diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/dot_stub_arm64.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/dot_stub_arm64.go new file mode 100644 index 0000000000000000000000000000000000000000..d5c9e701e65b4884912be0ec4b07d4bd9193c089 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/dot_stub_arm64.go @@ -0,0 +1,176 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package asm + +// To generate the asm code, run: +// go install github.com/gorse-io/goat@v0.1.0 +// go generate + +//// go:generate goat ../c/dot_arm64.c -O3 -e="-mfpu=neon-fp-armv8" -e="-mfloat-abi=hard" -e="--target=arm64" -e="-march=armv8-a+simd+fp" +//go:generate goat ../c/dot_neon_arm64.c -O3 -e="--target=arm64" -e="-march=armv8-a+simd+fp" +//go:generate goat ../c/dot_sve_arm64.c -O3 -e="-mcpu=neoverse-v1" -e="--target=arm64" -e="-march=armv8-a+sve" +//go:generate goat ../c/dot_byte_arm64.c -O3 -e="-mfpu=neon-fp-armv8" -e="-mfloat-abi=hard" -e="--target=arm64" -e="-march=armv8-a+simd+fp" + +import ( + "unsafe" +) + +// Dot calculates the dot product between two vectors +// using SIMD instructions. +func Dot_Neon(x []float32, y []float32) float32 { + switch len(x) { + case 2: + return dot2[float32, float32](x, y) + case 4: + return dot4[float32, float32](x, y) + case 6: + return dot6[float32, float32](x, y) + case 8: + // manually inlined dot8(x, y) + sum := x[7]*y[7] + x[6]*y[6] + return dot6[float32, float32](x, y) + sum + case 10: + // manually inlined dot10(x, y) + sum := x[9]*y[9] + x[8]*y[8] + x[7]*y[7] + x[6]*y[6] + return dot6[float32, float32](x, y) + sum + case 12: + // manually inlined dot12(x, y) + sum := x[11]*y[11] + x[10]*y[10] + x[9]*y[9] + x[8]*y[8] + x[7]*y[7] + x[6]*y[6] + return dot6[float32, float32](x, y) + sum + } + + var res float32 + + // The C function expects pointers to the underlying array, not slices. + hdrx := unsafe.SliceData(x) + hdry := unsafe.SliceData(y) + + l := len(x) + dot_neon( + // The slice header contains the address of the underlying array. + // We only need to cast it to a pointer. + unsafe.Pointer(hdrx), + unsafe.Pointer(hdry), + // The C function expects pointers to the result and the length of the arrays. + unsafe.Pointer(&res), + unsafe.Pointer(&l)) + + return res +} + +func Dot_SVE(x []float32, y []float32) float32 { + switch len(x) { + case 2: + return dot2[float32, float32](x, y) + case 4: + return dot4[float32, float32](x, y) + case 6: + return dot6[float32, float32](x, y) + case 8: + // manually inlined dot8(x, y) + sum := x[7]*y[7] + x[6]*y[6] + return dot6[float32, float32](x, y) + sum + case 10: + // manually inlined dot10(x, y) + sum := x[9]*y[9] + x[8]*y[8] + x[7]*y[7] + x[6]*y[6] + return dot6[float32, float32](x, y) + sum + case 12: + // manually inlined dot12(x, y) + sum := x[11]*y[11] + x[10]*y[10] + x[9]*y[9] + x[8]*y[8] + x[7]*y[7] + x[6]*y[6] + return dot6[float32, float32](x, y) + sum + } + + var res float32 + + l := len(x) + dot_sve( + // The slice header contains the address of the underlying array. + // We only need to cast it to a pointer. + unsafe.Pointer(unsafe.SliceData(x)), + unsafe.Pointer(unsafe.SliceData(y)), + // The C function expects pointers to the result and the length of the arrays. + unsafe.Pointer(&res), + unsafe.Pointer(&l)) + + return res +} + +func DotByteARM64(x []uint8, y []uint8) uint32 { + switch len(x) { + case 2: + return dot2[uint8, uint32](x, y) + case 3: + return dot3[uint8, uint32](x, y) + case 4: + return dot4[uint8, uint32](x, y) + case 5: + return dot5[uint8, uint32](x, y) + case 6: + return dot6[uint8, uint32](x, y) + case 7: + return dot7[uint8, uint32](x, y) + case 8: + // manually inlined dot8(x, y) + sum := uint32(x[7])*uint32(y[7]) + uint32(x[6])*uint32(y[6]) + return dot6[uint8, uint32](x, y) + uint32(sum) + case 10: + // manually inlined dot10(x, y) + sum := uint32(x[9])*uint32(y[9]) + uint32(x[8])*uint32(y[8]) + uint32(x[7])*uint32(y[7]) + uint32(x[6])*uint32(y[6]) + return dot6[uint8, uint32](x, y) + uint32(sum) + case 12: + // manually inlined dot12(x, y) + return dot6[uint8, uint32](x, y) + dot6[uint8, uint32](x[6:12], y[6:12]) + } + + var res uint32 + + l := len(x) + + dot_byte_256( + // The slice header contains the address of the underlying array. + // We only need to cast it to a pointer. + unsafe.Pointer(unsafe.SliceData(x)), + unsafe.Pointer(unsafe.SliceData(y)), + // The C function expects pointers to the result and the length of the arrays. + unsafe.Pointer(&res), + unsafe.Pointer(&l)) + + return res +} + +func DotFloatByte_Neon(x []float32, y []uint8) float32 { + var res float32 + + l := len(x) + + if l < 16 { + for i := 0; i < l; i++ { + res += x[i] * float32(y[i]) + } + return res + } + + dot_float_byte_neon( + unsafe.Pointer(unsafe.SliceData(x)), + unsafe.Pointer(unsafe.SliceData(y)), + unsafe.Pointer(&res), + unsafe.Pointer(&l)) + + if l > 16 && l%16 != 0 { + start := l - l%16 + for i := start; i < l; i++ { + res += x[i] * float32(y[i]) + } + } + + return res +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/dot_sve_arm64.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/dot_sve_arm64.go new file mode 100644 index 0000000000000000000000000000000000000000..4696843e446a9f88146531585c3f2a7ada466429 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/dot_sve_arm64.go @@ -0,0 +1,21 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +//go:build !noasm && arm64 + +// AUTO-GENERATED BY GOAT -- DO NOT EDIT + +package asm + +import "unsafe" + +//go:noescape +func dot_sve(a, b, res, len unsafe.Pointer) diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/dot_sve_arm64.s b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/dot_sve_arm64.s new file mode 100644 index 0000000000000000000000000000000000000000..9692867559346fdf228915cda8910e2d166edbac --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/dot_sve_arm64.s @@ -0,0 +1,129 @@ +//go:build !noasm && arm64 +// AUTO-GENERATED BY GOAT -- DO NOT EDIT + +TEXT ·dot_sve(SB), $0-32 + MOVD a+0(FP), R0 + MOVD b+8(FP), R1 + MOVD res+16(FP), R2 + MOVD len+24(FP), R3 + WORD $0xf9400068 // ldr x8, [x3] + WORD $0x04a0e3ea // cntw x10 + WORD $0xcb0a03e9 // neg x9, x10 + WORD $0x04bf502c // rdvl x12, #1 + WORD $0x2598e3e0 // ptrue p0.s + WORD $0x8a090109 // and x9, x8, x9 + WORD $0xeb09019f // cmp x12, x9 + WORD $0x540000e9 // b.ls .LBB0_2 + WORD $0x25b8c000 // mov z0.s, #0 + WORD $0xaa1f03eb // mov x11, xzr + WORD $0x04603001 // mov z1.d, z0.d + WORD $0x04603002 // mov z2.d, z0.d + WORD $0x04603003 // mov z3.d, z0.d + WORD $0x14000027 // b .LBB0_5 + +LBB0_2: + WORD $0x25b8c004 // mov z4.s, #0 + WORD $0x04bf5070 // rdvl x16, #3 + WORD $0xaa1f03eb // mov x11, xzr + WORD $0x8b0c000f // add x15, x0, x12 + WORD $0x8b0c0032 // add x18, x1, x12 + WORD $0x04643086 // mov z6.d, z4.d + WORD $0x04bf5051 // rdvl x17, #2 + WORD $0x04643087 // mov z7.d, z4.d + WORD $0x04643085 // mov z5.d, z4.d + WORD $0x8b10000d // add x13, x0, x16 + WORD $0x8b11000e // add x14, x0, x17 + WORD $0x8b100030 // add x16, x1, x16 + WORD $0x8b110031 // add x17, x1, x17 + +LBB0_3: + WORD $0xa54b4003 // ld1w { z3.s }, p0/z, [x0, x11, lsl #2] + WORD $0xa54b41e2 // ld1w { z2.s }, p0/z, [x15, x11, lsl #2] + WORD $0xa54b41c1 // ld1w { z1.s }, p0/z, [x14, x11, lsl #2] + WORD $0xa54b41a0 // ld1w { z0.s }, p0/z, [x13, x11, lsl #2] + WORD $0xa54b4030 // ld1w { z16.s }, p0/z, [x1, x11, lsl #2] + WORD $0x65a48203 // fmad z3.s, p0/m, z16.s, z4.s + WORD $0xa54b4251 // ld1w { z17.s }, p0/z, [x18, x11, lsl #2] + WORD $0xa54b4232 // ld1w { z18.s }, p0/z, [x17, x11, lsl #2] + WORD $0xa54b4213 // ld1w { z19.s }, p0/z, [x16, x11, lsl #2] + WORD $0x65a68222 // fmad z2.s, p0/m, z17.s, z6.s + WORD $0x65a78241 // fmad z1.s, p0/m, z18.s, z7.s + WORD $0x65a58260 // fmad z0.s, p0/m, z19.s, z5.s + WORD $0x8b0c016b // add x11, x11, x12 + WORD $0x8b0b0183 // add x3, x12, x11 + WORD $0xeb09007f // cmp x3, x9 + WORD $0x04633064 // mov z4.d, z3.d + WORD $0x04623046 // mov z6.d, z2.d + WORD $0x04613027 // mov z7.d, z1.d + WORD $0x04603005 // mov z5.d, z0.d + WORD $0x54fffda9 // b.ls .LBB0_3 + WORD $0x14000005 // b .LBB0_5 + +LBB0_4: + WORD $0xa54b4004 // ld1w { z4.s }, p0/z, [x0, x11, lsl #2] + WORD $0xa54b4025 // ld1w { z5.s }, p0/z, [x1, x11, lsl #2] + WORD $0x8b0a016b // add x11, x11, x10 + WORD $0x65a400a3 // fmla z3.s, p0/m, z5.s, z4.s + +LBB0_5: + WORD $0xeb09017f // cmp x11, x9 + WORD $0x54ffff63 // b.lo .LBB0_4 + WORD $0x65802063 // faddv s3, p0, z3.s + WORD $0xeb08013f // cmp x9, x8 + WORD $0x65802042 // faddv s2, p0, z2.s + WORD $0x1e222862 // fadd s2, s3, s2 + WORD $0x65802021 // faddv s1, p0, z1.s + WORD $0x65802000 // faddv s0, p0, z0.s + WORD $0x1e212841 // fadd s1, s2, s1 + WORD $0x1e202820 // fadd s0, s1, s0 + WORD $0x54000520 // b.eq .LBB0_13 + WORD $0xb240012a // orr x10, x9, #0x1 + WORD $0xeb0a011f // cmp x8, x10 + WORD $0x9a8a810a // csel x10, x8, x10, hi + WORD $0xcb09014b // sub x11, x10, x9 + WORD $0x0460e3ea // cnth x10 + WORD $0xeb0a017f // cmp x11, x10 + WORD $0x54000062 // b.hs .LBB0_9 + WORD $0xaa0903ea // mov x10, x9 + WORD $0x14000019 // b .LBB0_12 + +LBB0_9: + WORD $0xa9bf7bfd // stp x29, x30, [sp, #-16]! + WORD $0xcb0a03ed // neg x13, x10 + WORD $0x04bf504f // rdvl x15, #2 + WORD $0x8b09080e // add x14, x0, x9, lsl #2 + WORD $0x910003fd // mov x29, sp + WORD $0x8a0d016c // and x12, x11, x13 + WORD $0x8b0c012a // add x10, x9, x12 + WORD $0x8b090829 // add x9, x1, x9, lsl #2 + WORD $0xaa0c03f0 // mov x16, x12 + +LBB0_10: + WORD $0xa540a1c1 // ld1w { z1.s }, p0/z, [x14] + WORD $0xa540a123 // ld1w { z3.s }, p0/z, [x9] + WORD $0xab0d0210 // adds x16, x16, x13 + WORD $0x65830821 // fmul z1.s, z1.s, z3.s + WORD $0xa541a1c2 // ld1w { z2.s }, p0/z, [x14, #1, mul vl] + WORD $0x8b0f01ce // add x14, x14, x15 + WORD $0x65982020 // fadda s0, p0, s0, z1.s + WORD $0xa541a121 // ld1w { z1.s }, p0/z, [x9, #1, mul vl] + WORD $0x8b0f0129 // add x9, x9, x15 + WORD $0x65810841 // fmul z1.s, z2.s, z1.s + WORD $0x65982020 // fadda s0, p0, s0, z1.s + WORD $0x54fffea1 // b.ne .LBB0_10 + WORD $0xeb0c017f // cmp x11, x12 + WORD $0xa8c17bfd // ldp x29, x30, [sp], #16 + WORD $0x54000100 // b.eq .LBB0_13 + +LBB0_12: + WORD $0xbc6a7801 // ldr s1, [x0, x10, lsl #2] + WORD $0xbc6a7822 // ldr s2, [x1, x10, lsl #2] + WORD $0x9100054a // add x10, x10, #1 + WORD $0xeb08015f // cmp x10, x8 + WORD $0x1e220821 // fmul s1, s1, s2 + WORD $0x1e212800 // fadd s0, s0, s1 + WORD $0x54ffff43 // b.lo .LBB0_12 + +LBB0_13: + WORD $0xbd000040 // str s0, [x2] + WORD $0xd65f03c0 // ret diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/hamming_amd64.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/hamming_amd64.go new file mode 100644 index 0000000000000000000000000000000000000000..877395ce05a97445e702ba62741ab722e72cb4c9 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/hamming_amd64.go @@ -0,0 +1,98 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package asm + +//go:generate goat ../c/hamming_avx256_amd64.c -O3 -mavx2 -mno-avx512f -e="-mfloat-abi=hard" -e="-Rpass-analysis=loop-vectorize" -e="-Rpass=loop-vectorize" -e="-Rpass-missed=loop-vectorize" +//go:generate goat ../c/hamming_avx512_amd64.c -O3 -mavx2 -mfma -mavx512f -mavx512dq -mavx512vl -e="-mfloat-abi=hard" -e="-Rpass-analysis=loop-vectorize" -e="-Rpass=loop-vectorize" -e="-Rpass-missed=loop-vectorize" +//go:generate goat ../c/hamming_bitwise_avx256_amd64.c -O3 -mavx2 -mno-avx512f -e="-Rpass-analysis=loop-vectorize" -e="-Rpass=loop-vectorize" -e="-Rpass-missed=loop-vectorize" +//go:generate goat ../c/hamming_bitwise_avx512_amd64.c -O3 -mavx2 -mfma -mavx512f -mavx512dq -mavx512bw -mavx512vl -e="-mfloat-abi=hard" -e="-Rpass-analysis=loop-vectorize" -e="-Rpass=loop-vectorize" -e="-Rpass-missed=loop-vectorize" + +import "unsafe" + +func HammingAVX256(x []float32, y []float32) float32 { + var res float32 + + l := len(x) + hamming_256( + unsafe.Pointer(unsafe.SliceData(x)), + unsafe.Pointer(unsafe.SliceData(y)), + unsafe.Pointer(&res), + unsafe.Pointer(&l)) + + return res +} + +func HammingAVX512(x []float32, y []float32) float32 { + var res float32 + + l := len(x) + hamming_512( + unsafe.Pointer(unsafe.SliceData(x)), + unsafe.Pointer(unsafe.SliceData(y)), + unsafe.Pointer(&res), + unsafe.Pointer(&l)) + + return res +} + +// lookup_avx and popcnt_constants are being passed in through go +// to keep clang from creating a .rodata section for the constants +// (which goat cannot handle) +var lookup_avx = []uint8{ + /* 0 */ 0 /* 1 */, 1 /* 2 */, 1 /* 3 */, 2, + /* 4 */ 1 /* 5 */, 2 /* 6 */, 2 /* 7 */, 3, + /* 8 */ 1 /* 9 */, 2 /* a */, 2 /* b */, 3, + /* c */ 2 /* d */, 3 /* e */, 3 /* f */, 4, + /* 0 */ 0 /* 1 */, 1 /* 2 */, 1 /* 3 */, 2, + /* 4 */ 1 /* 5 */, 2 /* 6 */, 2 /* 7 */, 3, + /* 8 */ 1 /* 9 */, 2 /* a */, 2 /* b */, 3, + /* c */ 2 /* d */, 3 /* e */, 3 /* f */, 4, +} + +var popcnt_constants = []uint64{ + 0x5555555555555555, // MASK_01010101 + 0x3333333333333333, // MASK_00110011 + 0x0F0F0F0F0F0F0F0F, // MASK_00001111 + 0x0101010101010101, // MULT_01010101 + 0x0f0f0f0f0f0f0f0f, // MASK_00001111 +} + +func HammingBitwiseAVX256(x []uint64, y []uint64) float32 { + var res uint64 + + l := len(x) + hamming_bitwise_256( + unsafe.Pointer(unsafe.SliceData(x)), + unsafe.Pointer(unsafe.SliceData(y)), + unsafe.Pointer(&res), + unsafe.Pointer(&l), + unsafe.Pointer(unsafe.SliceData(lookup_avx)), + unsafe.Pointer(unsafe.SliceData(popcnt_constants)), + ) + + return float32(res) +} + +func HammingBitwiseAVX512(x []uint64, y []uint64) float32 { + var res uint64 + + l := len(x) + hamming_bitwise_512( + unsafe.Pointer(unsafe.SliceData(x)), + unsafe.Pointer(unsafe.SliceData(y)), + unsafe.Pointer(&res), + unsafe.Pointer(&l), + + unsafe.Pointer(unsafe.SliceData(popcnt_constants))) + + return float32(res) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/hamming_arm64.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/hamming_arm64.go new file mode 100644 index 0000000000000000000000000000000000000000..7fb282505c85564cc9da7afd56ead4905b725bb6 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/hamming_arm64.go @@ -0,0 +1,21 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +//go:build !noasm && arm64 + +// AUTO-GENERATED BY GOAT -- DO NOT EDIT + +package asm + +import "unsafe" + +//go:noescape +func hamming(a, b, res, len unsafe.Pointer) diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/hamming_arm64.s b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/hamming_arm64.s new file mode 100644 index 0000000000000000000000000000000000000000..9ecf8cbf73d373a49e9c3673af622932de947479 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/hamming_arm64.s @@ -0,0 +1,133 @@ +//go:build !noasm && arm64 +// AUTO-GENERATED BY GOAT -- DO NOT EDIT + +TEXT ·hamming(SB), $0-32 + MOVD a+0(FP), R0 + MOVD b+8(FP), R1 + MOVD res+16(FP), R2 + MOVD len+24(FP), R3 + WORD $0xa9bf7bfd // stp x29, x30, [sp, + WORD $0xf9400069 // ldr x9, [x3] + WORD $0x910003fd // mov x29, sp + WORD $0x6b0903e8 // negs w8, w9 + WORD $0x1200052a // and w10, w9, + WORD $0x12000508 // and w8, w8, + WORD $0x5a884548 // csneg w8, w10, w8, mi + WORD $0x4b08012a // sub w10, w9, w8 + WORD $0x7100415f // cmp w10, + WORD $0x540000ea // b.ge .LBB0_2 + WORD $0x6f00e401 // movi v1.2d, + WORD $0x2a1f03eb // mov w11, wzr + WORD $0x6f00e402 // movi v2.2d, + WORD $0x6f00e403 // movi v3.2d, + WORD $0x6f00e400 // movi v0.2d, + WORD $0x14000016 // b .LBB0_4 + +LBB0_2: + WORD $0x6f00e400 // movi v0.2d, + WORD $0xaa1f03eb // mov x11, xzr + WORD $0x6f00e403 // movi v3.2d, + WORD $0xaa0003ec // mov x12, x0 + WORD $0x6f00e402 // movi v2.2d, + WORD $0xaa0103ed // mov x13, x1 + WORD $0x6f00e401 // movi v1.2d, + +LBB0_3: + WORD $0x4cdf2984 // ld1 { v4.4s, v5.4s, v6.4s, v7.4s }, [x12], + WORD $0x9100816e // add x14, x11, + WORD $0x9100416b // add x11, x11, + WORD $0xeb0a01df // cmp x14, x10 + WORD $0x4cdf29b0 // ld1 { v16.4s, v17.4s, v18.4s, v19.4s }, [x13], + WORD $0x4e30e494 // fcmeq v20.4s, v4.4s, v16.4s + WORD $0x4e31e4b5 // fcmeq v21.4s, v5.4s, v17.4s + WORD $0x4e32e4d6 // fcmeq v22.4s, v6.4s, v18.4s + WORD $0x4e33e4e4 // fcmeq v4.4s, v7.4s, v19.4s + WORD $0x6eb48400 // sub v0.4s, v0.4s, v20.4s + WORD $0x6eb58463 // sub v3.4s, v3.4s, v21.4s + WORD $0x6eb68442 // sub v2.4s, v2.4s, v22.4s + WORD $0x6ea48421 // sub v1.4s, v1.4s, v4.4s + WORD $0x54fffe69 // b.ls .LBB0_3 + +LBB0_4: + WORD $0x6b0a017f // cmp w11, w10 + WORD $0x540001aa // b.ge .LBB0_7 + WORD $0x2a0b03ec // mov w12, w11 + WORD $0xd37e7d6d // ubfiz x13, x11, + WORD $0x93407d4a // sxtw x10, w10 + WORD $0x8b0d002b // add x11, x1, x13 + WORD $0x8b0d000d // add x13, x0, x13 + +LBB0_6: + WORD $0x3cc105a4 // ldr q4, [x13], + WORD $0x3cc10565 // ldr q5, [x11], + WORD $0x9100118c // add x12, x12, + WORD $0xeb0a019f // cmp x12, x10 + WORD $0x4e25e484 // fcmeq v4.4s, v4.4s, v5.4s + WORD $0x6ea48400 // sub v0.4s, v0.4s, v4.4s + WORD $0x54ffff4b // b.lt .LBB0_6 + +LBB0_7: + WORD $0x4ea38400 // add v0.4s, v0.4s, v3.4s + WORD $0x7100051f // cmp w8, + WORD $0x4ea28400 // add v0.4s, v0.4s, v2.4s + WORD $0x4ea18400 // add v0.4s, v0.4s, v1.4s + WORD $0x4eb1b800 // addv s0, v0.4s + WORD $0x1e26000a // fmov w10, s0 + WORD $0x4b0a012b // sub w11, w9, w10 + WORD $0x540005eb // b.lt .LBB0_13 + WORD $0x93407d29 // sxtw x9, w9 + WORD $0xcb08012a // sub x10, x9, x8 + WORD $0x9100054c // add x12, x10, + WORD $0xeb09019f // cmp x12, x9 + WORD $0x4b0803ec // neg w12, w8 + WORD $0x9a8ad52a // csinc x10, x9, x10, le + WORD $0x8b08014d // add x13, x10, x8 + WORD $0x8b2cc12a // add x10, x9, w12, sxtw + WORD $0xcb0901ac // sub x12, x13, x9 + WORD $0xf100219f // cmp x12, + WORD $0x54000363 // b.lo .LBB0_12 + WORD $0x6f00e400 // movi v0.2d, + WORD $0xd37ef52e // lsl x14, x9, + WORD $0xcb28c9c8 // sub x8, x14, w8, sxtw + WORD $0x927df18d // and x13, x12, + WORD $0x6f00e401 // movi v1.2d, + WORD $0x9100410e // add x14, x8, + WORD $0x8b0d014a // add x10, x10, x13 + WORD $0x8b0e0028 // add x8, x1, x14 + WORD $0x4e041d60 // mov v0.s[0], w11 + WORD $0x8b0e000b // add x11, x0, x14 + WORD $0xaa0d03ee // mov x14, x13 + +LBB0_10: + WORD $0xad7f8d62 // ldp q2, q3, [x11, + WORD $0xf10021ce // subs x14, x14, + WORD $0x9100816b // add x11, x11, + WORD $0xad7f9504 // ldp q4, q5, [x8, + WORD $0x91008108 // add x8, x8, + WORD $0x4e24e442 // fcmeq v2.4s, v2.4s, v4.4s + WORD $0x4e25e463 // fcmeq v3.4s, v3.4s, v5.4s + WORD $0x4ea28400 // add v0.4s, v0.4s, v2.4s + WORD $0x4ea38421 // add v1.4s, v1.4s, v3.4s + WORD $0x54fffee1 // b.ne .LBB0_10 + WORD $0x4ea08420 // add v0.4s, v1.4s, v0.4s + WORD $0xeb0d019f // cmp x12, x13 + WORD $0x4eb1b800 // addv s0, v0.4s + WORD $0x1e26000b // fmov w11, s0 + WORD $0x54000140 // b.eq .LBB0_13 + +LBB0_12: + WORD $0xd37ef548 // lsl x8, x10, + WORD $0x9100054a // add x10, x10, + WORD $0xbc686800 // ldr s0, [x0, x8] + WORD $0xbc686821 // ldr s1, [x1, x8] + WORD $0x1e212000 // fcmp s0, s1 + WORD $0x1a9f17e8 // cset w8, eq + WORD $0xeb09015f // cmp x10, x9 + WORD $0x4b08016b // sub w11, w11, w8 + WORD $0x54ffff0b // b.lt .LBB0_12 + +LBB0_13: + WORD $0x1e220160 // scvtf s0, w11 + WORD $0xbd000040 // str s0, [x2] + WORD $0xa8c17bfd // ldp x29, x30, [sp], + WORD $0xd65f03c0 // ret diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/hamming_avx256_amd64.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/hamming_avx256_amd64.go new file mode 100644 index 0000000000000000000000000000000000000000..2409268c44447ee9e5631cf90f62cec42eea1348 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/hamming_avx256_amd64.go @@ -0,0 +1,21 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +//go:build !noasm && amd64 + +// AUTO-GENERATED BY GOAT -- DO NOT EDIT + +package asm + +import "unsafe" + +//go:noescape +func hamming_256(a, b, res, len unsafe.Pointer) diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/hamming_avx256_amd64.s b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/hamming_avx256_amd64.s new file mode 100644 index 0000000000000000000000000000000000000000..7840ad289abb4bad30789e0bcb921c8df4bb3a71 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/hamming_avx256_amd64.s @@ -0,0 +1,217 @@ +//go:build !noasm && amd64 +// AUTO-GENERATED BY GOAT -- DO NOT EDIT + +TEXT ·hamming_256(SB), $0-32 + MOVQ a+0(FP), DI + MOVQ b+8(FP), SI + MOVQ res+16(FP), DX + MOVQ len+24(FP), CX + BYTE $0x55 // pushq %rbp + WORD $0x8948; BYTE $0xe5 // movq %rsp, %rbp + BYTE $0x53 // pushq %rbx + LONG $0xf8e48348 // andq $-8, %rsp + WORD $0x8b48; BYTE $0x19 // movq (%rcx), %rbx + WORD $0xfb83; BYTE $0x07 // cmpl $7, %ebx + JG LBB0_8 + WORD $0x8941; BYTE $0xd9 // movl %ebx, %r9d + LONG $0xffc18341 // addl $-1, %r9d + WORD $0xc031 // xorl %eax, %eax + LONG $0x1ff98341 // cmpl $31, %r9d + JAE LBB0_3 + WORD $0x8949; BYTE $0xf0 // movq %rsi, %r8 + WORD $0x8949; BYTE $0xfb // movq %rdi, %r11 + JMP LBB0_6 + +LBB0_8: + LONG $0xc0eff9c5 // vpxor %xmm0, %xmm0, %xmm0 + WORD $0xfb83; BYTE $0x20 // cmpl $32, %ebx + JB LBB0_9 + LONG $0xc0eff9c5 // vpxor %xmm0, %xmm0, %xmm0 + LONG $0xc9eff1c5 // vpxor %xmm1, %xmm1, %xmm1 + LONG $0xd2efe9c5 // vpxor %xmm2, %xmm2, %xmm2 + LONG $0xdbefe1c5 // vpxor %xmm3, %xmm3, %xmm3 + +LBB0_15: + LONG $0x2610fcc5 // vmovups (%rsi), %ymm4 + LONG $0x6e10fcc5; BYTE $0x20 // vmovups 32(%rsi), %ymm5 + LONG $0x7610fcc5; BYTE $0x40 // vmovups 64(%rsi), %ymm6 + LONG $0x7e10fcc5; BYTE $0x60 // vmovups 96(%rsi), %ymm7 + LONG $0x27c2dcc5; BYTE $0x0c // vcmpneq_oqps (%rdi), %ymm4, %ymm4 + LONG $0x6fc2d4c5; WORD $0x0c20 // vcmpneq_oqps 32(%rdi), %ymm5, %ymm5 + LONG $0xdcfae5c5 // vpsubd %ymm4, %ymm3, %ymm3 + LONG $0xd5faedc5 // vpsubd %ymm5, %ymm2, %ymm2 + LONG $0x67c2ccc5; WORD $0x0c40 // vcmpneq_oqps 64(%rdi), %ymm6, %ymm4 + LONG $0xccfaf5c5 // vpsubd %ymm4, %ymm1, %ymm1 + LONG $0x67c2c4c5; WORD $0x0c60 // vcmpneq_oqps 96(%rdi), %ymm7, %ymm4 + LONG $0xc4fafdc5 // vpsubd %ymm4, %ymm0, %ymm0 + WORD $0xc383; BYTE $0xe0 // addl $-32, %ebx + LONG $0x80ef8348 // subq $-128, %rdi + LONG $0x80ee8348 // subq $-128, %rsi + WORD $0xfb83; BYTE $0x1f // cmpl $31, %ebx + JA LBB0_15 + WORD $0xfb83; BYTE $0x08 // cmpl $8, %ebx + JAE LBB0_10 + JMP LBB0_12 + +LBB0_3: + LONG $0x01c18349 // addq $1, %r9 + WORD $0x894d; BYTE $0xca // movq %r9, %r10 + LONG $0xe0e28349 // andq $-32, %r10 + WORD $0x2944; BYTE $0xd3 // subl %r10d, %ebx + LONG $0x96048d4e // leaq (%rsi,%r10,4), %r8 + LONG $0x971c8d4e // leaq (%rdi,%r10,4), %r11 + QUAD $0x000000008d048d4a // leaq (,%r9,4), %rax + LONG $0x80e08348 // andq $-128, %rax + LONG $0xc0eff9c5 // vpxor %xmm0, %xmm0, %xmm0 + WORD $0xc931 // xorl %ecx, %ecx + LONG $0xc9eff1c5 // vpxor %xmm1, %xmm1, %xmm1 + LONG $0xd2efe9c5 // vpxor %xmm2, %xmm2, %xmm2 + LONG $0xdbefe1c5 // vpxor %xmm3, %xmm3, %xmm3 + +LBB0_4: + LONG $0x2410fcc5; BYTE $0x0e // vmovups (%rsi,%rcx), %ymm4 + LONG $0x6c10fcc5; WORD $0x200e // vmovups 32(%rsi,%rcx), %ymm5 + LONG $0x7410fcc5; WORD $0x400e // vmovups 64(%rsi,%rcx), %ymm6 + LONG $0x7c10fcc5; WORD $0x600e // vmovups 96(%rsi,%rcx), %ymm7 + LONG $0x24c2dcc5; WORD $0x040f // vcmpneqps (%rdi,%rcx), %ymm4, %ymm4 + LONG $0xc4fafdc5 // vpsubd %ymm4, %ymm0, %ymm0 + LONG $0x64c2d4c5; WORD $0x200f; BYTE $0x04 // vcmpneqps 32(%rdi,%rcx), %ymm5, %ymm4 + LONG $0xccfaf5c5 // vpsubd %ymm4, %ymm1, %ymm1 + LONG $0x64c2ccc5; WORD $0x400f; BYTE $0x04 // vcmpneqps 64(%rdi,%rcx), %ymm6, %ymm4 + LONG $0x6cc2c4c5; WORD $0x600f; BYTE $0x04 // vcmpneqps 96(%rdi,%rcx), %ymm7, %ymm5 + LONG $0xd4faedc5 // vpsubd %ymm4, %ymm2, %ymm2 + LONG $0xddfae5c5 // vpsubd %ymm5, %ymm3, %ymm3 + LONG $0x80e98348 // subq $-128, %rcx + WORD $0x3948; BYTE $0xc8 // cmpq %rcx, %rax + JNE LBB0_4 + LONG $0xc0fef5c5 // vpaddd %ymm0, %ymm1, %ymm0 + LONG $0xc0feedc5 // vpaddd %ymm0, %ymm2, %ymm0 + LONG $0xc0fee5c5 // vpaddd %ymm0, %ymm3, %ymm0 + LONG $0x397de3c4; WORD $0x01c1 // vextracti128 $1, %ymm0, %xmm1 + LONG $0xc1fef9c5 // vpaddd %xmm1, %xmm0, %xmm0 + LONG $0xc870f9c5; BYTE $0xee // vpshufd $238, %xmm0, %xmm1 # xmm1 = xmm0[2,3,2,3] + LONG $0xc1fef9c5 // vpaddd %xmm1, %xmm0, %xmm0 + LONG $0xc870f9c5; BYTE $0x55 // vpshufd $85, %xmm0, %xmm1 # xmm1 = xmm0[1,1,1,1] + LONG $0xc1fef9c5 // vpaddd %xmm1, %xmm0, %xmm0 + LONG $0xc07ef9c5 // vmovd %xmm0, %eax + WORD $0x394d; BYTE $0xd1 // cmpq %r10, %r9 + JE LBB0_24 + +LBB0_6: + WORD $0xd989 // movl %ebx, %ecx + WORD $0xf631 // xorl %esi, %esi + +LBB0_7: + LONG $0x107ac1c4; WORD $0xb004 // vmovss (%r8,%rsi,4), %xmm0 # xmm0 = mem[0],zero,zero,zero + LONG $0xc27ac1c4; WORD $0xb304; BYTE $0x04 // vcmpneqss (%r11,%rsi,4), %xmm0, %xmm0 + LONG $0xc77ef9c5 // vmovd %xmm0, %edi + WORD $0xf829 // subl %edi, %eax + LONG $0x01c68348 // addq $1, %rsi + WORD $0xf139 // cmpl %esi, %ecx + JNE LBB0_7 + JMP LBB0_24 + +LBB0_9: + LONG $0xc9eff1c5 // vpxor %xmm1, %xmm1, %xmm1 + LONG $0xd2efe9c5 // vpxor %xmm2, %xmm2, %xmm2 + LONG $0xdbefe1c5 // vpxor %xmm3, %xmm3, %xmm3 + +LBB0_10: + LONG $0x2610fcc5 // vmovups (%rsi), %ymm4 + LONG $0x27c2dcc5; BYTE $0x0c // vcmpneq_oqps (%rdi), %ymm4, %ymm4 + LONG $0xdcfae5c5 // vpsubd %ymm4, %ymm3, %ymm3 + WORD $0xc383; BYTE $0xf8 // addl $-8, %ebx + LONG $0x20c78348 // addq $32, %rdi + LONG $0x20c68348 // addq $32, %rsi + WORD $0xfb83; BYTE $0x07 // cmpl $7, %ebx + JA LBB0_10 + +LBB0_12: + WORD $0xdb85 // testl %ebx, %ebx + JE LBB0_13 + LONG $0xff4b8d44 // leal -1(%rbx), %r9d + WORD $0xc931 // xorl %ecx, %ecx + LONG $0x1ff98341 // cmpl $31, %r9d + JAE LBB0_18 + WORD $0x8949; BYTE $0xf8 // movq %rdi, %r8 + WORD $0x8949; BYTE $0xf2 // movq %rsi, %r10 + JMP LBB0_21 + +LBB0_13: + WORD $0xc931 // xorl %ecx, %ecx + JMP LBB0_23 + +LBB0_18: + LONG $0x01c18349 // addq $1, %r9 + WORD $0x894d; BYTE $0xcb // movq %r9, %r11 + LONG $0xe0e38349 // andq $-32, %r11 + LONG $0x9f048d4e // leaq (%rdi,%r11,4), %r8 + LONG $0x9e148d4e // leaq (%rsi,%r11,4), %r10 + WORD $0x2944; BYTE $0xdb // subl %r11d, %ebx + LONG $0xe4efd9c5 // vpxor %xmm4, %xmm4, %xmm4 + WORD $0xc031 // xorl %eax, %eax + LONG $0xedefd1c5 // vpxor %xmm5, %xmm5, %xmm5 + LONG $0xf657c8c5 // vxorps %xmm6, %xmm6, %xmm6 + LONG $0xff57c0c5 // vxorps %xmm7, %xmm7, %xmm7 + +LBB0_19: + LONG $0x04107cc5; BYTE $0x86 // vmovups (%rsi,%rax,4), %ymm8 + LONG $0x4c107cc5; WORD $0x2086 // vmovups 32(%rsi,%rax,4), %ymm9 + LONG $0x54107cc5; WORD $0x4086 // vmovups 64(%rsi,%rax,4), %ymm10 + LONG $0x5c107cc5; WORD $0x6086 // vmovups 96(%rsi,%rax,4), %ymm11 + LONG $0x04c23cc5; WORD $0x0487 // vcmpneqps (%rdi,%rax,4), %ymm8, %ymm8 + LONG $0xfa5dc1c4; BYTE $0xe0 // vpsubd %ymm8, %ymm4, %ymm4 + LONG $0x44c234c5; WORD $0x2087; BYTE $0x04 // vcmpneqps 32(%rdi,%rax,4), %ymm9, %ymm8 + LONG $0xfa55c1c4; BYTE $0xe8 // vpsubd %ymm8, %ymm5, %ymm5 + LONG $0x44c22cc5; WORD $0x4087; BYTE $0x04 // vcmpneqps 64(%rdi,%rax,4), %ymm10, %ymm8 + LONG $0x4cc224c5; WORD $0x6087; BYTE $0x04 // vcmpneqps 96(%rdi,%rax,4), %ymm11, %ymm9 + LONG $0xfa4dc1c4; BYTE $0xf0 // vpsubd %ymm8, %ymm6, %ymm6 + LONG $0xfa45c1c4; BYTE $0xf9 // vpsubd %ymm9, %ymm7, %ymm7 + LONG $0x20c08348 // addq $32, %rax + WORD $0x3949; BYTE $0xc3 // cmpq %rax, %r11 + JNE LBB0_19 + LONG $0xe4fed5c5 // vpaddd %ymm4, %ymm5, %ymm4 + LONG $0xe4fecdc5 // vpaddd %ymm4, %ymm6, %ymm4 + LONG $0xe4fec5c5 // vpaddd %ymm4, %ymm7, %ymm4 + LONG $0x397de3c4; WORD $0x01e5 // vextracti128 $1, %ymm4, %xmm5 + LONG $0xe5fed9c5 // vpaddd %xmm5, %xmm4, %xmm4 + LONG $0xec70f9c5; BYTE $0xee // vpshufd $238, %xmm4, %xmm5 # xmm5 = xmm4[2,3,2,3] + LONG $0xe5fed9c5 // vpaddd %xmm5, %xmm4, %xmm4 + LONG $0xec70f9c5; BYTE $0x55 // vpshufd $85, %xmm4, %xmm5 # xmm5 = xmm4[1,1,1,1] + LONG $0xe5fed9c5 // vpaddd %xmm5, %xmm4, %xmm4 + LONG $0xe17ef9c5 // vmovd %xmm4, %ecx + WORD $0x394d; BYTE $0xd9 // cmpq %r11, %r9 + JE LBB0_23 + +LBB0_21: + WORD $0xd889 // movl %ebx, %eax + WORD $0xf631 // xorl %esi, %esi + +LBB0_22: + LONG $0x107ac1c4; WORD $0xb224 // vmovss (%r10,%rsi,4), %xmm4 # xmm4 = mem[0],zero,zero,zero + LONG $0xc25ac1c4; WORD $0xb024; BYTE $0x04 // vcmpneqss (%r8,%rsi,4), %xmm4, %xmm4 + LONG $0xe77ef9c5 // vmovd %xmm4, %edi + WORD $0xf929 // subl %edi, %ecx + LONG $0x01c68348 // addq $1, %rsi + WORD $0xf039 // cmpl %esi, %eax + JNE LBB0_22 + +LBB0_23: + LONG $0xcafef5c5 // vpaddd %ymm2, %ymm1, %ymm1 + LONG $0xc0fef5c5 // vpaddd %ymm0, %ymm1, %ymm0 + LONG $0xc3fefdc5 // vpaddd %ymm3, %ymm0, %ymm0 + LONG $0x027de2c4; BYTE $0xc0 // vphaddd %ymm0, %ymm0, %ymm0 + LONG $0x027de2c4; BYTE $0xc0 // vphaddd %ymm0, %ymm0, %ymm0 + LONG $0x397de3c4; WORD $0x01c1 // vextracti128 $1, %ymm0, %xmm1 + LONG $0xc0fef1c5 // vpaddd %xmm0, %xmm1, %xmm0 + LONG $0xc07ef9c5 // vmovd %xmm0, %eax + WORD $0xc801 // addl %ecx, %eax + +LBB0_24: + LONG $0xc02a9ac5 // vcvtsi2ss %eax, %xmm12, %xmm0 + LONG $0x0211fac5 // vmovss %xmm0, (%rdx) + LONG $0xf8658d48 // leaq -8(%rbp), %rsp + BYTE $0x5b // popq %rbx + BYTE $0x5d // popq %rbp + WORD $0xf8c5; BYTE $0x77 // vzeroupper + BYTE $0xc3 // retq diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/hamming_avx512_amd64.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/hamming_avx512_amd64.go new file mode 100644 index 0000000000000000000000000000000000000000..0e2d49a059964f3845456095b85f409fb4919edd --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/hamming_avx512_amd64.go @@ -0,0 +1,21 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +//go:build !noasm && amd64 + +// AUTO-GENERATED BY GOAT -- DO NOT EDIT + +package asm + +import "unsafe" + +//go:noescape +func hamming_512(a, b, res, len unsafe.Pointer) diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/hamming_avx512_amd64.s b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/hamming_avx512_amd64.s new file mode 100644 index 0000000000000000000000000000000000000000..e5328b62c8947b83aff1751bd81ae6877579ea35 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/hamming_avx512_amd64.s @@ -0,0 +1,422 @@ +//go:build !noasm && amd64 +// AUTO-GENERATED BY GOAT -- DO NOT EDIT + +TEXT ·hamming_512(SB), $0-32 + MOVQ a+0(FP), DI + MOVQ b+8(FP), SI + MOVQ res+16(FP), DX + MOVQ len+24(FP), CX + BYTE $0x55 // pushq %rbp + WORD $0x8948; BYTE $0xe5 // movq %rsp, %rbp + WORD $0x5641 // pushq %r14 + BYTE $0x53 // pushq %rbx + LONG $0xf8e48348 // andq $-8, %rsp + WORD $0x8b4c; BYTE $0x19 // movq (%rcx), %r11 + LONG $0x07fb8341 // cmpl $7, %r11d + JG LBB0_15 + LONG $0xffffb941; WORD $0xffff // movl $4294967295, %r9d # imm = 0xFFFFFFFF + LONG $0x0b148d47 // leal (%r11,%r9), %r10d + WORD $0xc031 // xorl %eax, %eax + LONG $0x07fa8341 // cmpl $7, %r10d + JAE LBB0_3 + WORD $0x8949; BYTE $0xf1 // movq %rsi, %r9 + WORD $0x8948; BYTE $0xfb // movq %rdi, %rbx + JMP LBB0_13 + +LBB0_15: + LONG $0xc0eff9c5 // vpxor %xmm0, %xmm0, %xmm0 + LONG $0x80fb8141; WORD $0x0000; BYTE $0x00 // cmpl $128, %r11d + JB LBB0_16 + LONG $0xc9eff1c5 // vpxor %xmm1, %xmm1, %xmm1 + LONG $0xd2efe9c5 // vpxor %xmm2, %xmm2, %xmm2 + LONG $0xdbefe1c5 // vpxor %xmm3, %xmm3, %xmm3 + LONG $0xe4efd9c5 // vpxor %xmm4, %xmm4, %xmm4 + LONG $0xedefd1c5 // vpxor %xmm5, %xmm5, %xmm5 + LONG $0xf6efc9c5 // vpxor %xmm6, %xmm6, %xmm6 + LONG $0xffefc1c5 // vpxor %xmm7, %xmm7, %xmm7 + +LBB0_28: + LONG $0x487c7162; WORD $0x0710 // vmovups (%rdi), %zmm8 + LONG $0x487c7162; WORD $0x4f10; BYTE $0x01 // vmovups 64(%rdi), %zmm9 + LONG $0x487c7162; WORD $0x5710; BYTE $0x02 // vmovups 128(%rdi), %zmm10 + LONG $0x487c7162; WORD $0x5f10; BYTE $0x03 // vmovups 192(%rdi), %zmm11 + LONG $0x487c7162; WORD $0x6710; BYTE $0x04 // vmovups 256(%rdi), %zmm12 + LONG $0x487c7162; WORD $0x6f10; BYTE $0x05 // vmovups 320(%rdi), %zmm13 + LONG $0x487c7162; WORD $0x7710; BYTE $0x06 // vmovups 384(%rdi), %zmm14 + LONG $0x483cf162; WORD $0x06c2; BYTE $0x0c // vcmpneq_oqps (%rsi), %zmm8, %k0 + QUAD $0x0c014ec24834f162 // vcmpneq_oqps 64(%rsi), %zmm9, %k1 + QUAD $0x0c0256c2482cf162 // vcmpneq_oqps 128(%rsi), %zmm10, %k2 + QUAD $0x0c035ec24824f162 // vcmpneq_oqps 192(%rsi), %zmm11, %k3 + QUAD $0x0c0466c2481cf162 // vcmpneq_oqps 256(%rsi), %zmm12, %k4 + QUAD $0x0c056ec24814f162 // vcmpneq_oqps 320(%rsi), %zmm13, %k5 + QUAD $0x0c0676c2480cf162 // vcmpneq_oqps 384(%rsi), %zmm14, %k6 + LONG $0x487c7162; WORD $0x4710; BYTE $0x07 // vmovups 448(%rdi), %zmm8 + QUAD $0x0c077ec2483cf162 // vcmpneq_oqps 448(%rsi), %zmm8, %k7 + LONG $0x487e7262; WORD $0xc038 // vpmovm2d %k0, %zmm8 + LONG $0x4875d162; WORD $0xc8fa // vpsubd %zmm8, %zmm1, %zmm1 + LONG $0x487e7262; WORD $0xc738 // vpmovm2d %k7, %zmm8 + LONG $0x4875d162; WORD $0xc8fa // vpsubd %zmm8, %zmm1, %zmm1 + LONG $0x487e7262; WORD $0xc138 // vpmovm2d %k1, %zmm8 + LONG $0x486dd162; WORD $0xd0fa // vpsubd %zmm8, %zmm2, %zmm2 + LONG $0x487e7262; WORD $0xc238 // vpmovm2d %k2, %zmm8 + LONG $0x4865d162; WORD $0xd8fa // vpsubd %zmm8, %zmm3, %zmm3 + LONG $0x487e7262; WORD $0xc338 // vpmovm2d %k3, %zmm8 + LONG $0x485dd162; WORD $0xe0fa // vpsubd %zmm8, %zmm4, %zmm4 + LONG $0x487e7262; WORD $0xc438 // vpmovm2d %k4, %zmm8 + LONG $0x4855d162; WORD $0xe8fa // vpsubd %zmm8, %zmm5, %zmm5 + LONG $0x487e7262; WORD $0xc538 // vpmovm2d %k5, %zmm8 + LONG $0x484dd162; WORD $0xf0fa // vpsubd %zmm8, %zmm6, %zmm6 + LONG $0x487e7262; WORD $0xc638 // vpmovm2d %k6, %zmm8 + LONG $0x4845d162; WORD $0xf8fa // vpsubd %zmm8, %zmm7, %zmm7 + LONG $0x80c38341 // addl $-128, %r11d + LONG $0x00c78148; WORD $0x0002; BYTE $0x00 // addq $512, %rdi # imm = 0x200 + LONG $0x00c68148; WORD $0x0002; BYTE $0x00 // addq $512, %rsi # imm = 0x200 + LONG $0xc3be0f41 // movsbl %r11b, %eax + WORD $0x3944; BYTE $0xd8 // cmpl %r11d, %eax + JNE LBB0_28 + LONG $0x4865f162; WORD $0xd2fe // vpaddd %zmm2, %zmm3, %zmm2 + LONG $0x486df162; WORD $0xd4fe // vpaddd %zmm4, %zmm2, %zmm2 + LONG $0x486df162; WORD $0xd5fe // vpaddd %zmm5, %zmm2, %zmm2 + LONG $0x486df162; WORD $0xd6fe // vpaddd %zmm6, %zmm2, %zmm2 + LONG $0x486df162; WORD $0xd7fe // vpaddd %zmm7, %zmm2, %zmm2 + LONG $0x486df162; WORD $0xc9fe // vpaddd %zmm1, %zmm2, %zmm1 + LONG $0x48fdf362; WORD $0xca3b; BYTE $0x01 // vextracti64x4 $1, %zmm1, %ymm2 + LONG $0xc9feedc5 // vpaddd %ymm1, %ymm2, %ymm1 + WORD $0x8545; BYTE $0xdb // testl %r11d, %r11d + JE LBB0_30 + LONG $0x20fb8341 // cmpl $32, %r11d + JB LBB0_18 + +LBB0_31: + LONG $0xd2efe9c5 // vpxor %xmm2, %xmm2, %xmm2 + LONG $0xdbefe1c5 // vpxor %xmm3, %xmm3, %xmm3 + LONG $0xc0eff9c5 // vpxor %xmm0, %xmm0, %xmm0 + +LBB0_32: + LONG $0x2610fcc5 // vmovups (%rsi), %ymm4 + LONG $0x6e10fcc5; BYTE $0x20 // vmovups 32(%rsi), %ymm5 + LONG $0x7610fcc5; BYTE $0x40 // vmovups 64(%rsi), %ymm6 + LONG $0x7e10fcc5; BYTE $0x60 // vmovups 96(%rsi), %ymm7 + LONG $0x27c2dcc5; BYTE $0x0c // vcmpneq_oqps (%rdi), %ymm4, %ymm4 + LONG $0x6fc2d4c5; WORD $0x0c20 // vcmpneq_oqps 32(%rdi), %ymm5, %ymm5 + LONG $0xccfaf5c5 // vpsubd %ymm4, %ymm1, %ymm1 + LONG $0xc5fafdc5 // vpsubd %ymm5, %ymm0, %ymm0 + LONG $0x67c2ccc5; WORD $0x0c40 // vcmpneq_oqps 64(%rdi), %ymm6, %ymm4 + LONG $0xdcfae5c5 // vpsubd %ymm4, %ymm3, %ymm3 + LONG $0x67c2c4c5; WORD $0x0c60 // vcmpneq_oqps 96(%rdi), %ymm7, %ymm4 + LONG $0xd4faedc5 // vpsubd %ymm4, %ymm2, %ymm2 + LONG $0xe0c38341 // addl $-32, %r11d + LONG $0x80ef8348 // subq $-128, %rdi + LONG $0x80ee8348 // subq $-128, %rsi + LONG $0x1ffb8341 // cmpl $31, %r11d + JA LBB0_32 + LONG $0x08fb8341 // cmpl $8, %r11d + JAE LBB0_20 + JMP LBB0_25 + +LBB0_3: + QUAD $0x0001ffffffc0b849; WORD $0x0000 // movabsq $8589934528, %r8 # imm = 0x1FFFFFFC0 + LONG $0x3ffa8341 // cmpl $63, %r10d + JAE LBB0_8 + WORD $0xc031 // xorl %eax, %eax + WORD $0xc931 // xorl %ecx, %ecx + JMP LBB0_5 + +LBB0_16: + LONG $0xc9eff1c5 // vpxor %xmm1, %xmm1, %xmm1 + LONG $0x20fb8341 // cmpl $32, %r11d + JAE LBB0_31 + +LBB0_18: + LONG $0xdbefe1c5 // vpxor %xmm3, %xmm3, %xmm3 + LONG $0xd2efe9c5 // vpxor %xmm2, %xmm2, %xmm2 + LONG $0x08fb8341 // cmpl $8, %r11d + JB LBB0_25 + +LBB0_20: + LONG $0xf8438d41 // leal -8(%r11), %eax + WORD $0xc189 // movl %eax, %ecx + WORD $0xe9c1; BYTE $0x03 // shrl $3, %ecx + WORD $0x598d; BYTE $0x01 // leal 1(%rcx), %ebx + WORD $0xc3f6; BYTE $0x03 // testb $3, %bl + JE LBB0_24 + WORD $0xc180; BYTE $0x01 // addb $1, %cl + WORD $0xb60f; BYTE $0xc9 // movzbl %cl, %ecx + WORD $0xe183; BYTE $0x03 // andl $3, %ecx + LONG $0x03e1c148 // shlq $3, %rcx + WORD $0xdb31 // xorl %ebx, %ebx + +LBB0_22: + LONG $0x2610fcc5 // vmovups (%rsi), %ymm4 + LONG $0x27c2dcc5; BYTE $0x0c // vcmpneq_oqps (%rdi), %ymm4, %ymm4 + LONG $0xccfaf5c5 // vpsubd %ymm4, %ymm1, %ymm1 + LONG $0x20c78348 // addq $32, %rdi + LONG $0x20c68348 // addq $32, %rsi + LONG $0x08c38348 // addq $8, %rbx + WORD $0xd939 // cmpl %ebx, %ecx + JNE LBB0_22 + WORD $0x2941; BYTE $0xdb // subl %ebx, %r11d + +LBB0_24: + WORD $0xf883; BYTE $0x18 // cmpl $24, %eax + JB LBB0_25 + +LBB0_49: + LONG $0x2610fcc5 // vmovups (%rsi), %ymm4 + LONG $0x6e10fcc5; BYTE $0x20 // vmovups 32(%rsi), %ymm5 + LONG $0x7610fcc5; BYTE $0x40 // vmovups 64(%rsi), %ymm6 + LONG $0x7e10fcc5; BYTE $0x60 // vmovups 96(%rsi), %ymm7 + LONG $0x27c2dcc5; BYTE $0x0c // vcmpneq_oqps (%rdi), %ymm4, %ymm4 + LONG $0x6fc2d4c5; WORD $0x0c20 // vcmpneq_oqps 32(%rdi), %ymm5, %ymm5 + LONG $0xccfaf5c5 // vpsubd %ymm4, %ymm1, %ymm1 + LONG $0xcdfaf5c5 // vpsubd %ymm5, %ymm1, %ymm1 + LONG $0x67c2ccc5; WORD $0x0c40 // vcmpneq_oqps 64(%rdi), %ymm6, %ymm4 + LONG $0xccfaf5c5 // vpsubd %ymm4, %ymm1, %ymm1 + LONG $0x67c2c4c5; WORD $0x0c60 // vcmpneq_oqps 96(%rdi), %ymm7, %ymm4 + LONG $0xccfaf5c5 // vpsubd %ymm4, %ymm1, %ymm1 + LONG $0xe0c38341 // addl $-32, %r11d + LONG $0x80ef8348 // subq $-128, %rdi + LONG $0x80ee8348 // subq $-128, %rsi + LONG $0x07fb8341 // cmpl $7, %r11d + JA LBB0_49 + +LBB0_25: + WORD $0x8545; BYTE $0xdb // testl %r11d, %r11d + JE LBB0_26 + LONG $0xff438d45 // leal -1(%r11), %r8d + WORD $0xc931 // xorl %ecx, %ecx + LONG $0x07f88341 // cmpl $7, %r8d + JAE LBB0_35 + WORD $0x8949; BYTE $0xfe // movq %rdi, %r14 + WORD $0x8948; BYTE $0xf3 // movq %rsi, %rbx + JMP LBB0_47 + +LBB0_26: + WORD $0xc931 // xorl %ecx, %ecx + JMP LBB0_40 + +LBB0_35: + QUAD $0x0001ffffffc0b949; WORD $0x0000 // movabsq $8589934528, %r9 # imm = 0x1FFFFFFC0 + LONG $0x3ff88341 // cmpl $63, %r8d + JAE LBB0_42 + WORD $0xc931 // xorl %ecx, %ecx + WORD $0x3145; BYTE $0xd2 // xorl %r10d, %r10d + JMP LBB0_37 + +LBB0_8: + LONG $0x01c28349 // addq $1, %r10 + WORD $0x894c; BYTE $0xd1 // movq %r10, %rcx + WORD $0x214c; BYTE $0xc1 // andq %r8, %rcx + QUAD $0x0000000095048d4a // leaq (,%r10,4), %rax + LONG $0xff002548; WORD $0xffff // andq $-256, %rax + LONG $0xc0eff9c5 // vpxor %xmm0, %xmm0, %xmm0 + WORD $0xdb31 // xorl %ebx, %ebx + LONG $0xc9eff1c5 // vpxor %xmm1, %xmm1, %xmm1 + LONG $0xd2efe9c5 // vpxor %xmm2, %xmm2, %xmm2 + LONG $0xdbefe1c5 // vpxor %xmm3, %xmm3, %xmm3 + +LBB0_9: + LONG $0x487cf162; WORD $0x2410; BYTE $0x1f // vmovups (%rdi,%rbx), %zmm4 + QUAD $0x011f6c10487cf162 // vmovups 64(%rdi,%rbx), %zmm5 + QUAD $0x021f7410487cf162 // vmovups 128(%rdi,%rbx), %zmm6 + QUAD $0x031f7c10487cf162 // vmovups 192(%rdi,%rbx), %zmm7 + QUAD $0x041e04c2485cf162 // vcmpneqps (%rsi,%rbx), %zmm4, %k0 + QUAD $0x011e4cc24854f162; BYTE $0x04 // vcmpneqps 64(%rsi,%rbx), %zmm5, %k1 + QUAD $0x021e54c2484cf162; BYTE $0x04 // vcmpneqps 128(%rsi,%rbx), %zmm6, %k2 + QUAD $0x031e5cc24844f162; BYTE $0x04 // vcmpneqps 192(%rsi,%rbx), %zmm7, %k3 + LONG $0x487ef262; WORD $0xe038 // vpmovm2d %k0, %zmm4 + LONG $0x487df162; WORD $0xc4fa // vpsubd %zmm4, %zmm0, %zmm0 + LONG $0x487ef262; WORD $0xe138 // vpmovm2d %k1, %zmm4 + LONG $0x4875f162; WORD $0xccfa // vpsubd %zmm4, %zmm1, %zmm1 + LONG $0x487ef262; WORD $0xe238 // vpmovm2d %k2, %zmm4 + LONG $0x486df162; WORD $0xd4fa // vpsubd %zmm4, %zmm2, %zmm2 + LONG $0x487ef262; WORD $0xe338 // vpmovm2d %k3, %zmm4 + LONG $0x4865f162; WORD $0xdcfa // vpsubd %zmm4, %zmm3, %zmm3 + LONG $0x00c38148; WORD $0x0001; BYTE $0x00 // addq $256, %rbx # imm = 0x100 + WORD $0x3948; BYTE $0xd8 // cmpq %rbx, %rax + JNE LBB0_9 + LONG $0x4875f162; WORD $0xc0fe // vpaddd %zmm0, %zmm1, %zmm0 + LONG $0x486df162; WORD $0xc0fe // vpaddd %zmm0, %zmm2, %zmm0 + LONG $0x4865f162; WORD $0xc0fe // vpaddd %zmm0, %zmm3, %zmm0 + LONG $0x48fdf362; WORD $0xc13b; BYTE $0x01 // vextracti64x4 $1, %zmm0, %ymm1 + LONG $0x487df162; WORD $0xc1fe // vpaddd %zmm1, %zmm0, %zmm0 + LONG $0x397de3c4; WORD $0x01c1 // vextracti128 $1, %ymm0, %xmm1 + LONG $0xc1fef9c5 // vpaddd %xmm1, %xmm0, %xmm0 + LONG $0xc870f9c5; BYTE $0xee // vpshufd $238, %xmm0, %xmm1 # xmm1 = xmm0[2,3,2,3] + LONG $0xc1fef9c5 // vpaddd %xmm1, %xmm0, %xmm0 + LONG $0xc870f9c5; BYTE $0x55 // vpshufd $85, %xmm0, %xmm1 # xmm1 = xmm0[1,1,1,1] + LONG $0xc1fef9c5 // vpaddd %xmm1, %xmm0, %xmm0 + LONG $0xc07ef9c5 // vmovd %xmm0, %eax + WORD $0x3949; BYTE $0xca // cmpq %rcx, %r10 + JE LBB0_41 + LONG $0x38c2f641 // testb $56, %r10b + JE LBB0_12 + +LBB0_5: + WORD $0x8945; BYTE $0xda // movl %r11d, %r10d + WORD $0x0145; BYTE $0xca // addl %r9d, %r10d + LONG $0x01c28349 // addq $1, %r10 + LONG $0x38c08349 // addq $56, %r8 + WORD $0x214d; BYTE $0xd0 // andq %r10, %r8 + WORD $0x2945; BYTE $0xc3 // subl %r8d, %r11d + LONG $0x860c8d4e // leaq (%rsi,%r8,4), %r9 + LONG $0x871c8d4a // leaq (%rdi,%r8,4), %rbx + LONG $0xc06ef9c5 // vmovd %eax, %xmm0 + +LBB0_6: + LONG $0x0c10fcc5; BYTE $0x8e // vmovups (%rsi,%rcx,4), %ymm1 + LONG $0x0cc2f4c5; WORD $0x048f // vcmpneqps (%rdi,%rcx,4), %ymm1, %ymm1 + LONG $0xc1fafdc5 // vpsubd %ymm1, %ymm0, %ymm0 + LONG $0x08c18348 // addq $8, %rcx + WORD $0x3949; BYTE $0xc8 // cmpq %rcx, %r8 + JNE LBB0_6 + LONG $0x397de3c4; WORD $0x01c1 // vextracti128 $1, %ymm0, %xmm1 + LONG $0xc1fef9c5 // vpaddd %xmm1, %xmm0, %xmm0 + LONG $0xc870f9c5; BYTE $0xee // vpshufd $238, %xmm0, %xmm1 # xmm1 = xmm0[2,3,2,3] + LONG $0xc1fef9c5 // vpaddd %xmm1, %xmm0, %xmm0 + LONG $0xc870f9c5; BYTE $0x55 // vpshufd $85, %xmm0, %xmm1 # xmm1 = xmm0[1,1,1,1] + LONG $0xc1fef9c5 // vpaddd %xmm1, %xmm0, %xmm0 + LONG $0xc07ef9c5 // vmovd %xmm0, %eax + WORD $0x394d; BYTE $0xc2 // cmpq %r8, %r10 + JNE LBB0_13 + JMP LBB0_41 + +LBB0_30: + LONG $0x0275e2c4; BYTE $0xc1 // vphaddd %ymm1, %ymm1, %ymm0 + LONG $0x027de2c4; BYTE $0xc0 // vphaddd %ymm0, %ymm0, %ymm0 + LONG $0x397de3c4; WORD $0x01c1 // vextracti128 $1, %ymm0, %xmm1 + LONG $0xc0fef1c5 // vpaddd %xmm0, %xmm1, %xmm0 + LONG $0xc07ef9c5 // vmovd %xmm0, %eax + JMP LBB0_41 + +LBB0_42: + LONG $0x01408d49 // leaq 1(%r8), %rax + WORD $0x8949; BYTE $0xc2 // movq %rax, %r10 + WORD $0x214d; BYTE $0xca // andq %r9, %r10 + LONG $0xe4efd9c5 // vpxor %xmm4, %xmm4, %xmm4 + WORD $0xc931 // xorl %ecx, %ecx + LONG $0xedefd1c5 // vpxor %xmm5, %xmm5, %xmm5 + LONG $0xf6efc9c5 // vpxor %xmm6, %xmm6, %xmm6 + LONG $0xffefc1c5 // vpxor %xmm7, %xmm7, %xmm7 + +LBB0_43: + LONG $0x487c7162; WORD $0x0410; BYTE $0x8f // vmovups (%rdi,%rcx,4), %zmm8 + QUAD $0x018f4c10487c7162 // vmovups 64(%rdi,%rcx,4), %zmm9 + QUAD $0x028f5410487c7162 // vmovups 128(%rdi,%rcx,4), %zmm10 + QUAD $0x038f5c10487c7162 // vmovups 192(%rdi,%rcx,4), %zmm11 + QUAD $0x048e04c2483cf162 // vcmpneqps (%rsi,%rcx,4), %zmm8, %k0 + QUAD $0x018e4cc24834f162; BYTE $0x04 // vcmpneqps 64(%rsi,%rcx,4), %zmm9, %k1 + QUAD $0x028e54c2482cf162; BYTE $0x04 // vcmpneqps 128(%rsi,%rcx,4), %zmm10, %k2 + QUAD $0x038e5cc24824f162; BYTE $0x04 // vcmpneqps 192(%rsi,%rcx,4), %zmm11, %k3 + LONG $0x487e7262; WORD $0xc038 // vpmovm2d %k0, %zmm8 + LONG $0x485dd162; WORD $0xe0fa // vpsubd %zmm8, %zmm4, %zmm4 + LONG $0x487e7262; WORD $0xc138 // vpmovm2d %k1, %zmm8 + LONG $0x4855d162; WORD $0xe8fa // vpsubd %zmm8, %zmm5, %zmm5 + LONG $0x487e7262; WORD $0xc238 // vpmovm2d %k2, %zmm8 + LONG $0x484dd162; WORD $0xf0fa // vpsubd %zmm8, %zmm6, %zmm6 + LONG $0x487e7262; WORD $0xc338 // vpmovm2d %k3, %zmm8 + LONG $0x4845d162; WORD $0xf8fa // vpsubd %zmm8, %zmm7, %zmm7 + LONG $0x40c18348 // addq $64, %rcx + WORD $0x3949; BYTE $0xca // cmpq %rcx, %r10 + JNE LBB0_43 + LONG $0x4855f162; WORD $0xe4fe // vpaddd %zmm4, %zmm5, %zmm4 + LONG $0x484df162; WORD $0xe4fe // vpaddd %zmm4, %zmm6, %zmm4 + LONG $0x4845f162; WORD $0xe4fe // vpaddd %zmm4, %zmm7, %zmm4 + LONG $0x48fdf362; WORD $0xe53b; BYTE $0x01 // vextracti64x4 $1, %zmm4, %ymm5 + LONG $0x485df162; WORD $0xe5fe // vpaddd %zmm5, %zmm4, %zmm4 + LONG $0x397de3c4; WORD $0x01e5 // vextracti128 $1, %ymm4, %xmm5 + LONG $0xe5fed9c5 // vpaddd %xmm5, %xmm4, %xmm4 + LONG $0xec70f9c5; BYTE $0xee // vpshufd $238, %xmm4, %xmm5 # xmm5 = xmm4[2,3,2,3] + LONG $0xe5fed9c5 // vpaddd %xmm5, %xmm4, %xmm4 + LONG $0xec70f9c5; BYTE $0x55 // vpshufd $85, %xmm4, %xmm5 # xmm5 = xmm4[1,1,1,1] + LONG $0xe5fed9c5 // vpaddd %xmm5, %xmm4, %xmm4 + LONG $0xe17ef9c5 // vmovd %xmm4, %ecx + WORD $0x394c; BYTE $0xd0 // cmpq %r10, %rax + JE LBB0_40 + WORD $0x38a8 // testb $56, %al + JE LBB0_46 + +LBB0_37: + LONG $0x01c08349 // addq $1, %r8 + LONG $0x38c18349 // addq $56, %r9 + WORD $0x214d; BYTE $0xc1 // andq %r8, %r9 + LONG $0x8f348d4e // leaq (%rdi,%r9,4), %r14 + LONG $0x8e1c8d4a // leaq (%rsi,%r9,4), %rbx + WORD $0x2945; BYTE $0xcb // subl %r9d, %r11d + LONG $0xe16ef9c5 // vmovd %ecx, %xmm4 + +LBB0_38: + LONG $0x107ca1c4; WORD $0x962c // vmovups (%rsi,%r10,4), %ymm5 + LONG $0xc254a1c4; WORD $0x972c; BYTE $0x04 // vcmpneqps (%rdi,%r10,4), %ymm5, %ymm5 + LONG $0xe5faddc5 // vpsubd %ymm5, %ymm4, %ymm4 + LONG $0x08c28349 // addq $8, %r10 + WORD $0x394d; BYTE $0xd1 // cmpq %r10, %r9 + JNE LBB0_38 + LONG $0x397de3c4; WORD $0x01e5 // vextracti128 $1, %ymm4, %xmm5 + LONG $0xe5fed9c5 // vpaddd %xmm5, %xmm4, %xmm4 + LONG $0xec70f9c5; BYTE $0xee // vpshufd $238, %xmm4, %xmm5 # xmm5 = xmm4[2,3,2,3] + LONG $0xe5fed9c5 // vpaddd %xmm5, %xmm4, %xmm4 + LONG $0xec70f9c5; BYTE $0x55 // vpshufd $85, %xmm4, %xmm5 # xmm5 = xmm4[1,1,1,1] + LONG $0xe5fed9c5 // vpaddd %xmm5, %xmm4, %xmm4 + LONG $0xe17ef9c5 // vmovd %xmm4, %ecx + WORD $0x394d; BYTE $0xc8 // cmpq %r9, %r8 + JNE LBB0_47 + JMP LBB0_40 + +LBB0_12: + LONG $0x8f1c8d48 // leaq (%rdi,%rcx,4), %rbx + LONG $0x8e0c8d4c // leaq (%rsi,%rcx,4), %r9 + WORD $0x2941; BYTE $0xcb // subl %ecx, %r11d + +LBB0_13: + WORD $0x8944; BYTE $0xd9 // movl %r11d, %ecx + WORD $0xf631 // xorl %esi, %esi + +LBB0_14: + LONG $0x0410fac5; BYTE $0xb3 // vmovss (%rbx,%rsi,4), %xmm0 # xmm0 = mem[0],zero,zero,zero + QUAD $0x04b104c2087ed162 // vcmpneqss (%r9,%rsi,4), %xmm0, %k0 + LONG $0xf893f8c5 // kmovw %k0, %edi + WORD $0xf801 // addl %edi, %eax + LONG $0x01c68348 // addq $1, %rsi + WORD $0xf139 // cmpl %esi, %ecx + JNE LBB0_14 + JMP LBB0_41 + +LBB0_46: + WORD $0x2945; BYTE $0xd3 // subl %r10d, %r11d + LONG $0x961c8d4a // leaq (%rsi,%r10,4), %rbx + LONG $0x97348d4e // leaq (%rdi,%r10,4), %r14 + +LBB0_47: + WORD $0x8944; BYTE $0xde // movl %r11d, %esi + WORD $0xff31 // xorl %edi, %edi + +LBB0_48: + LONG $0x107ac1c4; WORD $0xbe24 // vmovss (%r14,%rdi,4), %xmm4 # xmm4 = mem[0],zero,zero,zero + QUAD $0x04bb04c2085ef162 // vcmpneqss (%rbx,%rdi,4), %xmm4, %k0 + LONG $0xc093f8c5 // kmovw %k0, %eax + WORD $0xc101 // addl %eax, %ecx + LONG $0x01c78348 // addq $1, %rdi + WORD $0xfe39 // cmpl %edi, %esi + JNE LBB0_48 + +LBB0_40: + LONG $0xc0fee5c5 // vpaddd %ymm0, %ymm3, %ymm0 + LONG $0xc2fefdc5 // vpaddd %ymm2, %ymm0, %ymm0 + LONG $0xc1fefdc5 // vpaddd %ymm1, %ymm0, %ymm0 + LONG $0x027de2c4; BYTE $0xc0 // vphaddd %ymm0, %ymm0, %ymm0 + LONG $0x027de2c4; BYTE $0xc0 // vphaddd %ymm0, %ymm0, %ymm0 + LONG $0x397de3c4; WORD $0x01c1 // vextracti128 $1, %ymm0, %xmm1 + LONG $0xc0fef1c5 // vpaddd %xmm0, %xmm1, %xmm0 + LONG $0xc07ef9c5 // vmovd %xmm0, %eax + WORD $0xc801 // addl %ecx, %eax + +LBB0_41: + LONG $0xc02a82c5 // vcvtsi2ss %eax, %xmm15, %xmm0 + LONG $0x0211fac5 // vmovss %xmm0, (%rdx) + LONG $0xf0658d48 // leaq -16(%rbp), %rsp + BYTE $0x5b // popq %rbx + WORD $0x5e41 // popq %r14 + BYTE $0x5d // popq %rbp + WORD $0xf8c5; BYTE $0x77 // vzeroupper + BYTE $0xc3 // retq diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/hamming_bitwise_arm64.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/hamming_bitwise_arm64.go new file mode 100644 index 0000000000000000000000000000000000000000..8972e77154aab129900560b8a35d4ba9ff75450b --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/hamming_bitwise_arm64.go @@ -0,0 +1,21 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +//go:build !noasm && arm64 + +// AUTO-GENERATED BY GOAT -- DO NOT EDIT + +package asm + +import "unsafe" + +//go:noescape +func hamming_bitwise(a, b, res, len unsafe.Pointer) diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/hamming_bitwise_arm64.s b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/hamming_bitwise_arm64.s new file mode 100644 index 0000000000000000000000000000000000000000..a7976ab02c7549e5bbe582e17314237f5a68f3a5 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/hamming_bitwise_arm64.s @@ -0,0 +1,165 @@ +//go:build !noasm && arm64 +// AUTO-GENERATED BY GOAT -- DO NOT EDIT + +TEXT ·hamming_bitwise(SB), $0-32 + MOVD a+0(FP), R0 + MOVD b+8(FP), R1 + MOVD res+16(FP), R2 + MOVD len+24(FP), R3 + WORD $0xa9bf7bfd // stp x29, x30, [sp, #-16]! + WORD $0xf9400069 // ldr x9, [x3] + WORD $0x910003fd // mov x29, sp + WORD $0x6b0903e8 // negs w8, w9 + WORD $0x1200052a // and w10, w9, #0x3 + WORD $0x12000508 // and w8, w8, #0x3 + WORD $0x5a884548 // csneg w8, w10, w8, mi + WORD $0x4b08012a // sub w10, w9, w8 + WORD $0x7100215f // cmp w10, #8 + WORD $0x540000ea // b.ge .LBB0_2 + WORD $0x6f00e401 // movi v1.2d, #0000000000000000 + WORD $0x2a1f03eb // mov w11, wzr + WORD $0x6f00e402 // movi v2.2d, #0000000000000000 + WORD $0x6f00e403 // movi v3.2d, #0000000000000000 + WORD $0x6f00e400 // movi v0.2d, #0000000000000000 + WORD $0x1400001e // b .LBB0_4 + +LBB0_2: + WORD $0x6f00e400 // movi v0.2d, #0000000000000000 + WORD $0xaa1f03eb // mov x11, xzr + WORD $0x6f00e403 // movi v3.2d, #0000000000000000 + WORD $0xaa0003ec // mov x12, x0 + WORD $0x6f00e402 // movi v2.2d, #0000000000000000 + WORD $0xaa0103ed // mov x13, x1 + WORD $0x6f00e401 // movi v1.2d, #0000000000000000 + +LBB0_3: + WORD $0x4cdf2d84 // ld1 { v4.2d, v5.2d, v6.2d, v7.2d }, [x12], #64 + WORD $0x9100416e // add x14, x11, #16 + WORD $0x9100216b // add x11, x11, #8 + WORD $0xeb0a01df // cmp x14, x10 + WORD $0x4cdf2db0 // ld1 { v16.2d, v17.2d, v18.2d, v19.2d }, [x13], #64 + WORD $0x6e241e14 // eor v20.16b, v16.16b, v4.16b + WORD $0x4e205a94 // cnt v20.16b, v20.16b + WORD $0x6e251e35 // eor v21.16b, v17.16b, v5.16b + WORD $0x6e261e56 // eor v22.16b, v18.16b, v6.16b + WORD $0x6e271e64 // eor v4.16b, v19.16b, v7.16b + WORD $0x4e205aa6 // cnt v6.16b, v21.16b + WORD $0x4e205ac7 // cnt v7.16b, v22.16b + WORD $0x4e205884 // cnt v4.16b, v4.16b + WORD $0x6e202a85 // uaddlp v5.8h, v20.16b + WORD $0x6e6068a0 // uadalp v0.4s, v5.8h + WORD $0x6e2028c5 // uaddlp v5.8h, v6.16b + WORD $0x6e2028e6 // uaddlp v6.8h, v7.16b + WORD $0x6e202884 // uaddlp v4.8h, v4.16b + WORD $0x6e6068a3 // uadalp v3.4s, v5.8h + WORD $0x6e6068c2 // uadalp v2.4s, v6.8h + WORD $0x6e606881 // uadalp v1.4s, v4.8h + WORD $0x54fffd69 // b.ls .LBB0_3 + +LBB0_4: + WORD $0x6b0a017f // cmp w11, w10 + WORD $0x540001ea // b.ge .LBB0_7 + WORD $0x2a0b03ec // mov w12, w11 + WORD $0xd37d7d6d // ubfiz x13, x11, #3, #32 + WORD $0x93407d4a // sxtw x10, w10 + WORD $0x8b0d002b // add x11, x1, x13 + WORD $0x8b0d000d // add x13, x0, x13 + +LBB0_6: + WORD $0x3cc105a4 // ldr q4, [x13], #16 + WORD $0x3cc10565 // ldr q5, [x11], #16 + WORD $0x9100098c // add x12, x12, #2 + WORD $0xeb0a019f // cmp x12, x10 + WORD $0x6e241ca4 // eor v4.16b, v5.16b, v4.16b + WORD $0x4e205884 // cnt v4.16b, v4.16b + WORD $0x6e202884 // uaddlp v4.8h, v4.16b + WORD $0x6e606880 // uadalp v0.4s, v4.8h + WORD $0x54ffff0b // b.lt .LBB0_6 + +LBB0_7: + WORD $0x4ea08460 // add v0.4s, v3.4s, v0.4s + WORD $0x7100051f // cmp w8, #1 + WORD $0x4ea28400 // add v0.4s, v0.4s, v2.4s + WORD $0x4ea18400 // add v0.4s, v0.4s, v1.4s + WORD $0x4eb1b800 // addv s0, v0.4s + WORD $0x1e26000c // fmov w12, s0 + WORD $0x540008cb // b.lt .LBB0_13 + WORD $0x93407d29 // sxtw x9, w9 + WORD $0xcb08012a // sub x10, x9, x8 + WORD $0x9100054b // add x11, x10, #1 + WORD $0xeb09017f // cmp x11, x9 + WORD $0x4b0803eb // neg w11, w8 + WORD $0x9a8ad52a // csinc x10, x9, x10, le + WORD $0x8b08014d // add x13, x10, x8 + WORD $0x8b2bc12a // add x10, x9, w11, sxtw + WORD $0xcb0901ab // sub x11, x13, x9 + WORD $0xf100217f // cmp x11, #8 + WORD $0x540005e3 // b.lo .LBB0_12 + WORD $0x6f00e400 // movi v0.2d, #0000000000000000 + WORD $0xd37df12e // lsl x14, x9, #3 + WORD $0xcb28cdc8 // sub x8, x14, w8, sxtw #3 + WORD $0x927df16d // and x13, x11, #0xfffffffffffffff8 + WORD $0x6f00e401 // movi v1.2d, #0000000000000000 + WORD $0x9100810e // add x14, x8, #32 + WORD $0x8b0d014a // add x10, x10, x13 + WORD $0x8b0e0028 // add x8, x1, x14 + WORD $0x4e041d80 // mov v0.s[0], w12 + WORD $0x8b0e000c // add x12, x0, x14 + WORD $0xaa0d03ee // mov x14, x13 + +LBB0_10: + WORD $0xad7f0983 // ldp q3, q2, [x12, #-32] + WORD $0xf10021ce // subs x14, x14, #8 + WORD $0xacc21185 // ldp q5, q4, [x12], #64 + WORD $0xad7f1907 // ldp q7, q6, [x8, #-32] + WORD $0x6e231ce3 // eor v3.16b, v7.16b, v3.16b + WORD $0x4e205863 // cnt v3.16b, v3.16b + WORD $0xacc24111 // ldp q17, q16, [x8], #64 + WORD $0x6e221cc2 // eor v2.16b, v6.16b, v2.16b + WORD $0x4e205842 // cnt v2.16b, v2.16b + WORD $0x6e251e25 // eor v5.16b, v17.16b, v5.16b + WORD $0x4e2058a5 // cnt v5.16b, v5.16b + WORD $0x6e241e04 // eor v4.16b, v16.16b, v4.16b + WORD $0x4e205884 // cnt v4.16b, v4.16b + WORD $0x6e202842 // uaddlp v2.8h, v2.16b + WORD $0x6e202863 // uaddlp v3.8h, v3.16b + WORD $0x6e2028a5 // uaddlp v5.8h, v5.16b + WORD $0x6e202884 // uaddlp v4.8h, v4.16b + WORD $0x6e602842 // uaddlp v2.4s, v2.8h + WORD $0x6e602863 // uaddlp v3.4s, v3.8h + WORD $0x6e602884 // uaddlp v4.4s, v4.8h + WORD $0x6e6028a5 // uaddlp v5.4s, v5.8h + WORD $0x6ea02842 // uaddlp v2.2d, v2.4s + WORD $0x6ea02863 // uaddlp v3.2d, v3.4s + WORD $0x6ea02884 // uaddlp v4.2d, v4.4s + WORD $0x6ea028a5 // uaddlp v5.2d, v5.4s + WORD $0x4e821862 // uzp1 v2.4s, v3.4s, v2.4s + WORD $0x4e8418a3 // uzp1 v3.4s, v5.4s, v4.4s + WORD $0x4ea28400 // add v0.4s, v0.4s, v2.4s + WORD $0x4ea38421 // add v1.4s, v1.4s, v3.4s + WORD $0x54fffc61 // b.ne .LBB0_10 + WORD $0x4ea08420 // add v0.4s, v1.4s, v0.4s + WORD $0xeb0d017f // cmp x11, x13 + WORD $0x4eb1b800 // addv s0, v0.4s + WORD $0x1e26000c // fmov w12, s0 + WORD $0x540001a0 // b.eq .LBB0_13 + +LBB0_12: + WORD $0xd37df148 // lsl x8, x10, #3 + WORD $0x9100054a // add x10, x10, #1 + WORD $0xeb09015f // cmp x10, x9 + WORD $0xf868680b // ldr x11, [x0, x8] + WORD $0xf8686828 // ldr x8, [x1, x8] + WORD $0xca0b0108 // eor x8, x8, x11 + WORD $0x9e670100 // fmov d0, x8 + WORD $0x0e205800 // cnt v0.8b, v0.8b + WORD $0x2e303800 // uaddlv h0, v0.8b + WORD $0x1e260008 // fmov w8, s0 + WORD $0x0b08018c // add w12, w12, w8 + WORD $0x54fffeab // b.lt .LBB0_12 + +LBB0_13: + WORD $0x93407d88 // sxtw x8, w12 + WORD $0xf9000048 // str x8, [x2] + WORD $0xa8c17bfd // ldp x29, x30, [sp], #16 + WORD $0xd65f03c0 // ret diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/hamming_bitwise_avx256_amd64.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/hamming_bitwise_avx256_amd64.go new file mode 100644 index 0000000000000000000000000000000000000000..731244da75ca83e03a1233bad64da3153c95975f --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/hamming_bitwise_avx256_amd64.go @@ -0,0 +1,27 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +//go:build !noasm && amd64 + +// AUTO-GENERATED BY GOAT -- DO NOT EDIT + +package asm + +import "unsafe" + +//go:noescape +func popcnt_AVX2_lookup(vec, low_mask_vec, lookup_vec unsafe.Pointer) + +//go:noescape +func popcnt_64bit(src, popcnt_constants unsafe.Pointer) + +//go:noescape +func hamming_bitwise_256(a, b, res, len, lookup_avx, popcnt_constants unsafe.Pointer) diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/hamming_bitwise_avx256_amd64.s b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/hamming_bitwise_avx256_amd64.s new file mode 100644 index 0000000000000000000000000000000000000000..4c348d010b83bfa759f21614262e9777fb2c6fcc --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/hamming_bitwise_avx256_amd64.s @@ -0,0 +1,457 @@ +//go:build !noasm && amd64 +// AUTO-GENERATED BY GOAT -- DO NOT EDIT + +TEXT ·popcnt_AVX2_lookup(SB), $0-32 + MOVQ vec+0(FP), DI + MOVQ low_mask_vec+8(FP), SI + MOVQ lookup_vec+16(FP), DX + +TEXT ·popcnt_64bit(SB), $0-32 + MOVQ src+0(FP), DI + MOVQ popcnt_constants+8(FP), SI + +TEXT ·hamming_bitwise_256(SB), $0-32 + MOVQ a+0(FP), DI + MOVQ b+8(FP), SI + MOVQ res+16(FP), DX + MOVQ len+24(FP), CX + MOVQ lookup_avx+32(FP), R8 + MOVQ popcnt_constants+40(FP), R9 + BYTE $0x55 // pushq %rbp + WORD $0x8948; BYTE $0xe5 // movq %rsp, %rbp + WORD $0x5741 // pushq %r15 + WORD $0x5641 // pushq %r14 + WORD $0x5541 // pushq %r13 + WORD $0x5441 // pushq %r12 + BYTE $0x53 // pushq %rbx + LONG $0xf8e48348 // andq $-8, %rsp + BYTE $0x50 // pushq %rax + WORD $0x8949; BYTE $0xd4 // movq %rdx, %r12 + WORD $0x8b48; BYTE $0x01 // movq (%rcx), %rax + WORD $0xf883; BYTE $0x08 // cmpl $8, %eax + JGE LBB0_1 + LONG $0x2424894c // movq %r12, (%rsp) # 8-byte Spill + WORD $0x8b4d; BYTE $0x01 // movq (%r9), %r8 + LONG $0x08518b4d // movq 8(%r9), %r10 + LONG $0x10598b4d // movq 16(%r9), %r11 + LONG $0x18498b4d // movq 24(%r9), %r9 + WORD $0x8941; BYTE $0xc4 // movl %eax, %r12d + LONG $0xffc48341 // addl $-1, %r12d + LONG $0x0ffc8341 // cmpl $15, %r12d + JAE LBB0_12 + WORD $0x3145; BYTE $0xff // xorl %r15d, %r15d + WORD $0x8949; BYTE $0xf6 // movq %rsi, %r14 + WORD $0x8949; BYTE $0xfd // movq %rdi, %r13 + LONG $0x24248b4c // movq (%rsp), %r12 # 8-byte Reload + JMP LBB0_15 + +LBB0_1: + LONG $0x6f7ec1c4; BYTE $0x00 // vmovdqu (%r8), %ymm0 + LONG $0x597dc2c4; WORD $0x2049 // vpbroadcastq 32(%r9), %ymm1 + WORD $0x3145; BYTE $0xff // xorl %r15d, %r15d + WORD $0xf883; BYTE $0x10 // cmpl $16, %eax + JB LBB0_5 + LONG $0xd2efe9c5 // vpxor %xmm2, %xmm2, %xmm2 + +LBB0_3: + LONG $0x1e6ffec5 // vmovdqu (%rsi), %ymm3 + LONG $0x666ffec5; BYTE $0x20 // vmovdqu 32(%rsi), %ymm4 + LONG $0x6e6ffec5; BYTE $0x40 // vmovdqu 64(%rsi), %ymm5 + LONG $0x766ffec5; BYTE $0x60 // vmovdqu 96(%rsi), %ymm6 + LONG $0x3fefe5c5 // vpxor (%rdi), %ymm3, %ymm7 + LONG $0x47ef5dc5; BYTE $0x20 // vpxor 32(%rdi), %ymm4, %ymm8 + LONG $0x67efd5c5; BYTE $0x40 // vpxor 64(%rdi), %ymm5, %ymm4 + LONG $0x5fefcdc5; BYTE $0x60 // vpxor 96(%rdi), %ymm6, %ymm3 + LONG $0xe9dbc5c5 // vpand %ymm1, %ymm7, %ymm5 + LONG $0xd771cdc5; BYTE $0x04 // vpsrlw $4, %ymm7, %ymm6 + LONG $0xf6dbf5c5 // vpand %ymm6, %ymm1, %ymm6 + LONG $0x007de2c4; BYTE $0xed // vpshufb %ymm5, %ymm0, %ymm5 + LONG $0x007de2c4; BYTE $0xf6 // vpshufb %ymm6, %ymm0, %ymm6 + LONG $0xf5fccdc5 // vpaddb %ymm5, %ymm6, %ymm6 + LONG $0xe9dbbdc5 // vpand %ymm1, %ymm8, %ymm5 + LONG $0x7145c1c4; WORD $0x04d0 // vpsrlw $4, %ymm8, %ymm7 + LONG $0xffdbf5c5 // vpand %ymm7, %ymm1, %ymm7 + LONG $0x007de2c4; BYTE $0xed // vpshufb %ymm5, %ymm0, %ymm5 + LONG $0x007de2c4; BYTE $0xff // vpshufb %ymm7, %ymm0, %ymm7 + LONG $0xedfcc5c5 // vpaddb %ymm5, %ymm7, %ymm5 + LONG $0xeaf6d5c5 // vpsadbw %ymm2, %ymm5, %ymm5 + LONG $0x16f9e3c4; WORD $0x01eb // vpextrq $1, %xmm5, %rbx + LONG $0xf2f6cdc5 // vpsadbw %ymm2, %ymm6, %ymm6 + LONG $0x397de3c4; WORD $0x01f7 // vextracti128 $1, %ymm6, %xmm7 + LONG $0xf7d4c9c5 // vpaddq %xmm7, %xmm6, %xmm6 + LONG $0xfe70f9c5; BYTE $0xee // vpshufd $238, %xmm6, %xmm7 # xmm7 = xmm6[2,3,2,3] + LONG $0xf7d4c9c5 // vpaddq %xmm7, %xmm6, %xmm6 + LONG $0x7ef9e1c4; BYTE $0xf1 // vmovq %xmm6, %rcx + WORD $0x0148; BYTE $0xd9 // addq %rbx, %rcx + LONG $0x7ef9e1c4; BYTE $0xea // vmovq %xmm5, %rdx + LONG $0x397de3c4; WORD $0x01ed // vextracti128 $1, %ymm5, %xmm5 + WORD $0x0148; BYTE $0xd1 // addq %rdx, %rcx + LONG $0x7ef9e1c4; BYTE $0xea // vmovq %xmm5, %rdx + WORD $0x0148; BYTE $0xd1 // addq %rdx, %rcx + LONG $0x16f9e3c4; WORD $0x01ea // vpextrq $1, %xmm5, %rdx + LONG $0xe9dbddc5 // vpand %ymm1, %ymm4, %ymm5 + LONG $0xd471ddc5; BYTE $0x04 // vpsrlw $4, %ymm4, %ymm4 + LONG $0xe4dbf5c5 // vpand %ymm4, %ymm1, %ymm4 + LONG $0x007de2c4; BYTE $0xed // vpshufb %ymm5, %ymm0, %ymm5 + LONG $0x007de2c4; BYTE $0xe4 // vpshufb %ymm4, %ymm0, %ymm4 + LONG $0xe5fcddc5 // vpaddb %ymm5, %ymm4, %ymm4 + LONG $0xe2f6ddc5 // vpsadbw %ymm2, %ymm4, %ymm4 + WORD $0x014c; BYTE $0xf9 // addq %r15, %rcx + LONG $0x7ef9e1c4; BYTE $0xe3 // vmovq %xmm4, %rbx + WORD $0x0148; BYTE $0xd1 // addq %rdx, %rcx + LONG $0x16f9e3c4; WORD $0x01e2 // vpextrq $1, %xmm4, %rdx + LONG $0x397de3c4; WORD $0x01e4 // vextracti128 $1, %ymm4, %xmm4 + WORD $0x0148; BYTE $0xd1 // addq %rdx, %rcx + LONG $0x7ef9e1c4; BYTE $0xe2 // vmovq %xmm4, %rdx + WORD $0x0148; BYTE $0xd9 // addq %rbx, %rcx + LONG $0x16f9e3c4; WORD $0x01e3 // vpextrq $1, %xmm4, %rbx + WORD $0x0148; BYTE $0xd1 // addq %rdx, %rcx + LONG $0xe1dbe5c5 // vpand %ymm1, %ymm3, %ymm4 + LONG $0xd371e5c5; BYTE $0x04 // vpsrlw $4, %ymm3, %ymm3 + LONG $0xdbdbf5c5 // vpand %ymm3, %ymm1, %ymm3 + LONG $0x007de2c4; BYTE $0xe4 // vpshufb %ymm4, %ymm0, %ymm4 + LONG $0x007de2c4; BYTE $0xdb // vpshufb %ymm3, %ymm0, %ymm3 + LONG $0xdcfce5c5 // vpaddb %ymm4, %ymm3, %ymm3 + LONG $0xdaf6e5c5 // vpsadbw %ymm2, %ymm3, %ymm3 + WORD $0x0148; BYTE $0xd9 // addq %rbx, %rcx + LONG $0x16f9e3c4; WORD $0x01da // vpextrq $1, %xmm3, %rdx + WORD $0x0148; BYTE $0xd1 // addq %rdx, %rcx + LONG $0x7ef9e1c4; BYTE $0xda // vmovq %xmm3, %rdx + LONG $0x397de3c4; WORD $0x01db // vextracti128 $1, %ymm3, %xmm3 + WORD $0x0148; BYTE $0xd1 // addq %rdx, %rcx + LONG $0x7ef9e1c4; BYTE $0xda // vmovq %xmm3, %rdx + WORD $0x0148; BYTE $0xd1 // addq %rdx, %rcx + LONG $0x16f9e3c4; WORD $0x01da // vpextrq $1, %xmm3, %rdx + WORD $0x8949; BYTE $0xcf // movq %rcx, %r15 + WORD $0x0149; BYTE $0xd7 // addq %rdx, %r15 + WORD $0xc083; BYTE $0xf0 // addl $-16, %eax + LONG $0x80ef8348 // subq $-128, %rdi + LONG $0x80ee8348 // subq $-128, %rsi + WORD $0xf883; BYTE $0x0f // cmpl $15, %eax + JA LBB0_3 + WORD $0xf883; BYTE $0x04 // cmpl $4, %eax + JB LBB0_7 + +LBB0_5: + LONG $0xd2efe9c5 // vpxor %xmm2, %xmm2, %xmm2 + +LBB0_6: + LONG $0x1e6ffec5 // vmovdqu (%rsi), %ymm3 + LONG $0x1fefe5c5 // vpxor (%rdi), %ymm3, %ymm3 + LONG $0xe1dbe5c5 // vpand %ymm1, %ymm3, %ymm4 + LONG $0xd371e5c5; BYTE $0x04 // vpsrlw $4, %ymm3, %ymm3 + LONG $0xdbdbf5c5 // vpand %ymm3, %ymm1, %ymm3 + LONG $0x007de2c4; BYTE $0xe4 // vpshufb %ymm4, %ymm0, %ymm4 + LONG $0x007de2c4; BYTE $0xdb // vpshufb %ymm3, %ymm0, %ymm3 + LONG $0xdcfce5c5 // vpaddb %ymm4, %ymm3, %ymm3 + LONG $0xdaf6e5c5 // vpsadbw %ymm2, %ymm3, %ymm3 + LONG $0x397de3c4; WORD $0x01dc // vextracti128 $1, %ymm3, %xmm4 + LONG $0xdcd4e1c5 // vpaddq %xmm4, %xmm3, %xmm3 + LONG $0xe370f9c5; BYTE $0xee // vpshufd $238, %xmm3, %xmm4 # xmm4 = xmm3[2,3,2,3] + LONG $0xdcd4e1c5 // vpaddq %xmm4, %xmm3, %xmm3 + LONG $0x7ef9e1c4; BYTE $0xd9 // vmovq %xmm3, %rcx + WORD $0x0149; BYTE $0xcf // addq %rcx, %r15 + WORD $0xc083; BYTE $0xfc // addl $-4, %eax + LONG $0x20c78348 // addq $32, %rdi + LONG $0x20c68348 // addq $32, %rsi + WORD $0xf883; BYTE $0x03 // cmpl $3, %eax + JA LBB0_6 + +LBB0_7: + WORD $0xc085 // testl %eax, %eax + JE LBB0_22 + WORD $0x8b4d; BYTE $0x11 // movq (%r9), %r10 + LONG $0x08418b4d // movq 8(%r9), %r8 + LONG $0x10598b49 // movq 16(%r9), %rbx + LONG $0x18498b4d // movq 24(%r9), %r9 + WORD $0x508d; BYTE $0xff // leal -1(%rax), %edx + WORD $0xfa83; BYTE $0x07 // cmpl $7, %edx + JAE LBB0_17 + WORD $0x8949; BYTE $0xfb // movq %rdi, %r11 + WORD $0x8949; BYTE $0xf6 // movq %rsi, %r14 + JMP LBB0_20 + +LBB0_12: + LONG $0x01c48349 // addq $1, %r12 + WORD $0x894c; BYTE $0xe2 // movq %r12, %rdx + LONG $0xf0e28348 // andq $-16, %rdx + WORD $0xd029 // subl %edx, %eax + LONG $0xd6348d4c // leaq (%rsi,%rdx,8), %r14 + LONG $0xd72c8d4c // leaq (%rdi,%rdx,8), %r13 + LONG $0x6ef9c1c4; BYTE $0xc0 // vmovq %r8, %xmm0 + LONG $0x597de2c4; BYTE $0xc0 // vpbroadcastq %xmm0, %ymm0 + LONG $0x6ef9c1c4; BYTE $0xca // vmovq %r10, %xmm1 + LONG $0x597de2c4; BYTE $0xc9 // vpbroadcastq %xmm1, %ymm1 + LONG $0x6ef9c1c4; BYTE $0xd3 // vmovq %r11, %xmm2 + LONG $0x597de2c4; BYTE $0xd2 // vpbroadcastq %xmm2, %ymm2 + LONG $0x6ef9c1c4; BYTE $0xd9 // vmovq %r9, %xmm3 + LONG $0x597de2c4; BYTE $0xdb // vpbroadcastq %xmm3, %ymm3 + QUAD $0x00000000e50c8d4a // leaq (,%r12,8), %rcx + LONG $0x80e18348 // andq $-128, %rcx + LONG $0xe4efd9c5 // vpxor %xmm4, %xmm4, %xmm4 + WORD $0xdb31 // xorl %ebx, %ebx + LONG $0xedefd1c5 // vpxor %xmm5, %xmm5, %xmm5 + LONG $0xf6efc9c5 // vpxor %xmm6, %xmm6, %xmm6 + LONG $0xffefc1c5 // vpxor %xmm7, %xmm7, %xmm7 + +LBB0_13: + LONG $0x046f7ec5; BYTE $0x1e // vmovdqu (%rsi,%rbx), %ymm8 + LONG $0x4c6f7ec5; WORD $0x201e // vmovdqu 32(%rsi,%rbx), %ymm9 + LONG $0x546f7ec5; WORD $0x401e // vmovdqu 64(%rsi,%rbx), %ymm10 + LONG $0x5c6f7ec5; WORD $0x601e // vmovdqu 96(%rsi,%rbx), %ymm11 + LONG $0x04ef3dc5; BYTE $0x1f // vpxor (%rdi,%rbx), %ymm8, %ymm8 + LONG $0x4cef35c5; WORD $0x201f // vpxor 32(%rdi,%rbx), %ymm9, %ymm9 + LONG $0x54ef2dc5; WORD $0x401f // vpxor 64(%rdi,%rbx), %ymm10, %ymm10 + LONG $0x5cef25c5; WORD $0x601f // vpxor 96(%rdi,%rbx), %ymm11, %ymm11 + LONG $0xe0db3dc5 // vpand %ymm0, %ymm8, %ymm12 + LONG $0xe8db35c5 // vpand %ymm0, %ymm9, %ymm13 + LONG $0xf0db2dc5 // vpand %ymm0, %ymm10, %ymm14 + LONG $0xf8db25c5 // vpand %ymm0, %ymm11, %ymm15 + LONG $0x733dc1c4; WORD $0x01d0 // vpsrlq $1, %ymm8, %ymm8 + LONG $0x7335c1c4; WORD $0x01d1 // vpsrlq $1, %ymm9, %ymm9 + LONG $0x732dc1c4; WORD $0x01d2 // vpsrlq $1, %ymm10, %ymm10 + LONG $0x7325c1c4; WORD $0x01d3 // vpsrlq $1, %ymm11, %ymm11 + LONG $0xc0db3dc5 // vpand %ymm0, %ymm8, %ymm8 + LONG $0xd43d41c4; BYTE $0xc4 // vpaddq %ymm12, %ymm8, %ymm8 + LONG $0xc8db35c5 // vpand %ymm0, %ymm9, %ymm9 + LONG $0xd43541c4; BYTE $0xcd // vpaddq %ymm13, %ymm9, %ymm9 + LONG $0xd0db2dc5 // vpand %ymm0, %ymm10, %ymm10 + LONG $0xd42d41c4; BYTE $0xd6 // vpaddq %ymm14, %ymm10, %ymm10 + LONG $0xd8db25c5 // vpand %ymm0, %ymm11, %ymm11 + LONG $0xd42541c4; BYTE $0xdf // vpaddq %ymm15, %ymm11, %ymm11 + LONG $0xe1db3dc5 // vpand %ymm1, %ymm8, %ymm12 + LONG $0xe9db35c5 // vpand %ymm1, %ymm9, %ymm13 + LONG $0xf1db2dc5 // vpand %ymm1, %ymm10, %ymm14 + LONG $0xf9db25c5 // vpand %ymm1, %ymm11, %ymm15 + LONG $0x733dc1c4; WORD $0x02d0 // vpsrlq $2, %ymm8, %ymm8 + LONG $0x7335c1c4; WORD $0x02d1 // vpsrlq $2, %ymm9, %ymm9 + LONG $0x732dc1c4; WORD $0x02d2 // vpsrlq $2, %ymm10, %ymm10 + LONG $0x7325c1c4; WORD $0x02d3 // vpsrlq $2, %ymm11, %ymm11 + LONG $0xc1db3dc5 // vpand %ymm1, %ymm8, %ymm8 + LONG $0xd43d41c4; BYTE $0xc4 // vpaddq %ymm12, %ymm8, %ymm8 + LONG $0xc9db35c5 // vpand %ymm1, %ymm9, %ymm9 + LONG $0xd43541c4; BYTE $0xcd // vpaddq %ymm13, %ymm9, %ymm9 + LONG $0xd1db2dc5 // vpand %ymm1, %ymm10, %ymm10 + LONG $0xd42d41c4; BYTE $0xd6 // vpaddq %ymm14, %ymm10, %ymm10 + LONG $0xd9db25c5 // vpand %ymm1, %ymm11, %ymm11 + LONG $0xd42541c4; BYTE $0xdf // vpaddq %ymm15, %ymm11, %ymm11 + LONG $0xe2db3dc5 // vpand %ymm2, %ymm8, %ymm12 + LONG $0xeadb35c5 // vpand %ymm2, %ymm9, %ymm13 + LONG $0xf2db2dc5 // vpand %ymm2, %ymm10, %ymm14 + LONG $0xfadb25c5 // vpand %ymm2, %ymm11, %ymm15 + LONG $0x733dc1c4; WORD $0x04d0 // vpsrlq $4, %ymm8, %ymm8 + LONG $0x7335c1c4; WORD $0x04d1 // vpsrlq $4, %ymm9, %ymm9 + LONG $0x732dc1c4; WORD $0x04d2 // vpsrlq $4, %ymm10, %ymm10 + LONG $0x7325c1c4; WORD $0x04d3 // vpsrlq $4, %ymm11, %ymm11 + LONG $0xc2db3dc5 // vpand %ymm2, %ymm8, %ymm8 + LONG $0xd43d41c4; BYTE $0xc4 // vpaddq %ymm12, %ymm8, %ymm8 + LONG $0xcadb35c5 // vpand %ymm2, %ymm9, %ymm9 + LONG $0xd43541c4; BYTE $0xcd // vpaddq %ymm13, %ymm9, %ymm9 + LONG $0xd2db2dc5 // vpand %ymm2, %ymm10, %ymm10 + LONG $0xd42d41c4; BYTE $0xd6 // vpaddq %ymm14, %ymm10, %ymm10 + LONG $0xdadb25c5 // vpand %ymm2, %ymm11, %ymm11 + LONG $0xd42541c4; BYTE $0xdf // vpaddq %ymm15, %ymm11, %ymm11 + LONG $0xd3739dc5; BYTE $0x20 // vpsrlq $32, %ymm3, %ymm12 + LONG $0xf43d41c4; BYTE $0xec // vpmuludq %ymm12, %ymm8, %ymm13 + LONG $0x730dc1c4; WORD $0x20d0 // vpsrlq $32, %ymm8, %ymm14 + LONG $0xf3f40dc5 // vpmuludq %ymm3, %ymm14, %ymm14 + LONG $0xd41541c4; BYTE $0xee // vpaddq %ymm14, %ymm13, %ymm13 + LONG $0x7315c1c4; WORD $0x20f5 // vpsllq $32, %ymm13, %ymm13 + LONG $0xc3f43dc5 // vpmuludq %ymm3, %ymm8, %ymm8 + LONG $0xd43d41c4; BYTE $0xc5 // vpaddq %ymm13, %ymm8, %ymm8 + LONG $0xf43541c4; BYTE $0xec // vpmuludq %ymm12, %ymm9, %ymm13 + LONG $0x730dc1c4; WORD $0x20d1 // vpsrlq $32, %ymm9, %ymm14 + LONG $0xf3f40dc5 // vpmuludq %ymm3, %ymm14, %ymm14 + LONG $0xd41541c4; BYTE $0xee // vpaddq %ymm14, %ymm13, %ymm13 + LONG $0x7315c1c4; WORD $0x20f5 // vpsllq $32, %ymm13, %ymm13 + LONG $0xcbf435c5 // vpmuludq %ymm3, %ymm9, %ymm9 + LONG $0xd43541c4; BYTE $0xcd // vpaddq %ymm13, %ymm9, %ymm9 + LONG $0xf42d41c4; BYTE $0xec // vpmuludq %ymm12, %ymm10, %ymm13 + LONG $0x730dc1c4; WORD $0x20d2 // vpsrlq $32, %ymm10, %ymm14 + LONG $0xf3f40dc5 // vpmuludq %ymm3, %ymm14, %ymm14 + LONG $0xd41541c4; BYTE $0xee // vpaddq %ymm14, %ymm13, %ymm13 + LONG $0x7315c1c4; WORD $0x20f5 // vpsllq $32, %ymm13, %ymm13 + LONG $0xd3f42dc5 // vpmuludq %ymm3, %ymm10, %ymm10 + LONG $0xd42d41c4; BYTE $0xd5 // vpaddq %ymm13, %ymm10, %ymm10 + LONG $0xf42541c4; BYTE $0xe4 // vpmuludq %ymm12, %ymm11, %ymm12 + LONG $0x7315c1c4; WORD $0x20d3 // vpsrlq $32, %ymm11, %ymm13 + LONG $0xebf415c5 // vpmuludq %ymm3, %ymm13, %ymm13 + LONG $0xd41d41c4; BYTE $0xe5 // vpaddq %ymm13, %ymm12, %ymm12 + LONG $0x731dc1c4; WORD $0x20f4 // vpsllq $32, %ymm12, %ymm12 + LONG $0xdbf425c5 // vpmuludq %ymm3, %ymm11, %ymm11 + LONG $0xd42541c4; BYTE $0xdc // vpaddq %ymm12, %ymm11, %ymm11 + LONG $0x733dc1c4; WORD $0x38d0 // vpsrlq $56, %ymm8, %ymm8 + LONG $0xe4d4bdc5 // vpaddq %ymm4, %ymm8, %ymm4 + LONG $0x733dc1c4; WORD $0x38d1 // vpsrlq $56, %ymm9, %ymm8 + LONG $0xedd4bdc5 // vpaddq %ymm5, %ymm8, %ymm5 + LONG $0x733dc1c4; WORD $0x38d2 // vpsrlq $56, %ymm10, %ymm8 + LONG $0xf6d4bdc5 // vpaddq %ymm6, %ymm8, %ymm6 + LONG $0x733dc1c4; WORD $0x38d3 // vpsrlq $56, %ymm11, %ymm8 + LONG $0xffd4bdc5 // vpaddq %ymm7, %ymm8, %ymm7 + LONG $0x80eb8348 // subq $-128, %rbx + WORD $0x3948; BYTE $0xd9 // cmpq %rbx, %rcx + JNE LBB0_13 + LONG $0xc4d4d5c5 // vpaddq %ymm4, %ymm5, %ymm0 + LONG $0xc0d4cdc5 // vpaddq %ymm0, %ymm6, %ymm0 + LONG $0xc0d4c5c5 // vpaddq %ymm0, %ymm7, %ymm0 + LONG $0x397de3c4; WORD $0x01c1 // vextracti128 $1, %ymm0, %xmm1 + LONG $0xc1d4f9c5 // vpaddq %xmm1, %xmm0, %xmm0 + LONG $0xc870f9c5; BYTE $0xee // vpshufd $238, %xmm0, %xmm1 # xmm1 = xmm0[2,3,2,3] + LONG $0xc1d4f9c5 // vpaddq %xmm1, %xmm0, %xmm0 + LONG $0x7ef9c1c4; BYTE $0xc7 // vmovq %xmm0, %r15 + WORD $0x3949; BYTE $0xd4 // cmpq %rdx, %r12 + LONG $0x24248b4c // movq (%rsp), %r12 # 8-byte Reload + JE LBB0_22 + +LBB0_15: + WORD $0xc089 // movl %eax, %eax + WORD $0xf631 // xorl %esi, %esi + +LBB0_16: + LONG $0xf63c8b49 // movq (%r14,%rsi,8), %rdi + LONG $0xf57c3349; BYTE $0x00 // xorq (%r13,%rsi,8), %rdi + WORD $0x894c; BYTE $0xc3 // movq %r8, %rbx + WORD $0x2148; BYTE $0xfb // andq %rdi, %rbx + WORD $0xd148; BYTE $0xef // shrq %rdi + WORD $0x214c; BYTE $0xc7 // andq %r8, %rdi + WORD $0x0148; BYTE $0xdf // addq %rbx, %rdi + WORD $0x8948; BYTE $0xfb // movq %rdi, %rbx + WORD $0x214c; BYTE $0xd3 // andq %r10, %rbx + LONG $0x02efc148 // shrq $2, %rdi + WORD $0x214c; BYTE $0xd7 // andq %r10, %rdi + WORD $0x0148; BYTE $0xdf // addq %rbx, %rdi + WORD $0x8948; BYTE $0xfb // movq %rdi, %rbx + WORD $0x214c; BYTE $0xdb // andq %r11, %rbx + LONG $0x04efc148 // shrq $4, %rdi + WORD $0x214c; BYTE $0xdf // andq %r11, %rdi + WORD $0x0148; BYTE $0xdf // addq %rbx, %rdi + LONG $0xf9af0f49 // imulq %r9, %rdi + LONG $0x38efc148 // shrq $56, %rdi + WORD $0x0149; BYTE $0xff // addq %rdi, %r15 + LONG $0x01c68348 // addq $1, %rsi + WORD $0xf039 // cmpl %esi, %eax + JNE LBB0_16 + JMP LBB0_22 + +LBB0_17: + WORD $0x894d; BYTE $0xe5 // movq %r12, %r13 + LONG $0x01c28348 // addq $1, %rdx + QUAD $0x0001fffffff0bc49; WORD $0x0000 // movabsq $8589934576, %r12 # imm = 0x1FFFFFFF0 + LONG $0x08cc8349 // orq $8, %r12 + WORD $0x2149; BYTE $0xd4 // andq %rdx, %r12 + LONG $0xe71c8d4e // leaq (%rdi,%r12,8), %r11 + LONG $0xe6348d4e // leaq (%rsi,%r12,8), %r14 + WORD $0x2944; BYTE $0xe0 // subl %r12d, %eax + LONG $0x6ef9c1c4; BYTE $0xc7 // vmovq %r15, %xmm0 + LONG $0x6ef9c1c4; BYTE $0xca // vmovq %r10, %xmm1 + LONG $0x597de2c4; BYTE $0xc9 // vpbroadcastq %xmm1, %ymm1 + LONG $0x6ef9c1c4; BYTE $0xd0 // vmovq %r8, %xmm2 + LONG $0x597de2c4; BYTE $0xd2 // vpbroadcastq %xmm2, %ymm2 + LONG $0x6ef9e1c4; BYTE $0xdb // vmovq %rbx, %xmm3 + LONG $0x597de2c4; BYTE $0xdb // vpbroadcastq %xmm3, %ymm3 + LONG $0x6ef9c1c4; BYTE $0xe1 // vmovq %r9, %xmm4 + LONG $0x597de2c4; BYTE $0xec // vpbroadcastq %xmm4, %ymm5 + LONG $0xe4efd9c5 // vpxor %xmm4, %xmm4, %xmm4 + WORD $0xc931 // xorl %ecx, %ecx + LONG $0xd573cdc5; BYTE $0x20 // vpsrlq $32, %ymm5, %ymm6 + +LBB0_18: + LONG $0x3c6ffec5; BYTE $0xce // vmovdqu (%rsi,%rcx,8), %ymm7 + LONG $0x446f7ec5; WORD $0x20ce // vmovdqu 32(%rsi,%rcx,8), %ymm8 + LONG $0x3cefc5c5; BYTE $0xcf // vpxor (%rdi,%rcx,8), %ymm7, %ymm7 + LONG $0x44ef3dc5; WORD $0x20cf // vpxor 32(%rdi,%rcx,8), %ymm8, %ymm8 + LONG $0xcfdb75c5 // vpand %ymm7, %ymm1, %ymm9 + LONG $0xd1db3dc5 // vpand %ymm1, %ymm8, %ymm10 + LONG $0xd773c5c5; BYTE $0x01 // vpsrlq $1, %ymm7, %ymm7 + LONG $0x733dc1c4; WORD $0x01d0 // vpsrlq $1, %ymm8, %ymm8 + LONG $0xf9dbc5c5 // vpand %ymm1, %ymm7, %ymm7 + LONG $0xffd4b5c5 // vpaddq %ymm7, %ymm9, %ymm7 + LONG $0xc1db3dc5 // vpand %ymm1, %ymm8, %ymm8 + LONG $0xd43d41c4; BYTE $0xc2 // vpaddq %ymm10, %ymm8, %ymm8 + LONG $0xcadb45c5 // vpand %ymm2, %ymm7, %ymm9 + LONG $0xd2db3dc5 // vpand %ymm2, %ymm8, %ymm10 + LONG $0xd773c5c5; BYTE $0x02 // vpsrlq $2, %ymm7, %ymm7 + LONG $0x733dc1c4; WORD $0x02d0 // vpsrlq $2, %ymm8, %ymm8 + LONG $0xfadbc5c5 // vpand %ymm2, %ymm7, %ymm7 + LONG $0xffd4b5c5 // vpaddq %ymm7, %ymm9, %ymm7 + LONG $0xc2db3dc5 // vpand %ymm2, %ymm8, %ymm8 + LONG $0xd43d41c4; BYTE $0xc2 // vpaddq %ymm10, %ymm8, %ymm8 + LONG $0xcbdb45c5 // vpand %ymm3, %ymm7, %ymm9 + LONG $0xd3db3dc5 // vpand %ymm3, %ymm8, %ymm10 + LONG $0xd773c5c5; BYTE $0x04 // vpsrlq $4, %ymm7, %ymm7 + LONG $0x733dc1c4; WORD $0x04d0 // vpsrlq $4, %ymm8, %ymm8 + LONG $0xfbdbc5c5 // vpand %ymm3, %ymm7, %ymm7 + LONG $0xffd4b5c5 // vpaddq %ymm7, %ymm9, %ymm7 + LONG $0xc3db3dc5 // vpand %ymm3, %ymm8, %ymm8 + LONG $0xd43d41c4; BYTE $0xc2 // vpaddq %ymm10, %ymm8, %ymm8 + LONG $0xcef445c5 // vpmuludq %ymm6, %ymm7, %ymm9 + LONG $0xd773adc5; BYTE $0x20 // vpsrlq $32, %ymm7, %ymm10 + LONG $0xd5f42dc5 // vpmuludq %ymm5, %ymm10, %ymm10 + LONG $0xd43541c4; BYTE $0xca // vpaddq %ymm10, %ymm9, %ymm9 + LONG $0x7335c1c4; WORD $0x20f1 // vpsllq $32, %ymm9, %ymm9 + LONG $0xfdf4c5c5 // vpmuludq %ymm5, %ymm7, %ymm7 + LONG $0xffd4b5c5 // vpaddq %ymm7, %ymm9, %ymm7 + LONG $0xcef43dc5 // vpmuludq %ymm6, %ymm8, %ymm9 + LONG $0x732dc1c4; WORD $0x20d0 // vpsrlq $32, %ymm8, %ymm10 + LONG $0xd5f42dc5 // vpmuludq %ymm5, %ymm10, %ymm10 + LONG $0xd43541c4; BYTE $0xca // vpaddq %ymm10, %ymm9, %ymm9 + LONG $0x7335c1c4; WORD $0x20f1 // vpsllq $32, %ymm9, %ymm9 + LONG $0xc5f43dc5 // vpmuludq %ymm5, %ymm8, %ymm8 + LONG $0xd43d41c4; BYTE $0xc1 // vpaddq %ymm9, %ymm8, %ymm8 + LONG $0xd773c5c5; BYTE $0x38 // vpsrlq $56, %ymm7, %ymm7 + LONG $0xc0d4c5c5 // vpaddq %ymm0, %ymm7, %ymm0 + LONG $0x7345c1c4; WORD $0x38d0 // vpsrlq $56, %ymm8, %ymm7 + LONG $0xe4d4c5c5 // vpaddq %ymm4, %ymm7, %ymm4 + LONG $0x08c18348 // addq $8, %rcx + WORD $0x3949; BYTE $0xcc // cmpq %rcx, %r12 + JNE LBB0_18 + LONG $0xc0d4ddc5 // vpaddq %ymm0, %ymm4, %ymm0 + LONG $0x397de3c4; WORD $0x01c1 // vextracti128 $1, %ymm0, %xmm1 + LONG $0xc1d4f9c5 // vpaddq %xmm1, %xmm0, %xmm0 + LONG $0xc870f9c5; BYTE $0xee // vpshufd $238, %xmm0, %xmm1 # xmm1 = xmm0[2,3,2,3] + LONG $0xc1d4f9c5 // vpaddq %xmm1, %xmm0, %xmm0 + LONG $0x7ef9c1c4; BYTE $0xc7 // vmovq %xmm0, %r15 + WORD $0x394c; BYTE $0xe2 // cmpq %r12, %rdx + WORD $0x894d; BYTE $0xec // movq %r13, %r12 + JE LBB0_22 + +LBB0_20: + WORD $0xc089 // movl %eax, %eax + WORD $0xf631 // xorl %esi, %esi + +LBB0_21: + LONG $0xf63c8b49 // movq (%r14,%rsi,8), %rdi + LONG $0xf33c3349 // xorq (%r11,%rsi,8), %rdi + WORD $0x894c; BYTE $0xd1 // movq %r10, %rcx + WORD $0x2148; BYTE $0xf9 // andq %rdi, %rcx + WORD $0xd148; BYTE $0xef // shrq %rdi + WORD $0x214c; BYTE $0xd7 // andq %r10, %rdi + WORD $0x0148; BYTE $0xcf // addq %rcx, %rdi + WORD $0x8948; BYTE $0xf9 // movq %rdi, %rcx + WORD $0x214c; BYTE $0xc1 // andq %r8, %rcx + LONG $0x02efc148 // shrq $2, %rdi + WORD $0x214c; BYTE $0xc7 // andq %r8, %rdi + WORD $0x0148; BYTE $0xcf // addq %rcx, %rdi + WORD $0x8948; BYTE $0xf9 // movq %rdi, %rcx + WORD $0x2148; BYTE $0xd9 // andq %rbx, %rcx + LONG $0x04efc148 // shrq $4, %rdi + WORD $0x2148; BYTE $0xdf // andq %rbx, %rdi + WORD $0x0148; BYTE $0xcf // addq %rcx, %rdi + LONG $0xf9af0f49 // imulq %r9, %rdi + LONG $0x38efc148 // shrq $56, %rdi + WORD $0x0149; BYTE $0xff // addq %rdi, %r15 + LONG $0x01c68348 // addq $1, %rsi + WORD $0xf039 // cmpl %esi, %eax + JNE LBB0_21 + +LBB0_22: + LONG $0x243c894d // movq %r15, (%r12) + LONG $0xd8658d48 // leaq -40(%rbp), %rsp + BYTE $0x5b // popq %rbx + WORD $0x5c41 // popq %r12 + WORD $0x5d41 // popq %r13 + WORD $0x5e41 // popq %r14 + WORD $0x5f41 // popq %r15 + BYTE $0x5d // popq %rbp + WORD $0xf8c5; BYTE $0x77 // vzeroupper + BYTE $0xc3 // retq diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/hamming_bitwise_avx512_amd64.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/hamming_bitwise_avx512_amd64.go new file mode 100644 index 0000000000000000000000000000000000000000..56566439e880a8e253ba345ec9ecff1c10293c24 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/hamming_bitwise_avx512_amd64.go @@ -0,0 +1,42 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +//go:build !noasm && amd64 + +// AUTO-GENERATED BY GOAT -- DO NOT EDIT + +package asm + +import "unsafe" + +//go:noescape +func popcnt_AVX2_lookup_512(vec, low_mask_vec, lookup_vec unsafe.Pointer) + +//go:noescape +func popcount(v_ptr, result, constants_avx512 unsafe.Pointer) + +//go:noescape +func CSA(h, l, a_ptr, b_ptr, c_ptr unsafe.Pointer) + +//go:noescape +func simd_sum_epu64_256(v_ptr unsafe.Pointer) + +//go:noescape +func simd_sum_epu64_512(v_ptr unsafe.Pointer) + +//go:noescape +func popcnt_AVX512_harleyseal(data, size_ptr, constants_avx512 unsafe.Pointer) + +//go:noescape +func popcnt_64bit_512(src, popcnt_constants unsafe.Pointer) + +//go:noescape +func hamming_bitwise_512(a, b, res, len, popcnt_constants unsafe.Pointer) diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/hamming_bitwise_avx512_amd64.s b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/hamming_bitwise_avx512_amd64.s new file mode 100644 index 0000000000000000000000000000000000000000..9724990d9f27ea0104c93b83eb5863d68b6ba2de --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/hamming_bitwise_avx512_amd64.s @@ -0,0 +1,788 @@ +//go:build !noasm && amd64 +// AUTO-GENERATED BY GOAT -- DO NOT EDIT + +TEXT ·popcnt_AVX2_lookup_512(SB), $0-32 + MOVQ vec+0(FP), DI + MOVQ low_mask_vec+8(FP), SI + MOVQ lookup_vec+16(FP), DX + +TEXT ·popcount(SB), $0-32 + MOVQ v_ptr+0(FP), DI + MOVQ result+8(FP), SI + MOVQ constants_avx512+16(FP), DX + BYTE $0x55 // pushq %rbp + WORD $0x8948; BYTE $0xe5 // movq %rsp, %rbp + LONG $0xf8e48348 // andq $-8, %rsp + LONG $0x48fdf262; WORD $0x4259; BYTE $0x01 // vpbroadcastq 8(%rdx), %zmm0 + LONG $0x48fdf262; WORD $0x4a59; BYTE $0x02 // vpbroadcastq 16(%rdx), %zmm1 + LONG $0x48fdf162; WORD $0x176f // vmovdqa64 (%rdi), %zmm2 + LONG $0x4865f162; WORD $0xd271; BYTE $0x01 // vpsrlw $1, %zmm2, %zmm3 + LONG $0x58e5f162; WORD $0x1adb // vpandq (%rdx){1to8}, %zmm3, %zmm3 + LONG $0x486df162; WORD $0xd3f8 // vpsubb %zmm3, %zmm2, %zmm2 + LONG $0x4865f162; WORD $0xd271; BYTE $0x02 // vpsrlw $2, %zmm2, %zmm3 + LONG $0x48fdf162; WORD $0xdbdb // vpandq %zmm3, %zmm0, %zmm3 + LONG $0x48edf162; WORD $0xc0db // vpandq %zmm0, %zmm2, %zmm0 + LONG $0x487df162; WORD $0xc3fc // vpaddb %zmm3, %zmm0, %zmm0 + LONG $0x486df162; WORD $0xd071; BYTE $0x04 // vpsrlw $4, %zmm0, %zmm2 + LONG $0x487df162; WORD $0xc2fc // vpaddb %zmm2, %zmm0, %zmm0 + LONG $0x48fdf162; WORD $0xc1db // vpandq %zmm1, %zmm0, %zmm0 + LONG $0xc9eff1c5 // vpxor %xmm1, %xmm1, %xmm1 + LONG $0x487df162; WORD $0xc1f6 // vpsadbw %zmm1, %zmm0, %zmm0 + LONG $0x48fdf162; WORD $0x067f // vmovdqa64 %zmm0, (%rsi) + WORD $0x8948; BYTE $0xec // movq %rbp, %rsp + BYTE $0x5d // popq %rbp + WORD $0xf8c5; BYTE $0x77 // vzeroupper + BYTE $0xc3 // retq + +TEXT ·CSA(SB), $0-32 + MOVQ h+0(FP), DI + MOVQ l+8(FP), SI + MOVQ a_ptr+16(FP), DX + MOVQ b_ptr+24(FP), CX + MOVQ c_ptr+32(FP), R8 + BYTE $0x55 // pushq %rbp + WORD $0x8948; BYTE $0xe5 // movq %rsp, %rbp + LONG $0xf8e48348 // andq $-8, %rsp + LONG $0x48fdf162; WORD $0x026f // vmovdqa64 (%rdx), %zmm0 + LONG $0x48fdf162; WORD $0x096f // vmovdqa64 (%rcx), %zmm1 + LONG $0x48fdd162; WORD $0x106f // vmovdqa64 (%r8), %zmm2 + LONG $0x48fdf162; WORD $0xda6f // vmovdqa64 %zmm2, %zmm3 + LONG $0x4875f362; WORD $0xd825; BYTE $0x96 // vpternlogd $150, %zmm0, %zmm1, %zmm3 + LONG $0x48fdf162; WORD $0x1e7f // vmovdqa64 %zmm3, (%rsi) + LONG $0x4875f362; WORD $0xd025; BYTE $0xe8 // vpternlogd $232, %zmm0, %zmm1, %zmm2 + LONG $0x48fdf162; WORD $0x177f // vmovdqa64 %zmm2, (%rdi) + WORD $0x8948; BYTE $0xec // movq %rbp, %rsp + BYTE $0x5d // popq %rbp + WORD $0xf8c5; BYTE $0x77 // vzeroupper + BYTE $0xc3 // retq + +TEXT ·simd_sum_epu64_256(SB), $0-32 + MOVQ v_ptr+0(FP), DI + BYTE $0x55 // pushq %rbp + WORD $0x8948; BYTE $0xe5 // movq %rsp, %rbp + LONG $0xf8e48348 // andq $-8, %rsp + LONG $0x076ff9c5 // vmovdqa (%rdi), %xmm0 + LONG $0x47d4f9c5; BYTE $0x10 // vpaddq 16(%rdi), %xmm0, %xmm0 + LONG $0xc870f9c5; BYTE $0xee // vpshufd $238, %xmm0, %xmm1 # xmm1 = xmm0[2,3,2,3] + LONG $0xc1d4f9c5 // vpaddq %xmm1, %xmm0, %xmm0 + LONG $0x7ef9e1c4; BYTE $0xc0 // vmovq %xmm0, %rax + WORD $0x8948; BYTE $0xec // movq %rbp, %rsp + BYTE $0x5d // popq %rbp + BYTE $0xc3 // retq + +TEXT ·simd_sum_epu64_512(SB), $0-32 + MOVQ v_ptr+0(FP), DI + BYTE $0x55 // pushq %rbp + WORD $0x8948; BYTE $0xe5 // movq %rsp, %rbp + LONG $0xf8e48348 // andq $-8, %rsp + LONG $0x476ffdc5; BYTE $0x20 // vmovdqa 32(%rdi), %ymm0 + LONG $0x48fdf162; WORD $0x07d4 // vpaddq (%rdi), %zmm0, %zmm0 + LONG $0x397de3c4; WORD $0x01c1 // vextracti128 $1, %ymm0, %xmm1 + LONG $0xc1d4f9c5 // vpaddq %xmm1, %xmm0, %xmm0 + LONG $0xc870f9c5; BYTE $0xee // vpshufd $238, %xmm0, %xmm1 # xmm1 = xmm0[2,3,2,3] + LONG $0xc1d4f9c5 // vpaddq %xmm1, %xmm0, %xmm0 + LONG $0x7ef9e1c4; BYTE $0xc0 // vmovq %xmm0, %rax + WORD $0x8948; BYTE $0xec // movq %rbp, %rsp + BYTE $0x5d // popq %rbp + WORD $0xf8c5; BYTE $0x77 // vzeroupper + BYTE $0xc3 // retq + +TEXT ·popcnt_AVX512_harleyseal(SB), $0-32 + MOVQ data+0(FP), DI + MOVQ size_ptr+8(FP), SI + MOVQ constants_avx512+16(FP), DX + BYTE $0x55 // pushq %rbp + WORD $0x8948; BYTE $0xe5 // movq %rsp, %rbp + LONG $0xf8e48348 // andq $-8, %rsp + WORD $0x8b48; BYTE $0x06 // movq (%rsi), %rax + WORD $0x8949; BYTE $0xc0 // movq %rax, %r8 + LONG $0xc0eff9c5 // vpxor %xmm0, %xmm0, %xmm0 + LONG $0xf0e08349 // andq $-16, %r8 + JE LBB4_1 + LONG $0x48fdf262; WORD $0x1259 // vpbroadcastq (%rdx), %zmm2 + LONG $0x48fdf262; WORD $0x5a59; BYTE $0x01 // vpbroadcastq 8(%rdx), %zmm3 + LONG $0x48fdf262; WORD $0x6259; BYTE $0x02 // vpbroadcastq 16(%rdx), %zmm4 + LONG $0xc0b78d48; WORD $0x0003; BYTE $0x00 // leaq 960(%rdi), %rsi + LONG $0xf6efc9c5 // vpxor %xmm6, %xmm6, %xmm6 + LONG $0xc9eff1c5 // vpxor %xmm1, %xmm1, %xmm1 + WORD $0xc931 // xorl %ecx, %ecx + LONG $0xedefd1c5 // vpxor %xmm5, %xmm5, %xmm5 + LONG $0xef3941c4; BYTE $0xc0 // vpxor %xmm8, %xmm8, %xmm8 + LONG $0xffefc1c5 // vpxor %xmm7, %xmm7, %xmm7 + LONG $0xef2941c4; BYTE $0xd2 // vpxor %xmm10, %xmm10, %xmm10 + +LBB4_3: + LONG $0x48fd5162; WORD $0xca6f // vmovdqa64 %zmm10, %zmm9 + LONG $0x48fd7162; WORD $0x566f; BYTE $0xf1 // vmovdqa64 -960(%rsi), %zmm10 + LONG $0x48fd7162; WORD $0x5e6f; BYTE $0xf2 // vmovdqa64 -896(%rsi), %zmm11 + LONG $0x48fd7162; WORD $0x666f; BYTE $0xf3 // vmovdqa64 -832(%rsi), %zmm12 + LONG $0x48fd7162; WORD $0x6e6f; BYTE $0xf4 // vmovdqa64 -768(%rsi), %zmm13 + LONG $0x48fd5162; WORD $0xf36f // vmovdqa64 %zmm11, %zmm14 + LONG $0x482d7362; WORD $0xf525; BYTE $0x96 // vpternlogd $150, %zmm5, %zmm10, %zmm14 + LONG $0x482d7362; WORD $0xdd25; BYTE $0xe8 // vpternlogd $232, %zmm5, %zmm10, %zmm11 + LONG $0x48fdd162; WORD $0xed6f // vmovdqa64 %zmm13, %zmm5 + LONG $0x481dd362; WORD $0xee25; BYTE $0x96 // vpternlogd $150, %zmm14, %zmm12, %zmm5 + LONG $0x481d5362; WORD $0xf525; BYTE $0xe8 // vpternlogd $232, %zmm13, %zmm12, %zmm14 + LONG $0x48fd5162; WORD $0xd66f // vmovdqa64 %zmm14, %zmm10 + LONG $0x48255362; WORD $0xd025; BYTE $0x96 // vpternlogd $150, %zmm8, %zmm11, %zmm10 + LONG $0x48255362; WORD $0xf025; BYTE $0xe8 // vpternlogd $232, %zmm8, %zmm11, %zmm14 + LONG $0x48fd7162; WORD $0x466f; BYTE $0xf5 // vmovdqa64 -704(%rsi), %zmm8 + LONG $0x48fd7162; WORD $0x5e6f; BYTE $0xf6 // vmovdqa64 -640(%rsi), %zmm11 + LONG $0x48fd5162; WORD $0xe36f // vmovdqa64 %zmm11, %zmm12 + LONG $0x483d7362; WORD $0xe525; BYTE $0x96 // vpternlogd $150, %zmm5, %zmm8, %zmm12 + LONG $0x483dd362; WORD $0xeb25; BYTE $0xe8 // vpternlogd $232, %zmm11, %zmm8, %zmm5 + LONG $0x48fd7162; WORD $0x466f; BYTE $0xf7 // vmovdqa64 -576(%rsi), %zmm8 + LONG $0x48fd7162; WORD $0x5e6f; BYTE $0xf8 // vmovdqa64 -512(%rsi), %zmm11 + LONG $0x48fd5162; WORD $0xeb6f // vmovdqa64 %zmm11, %zmm13 + LONG $0x483d5362; WORD $0xec25; BYTE $0x96 // vpternlogd $150, %zmm12, %zmm8, %zmm13 + LONG $0x483d5362; WORD $0xe325; BYTE $0xe8 // vpternlogd $232, %zmm11, %zmm8, %zmm12 + LONG $0x48fd5162; WORD $0xc46f // vmovdqa64 %zmm12, %zmm8 + LONG $0x48555362; WORD $0xc225; BYTE $0x96 // vpternlogd $150, %zmm10, %zmm5, %zmm8 + LONG $0x48555362; WORD $0xe225; BYTE $0xe8 // vpternlogd $232, %zmm10, %zmm5, %zmm12 + LONG $0x48fd5162; WORD $0xd46f // vmovdqa64 %zmm12, %zmm10 + LONG $0x480d7362; WORD $0xd725; BYTE $0x96 // vpternlogd $150, %zmm7, %zmm14, %zmm10 + LONG $0x480d7362; WORD $0xe725; BYTE $0xe8 // vpternlogd $232, %zmm7, %zmm14, %zmm12 + LONG $0x48fdf162; WORD $0x6e6f; BYTE $0xf9 // vmovdqa64 -448(%rsi), %zmm5 + LONG $0x48fdf162; WORD $0x7e6f; BYTE $0xfa // vmovdqa64 -384(%rsi), %zmm7 + LONG $0x48fd7162; WORD $0xdf6f // vmovdqa64 %zmm7, %zmm11 + LONG $0x48555362; WORD $0xdd25; BYTE $0x96 // vpternlogd $150, %zmm13, %zmm5, %zmm11 + LONG $0x48557362; WORD $0xef25; BYTE $0xe8 // vpternlogd $232, %zmm7, %zmm5, %zmm13 + LONG $0x48fdf162; WORD $0x6e6f; BYTE $0xfb // vmovdqa64 -320(%rsi), %zmm5 + LONG $0x48fdf162; WORD $0x7e6f; BYTE $0xfc // vmovdqa64 -256(%rsi), %zmm7 + LONG $0x48fd7162; WORD $0xf76f // vmovdqa64 %zmm7, %zmm14 + LONG $0x48555362; WORD $0xf325; BYTE $0x96 // vpternlogd $150, %zmm11, %zmm5, %zmm14 + LONG $0x48557362; WORD $0xdf25; BYTE $0xe8 // vpternlogd $232, %zmm7, %zmm5, %zmm11 + LONG $0x48fdd162; WORD $0xfb6f // vmovdqa64 %zmm11, %zmm7 + LONG $0x4815d362; WORD $0xf825; BYTE $0x96 // vpternlogd $150, %zmm8, %zmm13, %zmm7 + LONG $0x48155362; WORD $0xd825; BYTE $0xe8 // vpternlogd $232, %zmm8, %zmm13, %zmm11 + LONG $0x48fdf162; WORD $0x6e6f; BYTE $0xfd // vmovdqa64 -192(%rsi), %zmm5 + LONG $0x48fd7162; WORD $0x466f; BYTE $0xfe // vmovdqa64 -128(%rsi), %zmm8 + LONG $0x48fd5162; WORD $0xe86f // vmovdqa64 %zmm8, %zmm13 + LONG $0x48555362; WORD $0xee25; BYTE $0x96 // vpternlogd $150, %zmm14, %zmm5, %zmm13 + LONG $0x48555362; WORD $0xf025; BYTE $0xe8 // vpternlogd $232, %zmm8, %zmm5, %zmm14 + LONG $0x48fd7162; WORD $0x466f; BYTE $0xff // vmovdqa64 -64(%rsi), %zmm8 + LONG $0x48fd7162; WORD $0x3e6f // vmovdqa64 (%rsi), %zmm15 + LONG $0x48fdd162; WORD $0xef6f // vmovdqa64 %zmm15, %zmm5 + LONG $0x483dd362; WORD $0xed25; BYTE $0x96 // vpternlogd $150, %zmm13, %zmm8, %zmm5 + LONG $0x483d5362; WORD $0xef25; BYTE $0xe8 // vpternlogd $232, %zmm15, %zmm8, %zmm13 + LONG $0x48fd5162; WORD $0xc56f // vmovdqa64 %zmm13, %zmm8 + LONG $0x480d7362; WORD $0xc725; BYTE $0x96 // vpternlogd $150, %zmm7, %zmm14, %zmm8 + LONG $0x480d7362; WORD $0xef25; BYTE $0xe8 // vpternlogd $232, %zmm7, %zmm14, %zmm13 + LONG $0x48fdd162; WORD $0xfd6f // vmovdqa64 %zmm13, %zmm7 + LONG $0x4825d362; WORD $0xfa25; BYTE $0x96 // vpternlogd $150, %zmm10, %zmm11, %zmm7 + LONG $0x48255362; WORD $0xea25; BYTE $0xe8 // vpternlogd $232, %zmm10, %zmm11, %zmm13 + LONG $0x48fd5162; WORD $0xd56f // vmovdqa64 %zmm13, %zmm10 + LONG $0x481d5362; WORD $0xd125; BYTE $0x96 // vpternlogd $150, %zmm9, %zmm12, %zmm10 + LONG $0x481d5362; WORD $0xe925; BYTE $0xe8 // vpternlogd $232, %zmm9, %zmm12, %zmm13 + LONG $0x4835d162; WORD $0xd571; BYTE $0x01 // vpsrlw $1, %zmm13, %zmm9 + LONG $0x48ed5162; WORD $0xc9db // vpandq %zmm9, %zmm2, %zmm9 + LONG $0x48155162; WORD $0xc9f8 // vpsubb %zmm9, %zmm13, %zmm9 + LONG $0x4825d162; WORD $0xd171; BYTE $0x02 // vpsrlw $2, %zmm9, %zmm11 + LONG $0x48e55162; WORD $0xdbdb // vpandq %zmm11, %zmm3, %zmm11 + LONG $0x48b57162; WORD $0xcbdb // vpandq %zmm3, %zmm9, %zmm9 + LONG $0x48355162; WORD $0xcbfc // vpaddb %zmm11, %zmm9, %zmm9 + LONG $0x4825d162; WORD $0xd171; BYTE $0x04 // vpsrlw $4, %zmm9, %zmm11 + LONG $0x48355162; WORD $0xcbfc // vpaddb %zmm11, %zmm9, %zmm9 + LONG $0x48b57162; WORD $0xccdb // vpandq %zmm4, %zmm9, %zmm9 + LONG $0x48357162; WORD $0xcef6 // vpsadbw %zmm6, %zmm9, %zmm9 + LONG $0x48b5f162; WORD $0xc9d4 // vpaddq %zmm1, %zmm9, %zmm1 + LONG $0x10c18348 // addq $16, %rcx + LONG $0x00c68148; WORD $0x0004; BYTE $0x00 // addq $1024, %rsi # imm = 0x400 + WORD $0x394c; BYTE $0xc1 // cmpq %r8, %rcx + JB LBB4_3 + LONG $0x48ddf162; WORD $0xf173; BYTE $0x04 // vpsllq $4, %zmm1, %zmm4 + JMP LBB4_5 + +LBB4_1: + WORD $0x3145; BYTE $0xc0 // xorl %r8d, %r8d + LONG $0xef2941c4; BYTE $0xd2 // vpxor %xmm10, %xmm10, %xmm10 + LONG $0xffefc1c5 // vpxor %xmm7, %xmm7, %xmm7 + LONG $0xef3941c4; BYTE $0xc0 // vpxor %xmm8, %xmm8, %xmm8 + LONG $0xedefd1c5 // vpxor %xmm5, %xmm5, %xmm5 + LONG $0xe4efd9c5 // vpxor %xmm4, %xmm4, %xmm4 + +LBB4_5: + LONG $0x48fdf262; WORD $0x0a59 // vpbroadcastq (%rdx), %zmm1 + LONG $0x48fdf262; WORD $0x5259; BYTE $0x01 // vpbroadcastq 8(%rdx), %zmm2 + LONG $0x48fdf262; WORD $0x5a59; BYTE $0x02 // vpbroadcastq 16(%rdx), %zmm3 + LONG $0x484dd162; WORD $0xd271; BYTE $0x01 // vpsrlw $1, %zmm10, %zmm6 + LONG $0x48f5f162; WORD $0xf6db // vpandq %zmm6, %zmm1, %zmm6 + LONG $0x482df162; WORD $0xf6f8 // vpsubb %zmm6, %zmm10, %zmm6 + LONG $0x4835f162; WORD $0xd671; BYTE $0x02 // vpsrlw $2, %zmm6, %zmm9 + LONG $0x48ed5162; WORD $0xc9db // vpandq %zmm9, %zmm2, %zmm9 + LONG $0x48cdf162; WORD $0xf2db // vpandq %zmm2, %zmm6, %zmm6 + LONG $0x484dd162; WORD $0xf1fc // vpaddb %zmm9, %zmm6, %zmm6 + LONG $0x4835f162; WORD $0xd671; BYTE $0x04 // vpsrlw $4, %zmm6, %zmm9 + LONG $0x484dd162; WORD $0xf1fc // vpaddb %zmm9, %zmm6, %zmm6 + LONG $0x48cdf162; WORD $0xf3db // vpandq %zmm3, %zmm6, %zmm6 + LONG $0x484df162; WORD $0xf0f6 // vpsadbw %zmm0, %zmm6, %zmm6 + LONG $0x4835f162; WORD $0xd771; BYTE $0x01 // vpsrlw $1, %zmm7, %zmm9 + LONG $0x48f55162; WORD $0xc9db // vpandq %zmm9, %zmm1, %zmm9 + LONG $0x4845d162; WORD $0xf9f8 // vpsubb %zmm9, %zmm7, %zmm7 + LONG $0x4835f162; WORD $0xd771; BYTE $0x02 // vpsrlw $2, %zmm7, %zmm9 + LONG $0x48ed5162; WORD $0xc9db // vpandq %zmm9, %zmm2, %zmm9 + LONG $0x48c5f162; WORD $0xfadb // vpandq %zmm2, %zmm7, %zmm7 + LONG $0x4845d162; WORD $0xf9fc // vpaddb %zmm9, %zmm7, %zmm7 + LONG $0x4835f162; WORD $0xd771; BYTE $0x04 // vpsrlw $4, %zmm7, %zmm9 + LONG $0x4845d162; WORD $0xf9fc // vpaddb %zmm9, %zmm7, %zmm7 + LONG $0x48c5f162; WORD $0xfbdb // vpandq %zmm3, %zmm7, %zmm7 + LONG $0x4845f162; WORD $0xf8f6 // vpsadbw %zmm0, %zmm7, %zmm7 + LONG $0x4835d162; WORD $0xd071; BYTE $0x01 // vpsrlw $1, %zmm8, %zmm9 + LONG $0x48f55162; WORD $0xc9db // vpandq %zmm9, %zmm1, %zmm9 + LONG $0x483d5162; WORD $0xc1f8 // vpsubb %zmm9, %zmm8, %zmm8 + LONG $0x4835d162; WORD $0xd071; BYTE $0x02 // vpsrlw $2, %zmm8, %zmm9 + LONG $0x48ed5162; WORD $0xc9db // vpandq %zmm9, %zmm2, %zmm9 + LONG $0x48bd7162; WORD $0xc2db // vpandq %zmm2, %zmm8, %zmm8 + LONG $0x483d5162; WORD $0xc1fc // vpaddb %zmm9, %zmm8, %zmm8 + LONG $0x4835d162; WORD $0xd071; BYTE $0x04 // vpsrlw $4, %zmm8, %zmm9 + LONG $0x483d5162; WORD $0xc1fc // vpaddb %zmm9, %zmm8, %zmm8 + LONG $0x48bd7162; WORD $0xc3db // vpandq %zmm3, %zmm8, %zmm8 + LONG $0x483d7162; WORD $0xc0f6 // vpsadbw %zmm0, %zmm8, %zmm8 + LONG $0x4835f162; WORD $0xd571; BYTE $0x01 // vpsrlw $1, %zmm5, %zmm9 + LONG $0x48f55162; WORD $0xc9db // vpandq %zmm9, %zmm1, %zmm9 + LONG $0x4855d162; WORD $0xe9f8 // vpsubb %zmm9, %zmm5, %zmm5 + LONG $0x4835f162; WORD $0xd571; BYTE $0x02 // vpsrlw $2, %zmm5, %zmm9 + LONG $0x48ed5162; WORD $0xc9db // vpandq %zmm9, %zmm2, %zmm9 + LONG $0x48d5f162; WORD $0xeadb // vpandq %zmm2, %zmm5, %zmm5 + LONG $0x4855d162; WORD $0xe9fc // vpaddb %zmm9, %zmm5, %zmm5 + LONG $0x4835f162; WORD $0xd571; BYTE $0x04 // vpsrlw $4, %zmm5, %zmm9 + LONG $0x4855d162; WORD $0xe9fc // vpaddb %zmm9, %zmm5, %zmm5 + LONG $0x48d5f162; WORD $0xebdb // vpandq %zmm3, %zmm5, %zmm5 + LONG $0x4855f162; WORD $0xc0f6 // vpsadbw %zmm0, %zmm5, %zmm0 + LONG $0x48d5f162; WORD $0xf673; BYTE $0x03 // vpsllq $3, %zmm6, %zmm5 + LONG $0x48d5f162; WORD $0xe4d4 // vpaddq %zmm4, %zmm5, %zmm4 + LONG $0x48d5f162; WORD $0xf773; BYTE $0x02 // vpsllq $2, %zmm7, %zmm5 + LONG $0x48bdd162; WORD $0xf0d4 // vpaddq %zmm8, %zmm8, %zmm6 + LONG $0x48d5f162; WORD $0xeed4 // vpaddq %zmm6, %zmm5, %zmm5 + LONG $0x48ddf162; WORD $0xe5d4 // vpaddq %zmm5, %zmm4, %zmm4 + LONG $0x48ddf162; WORD $0xc0d4 // vpaddq %zmm0, %zmm4, %zmm0 + WORD $0x3949; BYTE $0xc0 // cmpq %rax, %r8 + JAE LBB4_8 + WORD $0x294c; BYTE $0xc0 // subq %r8, %rax + LONG $0x06e0c149 // shlq $6, %r8 + WORD $0x014c; BYTE $0xc7 // addq %r8, %rdi + LONG $0xe4efd9c5 // vpxor %xmm4, %xmm4, %xmm4 + +LBB4_7: + LONG $0x48fdf162; WORD $0x2f6f // vmovdqa64 (%rdi), %zmm5 + LONG $0x484df162; WORD $0xd571; BYTE $0x01 // vpsrlw $1, %zmm5, %zmm6 + LONG $0x48f5f162; WORD $0xf6db // vpandq %zmm6, %zmm1, %zmm6 + LONG $0x4855f162; WORD $0xeef8 // vpsubb %zmm6, %zmm5, %zmm5 + LONG $0x484df162; WORD $0xd571; BYTE $0x02 // vpsrlw $2, %zmm5, %zmm6 + LONG $0x48edf162; WORD $0xf6db // vpandq %zmm6, %zmm2, %zmm6 + LONG $0x48d5f162; WORD $0xeadb // vpandq %zmm2, %zmm5, %zmm5 + LONG $0x4855f162; WORD $0xeefc // vpaddb %zmm6, %zmm5, %zmm5 + LONG $0x484df162; WORD $0xd571; BYTE $0x04 // vpsrlw $4, %zmm5, %zmm6 + LONG $0x4855f162; WORD $0xeefc // vpaddb %zmm6, %zmm5, %zmm5 + LONG $0x48d5f162; WORD $0xebdb // vpandq %zmm3, %zmm5, %zmm5 + LONG $0x4855f162; WORD $0xecf6 // vpsadbw %zmm4, %zmm5, %zmm5 + LONG $0x48d5f162; WORD $0xc0d4 // vpaddq %zmm0, %zmm5, %zmm0 + LONG $0x40c78348 // addq $64, %rdi + LONG $0xffc08348 // addq $-1, %rax + JNE LBB4_7 + +LBB4_8: + LONG $0x48fdf362; WORD $0xc13b; BYTE $0x01 // vextracti64x4 $1, %zmm0, %ymm1 + LONG $0x48fdf162; WORD $0xc1d4 // vpaddq %zmm1, %zmm0, %zmm0 + LONG $0x397de3c4; WORD $0x01c1 // vextracti128 $1, %ymm0, %xmm1 + LONG $0xc1d4f9c5 // vpaddq %xmm1, %xmm0, %xmm0 + LONG $0xc870f9c5; BYTE $0xee // vpshufd $238, %xmm0, %xmm1 # xmm1 = xmm0[2,3,2,3] + LONG $0xc1d4f9c5 // vpaddq %xmm1, %xmm0, %xmm0 + LONG $0x7ef9e1c4; BYTE $0xc0 // vmovq %xmm0, %rax + WORD $0x8948; BYTE $0xec // movq %rbp, %rsp + BYTE $0x5d // popq %rbp + WORD $0xf8c5; BYTE $0x77 // vzeroupper + BYTE $0xc3 // retq + +TEXT ·popcnt_64bit_512(SB), $0-32 + MOVQ src+0(FP), DI + MOVQ popcnt_constants+8(FP), SI + +TEXT ·hamming_bitwise_512(SB), $0-32 + MOVQ a+0(FP), DI + MOVQ b+8(FP), SI + MOVQ res+16(FP), DX + MOVQ len+24(FP), CX + MOVQ popcnt_constants+32(FP), R8 + BYTE $0x55 // pushq %rbp + WORD $0x8948; BYTE $0xe5 // movq %rsp, %rbp + WORD $0x5741 // pushq %r15 + WORD $0x5641 // pushq %r14 + WORD $0x5541 // pushq %r13 + WORD $0x5441 // pushq %r12 + BYTE $0x53 // pushq %rbx + LONG $0xf8e48348 // andq $-8, %rsp + WORD $0x8b48; BYTE $0x01 // movq (%rcx), %rax + WORD $0xf883; BYTE $0x08 // cmpl $8, %eax + JGE LBB5_1 + WORD $0x8b4d; BYTE $0x08 // movq (%r8), %r9 + LONG $0x08508b4d // movq 8(%r8), %r10 + LONG $0x10588b4d // movq 16(%r8), %r11 + LONG $0x18408b4d // movq 24(%r8), %r8 + WORD $0x8941; BYTE $0xc7 // movl %eax, %r15d + LONG $0xffc78341 // addl $-1, %r15d + LONG $0x1fff8341 // cmpl $31, %r15d + JAE LBB5_5 + WORD $0xc931 // xorl %ecx, %ecx + WORD $0x8949; BYTE $0xf6 // movq %rsi, %r14 + WORD $0x8949; BYTE $0xfd // movq %rdi, %r13 + JMP LBB5_8 + +LBB5_1: + LONG $0x0000803d; BYTE $0x00 // cmpl $128, %eax + JB LBB5_2 + LONG $0x48fdd262; WORD $0x0059 // vpbroadcastq (%r8), %zmm0 + LONG $0x48fdd262; WORD $0x4859; BYTE $0x01 // vpbroadcastq 8(%r8), %zmm1 + LONG $0x48fdd262; WORD $0x5059; BYTE $0x02 // vpbroadcastq 16(%r8), %zmm2 + WORD $0xc931 // xorl %ecx, %ecx + LONG $0xdbefe1c5 // vpxor %xmm3, %xmm3, %xmm3 + +LBB5_20: + LONG $0x48fef162; WORD $0x266f // vmovdqu64 (%rsi), %zmm4 + LONG $0x48fef162; WORD $0x6e6f; BYTE $0x01 // vmovdqu64 64(%rsi), %zmm5 + LONG $0x48fef162; WORD $0x766f; BYTE $0x02 // vmovdqu64 128(%rsi), %zmm6 + LONG $0x48fef162; WORD $0x7e6f; BYTE $0x03 // vmovdqu64 192(%rsi), %zmm7 + LONG $0x48fe7162; WORD $0x466f; BYTE $0x04 // vmovdqu64 256(%rsi), %zmm8 + LONG $0x48fe7162; WORD $0x4e6f; BYTE $0x05 // vmovdqu64 320(%rsi), %zmm9 + LONG $0x48fe7162; WORD $0x566f; BYTE $0x06 // vmovdqu64 384(%rsi), %zmm10 + LONG $0x48fe7162; WORD $0x5e6f; BYTE $0x07 // vmovdqu64 448(%rsi), %zmm11 + LONG $0x48fe7162; WORD $0x666f; BYTE $0x08 // vmovdqu64 512(%rsi), %zmm12 + LONG $0x48fe7162; WORD $0x6e6f; BYTE $0x09 // vmovdqu64 576(%rsi), %zmm13 + LONG $0x48fe7162; WORD $0x766f; BYTE $0x0a // vmovdqu64 640(%rsi), %zmm14 + LONG $0x48fe7162; WORD $0x7e6f; BYTE $0x0b // vmovdqu64 704(%rsi), %zmm15 + LONG $0x48fee162; WORD $0x466f; BYTE $0x0c // vmovdqu64 768(%rsi), %zmm16 + LONG $0x48fee162; WORD $0x4e6f; BYTE $0x0d // vmovdqu64 832(%rsi), %zmm17 + LONG $0x48fee162; WORD $0x566f; BYTE $0x0e // vmovdqu64 896(%rsi), %zmm18 + LONG $0x48ddf162; WORD $0x27ef // vpxorq (%rdi), %zmm4, %zmm4 + LONG $0x48d5f162; WORD $0x6fef; BYTE $0x01 // vpxorq 64(%rdi), %zmm5, %zmm5 + LONG $0x48cdf162; WORD $0x77ef; BYTE $0x02 // vpxorq 128(%rdi), %zmm6, %zmm6 + LONG $0x48c5f162; WORD $0x7fef; BYTE $0x03 // vpxorq 192(%rdi), %zmm7, %zmm7 + LONG $0x48bd7162; WORD $0x47ef; BYTE $0x04 // vpxorq 256(%rdi), %zmm8, %zmm8 + LONG $0x48b57162; WORD $0x4fef; BYTE $0x05 // vpxorq 320(%rdi), %zmm9, %zmm9 + LONG $0x48ad7162; WORD $0x57ef; BYTE $0x06 // vpxorq 384(%rdi), %zmm10, %zmm10 + LONG $0x48a57162; WORD $0x5fef; BYTE $0x07 // vpxorq 448(%rdi), %zmm11, %zmm11 + LONG $0x489d7162; WORD $0x67ef; BYTE $0x08 // vpxorq 512(%rdi), %zmm12, %zmm12 + LONG $0x48957162; WORD $0x6fef; BYTE $0x09 // vpxorq 576(%rdi), %zmm13, %zmm13 + LONG $0x488d7162; WORD $0x77ef; BYTE $0x0a // vpxorq 640(%rdi), %zmm14, %zmm14 + LONG $0x48857162; WORD $0x7fef; BYTE $0x0b // vpxorq 704(%rdi), %zmm15, %zmm15 + LONG $0x40fde162; WORD $0x47ef; BYTE $0x0c // vpxorq 768(%rdi), %zmm16, %zmm16 + LONG $0x40f5e162; WORD $0x4fef; BYTE $0x0d // vpxorq 832(%rdi), %zmm17, %zmm17 + LONG $0x40ede162; WORD $0x57ef; BYTE $0x0e // vpxorq 896(%rdi), %zmm18, %zmm18 + LONG $0x48fee162; WORD $0x5e6f; BYTE $0x0f // vmovdqu64 960(%rsi), %zmm19 + LONG $0x40e5e162; WORD $0x5fef; BYTE $0x0f // vpxorq 960(%rdi), %zmm19, %zmm19 + LONG $0x48fde162; WORD $0xe56f // vmovdqa64 %zmm5, %zmm20 + LONG $0x485de362; WORD $0xe325; BYTE $0x96 // vpternlogd $150, %zmm3, %zmm4, %zmm20 + LONG $0x485df362; WORD $0xeb25; BYTE $0xe8 // vpternlogd $232, %zmm3, %zmm4, %zmm5 + LONG $0x48fdf162; WORD $0xe76f // vmovdqa64 %zmm7, %zmm4 + LONG $0x484db362; WORD $0xe425; BYTE $0x96 // vpternlogd $150, %zmm20, %zmm6, %zmm4 + LONG $0x484de362; WORD $0xe725; BYTE $0xe8 // vpternlogd $232, %zmm7, %zmm6, %zmm20 + LONG $0x48fdb162; WORD $0xf46f // vmovdqa64 %zmm20, %zmm6 + LONG $0x4855f362; WORD $0xf325; BYTE $0x96 // vpternlogd $150, %zmm3, %zmm5, %zmm6 + LONG $0x4855e362; WORD $0xe325; BYTE $0xe8 // vpternlogd $232, %zmm3, %zmm5, %zmm20 + LONG $0x48fdd162; WORD $0xe96f // vmovdqa64 %zmm9, %zmm5 + LONG $0x483df362; WORD $0xec25; BYTE $0x96 // vpternlogd $150, %zmm4, %zmm8, %zmm5 + LONG $0x48fdd162; WORD $0xfb6f // vmovdqa64 %zmm11, %zmm7 + LONG $0x482df362; WORD $0xfd25; BYTE $0x96 // vpternlogd $150, %zmm5, %zmm10, %zmm7 + LONG $0x483dd362; WORD $0xe125; BYTE $0xe8 // vpternlogd $232, %zmm9, %zmm8, %zmm4 + LONG $0x482dd362; WORD $0xeb25; BYTE $0xe8 // vpternlogd $232, %zmm11, %zmm10, %zmm5 + LONG $0x48fd7162; WORD $0xc56f // vmovdqa64 %zmm5, %zmm8 + LONG $0x485d7362; WORD $0xc625; BYTE $0x96 // vpternlogd $150, %zmm6, %zmm4, %zmm8 + LONG $0x485df362; WORD $0xee25; BYTE $0xe8 // vpternlogd $232, %zmm6, %zmm4, %zmm5 + LONG $0x48fdf162; WORD $0xe56f // vmovdqa64 %zmm5, %zmm4 + LONG $0x48fdd162; WORD $0xf56f // vmovdqa64 %zmm13, %zmm6 + LONG $0x405df362; WORD $0xe325; BYTE $0x96 // vpternlogd $150, %zmm3, %zmm20, %zmm4 + LONG $0x481df362; WORD $0xf725; BYTE $0x96 // vpternlogd $150, %zmm7, %zmm12, %zmm6 + LONG $0x481dd362; WORD $0xfd25; BYTE $0xe8 // vpternlogd $232, %zmm13, %zmm12, %zmm7 + LONG $0x48fd5162; WORD $0xcf6f // vmovdqa64 %zmm15, %zmm9 + LONG $0x480d7362; WORD $0xce25; BYTE $0x96 // vpternlogd $150, %zmm6, %zmm14, %zmm9 + LONG $0x405df362; WORD $0xeb25; BYTE $0xe8 // vpternlogd $232, %zmm3, %zmm20, %zmm5 + LONG $0x480dd362; WORD $0xf725; BYTE $0xe8 // vpternlogd $232, %zmm15, %zmm14, %zmm6 + LONG $0x48fd7162; WORD $0xd66f // vmovdqa64 %zmm6, %zmm10 + LONG $0x48455362; WORD $0xd025; BYTE $0x96 // vpternlogd $150, %zmm8, %zmm7, %zmm10 + LONG $0x48fd3162; WORD $0xd96f // vmovdqa64 %zmm17, %zmm11 + LONG $0x407d5362; WORD $0xd925; BYTE $0x96 // vpternlogd $150, %zmm9, %zmm16, %zmm11 + LONG $0x4845d362; WORD $0xf025; BYTE $0xe8 // vpternlogd $232, %zmm8, %zmm7, %zmm6 + LONG $0x407d3362; WORD $0xc925; BYTE $0xe8 // vpternlogd $232, %zmm17, %zmm16, %zmm9 + LONG $0x48fdb162; WORD $0xfb6f // vmovdqa64 %zmm19, %zmm7 + LONG $0x406dd362; WORD $0xfb25; BYTE $0x96 // vpternlogd $150, %zmm11, %zmm18, %zmm7 + LONG $0x406d3362; WORD $0xdb25; BYTE $0xe8 // vpternlogd $232, %zmm19, %zmm18, %zmm11 + LONG $0x48fd5162; WORD $0xc36f // vmovdqa64 %zmm11, %zmm8 + LONG $0x48355362; WORD $0xc225; BYTE $0x96 // vpternlogd $150, %zmm10, %zmm9, %zmm8 + LONG $0x48355362; WORD $0xda25; BYTE $0xe8 // vpternlogd $232, %zmm10, %zmm9, %zmm11 + LONG $0x48fd5162; WORD $0xcb6f // vmovdqa64 %zmm11, %zmm9 + LONG $0x484d7362; WORD $0xcc25; BYTE $0x96 // vpternlogd $150, %zmm4, %zmm6, %zmm9 + LONG $0x484d7362; WORD $0xdc25; BYTE $0xe8 // vpternlogd $232, %zmm4, %zmm6, %zmm11 + LONG $0x48fdd162; WORD $0xe36f // vmovdqa64 %zmm11, %zmm4 + LONG $0x4855f362; WORD $0xe325; BYTE $0x96 // vpternlogd $150, %zmm3, %zmm5, %zmm4 + LONG $0x48557362; WORD $0xdb25; BYTE $0xe8 // vpternlogd $232, %zmm3, %zmm5, %zmm11 + LONG $0x4855d162; WORD $0xd371; BYTE $0x01 // vpsrlw $1, %zmm11, %zmm5 + LONG $0x48fdf162; WORD $0xeddb // vpandq %zmm5, %zmm0, %zmm5 + LONG $0x4825f162; WORD $0xedf8 // vpsubb %zmm5, %zmm11, %zmm5 + LONG $0x484df162; WORD $0xd571; BYTE $0x02 // vpsrlw $2, %zmm5, %zmm6 + LONG $0x48d5f162; WORD $0xe9db // vpandq %zmm1, %zmm5, %zmm5 + LONG $0x482df162; WORD $0xd471; BYTE $0x01 // vpsrlw $1, %zmm4, %zmm10 + LONG $0x48f5f162; WORD $0xf6db // vpandq %zmm6, %zmm1, %zmm6 + LONG $0x48fd5162; WORD $0xd2db // vpandq %zmm10, %zmm0, %zmm10 + LONG $0x485dd162; WORD $0xe2f8 // vpsubb %zmm10, %zmm4, %zmm4 + LONG $0x482df162; WORD $0xd471; BYTE $0x02 // vpsrlw $2, %zmm4, %zmm10 + LONG $0x4855f162; WORD $0xeefc // vpaddb %zmm6, %zmm5, %zmm5 + LONG $0x48f5d162; WORD $0xf2db // vpandq %zmm10, %zmm1, %zmm6 + LONG $0x48ddf162; WORD $0xe1db // vpandq %zmm1, %zmm4, %zmm4 + LONG $0x485df162; WORD $0xe6fc // vpaddb %zmm6, %zmm4, %zmm4 + LONG $0x484df162; WORD $0xd571; BYTE $0x04 // vpsrlw $4, %zmm5, %zmm6 + LONG $0x482dd162; WORD $0xd171; BYTE $0x01 // vpsrlw $1, %zmm9, %zmm10 + LONG $0x48fd5162; WORD $0xd2db // vpandq %zmm10, %zmm0, %zmm10 + LONG $0x48355162; WORD $0xcaf8 // vpsubb %zmm10, %zmm9, %zmm9 + LONG $0x482df162; WORD $0xd471; BYTE $0x04 // vpsrlw $4, %zmm4, %zmm10 + LONG $0x4825d162; WORD $0xd171; BYTE $0x02 // vpsrlw $2, %zmm9, %zmm11 + LONG $0x48f55162; WORD $0xdbdb // vpandq %zmm11, %zmm1, %zmm11 + LONG $0x48b57162; WORD $0xc9db // vpandq %zmm1, %zmm9, %zmm9 + LONG $0x4855f162; WORD $0xeefc // vpaddb %zmm6, %zmm5, %zmm5 + LONG $0x4835d162; WORD $0xf3fc // vpaddb %zmm11, %zmm9, %zmm6 + LONG $0x4835f162; WORD $0xd671; BYTE $0x04 // vpsrlw $4, %zmm6, %zmm9 + LONG $0x484dd162; WORD $0xf1fc // vpaddb %zmm9, %zmm6, %zmm6 + LONG $0x485dd162; WORD $0xe2fc // vpaddb %zmm10, %zmm4, %zmm4 + LONG $0x4835d162; WORD $0xd071; BYTE $0x01 // vpsrlw $1, %zmm8, %zmm9 + LONG $0x48fd5162; WORD $0xc9db // vpandq %zmm9, %zmm0, %zmm9 + LONG $0x483d5162; WORD $0xc1f8 // vpsubb %zmm9, %zmm8, %zmm8 + LONG $0x48cdf162; WORD $0xf2db // vpandq %zmm2, %zmm6, %zmm6 + LONG $0x4835d162; WORD $0xd071; BYTE $0x02 // vpsrlw $2, %zmm8, %zmm9 + LONG $0x48f55162; WORD $0xc9db // vpandq %zmm9, %zmm1, %zmm9 + LONG $0x48bd7162; WORD $0xc1db // vpandq %zmm1, %zmm8, %zmm8 + LONG $0x48d5f162; WORD $0xeadb // vpandq %zmm2, %zmm5, %zmm5 + LONG $0x483d5162; WORD $0xc1fc // vpaddb %zmm9, %zmm8, %zmm8 + LONG $0x4835d162; WORD $0xd071; BYTE $0x04 // vpsrlw $4, %zmm8, %zmm9 + LONG $0x483d5162; WORD $0xc1fc // vpaddb %zmm9, %zmm8, %zmm8 + LONG $0x48ddf162; WORD $0xe2db // vpandq %zmm2, %zmm4, %zmm4 + LONG $0x48bd7162; WORD $0xc2db // vpandq %zmm2, %zmm8, %zmm8 + LONG $0x483d7162; WORD $0xc3f6 // vpsadbw %zmm3, %zmm8, %zmm8 + LONG $0x4835f162; WORD $0xd771; BYTE $0x01 // vpsrlw $1, %zmm7, %zmm9 + LONG $0x484df162; WORD $0xf3f6 // vpsadbw %zmm3, %zmm6, %zmm6 + LONG $0x48fd5162; WORD $0xc9db // vpandq %zmm9, %zmm0, %zmm9 + LONG $0x4845d162; WORD $0xf9f8 // vpsubb %zmm9, %zmm7, %zmm7 + LONG $0x4835f162; WORD $0xd771; BYTE $0x02 // vpsrlw $2, %zmm7, %zmm9 + LONG $0x4855f162; WORD $0xebf6 // vpsadbw %zmm3, %zmm5, %zmm5 + LONG $0x48f55162; WORD $0xc9db // vpandq %zmm9, %zmm1, %zmm9 + LONG $0x48c5f162; WORD $0xf9db // vpandq %zmm1, %zmm7, %zmm7 + LONG $0x4845d162; WORD $0xf9fc // vpaddb %zmm9, %zmm7, %zmm7 + LONG $0x485df162; WORD $0xe3f6 // vpsadbw %zmm3, %zmm4, %zmm4 + LONG $0x4835f162; WORD $0xd771; BYTE $0x04 // vpsrlw $4, %zmm7, %zmm9 + LONG $0x4845d162; WORD $0xf9fc // vpaddb %zmm9, %zmm7, %zmm7 + LONG $0x48c5f162; WORD $0xfadb // vpandq %zmm2, %zmm7, %zmm7 + LONG $0x48d5f162; WORD $0xf573; BYTE $0x04 // vpsllq $4, %zmm5, %zmm5 + LONG $0x4845f162; WORD $0xfbf6 // vpsadbw %zmm3, %zmm7, %zmm7 + LONG $0x48ddf162; WORD $0xf473; BYTE $0x03 // vpsllq $3, %zmm4, %zmm4 + LONG $0x48cdf162; WORD $0xf673; BYTE $0x02 // vpsllq $2, %zmm6, %zmm6 + LONG $0x48ddf162; WORD $0xe5d4 // vpaddq %zmm5, %zmm4, %zmm4 + LONG $0x48bdd162; WORD $0xe8d4 // vpaddq %zmm8, %zmm8, %zmm5 + LONG $0x48cdf162; WORD $0xedd4 // vpaddq %zmm5, %zmm6, %zmm5 + LONG $0x48d5f162; WORD $0xefd4 // vpaddq %zmm7, %zmm5, %zmm5 + LONG $0x48ddf162; WORD $0xe5d4 // vpaddq %zmm5, %zmm4, %zmm4 + LONG $0x48fdf362; WORD $0xe53b; BYTE $0x01 // vextracti64x4 $1, %zmm4, %ymm5 + LONG $0x48ddf162; WORD $0xe5d4 // vpaddq %zmm5, %zmm4, %zmm4 + LONG $0x397de3c4; WORD $0x01e5 // vextracti128 $1, %ymm4, %xmm5 + LONG $0xe5d4d9c5 // vpaddq %xmm5, %xmm4, %xmm4 + LONG $0xec70f9c5; BYTE $0xee // vpshufd $238, %xmm4, %xmm5 # xmm5 = xmm4[2,3,2,3] + LONG $0xe5d4d9c5 // vpaddq %xmm5, %xmm4, %xmm4 + LONG $0x7ef9e1c4; BYTE $0xe3 // vmovq %xmm4, %rbx + WORD $0x0148; BYTE $0xd9 // addq %rbx, %rcx + WORD $0xc083; BYTE $0x80 // addl $-128, %eax + LONG $0x00c78148; WORD $0x0004; BYTE $0x00 // addq $1024, %rdi # imm = 0x400 + LONG $0x00c68148; WORD $0x0004; BYTE $0x00 // addq $1024, %rsi # imm = 0x400 + WORD $0xf883; BYTE $0x7f // cmpl $127, %eax + JA LBB5_20 + WORD $0xc085 // testl %eax, %eax + JNE LBB5_11 + JMP LBB5_18 + +LBB5_5: + LONG $0x01c78349 // addq $1, %r15 + WORD $0x894d; BYTE $0xfc // movq %r15, %r12 + LONG $0xe0e48349 // andq $-32, %r12 + WORD $0x2944; BYTE $0xe0 // subl %r12d, %eax + LONG $0xe6348d4e // leaq (%rsi,%r12,8), %r14 + LONG $0xe72c8d4e // leaq (%rdi,%r12,8), %r13 + LONG $0x48fdd262; WORD $0xc17c // vpbroadcastq %r9, %zmm0 + LONG $0x48fdd262; WORD $0xca7c // vpbroadcastq %r10, %zmm1 + LONG $0x48fdd262; WORD $0xd37c // vpbroadcastq %r11, %zmm2 + LONG $0x48fdd262; WORD $0xd87c // vpbroadcastq %r8, %zmm3 + QUAD $0x00000000fd0c8d4a // leaq (,%r15,8), %rcx + LONG $0x00e18148; WORD $0xffff; BYTE $0xff // andq $-256, %rcx + LONG $0xe4efd9c5 // vpxor %xmm4, %xmm4, %xmm4 + WORD $0xdb31 // xorl %ebx, %ebx + LONG $0xedefd1c5 // vpxor %xmm5, %xmm5, %xmm5 + LONG $0xf6efc9c5 // vpxor %xmm6, %xmm6, %xmm6 + LONG $0xffefc1c5 // vpxor %xmm7, %xmm7, %xmm7 + +LBB5_6: + LONG $0x48fe7162; WORD $0x046f; BYTE $0x1e // vmovdqu64 (%rsi,%rbx), %zmm8 + QUAD $0x011e4c6f48fe7162 // vmovdqu64 64(%rsi,%rbx), %zmm9 + QUAD $0x021e546f48fe7162 // vmovdqu64 128(%rsi,%rbx), %zmm10 + QUAD $0x031e5c6f48fe7162 // vmovdqu64 192(%rsi,%rbx), %zmm11 + LONG $0x48bd7162; WORD $0x04ef; BYTE $0x1f // vpxorq (%rdi,%rbx), %zmm8, %zmm8 + QUAD $0x011f4cef48b57162 // vpxorq 64(%rdi,%rbx), %zmm9, %zmm9 + QUAD $0x021f54ef48ad7162 // vpxorq 128(%rdi,%rbx), %zmm10, %zmm10 + QUAD $0x031f5cef48a57162 // vpxorq 192(%rdi,%rbx), %zmm11, %zmm11 + LONG $0x48fd5162; WORD $0xe0db // vpandq %zmm8, %zmm0, %zmm12 + LONG $0x48fd5162; WORD $0xe9db // vpandq %zmm9, %zmm0, %zmm13 + LONG $0x48fd5162; WORD $0xf2db // vpandq %zmm10, %zmm0, %zmm14 + LONG $0x48fd5162; WORD $0xfbdb // vpandq %zmm11, %zmm0, %zmm15 + LONG $0x48bdd162; WORD $0xd073; BYTE $0x01 // vpsrlq $1, %zmm8, %zmm8 + LONG $0x48b5d162; WORD $0xd173; BYTE $0x01 // vpsrlq $1, %zmm9, %zmm9 + LONG $0x48add162; WORD $0xd273; BYTE $0x01 // vpsrlq $1, %zmm10, %zmm10 + LONG $0x48a5d162; WORD $0xd373; BYTE $0x01 // vpsrlq $1, %zmm11, %zmm11 + LONG $0x48bd7162; WORD $0xc0db // vpandq %zmm0, %zmm8, %zmm8 + LONG $0x48bd5162; WORD $0xc4d4 // vpaddq %zmm12, %zmm8, %zmm8 + LONG $0x48b57162; WORD $0xc8db // vpandq %zmm0, %zmm9, %zmm9 + LONG $0x48b55162; WORD $0xcdd4 // vpaddq %zmm13, %zmm9, %zmm9 + LONG $0x48ad7162; WORD $0xd0db // vpandq %zmm0, %zmm10, %zmm10 + LONG $0x48ad5162; WORD $0xd6d4 // vpaddq %zmm14, %zmm10, %zmm10 + LONG $0x48a57162; WORD $0xd8db // vpandq %zmm0, %zmm11, %zmm11 + LONG $0x48a55162; WORD $0xdfd4 // vpaddq %zmm15, %zmm11, %zmm11 + LONG $0x48bd7162; WORD $0xe1db // vpandq %zmm1, %zmm8, %zmm12 + LONG $0x48b57162; WORD $0xe9db // vpandq %zmm1, %zmm9, %zmm13 + LONG $0x48ad7162; WORD $0xf1db // vpandq %zmm1, %zmm10, %zmm14 + LONG $0x48a57162; WORD $0xf9db // vpandq %zmm1, %zmm11, %zmm15 + LONG $0x48bdd162; WORD $0xd073; BYTE $0x02 // vpsrlq $2, %zmm8, %zmm8 + LONG $0x48b5d162; WORD $0xd173; BYTE $0x02 // vpsrlq $2, %zmm9, %zmm9 + LONG $0x48add162; WORD $0xd273; BYTE $0x02 // vpsrlq $2, %zmm10, %zmm10 + LONG $0x48a5d162; WORD $0xd373; BYTE $0x02 // vpsrlq $2, %zmm11, %zmm11 + LONG $0x48bd7162; WORD $0xc1db // vpandq %zmm1, %zmm8, %zmm8 + LONG $0x48bd5162; WORD $0xc4d4 // vpaddq %zmm12, %zmm8, %zmm8 + LONG $0x48b57162; WORD $0xc9db // vpandq %zmm1, %zmm9, %zmm9 + LONG $0x48b55162; WORD $0xcdd4 // vpaddq %zmm13, %zmm9, %zmm9 + LONG $0x48ad7162; WORD $0xd1db // vpandq %zmm1, %zmm10, %zmm10 + LONG $0x48ad5162; WORD $0xd6d4 // vpaddq %zmm14, %zmm10, %zmm10 + LONG $0x48a57162; WORD $0xd9db // vpandq %zmm1, %zmm11, %zmm11 + LONG $0x48a55162; WORD $0xdfd4 // vpaddq %zmm15, %zmm11, %zmm11 + LONG $0x48bd7162; WORD $0xe2db // vpandq %zmm2, %zmm8, %zmm12 + LONG $0x48b57162; WORD $0xeadb // vpandq %zmm2, %zmm9, %zmm13 + LONG $0x48ad7162; WORD $0xf2db // vpandq %zmm2, %zmm10, %zmm14 + LONG $0x48a57162; WORD $0xfadb // vpandq %zmm2, %zmm11, %zmm15 + LONG $0x48bdd162; WORD $0xd073; BYTE $0x04 // vpsrlq $4, %zmm8, %zmm8 + LONG $0x48b5d162; WORD $0xd173; BYTE $0x04 // vpsrlq $4, %zmm9, %zmm9 + LONG $0x48add162; WORD $0xd273; BYTE $0x04 // vpsrlq $4, %zmm10, %zmm10 + LONG $0x48a5d162; WORD $0xd373; BYTE $0x04 // vpsrlq $4, %zmm11, %zmm11 + LONG $0x48bd7162; WORD $0xc2db // vpandq %zmm2, %zmm8, %zmm8 + LONG $0x48bd5162; WORD $0xc4d4 // vpaddq %zmm12, %zmm8, %zmm8 + LONG $0x48b57162; WORD $0xcadb // vpandq %zmm2, %zmm9, %zmm9 + LONG $0x48b55162; WORD $0xcdd4 // vpaddq %zmm13, %zmm9, %zmm9 + LONG $0x48ad7162; WORD $0xd2db // vpandq %zmm2, %zmm10, %zmm10 + LONG $0x48ad5162; WORD $0xd6d4 // vpaddq %zmm14, %zmm10, %zmm10 + LONG $0x48a57162; WORD $0xdadb // vpandq %zmm2, %zmm11, %zmm11 + LONG $0x48a55162; WORD $0xdfd4 // vpaddq %zmm15, %zmm11, %zmm11 + LONG $0x48bd7262; WORD $0xc340 // vpmullq %zmm3, %zmm8, %zmm8 + LONG $0x48b57262; WORD $0xcb40 // vpmullq %zmm3, %zmm9, %zmm9 + LONG $0x48ad7262; WORD $0xd340 // vpmullq %zmm3, %zmm10, %zmm10 + LONG $0x48a57262; WORD $0xdb40 // vpmullq %zmm3, %zmm11, %zmm11 + LONG $0x48bdd162; WORD $0xd073; BYTE $0x38 // vpsrlq $56, %zmm8, %zmm8 + LONG $0x48bdf162; WORD $0xe4d4 // vpaddq %zmm4, %zmm8, %zmm4 + LONG $0x48bdd162; WORD $0xd173; BYTE $0x38 // vpsrlq $56, %zmm9, %zmm8 + LONG $0x48bdf162; WORD $0xedd4 // vpaddq %zmm5, %zmm8, %zmm5 + LONG $0x48bdd162; WORD $0xd273; BYTE $0x38 // vpsrlq $56, %zmm10, %zmm8 + LONG $0x48bdf162; WORD $0xf6d4 // vpaddq %zmm6, %zmm8, %zmm6 + LONG $0x48bdd162; WORD $0xd373; BYTE $0x38 // vpsrlq $56, %zmm11, %zmm8 + LONG $0x48bdf162; WORD $0xffd4 // vpaddq %zmm7, %zmm8, %zmm7 + LONG $0x00c38148; WORD $0x0001; BYTE $0x00 // addq $256, %rbx # imm = 0x100 + WORD $0x3948; BYTE $0xd9 // cmpq %rbx, %rcx + JNE LBB5_6 + LONG $0x48d5f162; WORD $0xc4d4 // vpaddq %zmm4, %zmm5, %zmm0 + LONG $0x48cdf162; WORD $0xc0d4 // vpaddq %zmm0, %zmm6, %zmm0 + LONG $0x48c5f162; WORD $0xc0d4 // vpaddq %zmm0, %zmm7, %zmm0 + LONG $0x48fdf362; WORD $0xc13b; BYTE $0x01 // vextracti64x4 $1, %zmm0, %ymm1 + LONG $0x48fdf162; WORD $0xc1d4 // vpaddq %zmm1, %zmm0, %zmm0 + LONG $0x397de3c4; WORD $0x01c1 // vextracti128 $1, %ymm0, %xmm1 + LONG $0xc1d4f9c5 // vpaddq %xmm1, %xmm0, %xmm0 + LONG $0xc870f9c5; BYTE $0xee // vpshufd $238, %xmm0, %xmm1 # xmm1 = xmm0[2,3,2,3] + LONG $0xc1d4f9c5 // vpaddq %xmm1, %xmm0, %xmm0 + LONG $0x7ef9e1c4; BYTE $0xc1 // vmovq %xmm0, %rcx + WORD $0x394d; BYTE $0xe7 // cmpq %r12, %r15 + JE LBB5_18 + +LBB5_8: + WORD $0xc089 // movl %eax, %eax + WORD $0xf631 // xorl %esi, %esi + +LBB5_9: + LONG $0xf63c8b49 // movq (%r14,%rsi,8), %rdi + LONG $0xf57c3349; BYTE $0x00 // xorq (%r13,%rsi,8), %rdi + WORD $0x894c; BYTE $0xcb // movq %r9, %rbx + WORD $0x2148; BYTE $0xfb // andq %rdi, %rbx + WORD $0xd148; BYTE $0xef // shrq %rdi + WORD $0x214c; BYTE $0xcf // andq %r9, %rdi + WORD $0x0148; BYTE $0xdf // addq %rbx, %rdi + WORD $0x8948; BYTE $0xfb // movq %rdi, %rbx + WORD $0x214c; BYTE $0xd3 // andq %r10, %rbx + LONG $0x02efc148 // shrq $2, %rdi + WORD $0x214c; BYTE $0xd7 // andq %r10, %rdi + WORD $0x0148; BYTE $0xdf // addq %rbx, %rdi + WORD $0x8948; BYTE $0xfb // movq %rdi, %rbx + WORD $0x214c; BYTE $0xdb // andq %r11, %rbx + LONG $0x04efc148 // shrq $4, %rdi + WORD $0x214c; BYTE $0xdf // andq %r11, %rdi + WORD $0x0148; BYTE $0xdf // addq %rbx, %rdi + LONG $0xf8af0f49 // imulq %r8, %rdi + LONG $0x38efc148 // shrq $56, %rdi + WORD $0x0148; BYTE $0xf9 // addq %rdi, %rcx + LONG $0x01c68348 // addq $1, %rsi + WORD $0xf039 // cmpl %esi, %eax + JNE LBB5_9 + JMP LBB5_18 + +LBB5_2: + WORD $0xc931 // xorl %ecx, %ecx + +LBB5_11: + WORD $0x8b4d; BYTE $0x08 // movq (%r8), %r9 + LONG $0x08508b4d // movq 8(%r8), %r10 + LONG $0x10588b4d // movq 16(%r8), %r11 + LONG $0x18408b4d // movq 24(%r8), %r8 + LONG $0xff788d44 // leal -1(%rax), %r15d + LONG $0x1fff8341 // cmpl $31, %r15d + JAE LBB5_13 + WORD $0x8949; BYTE $0xfe // movq %rdi, %r14 + WORD $0x8949; BYTE $0xf4 // movq %rsi, %r12 + JMP LBB5_16 + +LBB5_13: + LONG $0x01c78349 // addq $1, %r15 + WORD $0x894c; BYTE $0xfb // movq %r15, %rbx + LONG $0xe0e38348 // andq $-32, %rbx + LONG $0xdf348d4c // leaq (%rdi,%rbx,8), %r14 + LONG $0xde248d4c // leaq (%rsi,%rbx,8), %r12 + WORD $0xd829 // subl %ebx, %eax + LONG $0x6ef9e1c4; BYTE $0xc1 // vmovq %rcx, %xmm0 + LONG $0x48fdd262; WORD $0xc97c // vpbroadcastq %r9, %zmm1 + LONG $0x48fdd262; WORD $0xd27c // vpbroadcastq %r10, %zmm2 + LONG $0x48fdd262; WORD $0xdb7c // vpbroadcastq %r11, %zmm3 + LONG $0x48fdd262; WORD $0xe87c // vpbroadcastq %r8, %zmm5 + LONG $0xe4efd9c5 // vpxor %xmm4, %xmm4, %xmm4 + WORD $0xc931 // xorl %ecx, %ecx + LONG $0xf6efc9c5 // vpxor %xmm6, %xmm6, %xmm6 + LONG $0xffefc1c5 // vpxor %xmm7, %xmm7, %xmm7 + +LBB5_14: + LONG $0x48fe7162; WORD $0x046f; BYTE $0xce // vmovdqu64 (%rsi,%rcx,8), %zmm8 + QUAD $0x01ce4c6f48fe7162 // vmovdqu64 64(%rsi,%rcx,8), %zmm9 + QUAD $0x02ce546f48fe7162 // vmovdqu64 128(%rsi,%rcx,8), %zmm10 + QUAD $0x03ce5c6f48fe7162 // vmovdqu64 192(%rsi,%rcx,8), %zmm11 + LONG $0x48bd7162; WORD $0x04ef; BYTE $0xcf // vpxorq (%rdi,%rcx,8), %zmm8, %zmm8 + QUAD $0x01cf4cef48b57162 // vpxorq 64(%rdi,%rcx,8), %zmm9, %zmm9 + QUAD $0x02cf54ef48ad7162 // vpxorq 128(%rdi,%rcx,8), %zmm10, %zmm10 + QUAD $0x03cf5cef48a57162 // vpxorq 192(%rdi,%rcx,8), %zmm11, %zmm11 + LONG $0x48f55162; WORD $0xe0db // vpandq %zmm8, %zmm1, %zmm12 + LONG $0x48f55162; WORD $0xe9db // vpandq %zmm9, %zmm1, %zmm13 + LONG $0x48f55162; WORD $0xf2db // vpandq %zmm10, %zmm1, %zmm14 + LONG $0x48f55162; WORD $0xfbdb // vpandq %zmm11, %zmm1, %zmm15 + LONG $0x48bdd162; WORD $0xd073; BYTE $0x01 // vpsrlq $1, %zmm8, %zmm8 + LONG $0x48b5d162; WORD $0xd173; BYTE $0x01 // vpsrlq $1, %zmm9, %zmm9 + LONG $0x48add162; WORD $0xd273; BYTE $0x01 // vpsrlq $1, %zmm10, %zmm10 + LONG $0x48a5d162; WORD $0xd373; BYTE $0x01 // vpsrlq $1, %zmm11, %zmm11 + LONG $0x48bd7162; WORD $0xc1db // vpandq %zmm1, %zmm8, %zmm8 + LONG $0x48bd5162; WORD $0xc4d4 // vpaddq %zmm12, %zmm8, %zmm8 + LONG $0x48b57162; WORD $0xc9db // vpandq %zmm1, %zmm9, %zmm9 + LONG $0x48b55162; WORD $0xcdd4 // vpaddq %zmm13, %zmm9, %zmm9 + LONG $0x48ad7162; WORD $0xd1db // vpandq %zmm1, %zmm10, %zmm10 + LONG $0x48ad5162; WORD $0xd6d4 // vpaddq %zmm14, %zmm10, %zmm10 + LONG $0x48a57162; WORD $0xd9db // vpandq %zmm1, %zmm11, %zmm11 + LONG $0x48a55162; WORD $0xdfd4 // vpaddq %zmm15, %zmm11, %zmm11 + LONG $0x48bd7162; WORD $0xe2db // vpandq %zmm2, %zmm8, %zmm12 + LONG $0x48b57162; WORD $0xeadb // vpandq %zmm2, %zmm9, %zmm13 + LONG $0x48ad7162; WORD $0xf2db // vpandq %zmm2, %zmm10, %zmm14 + LONG $0x48a57162; WORD $0xfadb // vpandq %zmm2, %zmm11, %zmm15 + LONG $0x48bdd162; WORD $0xd073; BYTE $0x02 // vpsrlq $2, %zmm8, %zmm8 + LONG $0x48b5d162; WORD $0xd173; BYTE $0x02 // vpsrlq $2, %zmm9, %zmm9 + LONG $0x48add162; WORD $0xd273; BYTE $0x02 // vpsrlq $2, %zmm10, %zmm10 + LONG $0x48a5d162; WORD $0xd373; BYTE $0x02 // vpsrlq $2, %zmm11, %zmm11 + LONG $0x48bd7162; WORD $0xc2db // vpandq %zmm2, %zmm8, %zmm8 + LONG $0x48bd5162; WORD $0xc4d4 // vpaddq %zmm12, %zmm8, %zmm8 + LONG $0x48b57162; WORD $0xcadb // vpandq %zmm2, %zmm9, %zmm9 + LONG $0x48b55162; WORD $0xcdd4 // vpaddq %zmm13, %zmm9, %zmm9 + LONG $0x48ad7162; WORD $0xd2db // vpandq %zmm2, %zmm10, %zmm10 + LONG $0x48ad5162; WORD $0xd6d4 // vpaddq %zmm14, %zmm10, %zmm10 + LONG $0x48a57162; WORD $0xdadb // vpandq %zmm2, %zmm11, %zmm11 + LONG $0x48a55162; WORD $0xdfd4 // vpaddq %zmm15, %zmm11, %zmm11 + LONG $0x48bd7162; WORD $0xe3db // vpandq %zmm3, %zmm8, %zmm12 + LONG $0x48b57162; WORD $0xebdb // vpandq %zmm3, %zmm9, %zmm13 + LONG $0x48ad7162; WORD $0xf3db // vpandq %zmm3, %zmm10, %zmm14 + LONG $0x48a57162; WORD $0xfbdb // vpandq %zmm3, %zmm11, %zmm15 + LONG $0x48bdd162; WORD $0xd073; BYTE $0x04 // vpsrlq $4, %zmm8, %zmm8 + LONG $0x48b5d162; WORD $0xd173; BYTE $0x04 // vpsrlq $4, %zmm9, %zmm9 + LONG $0x48add162; WORD $0xd273; BYTE $0x04 // vpsrlq $4, %zmm10, %zmm10 + LONG $0x48a5d162; WORD $0xd373; BYTE $0x04 // vpsrlq $4, %zmm11, %zmm11 + LONG $0x48bd7162; WORD $0xc3db // vpandq %zmm3, %zmm8, %zmm8 + LONG $0x48bd5162; WORD $0xc4d4 // vpaddq %zmm12, %zmm8, %zmm8 + LONG $0x48b57162; WORD $0xcbdb // vpandq %zmm3, %zmm9, %zmm9 + LONG $0x48b55162; WORD $0xcdd4 // vpaddq %zmm13, %zmm9, %zmm9 + LONG $0x48ad7162; WORD $0xd3db // vpandq %zmm3, %zmm10, %zmm10 + LONG $0x48ad5162; WORD $0xd6d4 // vpaddq %zmm14, %zmm10, %zmm10 + LONG $0x48a57162; WORD $0xdbdb // vpandq %zmm3, %zmm11, %zmm11 + LONG $0x48a55162; WORD $0xdfd4 // vpaddq %zmm15, %zmm11, %zmm11 + LONG $0x48bd7262; WORD $0xc540 // vpmullq %zmm5, %zmm8, %zmm8 + LONG $0x48b57262; WORD $0xcd40 // vpmullq %zmm5, %zmm9, %zmm9 + LONG $0x48ad7262; WORD $0xd540 // vpmullq %zmm5, %zmm10, %zmm10 + LONG $0x48a57262; WORD $0xdd40 // vpmullq %zmm5, %zmm11, %zmm11 + LONG $0x48bdd162; WORD $0xd073; BYTE $0x38 // vpsrlq $56, %zmm8, %zmm8 + LONG $0x48bdf162; WORD $0xc0d4 // vpaddq %zmm0, %zmm8, %zmm0 + LONG $0x48bdd162; WORD $0xd173; BYTE $0x38 // vpsrlq $56, %zmm9, %zmm8 + LONG $0x48bdf162; WORD $0xe4d4 // vpaddq %zmm4, %zmm8, %zmm4 + LONG $0x48bdd162; WORD $0xd273; BYTE $0x38 // vpsrlq $56, %zmm10, %zmm8 + LONG $0x48bdf162; WORD $0xf6d4 // vpaddq %zmm6, %zmm8, %zmm6 + LONG $0x48bdd162; WORD $0xd373; BYTE $0x38 // vpsrlq $56, %zmm11, %zmm8 + LONG $0x48bdf162; WORD $0xffd4 // vpaddq %zmm7, %zmm8, %zmm7 + LONG $0x20c18348 // addq $32, %rcx + WORD $0x3948; BYTE $0xcb // cmpq %rcx, %rbx + JNE LBB5_14 + LONG $0x48ddf162; WORD $0xc0d4 // vpaddq %zmm0, %zmm4, %zmm0 + LONG $0x48cdf162; WORD $0xc0d4 // vpaddq %zmm0, %zmm6, %zmm0 + LONG $0x48c5f162; WORD $0xc0d4 // vpaddq %zmm0, %zmm7, %zmm0 + LONG $0x48fdf362; WORD $0xc13b; BYTE $0x01 // vextracti64x4 $1, %zmm0, %ymm1 + LONG $0x48fdf162; WORD $0xc1d4 // vpaddq %zmm1, %zmm0, %zmm0 + LONG $0x397de3c4; WORD $0x01c1 // vextracti128 $1, %ymm0, %xmm1 + LONG $0xc1d4f9c5 // vpaddq %xmm1, %xmm0, %xmm0 + LONG $0xc870f9c5; BYTE $0xee // vpshufd $238, %xmm0, %xmm1 # xmm1 = xmm0[2,3,2,3] + LONG $0xc1d4f9c5 // vpaddq %xmm1, %xmm0, %xmm0 + LONG $0x7ef9e1c4; BYTE $0xc1 // vmovq %xmm0, %rcx + WORD $0x3949; BYTE $0xdf // cmpq %rbx, %r15 + JE LBB5_18 + +LBB5_16: + WORD $0xc089 // movl %eax, %eax + WORD $0xf631 // xorl %esi, %esi + +LBB5_17: + LONG $0xf43c8b49 // movq (%r12,%rsi,8), %rdi + LONG $0xf63c3349 // xorq (%r14,%rsi,8), %rdi + WORD $0x894c; BYTE $0xcb // movq %r9, %rbx + WORD $0x2148; BYTE $0xfb // andq %rdi, %rbx + WORD $0xd148; BYTE $0xef // shrq %rdi + WORD $0x214c; BYTE $0xcf // andq %r9, %rdi + WORD $0x0148; BYTE $0xdf // addq %rbx, %rdi + WORD $0x8948; BYTE $0xfb // movq %rdi, %rbx + WORD $0x214c; BYTE $0xd3 // andq %r10, %rbx + LONG $0x02efc148 // shrq $2, %rdi + WORD $0x214c; BYTE $0xd7 // andq %r10, %rdi + WORD $0x0148; BYTE $0xdf // addq %rbx, %rdi + WORD $0x8948; BYTE $0xfb // movq %rdi, %rbx + WORD $0x214c; BYTE $0xdb // andq %r11, %rbx + LONG $0x04efc148 // shrq $4, %rdi + WORD $0x214c; BYTE $0xdf // andq %r11, %rdi + WORD $0x0148; BYTE $0xdf // addq %rbx, %rdi + LONG $0xf8af0f49 // imulq %r8, %rdi + LONG $0x38efc148 // shrq $56, %rdi + WORD $0x0148; BYTE $0xf9 // addq %rdi, %rcx + LONG $0x01c68348 // addq $1, %rsi + WORD $0xf039 // cmpl %esi, %eax + JNE LBB5_17 + +LBB5_18: + WORD $0x8948; BYTE $0x0a // movq %rcx, (%rdx) + LONG $0xd8658d48 // leaq -40(%rbp), %rsp + BYTE $0x5b // popq %rbx + WORD $0x5c41 // popq %r12 + WORD $0x5d41 // popq %r13 + WORD $0x5e41 // popq %r14 + WORD $0x5f41 // popq %r15 + BYTE $0x5d // popq %rbp + WORD $0xf8c5; BYTE $0x77 // vzeroupper + BYTE $0xc3 // retq diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/hamming_inline_arm64.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/hamming_inline_arm64.go new file mode 100644 index 0000000000000000000000000000000000000000..97117dde445cacbcfced724723f02e66a8c5a3da --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/hamming_inline_arm64.go @@ -0,0 +1,57 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package asm + +// Experiment with inlining and flattening the L2Squared distancer. +// Theoretically, this should be faster than the loop version for small vectors +// - it avoids the loop overhead +// - it eliminates the bounds check by reversing the iteration +// - it allows dot2, dot4 and dot6 to be inlined (the other ones are too large) +// See go tool compile -d=ssa/check_bce/debug=1 -m dot_inline.go +func hamming2(x []float32, y []float32) float32 { + sum := float32(0) + + if x[1] != y[1] { + sum = sum + 1 + } + if x[0] != y[0] { + sum = sum + 1 + } + + return sum +} + +func hamming4(x []float32, y []float32) float32 { + sum := float32(0) + + if x[3] != y[3] { + sum = sum + 1 + } + if x[2] != y[2] { + sum = sum + 1 + } + + return hamming2(x, y) + sum +} + +func hamming6(x []float32, y []float32) float32 { + sum := float32(0) + + if x[5] != y[5] { + sum = sum + 1.0 + } + if x[4] != y[4] { + sum = sum + 1.0 + } + + return hamming4(x, y) + sum +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/hamming_stub_arm64.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/hamming_stub_arm64.go new file mode 100644 index 0000000000000000000000000000000000000000..e80dac194d07681b09e4727e66f1272d40dbf905 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/hamming_stub_arm64.go @@ -0,0 +1,119 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package asm + +// To generate the asm code, run: +// go install github.com/gorse-io/goat@v0.1.0 +// go generate + +//go:generate goat ../c/hamming_arm64.c -O3 -e="-mfpu=neon-fp-armv8" -e="-mfloat-abi=hard" -e="--target=arm64" -e="-march=armv8-a+simd+fp" +//go:generate goat ../c/hamming_bitwise_arm64.c -O3 -e="-mfpu=neon-fp-armv8" -e="-mfloat-abi=hard" -e="--target=arm64" -e="-march=armv8-a+simd+fp" + +import ( + "unsafe" +) + +// Hamming calculates the hamming distance between two vectors +// using SIMD instructions. +func Hamming(x []float32, y []float32) float32 { + switch len(x) { + case 2: + return hamming2(x, y) + case 4: + return hamming4(x, y) + case 6: + return hamming6(x, y) + case 8: + // manually inlined hamming8(x, y) + sum := float32(0) + + if x[7] != y[7] { + sum = sum + 1.0 + } + if x[6] != y[6] { + sum = sum + 1.0 + } + return hamming6(x, y) + sum + case 10: + // manually inlined hamming10(x, y) + sum := float32(0) + + if x[9] != y[9] { + sum = sum + 1.0 + } + if x[8] != y[8] { + sum = sum + 1.0 + } + + if x[7] != y[7] { + sum = sum + 1.0 + } + + if x[6] != y[6] { + sum = sum + 1.0 + } + return hamming6(x, y) + sum + case 12: + // manually inlined hamming12(x, y) + sum := float32(0) + + if x[11] != y[11] { + sum = sum + 1.0 + } + if x[10] != y[10] { + sum = sum + 1.0 + } + if x[9] != y[9] { + sum = sum + 1.0 + } + if x[8] != y[8] { + sum = sum + 1.0 + } + if x[7] != y[7] { + sum = sum + 1.0 + } + if x[6] != y[6] { + sum = sum + 1.0 + } + return hamming6(x, y) + sum + } + + var res float32 + + l := len(x) + hamming( + // The slice header contains the address of the underlying array. + // We only need to cast it to a pointer. + unsafe.Pointer(unsafe.SliceData(x)), + unsafe.Pointer(unsafe.SliceData(y)), + // The C function expects pointers to the result and the length of the arrays. + unsafe.Pointer(&res), + unsafe.Pointer(&l)) + + return res +} + +func HammingBitwise(x []uint64, y []uint64) float32 { + l := len(x) + + var res uint64 + hamming_bitwise( + // The slice header contains the address of the underlying array. + // We only need to cast it to a pointer. + unsafe.Pointer(unsafe.SliceData(x)), + unsafe.Pointer(unsafe.SliceData(y)), + // The C function expects pointers to the result and the length of the arrays. + unsafe.Pointer(&res), + unsafe.Pointer(&l)) + + return float32(res) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/l2.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/l2.go new file mode 100644 index 0000000000000000000000000000000000000000..814d19d45f61917dbafa39c1098c3493e08f3b54 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/l2.go @@ -0,0 +1,120 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +//go:build ignore +// +build ignore + +package main + +import ( + . "github.com/mmcloughlin/avo/build" + . "github.com/mmcloughlin/avo/operand" + . "github.com/mmcloughlin/avo/reg" +) + +var unroll = 4 + +func main() { + TEXT("L2", NOSPLIT, "func(x, y []float32) float32") + x := Mem{Base: Load(Param("x").Base(), GP64())} + y := Mem{Base: Load(Param("y").Base(), GP64())} + n := Load(Param("x").Len(), GP64()) + + acc := make([]VecVirtual, unroll) + diff := make([]VecVirtual, unroll) + for i := 0; i < unroll; i++ { + acc[i] = YMM() + diff[i] = YMM() + } + + for i := 0; i < unroll; i++ { + VXORPS(acc[i], acc[i], acc[i]) + VXORPS(diff[i], diff[i], diff[i]) + } + + blockitems := 8 * unroll + blocksize := 4 * blockitems + Label("blockloop") + CMPQ(n, U32(blockitems)) + JL(LabelRef("tail")) + + // Load x. + xs := make([]VecVirtual, unroll) + for i := 0; i < unroll; i++ { + xs[i] = YMM() + } + + for i := 0; i < unroll; i++ { + VMOVUPS(x.Offset(32*i), xs[i]) + } + + for i := 0; i < unroll; i++ { + VSUBPS(y.Offset(32*i), xs[i], diff[i]) + } + + for i := 0; i < unroll; i++ { + VFMADD231PS(diff[i], diff[i], acc[i]) + } + + ADDQ(U32(blocksize), x.Base) + ADDQ(U32(blocksize), y.Base) + SUBQ(U32(blockitems), n) + JMP(LabelRef("blockloop")) + + // Process any trailing entries. + Label("tail") + tail := XMM() + VXORPS(tail, tail, tail) + + Label("tailloop") + CMPQ(n, U32(0)) + JE(LabelRef("reduce")) + + xt := XMM() + VMOVSS(x, xt) + + difft := XMM() + VSUBSS(y, xt, difft) + + VFMADD231SS(difft, difft, tail) + + ADDQ(U32(4), x.Base) + ADDQ(U32(4), y.Base) + DECQ(n) + JMP(LabelRef("tailloop")) + + // Reduce the lanes to one. + Label("reduce") + if unroll != 4 { + // we have hard-coded the reduction for this specific unrolling as it + // allows us to do 0+1 and 2+3 and only then have a multiplication which + // touches both. + panic("addition is hard-coded") + } + + // Manual reduction + VADDPS(acc[0], acc[1], acc[0]) + VADDPS(acc[2], acc[3], acc[2]) + VADDPS(acc[0], acc[2], acc[0]) + + result := acc[0].AsX() + top := XMM() + VEXTRACTF128(U8(1), acc[0], top) + VADDPS(result, top, result) + VADDPS(result, tail, result) + VHADDPS(result, result, result) + VHADDPS(result, result, result) + Store(result, ReturnIndex(0)) + + RET() + + Generate() +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/l2_amd64.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/l2_amd64.go new file mode 100644 index 0000000000000000000000000000000000000000..40d2e8aa892c95d0ad456e5e40fa72b12799ef6e --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/l2_amd64.go @@ -0,0 +1,153 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package asm + +//go:generate goat ../c/l2_avx256_amd64.c -O3 -mavx2 -mfma -mavx512f -mavx512dq -e="-mfloat-abi=hard" -e="-Rpass-analysis=loop-vectorize" -e="-Rpass=loop-vectorize" -e="-Rpass-missed=loop-vectorize" +//go:generate goat ../c/l2_avx512_amd64.c -O3 -mavx2 -mfma -mavx512f -mavx512dq -e="-mfloat-abi=hard" -e="-Rpass-analysis=loop-vectorize" -e="-Rpass=loop-vectorize" -e="-Rpass-missed=loop-vectorize" + +import "unsafe" + +func L2AVX256(x []float32, y []float32) float32 { + var res float32 + + l := len(x) + l2_256( + unsafe.Pointer(unsafe.SliceData(x)), + unsafe.Pointer(unsafe.SliceData(y)), + unsafe.Pointer(&res), + unsafe.Pointer(&l)) + + return res +} + +func L2AVX512(x []float32, y []float32) float32 { + var res float32 + + l := len(x) + l2_512( + unsafe.Pointer(unsafe.SliceData(x)), + unsafe.Pointer(unsafe.SliceData(y)), + unsafe.Pointer(&res), + unsafe.Pointer(&l)) + + return res +} + +func L2ByteAVX256(x []uint8, y []uint8) uint32 { + switch len(x) { + case 1: + diff := uint32(x[0]) - uint32(y[0]) + return diff * diff + case 2: + return l22[uint8, uint32](x, y) + case 3: + return l23[uint8, uint32](x, y) + case 4: + return l24[uint8, uint32](x, y) + case 5: + return l25[uint8, uint32](x, y) + case 6: + // manually inlined l26(x, y) + diff := uint32(x[5]) - uint32(y[5]) + sum := diff * diff + + diff = uint32(x[4]) - uint32(y[4]) + sum += diff * diff + + return l24[uint8, uint32](x, y) + sum + case 8: + // manually inlined l28(x, y) + diff := uint32(x[7]) - uint32(y[7]) + sum := diff * diff + + diff = uint32(x[6]) - uint32(y[6]) + sum += diff * diff + + diff = uint32(x[5]) - uint32(y[5]) + sum += diff * diff + + diff = uint32(x[4]) - uint32(y[4]) + sum += diff * diff + + return l24[uint8, uint32](x, y) + sum + case 10: + return l210[uint8, uint32](x, y) + case 12: + return l212[uint8, uint32](x, y) + } + + var res uint32 + + l := len(x) + l2_byte_256( + unsafe.Pointer(unsafe.SliceData(x)), + unsafe.Pointer(unsafe.SliceData(y)), + unsafe.Pointer(&res), + unsafe.Pointer(&l)) + + return res +} + +func L2FloatByteAVX256(x []float32, y []uint8) float32 { + var res float32 + + switch len(x) { + case 1: + diff := x[0] - float32(y[0]) + return diff * diff + case 2: + return l22FloatByte(x, y) + case 3: + return l23FloatByte(x, y) + case 4: + return l24FloatByte(x, y) + case 5: + return l25FloatByte(x, y) + case 6: + // manually inlined l26(x, y) + diff := x[5] - float32(y[5]) + sum := diff * diff + + diff = x[4] - float32(y[4]) + sum += diff * diff + + return l24FloatByte(x, y) + sum + case 8: + // manually inlined l28(x, y) + diff := x[7] - float32(y[7]) + sum := diff * diff + + diff = x[6] - float32(y[6]) + sum += diff * diff + + diff = x[5] - float32(y[5]) + sum += diff * diff + + diff = x[4] - float32(y[4]) + sum += diff * diff + + return l24FloatByte(x, y) + sum + case 10: + return l210FloatByte(x, y) + case 12: + return l212FloatByte(x, y) + } + + l := len(x) + l2_float_byte_256( + unsafe.Pointer(unsafe.SliceData(x)), + unsafe.Pointer(unsafe.SliceData(y)), + unsafe.Pointer(&res), + unsafe.Pointer(&l)) + + return res +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/l2_amd64.s b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/l2_amd64.s new file mode 100644 index 0000000000000000000000000000000000000000..7775020ea55f8c34a3264c82d2b2f5f146f9e95e --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/l2_amd64.s @@ -0,0 +1,64 @@ +// Code generated by command: go run l2.go -out l2_amd64.s -stubs l2_stub_amd64.go. DO NOT EDIT. + +#include "textflag.h" + +// func L2(x []float32, y []float32) float32 +// Requires: AVX, FMA3, SSE +TEXT ·L2(SB), NOSPLIT, $0-52 + MOVQ x_base+0(FP), AX + MOVQ y_base+24(FP), CX + MOVQ x_len+8(FP), DX + VXORPS Y0, Y0, Y0 + VXORPS Y1, Y1, Y1 + VXORPS Y2, Y2, Y2 + VXORPS Y3, Y3, Y3 + VXORPS Y4, Y4, Y4 + VXORPS Y5, Y5, Y5 + VXORPS Y6, Y6, Y6 + VXORPS Y7, Y7, Y7 + +blockloop: + CMPQ DX, $0x00000020 + JL tail + VMOVUPS (AX), Y1 + VMOVUPS 32(AX), Y3 + VMOVUPS 64(AX), Y5 + VMOVUPS 96(AX), Y7 + VSUBPS (CX), Y1, Y1 + VSUBPS 32(CX), Y3, Y3 + VSUBPS 64(CX), Y5, Y5 + VSUBPS 96(CX), Y7, Y7 + VFMADD231PS Y1, Y1, Y0 + VFMADD231PS Y3, Y3, Y2 + VFMADD231PS Y5, Y5, Y4 + VFMADD231PS Y7, Y7, Y6 + ADDQ $0x00000080, AX + ADDQ $0x00000080, CX + SUBQ $0x00000020, DX + JMP blockloop + +tail: + VXORPS X1, X1, X1 + +tailloop: + CMPQ DX, $0x00000000 + JE reduce + VMOVSS (AX), X3 + VSUBSS (CX), X3, X3 + VFMADD231SS X3, X3, X1 + ADDQ $0x00000004, AX + ADDQ $0x00000004, CX + DECQ DX + JMP tailloop + +reduce: + VADDPS Y0, Y2, Y0 + VADDPS Y4, Y6, Y4 + VADDPS Y0, Y4, Y0 + VEXTRACTF128 $0x01, Y0, X2 + VADDPS X0, X2, X0 + VADDPS X0, X1, X0 + VHADDPS X0, X0, X0 + VHADDPS X0, X0, X0 + MOVSS X0, ret+48(FP) + RET diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/l2_arm64.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/l2_arm64.go new file mode 100644 index 0000000000000000000000000000000000000000..1d8fe2327a0a98d0c5df6ad6e501ae6d1a76af96 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/l2_arm64.go @@ -0,0 +1,214 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package asm + +import ( + "unsafe" +) + +// To generate the asm code, run: +// go install github.com/gorse-io/goat@v0.1.0 +// go generate + +//go:generate goat ../c/l2_neon_arm64.c -O3 -e="--target=arm64" -e="-march=armv8-a+simd+fp" +//go:generate goat ../c/l2_sve_arm64.c -O3 -e="-mcpu=neoverse-v1" -e="--target=arm64" -e="-march=armv8-a+sve" +//go:generate goat ../c/l2_neon_byte_arm64.c -O3 -e="--target=arm64" -e="-march=armv8-a+simd+fp" + +// L2 calculates the L2 distance between two vectors +// using SIMD instructions when possible. +// Vector lengths < 16 are handled by the Go implementation +// because the overhead of using reflection is too high. + +func L2_Neon(x []float32, y []float32) float32 { + switch len(x) { + case 2: + return l22[float32, float32](x, y) + case 4: + return l24[float32, float32](x, y) + case 6: + // manually inlined l26(x, y) + diff := x[5] - y[5] + sum := diff * diff + + diff = x[4] - y[4] + sum += diff * diff + + return l24[float32, float32](x, y) + sum + case 8: + // manually inlined l28(x, y) + diff := x[7] - y[7] + sum := diff * diff + + diff = x[6] - y[6] + sum += diff * diff + + diff = x[5] - y[5] + sum += diff * diff + + diff = x[4] - y[4] + sum += diff * diff + + return l24[float32, float32](x, y) + sum + case 10: + return l210[float32, float32](x, y) + case 12: + return l212[float32, float32](x, y) + } + + // deal with odd lengths and lengths 13, 14, 15 + if len(x) < 16 { + var sum float32 + + for i := range x { + diff := x[i] - y[i] + sum += diff * diff + } + + return sum + } + + var res float32 + + l := len(x) + + l2_neon( + unsafe.Pointer(unsafe.SliceData(x)), + unsafe.Pointer(unsafe.SliceData(y)), + unsafe.Pointer(&res), + unsafe.Pointer(&l)) + + return res +} + +func L2_SVE(x []float32, y []float32) float32 { + switch len(x) { + case 2: + return l22[float32, float32](x, y) + case 4: + return l24[float32, float32](x, y) + case 6: + // manually inlined l26(x, y) + diff := x[5] - y[5] + sum := diff * diff + + diff = x[4] - y[4] + sum += diff * diff + + return l24[float32, float32](x, y) + sum + case 8: + // manually inlined l28(x, y) + diff := x[7] - y[7] + sum := diff * diff + + diff = x[6] - y[6] + sum += diff * diff + + diff = x[5] - y[5] + sum += diff * diff + + diff = x[4] - y[4] + sum += diff * diff + + return l24[float32, float32](x, y) + sum + case 10: + return l210[float32, float32](x, y) + case 12: + return l212[float32, float32](x, y) + } + + // deal with odd lengths and lengths 13, 14, 15 + if len(x) < 16 { + var sum float32 + + for i := range x { + diff := x[i] - y[i] + sum += diff * diff + } + + return sum + } + + var res float32 + + l := len(x) + l2_sve( + unsafe.Pointer(unsafe.SliceData(x)), + unsafe.Pointer(unsafe.SliceData(y)), + unsafe.Pointer(&res), + unsafe.Pointer(&l)) + + return res +} + +func L2ByteARM64(x []uint8, y []uint8) uint32 { + switch len(x) { + case 2: + return l22[uint8, uint32](x, y) + case 4: + return l24[uint8, uint32](x, y) + case 6: + // manually inlined l26(x, y) + diff := x[5] - y[5] + sum := diff * diff + + diff = x[4] - y[4] + sum += diff * diff + + return l24[uint8, uint32](x, y) + uint32(sum) + case 8: + // manually inlined l28(x, y) + diff := x[7] - y[7] + sum := diff * diff + + diff = x[6] - y[6] + sum += diff * diff + + diff = x[5] - y[5] + sum += diff * diff + + diff = x[4] - y[4] + sum += diff * diff + + return l24[uint8, uint32](x, y) + uint32(sum) + case 10: + return l210[uint8, uint32](x, y) + case 12: + return l212[uint8, uint32](x, y) + } + + // deal with odd lengths and lengths 13, 14, 15 + if len(x) < 16 { + var sum uint32 + + for i := range x { + diff := int32(x[i]) - int32(y[i]) + sum += uint32(diff * diff) + } + + return sum + } + + var res uint32 + + l := len(x) + + l2_neon_byte_256( + // The slice header contains the address of the underlying array. + // We only need to cast it to a pointer. + unsafe.Pointer(unsafe.SliceData(x)), + unsafe.Pointer(unsafe.SliceData(y)), + // The C function expects pointers to the result and the length of the arrays. + unsafe.Pointer(&res), + unsafe.Pointer(&l)) + + return res +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/l2_avx256_amd64.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/l2_avx256_amd64.go new file mode 100644 index 0000000000000000000000000000000000000000..2f29c058922523741b41ac2927960137ae2017ec --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/l2_avx256_amd64.go @@ -0,0 +1,21 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +//go:build !noasm && amd64 + +// AUTO-GENERATED BY GOAT -- DO NOT EDIT + +package asm + +import "unsafe" + +//go:noescape +func l2_256(a, b, res, len unsafe.Pointer) diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/l2_avx256_amd64.s b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/l2_avx256_amd64.s new file mode 100644 index 0000000000000000000000000000000000000000..947c0ee75dbd5433fe023502d90579c0d524d5dc --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/l2_avx256_amd64.s @@ -0,0 +1,193 @@ +//go:build !noasm && amd64 +// AUTO-GENERATED BY GOAT -- DO NOT EDIT + +TEXT ·l2_256(SB), $0-32 + MOVQ a+0(FP), DI + MOVQ b+8(FP), SI + MOVQ res+16(FP), DX + MOVQ len+24(FP), CX + BYTE $0x55 // pushq %rbp + WORD $0x8948; BYTE $0xe5 // movq %rsp, %rbp + LONG $0xf8e48348 // andq $-8, %rsp + WORD $0x8b48; BYTE $0x01 // movq (%rcx), %rax + WORD $0xf883; BYTE $0x07 // cmpl $7, %eax + JG LBB0_9 + LONG $0xff408d44 // leal -1(%rax), %r8d + WORD $0x03a8 // testb $3, %al + JE LBB0_2 + WORD $0x8941; BYTE $0xc1 // movl %eax, %r9d + LONG $0x03e18341 // andl $3, %r9d + LONG $0xc057f8c5 // vxorps %xmm0, %xmm0, %xmm0 + WORD $0xc931 // xorl %ecx, %ecx + +LBB0_4: + LONG $0x0f10fac5 // vmovss (%rdi), %xmm1 + LONG $0x0e5cf2c5 // vsubss (%rsi), %xmm1, %xmm1 + LONG $0xc959f2c5 // vmulss %xmm1, %xmm1, %xmm1 + LONG $0xc158fac5 // vaddss %xmm1, %xmm0, %xmm0 + LONG $0x04c78348 // addq $4, %rdi + LONG $0x04c68348 // addq $4, %rsi + LONG $0x01c18348 // addq $1, %rcx + WORD $0x3941; BYTE $0xc9 // cmpl %ecx, %r9d + JNE LBB0_4 + WORD $0xc829 // subl %ecx, %eax + LONG $0x03f88341 // cmpl $3, %r8d + JAE LBB0_7 + +LBB0_26: + LONG $0x0211fac5 // vmovss %xmm0, (%rdx) + WORD $0x8948; BYTE $0xec // movq %rbp, %rsp + BYTE $0x5d // popq %rbp + BYTE $0xc3 // retq + +LBB0_9: + LONG $0xc057f8c5 // vxorps %xmm0, %xmm0, %xmm0 + WORD $0xf883; BYTE $0x20 // cmpl $32, %eax + JB LBB0_10 + LONG $0xc057f8c5 // vxorps %xmm0, %xmm0, %xmm0 + LONG $0xc957f0c5 // vxorps %xmm1, %xmm1, %xmm1 + LONG $0xd257e8c5 // vxorps %xmm2, %xmm2, %xmm2 + LONG $0xdb57e0c5 // vxorps %xmm3, %xmm3, %xmm3 + +LBB0_16: + LONG $0x2710fcc5 // vmovups (%rdi), %ymm4 + LONG $0x6f10fcc5; BYTE $0x20 // vmovups 32(%rdi), %ymm5 + LONG $0x7710fcc5; BYTE $0x40 // vmovups 64(%rdi), %ymm6 + LONG $0x7f10fcc5; BYTE $0x60 // vmovups 96(%rdi), %ymm7 + LONG $0x265cdcc5 // vsubps (%rsi), %ymm4, %ymm4 + LONG $0x6e5cd4c5; BYTE $0x20 // vsubps 32(%rsi), %ymm5, %ymm5 + LONG $0x765cccc5; BYTE $0x40 // vsubps 64(%rsi), %ymm6, %ymm6 + LONG $0x7e5cc4c5; BYTE $0x60 // vsubps 96(%rsi), %ymm7, %ymm7 + LONG $0xb85de2c4; BYTE $0xdc // vfmadd231ps %ymm4, %ymm4, %ymm3 + LONG $0xb855e2c4; BYTE $0xd5 // vfmadd231ps %ymm5, %ymm5, %ymm2 + LONG $0xb84de2c4; BYTE $0xce // vfmadd231ps %ymm6, %ymm6, %ymm1 + LONG $0xb845e2c4; BYTE $0xc7 // vfmadd231ps %ymm7, %ymm7, %ymm0 + WORD $0xc083; BYTE $0xe0 // addl $-32, %eax + LONG $0x80ef8348 // subq $-128, %rdi + LONG $0x80ee8348 // subq $-128, %rsi + WORD $0xf883; BYTE $0x1f // cmpl $31, %eax + JA LBB0_16 + WORD $0xf883; BYTE $0x08 // cmpl $8, %eax + JAE LBB0_11 + JMP LBB0_13 + +LBB0_10: + LONG $0xc957f0c5 // vxorps %xmm1, %xmm1, %xmm1 + LONG $0xd257e8c5 // vxorps %xmm2, %xmm2, %xmm2 + LONG $0xdb57e0c5 // vxorps %xmm3, %xmm3, %xmm3 + +LBB0_11: + LONG $0x2710fcc5 // vmovups (%rdi), %ymm4 + LONG $0x265cdcc5 // vsubps (%rsi), %ymm4, %ymm4 + LONG $0xb85de2c4; BYTE $0xdc // vfmadd231ps %ymm4, %ymm4, %ymm3 + WORD $0xc083; BYTE $0xf8 // addl $-8, %eax + LONG $0x20c78348 // addq $32, %rdi + LONG $0x20c68348 // addq $32, %rsi + WORD $0xf883; BYTE $0x07 // cmpl $7, %eax + JA LBB0_11 + +LBB0_13: + WORD $0xc085 // testl %eax, %eax + JE LBB0_14 + LONG $0xff408d44 // leal -1(%rax), %r8d + WORD $0x03a8 // testb $3, %al + JE LBB0_18 + WORD $0x8941; BYTE $0xc1 // movl %eax, %r9d + LONG $0x03e18341 // andl $3, %r9d + LONG $0xe457d8c5 // vxorps %xmm4, %xmm4, %xmm4 + WORD $0xc931 // xorl %ecx, %ecx + +LBB0_20: + LONG $0x2f10fac5 // vmovss (%rdi), %xmm5 + LONG $0x2e5cd2c5 // vsubss (%rsi), %xmm5, %xmm5 + LONG $0xed59d2c5 // vmulss %xmm5, %xmm5, %xmm5 + LONG $0xe558dac5 // vaddss %xmm5, %xmm4, %xmm4 + LONG $0x04c78348 // addq $4, %rdi + LONG $0x04c68348 // addq $4, %rsi + LONG $0x01c18348 // addq $1, %rcx + WORD $0x3941; BYTE $0xc9 // cmpl %ecx, %r9d + JNE LBB0_20 + WORD $0xc829 // subl %ecx, %eax + LONG $0x03f88341 // cmpl $3, %r8d + JAE LBB0_23 + JMP LBB0_25 + +LBB0_2: + LONG $0xc057f8c5 // vxorps %xmm0, %xmm0, %xmm0 + LONG $0x03f88341 // cmpl $3, %r8d + JB LBB0_26 + +LBB0_7: + WORD $0xc089 // movl %eax, %eax + WORD $0xc931 // xorl %ecx, %ecx + +LBB0_8: + LONG $0x0c10fac5; BYTE $0x8f // vmovss (%rdi,%rcx,4), %xmm1 + LONG $0x5410fac5; WORD $0x048f // vmovss 4(%rdi,%rcx,4), %xmm2 + LONG $0x0c5cf2c5; BYTE $0x8e // vsubss (%rsi,%rcx,4), %xmm1, %xmm1 + LONG $0xc959f2c5 // vmulss %xmm1, %xmm1, %xmm1 + LONG $0xc158fac5 // vaddss %xmm1, %xmm0, %xmm0 + LONG $0x4c5ceac5; WORD $0x048e // vsubss 4(%rsi,%rcx,4), %xmm2, %xmm1 + LONG $0xc959f2c5 // vmulss %xmm1, %xmm1, %xmm1 + LONG $0xc158fac5 // vaddss %xmm1, %xmm0, %xmm0 + LONG $0x4c10fac5; WORD $0x088f // vmovss 8(%rdi,%rcx,4), %xmm1 + LONG $0x4c5cf2c5; WORD $0x088e // vsubss 8(%rsi,%rcx,4), %xmm1, %xmm1 + LONG $0xc959f2c5 // vmulss %xmm1, %xmm1, %xmm1 + LONG $0xc158fac5 // vaddss %xmm1, %xmm0, %xmm0 + LONG $0x4c10fac5; WORD $0x0c8f // vmovss 12(%rdi,%rcx,4), %xmm1 + LONG $0x4c5cf2c5; WORD $0x0c8e // vsubss 12(%rsi,%rcx,4), %xmm1, %xmm1 + LONG $0xc959f2c5 // vmulss %xmm1, %xmm1, %xmm1 + LONG $0xc158fac5 // vaddss %xmm1, %xmm0, %xmm0 + LONG $0x04c18348 // addq $4, %rcx + WORD $0xc839 // cmpl %ecx, %eax + JNE LBB0_8 + JMP LBB0_26 + +LBB0_14: + LONG $0xe457d8c5 // vxorps %xmm4, %xmm4, %xmm4 + JMP LBB0_25 + +LBB0_18: + LONG $0xe457d8c5 // vxorps %xmm4, %xmm4, %xmm4 + LONG $0x03f88341 // cmpl $3, %r8d + JB LBB0_25 + +LBB0_23: + WORD $0xc089 // movl %eax, %eax + WORD $0xc931 // xorl %ecx, %ecx + +LBB0_24: + LONG $0x2c10fac5; BYTE $0x8f // vmovss (%rdi,%rcx,4), %xmm5 + LONG $0x7410fac5; WORD $0x048f // vmovss 4(%rdi,%rcx,4), %xmm6 + LONG $0x2c5cd2c5; BYTE $0x8e // vsubss (%rsi,%rcx,4), %xmm5, %xmm5 + LONG $0xed59d2c5 // vmulss %xmm5, %xmm5, %xmm5 + LONG $0xe558dac5 // vaddss %xmm5, %xmm4, %xmm4 + LONG $0x6c5ccac5; WORD $0x048e // vsubss 4(%rsi,%rcx,4), %xmm6, %xmm5 + LONG $0xed59d2c5 // vmulss %xmm5, %xmm5, %xmm5 + LONG $0xe558dac5 // vaddss %xmm5, %xmm4, %xmm4 + LONG $0x6c10fac5; WORD $0x088f // vmovss 8(%rdi,%rcx,4), %xmm5 + LONG $0x6c5cd2c5; WORD $0x088e // vsubss 8(%rsi,%rcx,4), %xmm5, %xmm5 + LONG $0xed59d2c5 // vmulss %xmm5, %xmm5, %xmm5 + LONG $0xe558dac5 // vaddss %xmm5, %xmm4, %xmm4 + LONG $0x6c10fac5; WORD $0x0c8f // vmovss 12(%rdi,%rcx,4), %xmm5 + LONG $0x6c5cd2c5; WORD $0x0c8e // vsubss 12(%rsi,%rcx,4), %xmm5, %xmm5 + LONG $0xed59d2c5 // vmulss %xmm5, %xmm5, %xmm5 + LONG $0xe558dac5 // vaddss %xmm5, %xmm4, %xmm4 + LONG $0x04c18348 // addq $4, %rcx + WORD $0xc839 // cmpl %ecx, %eax + JNE LBB0_24 + +LBB0_25: + LONG $0xd358ecc5 // vaddps %ymm3, %ymm2, %ymm2 + LONG $0xc058f4c5 // vaddps %ymm0, %ymm1, %ymm0 + LONG $0xc258fcc5 // vaddps %ymm2, %ymm0, %ymm0 + LONG $0xc07cffc5 // vhaddps %ymm0, %ymm0, %ymm0 + LONG $0xc07cffc5 // vhaddps %ymm0, %ymm0, %ymm0 + LONG $0x197de3c4; WORD $0x01c1 // vextractf128 $1, %ymm0, %xmm1 + LONG $0xc158fac5 // vaddss %xmm1, %xmm0, %xmm0 + LONG $0xc058dac5 // vaddss %xmm0, %xmm4, %xmm0 + LONG $0x0211fac5 // vmovss %xmm0, (%rdx) + WORD $0x8948; BYTE $0xec // movq %rbp, %rsp + BYTE $0x5d // popq %rbp + WORD $0xf8c5; BYTE $0x77 // vzeroupper + BYTE $0xc3 // retq diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/l2_avx512_amd64.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/l2_avx512_amd64.go new file mode 100644 index 0000000000000000000000000000000000000000..e1f4e17079cc737c96412a751b15139dc217cbc8 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/l2_avx512_amd64.go @@ -0,0 +1,21 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +//go:build !noasm && amd64 + +// AUTO-GENERATED BY GOAT -- DO NOT EDIT + +package asm + +import "unsafe" + +//go:noescape +func l2_512(a, b, res, len unsafe.Pointer) diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/l2_avx512_amd64.s b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/l2_avx512_amd64.s new file mode 100644 index 0000000000000000000000000000000000000000..c3cec8a6d1315cdb1a4ec6b466a28324e0b3acd3 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/l2_avx512_amd64.s @@ -0,0 +1,306 @@ +//go:build !noasm && amd64 +// AUTO-GENERATED BY GOAT -- DO NOT EDIT + +TEXT ·l2_512(SB), $0-32 + MOVQ a+0(FP), DI + MOVQ b+8(FP), SI + MOVQ res+16(FP), DX + MOVQ len+24(FP), CX + BYTE $0x55 // pushq %rbp + WORD $0x8948; BYTE $0xe5 // movq %rsp, %rbp + LONG $0xf8e48348 // andq $-8, %rsp + WORD $0x8b48; BYTE $0x01 // movq (%rcx), %rax + WORD $0xf883; BYTE $0x07 // cmpl $7, %eax + JG LBB0_9 + LONG $0xff408d44 // leal -1(%rax), %r8d + WORD $0x03a8 // testb $3, %al + JE LBB0_2 + WORD $0x8941; BYTE $0xc1 // movl %eax, %r9d + LONG $0x03e18341 // andl $3, %r9d + LONG $0xc057f8c5 // vxorps %xmm0, %xmm0, %xmm0 + WORD $0xc931 // xorl %ecx, %ecx + +LBB0_4: + LONG $0x0f10fac5 // vmovss (%rdi), %xmm1 + LONG $0x0e5cf2c5 // vsubss (%rsi), %xmm1, %xmm1 + LONG $0xc959f2c5 // vmulss %xmm1, %xmm1, %xmm1 + LONG $0xc158fac5 // vaddss %xmm1, %xmm0, %xmm0 + LONG $0x04c78348 // addq $4, %rdi + LONG $0x04c68348 // addq $4, %rsi + LONG $0x01c18348 // addq $1, %rcx + WORD $0x3941; BYTE $0xc9 // cmpl %ecx, %r9d + JNE LBB0_4 + WORD $0xc829 // subl %ecx, %eax + LONG $0x03f88341 // cmpl $3, %r8d + JAE LBB0_7 + +LBB0_36: + LONG $0x0211fac5 // vmovss %xmm0, (%rdx) + WORD $0x8948; BYTE $0xec // movq %rbp, %rsp + BYTE $0x5d // popq %rbp + BYTE $0xc3 // retq + +LBB0_9: + LONG $0xc057f8c5 // vxorps %xmm0, %xmm0, %xmm0 + LONG $0x0000803d; BYTE $0x00 // cmpl $128, %eax + JB LBB0_10 + LONG $0xc957f0c5 // vxorps %xmm1, %xmm1, %xmm1 + LONG $0xd257e8c5 // vxorps %xmm2, %xmm2, %xmm2 + LONG $0xdb57e0c5 // vxorps %xmm3, %xmm3, %xmm3 + LONG $0xed57d0c5 // vxorps %xmm5, %xmm5, %xmm5 + LONG $0xe457d8c5 // vxorps %xmm4, %xmm4, %xmm4 + LONG $0xf657c8c5 // vxorps %xmm6, %xmm6, %xmm6 + LONG $0xff57c0c5 // vxorps %xmm7, %xmm7, %xmm7 + LONG $0x573841c4; BYTE $0xc0 // vxorps %xmm8, %xmm8, %xmm8 + +LBB0_22: + LONG $0x487c7162; WORD $0x0f10 // vmovups (%rdi), %zmm9 + LONG $0x487c7162; WORD $0x5710; BYTE $0x01 // vmovups 64(%rdi), %zmm10 + LONG $0x487c7162; WORD $0x5f10; BYTE $0x02 // vmovups 128(%rdi), %zmm11 + LONG $0x487c7162; WORD $0x6710; BYTE $0x03 // vmovups 192(%rdi), %zmm12 + LONG $0x487c7162; WORD $0x6f10; BYTE $0x04 // vmovups 256(%rdi), %zmm13 + LONG $0x487c7162; WORD $0x7710; BYTE $0x05 // vmovups 320(%rdi), %zmm14 + LONG $0x487c7162; WORD $0x7f10; BYTE $0x06 // vmovups 384(%rdi), %zmm15 + LONG $0x48347162; WORD $0x0e5c // vsubps (%rsi), %zmm9, %zmm9 + LONG $0x482c7162; WORD $0x565c; BYTE $0x01 // vsubps 64(%rsi), %zmm10, %zmm10 + LONG $0x48247162; WORD $0x5e5c; BYTE $0x02 // vsubps 128(%rsi), %zmm11, %zmm11 + LONG $0x481c7162; WORD $0x665c; BYTE $0x03 // vsubps 192(%rsi), %zmm12, %zmm12 + LONG $0x48147162; WORD $0x6e5c; BYTE $0x04 // vsubps 256(%rsi), %zmm13, %zmm13 + LONG $0x480c7162; WORD $0x765c; BYTE $0x05 // vsubps 320(%rsi), %zmm14, %zmm14 + LONG $0x48047162; WORD $0x7e5c; BYTE $0x06 // vsubps 384(%rsi), %zmm15, %zmm15 + LONG $0x487ce162; WORD $0x4710; BYTE $0x07 // vmovups 448(%rdi), %zmm16 + LONG $0x407ce162; WORD $0x465c; BYTE $0x07 // vsubps 448(%rsi), %zmm16, %zmm16 + LONG $0x4835d262; WORD $0xc9b8 // vfmadd231ps %zmm9, %zmm9, %zmm1 + LONG $0x482dd262; WORD $0xd2b8 // vfmadd231ps %zmm10, %zmm10, %zmm2 + LONG $0x4825d262; WORD $0xdbb8 // vfmadd231ps %zmm11, %zmm11, %zmm3 + LONG $0x481dd262; WORD $0xecb8 // vfmadd231ps %zmm12, %zmm12, %zmm5 + LONG $0x4815d262; WORD $0xe5b8 // vfmadd231ps %zmm13, %zmm13, %zmm4 + LONG $0x480dd262; WORD $0xf6b8 // vfmadd231ps %zmm14, %zmm14, %zmm6 + LONG $0x4805d262; WORD $0xffb8 // vfmadd231ps %zmm15, %zmm15, %zmm7 + LONG $0x407d3262; WORD $0xc0b8 // vfmadd231ps %zmm16, %zmm16, %zmm8 + WORD $0xc083; BYTE $0x80 // addl $-128, %eax + LONG $0x00c78148; WORD $0x0002; BYTE $0x00 // addq $512, %rdi + LONG $0x00c68148; WORD $0x0002; BYTE $0x00 // addq $512, %rsi + WORD $0xbe0f; BYTE $0xc8 // movsbl %al, %ecx + WORD $0xc139 // cmpl %eax, %ecx + JNE LBB0_22 + LONG $0x4874f162; WORD $0xca58 // vaddps %zmm2, %zmm1, %zmm1 + LONG $0x4864f162; WORD $0xd558 // vaddps %zmm5, %zmm3, %zmm2 + LONG $0x4874f162; WORD $0xca58 // vaddps %zmm2, %zmm1, %zmm1 + LONG $0x485cf162; WORD $0xd658 // vaddps %zmm6, %zmm4, %zmm2 + LONG $0x4844d162; WORD $0xd858 // vaddps %zmm8, %zmm7, %zmm3 + LONG $0x486cf162; WORD $0xd358 // vaddps %zmm3, %zmm2, %zmm2 + LONG $0x4874f162; WORD $0xca58 // vaddps %zmm2, %zmm1, %zmm1 + LONG $0x48fdf362; WORD $0xcb1b; BYTE $0x01 // vextractf64x4 $1, %zmm1, %ymm3 + LONG $0xd257e8c5 // vxorps %xmm2, %xmm2, %xmm2 + LONG $0xca58f4c5 // vaddps %ymm2, %ymm1, %ymm1 + LONG $0xcb58f4c5 // vaddps %ymm3, %ymm1, %ymm1 + WORD $0xc085 // testl %eax, %eax + JE LBB0_24 + WORD $0xf883; BYTE $0x20 // cmpl $32, %eax + JB LBB0_12 + +LBB0_25: + LONG $0xd257e8c5 // vxorps %xmm2, %xmm2, %xmm2 + LONG $0xdb57e0c5 // vxorps %xmm3, %xmm3, %xmm3 + LONG $0xc057f8c5 // vxorps %xmm0, %xmm0, %xmm0 + +LBB0_26: + LONG $0x2710fcc5 // vmovups (%rdi), %ymm4 + LONG $0x6f10fcc5; BYTE $0x20 // vmovups 32(%rdi), %ymm5 + LONG $0x7710fcc5; BYTE $0x40 // vmovups 64(%rdi), %ymm6 + LONG $0x7f10fcc5; BYTE $0x60 // vmovups 96(%rdi), %ymm7 + LONG $0x265cdcc5 // vsubps (%rsi), %ymm4, %ymm4 + LONG $0x6e5cd4c5; BYTE $0x20 // vsubps 32(%rsi), %ymm5, %ymm5 + LONG $0x765cccc5; BYTE $0x40 // vsubps 64(%rsi), %ymm6, %ymm6 + LONG $0x7e5cc4c5; BYTE $0x60 // vsubps 96(%rsi), %ymm7, %ymm7 + LONG $0xb85de2c4; BYTE $0xcc // vfmadd231ps %ymm4, %ymm4, %ymm1 + LONG $0xb855e2c4; BYTE $0xc5 // vfmadd231ps %ymm5, %ymm5, %ymm0 + LONG $0xb84de2c4; BYTE $0xde // vfmadd231ps %ymm6, %ymm6, %ymm3 + LONG $0xb845e2c4; BYTE $0xd7 // vfmadd231ps %ymm7, %ymm7, %ymm2 + WORD $0xc083; BYTE $0xe0 // addl $-32, %eax + LONG $0x80ef8348 // subq $-128, %rdi + LONG $0x80ee8348 // subq $-128, %rsi + WORD $0xf883; BYTE $0x1f // cmpl $31, %eax + JA LBB0_26 + WORD $0xf883; BYTE $0x08 // cmpl $8, %eax + JAE LBB0_14 + JMP LBB0_19 + +LBB0_10: + LONG $0xc957f0c5 // vxorps %xmm1, %xmm1, %xmm1 + WORD $0xf883; BYTE $0x20 // cmpl $32, %eax + JAE LBB0_25 + +LBB0_12: + LONG $0xdb57e0c5 // vxorps %xmm3, %xmm3, %xmm3 + LONG $0xd257e8c5 // vxorps %xmm2, %xmm2, %xmm2 + WORD $0xf883; BYTE $0x08 // cmpl $8, %eax + JB LBB0_19 + +LBB0_14: + LONG $0xf8408d44 // leal -8(%rax), %r8d + WORD $0x8945; BYTE $0xc1 // movl %r8d, %r9d + LONG $0x03e9c141 // shrl $3, %r9d + LONG $0x01498d41 // leal 1(%r9), %ecx + WORD $0xc1f6; BYTE $0x03 // testb $3, %cl + JE LBB0_18 + LONG $0x01c18041 // addb $1, %r9b + LONG $0xc9b60f45 // movzbl %r9b, %r9d + LONG $0x03e18341 // andl $3, %r9d + LONG $0x03e1c149 // shlq $3, %r9 + WORD $0xc931 // xorl %ecx, %ecx + +LBB0_16: + LONG $0x2710fcc5 // vmovups (%rdi), %ymm4 + LONG $0x265cdcc5 // vsubps (%rsi), %ymm4, %ymm4 + LONG $0xb85de2c4; BYTE $0xcc // vfmadd231ps %ymm4, %ymm4, %ymm1 + LONG $0x20c78348 // addq $32, %rdi + LONG $0x20c68348 // addq $32, %rsi + LONG $0x08c18348 // addq $8, %rcx + WORD $0x3941; BYTE $0xc9 // cmpl %ecx, %r9d + JNE LBB0_16 + WORD $0xc829 // subl %ecx, %eax + +LBB0_18: + LONG $0x18f88341 // cmpl $24, %r8d + JB LBB0_19 + +LBB0_37: + LONG $0x2710fcc5 // vmovups (%rdi), %ymm4 + LONG $0x6f10fcc5; BYTE $0x20 // vmovups 32(%rdi), %ymm5 + LONG $0x7710fcc5; BYTE $0x40 // vmovups 64(%rdi), %ymm6 + LONG $0x7f10fcc5; BYTE $0x60 // vmovups 96(%rdi), %ymm7 + LONG $0x265cdcc5 // vsubps (%rsi), %ymm4, %ymm4 + LONG $0x6e5cd4c5; BYTE $0x20 // vsubps 32(%rsi), %ymm5, %ymm5 + LONG $0xa85de2c4; BYTE $0xe1 // vfmadd213ps %ymm1, %ymm4, %ymm4 + LONG $0xa855e2c4; BYTE $0xec // vfmadd213ps %ymm4, %ymm5, %ymm5 + LONG $0x665cccc5; BYTE $0x40 // vsubps 64(%rsi), %ymm6, %ymm4 + LONG $0xa85de2c4; BYTE $0xe5 // vfmadd213ps %ymm5, %ymm4, %ymm4 + LONG $0x4e5cc4c5; BYTE $0x60 // vsubps 96(%rsi), %ymm7, %ymm1 + LONG $0xa875e2c4; BYTE $0xcc // vfmadd213ps %ymm4, %ymm1, %ymm1 + WORD $0xc083; BYTE $0xe0 // addl $-32, %eax + LONG $0x80ef8348 // subq $-128, %rdi + LONG $0x80ee8348 // subq $-128, %rsi + WORD $0xf883; BYTE $0x07 // cmpl $7, %eax + JA LBB0_37 + +LBB0_19: + WORD $0xc085 // testl %eax, %eax + JE LBB0_20 + LONG $0xff408d44 // leal -1(%rax), %r8d + WORD $0x03a8 // testb $3, %al + JE LBB0_28 + WORD $0x8941; BYTE $0xc1 // movl %eax, %r9d + LONG $0x03e18341 // andl $3, %r9d + LONG $0xe457d8c5 // vxorps %xmm4, %xmm4, %xmm4 + WORD $0xc931 // xorl %ecx, %ecx + +LBB0_30: + LONG $0x2f10fac5 // vmovss (%rdi), %xmm5 + LONG $0x2e5cd2c5 // vsubss (%rsi), %xmm5, %xmm5 + LONG $0xed59d2c5 // vmulss %xmm5, %xmm5, %xmm5 + LONG $0xe558dac5 // vaddss %xmm5, %xmm4, %xmm4 + LONG $0x04c78348 // addq $4, %rdi + LONG $0x04c68348 // addq $4, %rsi + LONG $0x01c18348 // addq $1, %rcx + WORD $0x3941; BYTE $0xc9 // cmpl %ecx, %r9d + JNE LBB0_30 + WORD $0xc829 // subl %ecx, %eax + LONG $0x03f88341 // cmpl $3, %r8d + JAE LBB0_33 + JMP LBB0_35 + +LBB0_2: + LONG $0xc057f8c5 // vxorps %xmm0, %xmm0, %xmm0 + LONG $0x03f88341 // cmpl $3, %r8d + JB LBB0_36 + +LBB0_7: + WORD $0xc089 // movl %eax, %eax + WORD $0xc931 // xorl %ecx, %ecx + +LBB0_8: + LONG $0x0c10fac5; BYTE $0x8f // vmovss (%rdi,%rcx,4), %xmm1 + LONG $0x5410fac5; WORD $0x048f // vmovss 4(%rdi,%rcx,4), %xmm2 + LONG $0x0c5cf2c5; BYTE $0x8e // vsubss (%rsi,%rcx,4), %xmm1, %xmm1 + LONG $0xc959f2c5 // vmulss %xmm1, %xmm1, %xmm1 + LONG $0xc158fac5 // vaddss %xmm1, %xmm0, %xmm0 + LONG $0x4c5ceac5; WORD $0x048e // vsubss 4(%rsi,%rcx,4), %xmm2, %xmm1 + LONG $0xc959f2c5 // vmulss %xmm1, %xmm1, %xmm1 + LONG $0xc158fac5 // vaddss %xmm1, %xmm0, %xmm0 + LONG $0x4c10fac5; WORD $0x088f // vmovss 8(%rdi,%rcx,4), %xmm1 + LONG $0x4c5cf2c5; WORD $0x088e // vsubss 8(%rsi,%rcx,4), %xmm1, %xmm1 + LONG $0xc959f2c5 // vmulss %xmm1, %xmm1, %xmm1 + LONG $0xc158fac5 // vaddss %xmm1, %xmm0, %xmm0 + LONG $0x4c10fac5; WORD $0x0c8f // vmovss 12(%rdi,%rcx,4), %xmm1 + LONG $0x4c5cf2c5; WORD $0x0c8e // vsubss 12(%rsi,%rcx,4), %xmm1, %xmm1 + LONG $0xc959f2c5 // vmulss %xmm1, %xmm1, %xmm1 + LONG $0xc158fac5 // vaddss %xmm1, %xmm0, %xmm0 + LONG $0x04c18348 // addq $4, %rcx + WORD $0xc839 // cmpl %ecx, %eax + JNE LBB0_8 + JMP LBB0_36 + +LBB0_20: + LONG $0xe457d8c5 // vxorps %xmm4, %xmm4, %xmm4 + JMP LBB0_35 + +LBB0_24: + LONG $0xc258f4c5 // vaddps %ymm2, %ymm1, %ymm0 + LONG $0xc07cffc5 // vhaddps %ymm0, %ymm0, %ymm0 + LONG $0xc07cffc5 // vhaddps %ymm0, %ymm0, %ymm0 + LONG $0x197de3c4; WORD $0x01c1 // vextractf128 $1, %ymm0, %xmm1 + LONG $0xc158fac5 // vaddss %xmm1, %xmm0, %xmm0 + LONG $0xc957f0c5 // vxorps %xmm1, %xmm1, %xmm1 + LONG $0xc158fac5 // vaddss %xmm1, %xmm0, %xmm0 + LONG $0x0211fac5 // vmovss %xmm0, (%rdx) + WORD $0x8948; BYTE $0xec // movq %rbp, %rsp + BYTE $0x5d // popq %rbp + WORD $0xf8c5; BYTE $0x77 // vzeroupper + BYTE $0xc3 // retq + +LBB0_28: + LONG $0xe457d8c5 // vxorps %xmm4, %xmm4, %xmm4 + LONG $0x03f88341 // cmpl $3, %r8d + JB LBB0_35 + +LBB0_33: + WORD $0xc089 // movl %eax, %eax + WORD $0xc931 // xorl %ecx, %ecx + +LBB0_34: + LONG $0x2c10fac5; BYTE $0x8f // vmovss (%rdi,%rcx,4), %xmm5 + LONG $0x7410fac5; WORD $0x048f // vmovss 4(%rdi,%rcx,4), %xmm6 + LONG $0x2c5cd2c5; BYTE $0x8e // vsubss (%rsi,%rcx,4), %xmm5, %xmm5 + LONG $0xed59d2c5 // vmulss %xmm5, %xmm5, %xmm5 + LONG $0xe558dac5 // vaddss %xmm5, %xmm4, %xmm4 + LONG $0x6c5ccac5; WORD $0x048e // vsubss 4(%rsi,%rcx,4), %xmm6, %xmm5 + LONG $0xed59d2c5 // vmulss %xmm5, %xmm5, %xmm5 + LONG $0xe558dac5 // vaddss %xmm5, %xmm4, %xmm4 + LONG $0x6c10fac5; WORD $0x088f // vmovss 8(%rdi,%rcx,4), %xmm5 + LONG $0x6c5cd2c5; WORD $0x088e // vsubss 8(%rsi,%rcx,4), %xmm5, %xmm5 + LONG $0xed59d2c5 // vmulss %xmm5, %xmm5, %xmm5 + LONG $0xe558dac5 // vaddss %xmm5, %xmm4, %xmm4 + LONG $0x6c10fac5; WORD $0x0c8f // vmovss 12(%rdi,%rcx,4), %xmm5 + LONG $0x6c5cd2c5; WORD $0x0c8e // vsubss 12(%rsi,%rcx,4), %xmm5, %xmm5 + LONG $0xed59d2c5 // vmulss %xmm5, %xmm5, %xmm5 + LONG $0xe558dac5 // vaddss %xmm5, %xmm4, %xmm4 + LONG $0x04c18348 // addq $4, %rcx + WORD $0xc839 // cmpl %ecx, %eax + JNE LBB0_34 + +LBB0_35: + LONG $0xc158fcc5 // vaddps %ymm1, %ymm0, %ymm0 + LONG $0xca58e4c5 // vaddps %ymm2, %ymm3, %ymm1 + LONG $0xc058f4c5 // vaddps %ymm0, %ymm1, %ymm0 + LONG $0xc07cffc5 // vhaddps %ymm0, %ymm0, %ymm0 + LONG $0xc07cffc5 // vhaddps %ymm0, %ymm0, %ymm0 + LONG $0x197de3c4; WORD $0x01c1 // vextractf128 $1, %ymm0, %xmm1 + LONG $0xc158fac5 // vaddss %xmm1, %xmm0, %xmm0 + LONG $0xc058dac5 // vaddss %xmm0, %xmm4, %xmm0 + LONG $0x0211fac5 // vmovss %xmm0, (%rdx) + WORD $0x8948; BYTE $0xec // movq %rbp, %rsp + BYTE $0x5d // popq %rbp + WORD $0xf8c5; BYTE $0x77 // vzeroupper + BYTE $0xc3 // retq diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/l2_byte_avx256_amd64.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/l2_byte_avx256_amd64.go new file mode 100644 index 0000000000000000000000000000000000000000..b89a2c5405d39c134b834fb2cb4977440b17bc4c --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/l2_byte_avx256_amd64.go @@ -0,0 +1,21 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +//go:build !noasm && amd64 + +// AUTO-GENERATED BY GOAT -- DO NOT EDIT + +package asm + +import "unsafe" + +//go:noescape +func l2_byte_256(a, b, res, len unsafe.Pointer) diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/l2_byte_avx256_amd64.s b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/l2_byte_avx256_amd64.s new file mode 100644 index 0000000000000000000000000000000000000000..152735b012413dabdcc31c24f42dc69e1877c579 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/l2_byte_avx256_amd64.s @@ -0,0 +1,193 @@ +//go:build !noasm && amd64 +// AUTO-GENERATED BY GOAT -- DO NOT EDIT + +TEXT ·l2_byte_256(SB), $0-32 + MOVQ a+0(FP), DI + MOVQ b+8(FP), SI + MOVQ res+16(FP), DX + MOVQ len+24(FP), CX + BYTE $0x55 // pushq %rbp + WORD $0x8948; BYTE $0xe5 // movq %rsp, %rbp + BYTE $0x53 // pushq %rbx + LONG $0xf8e48348 // andq $-8, %rsp + WORD $0x8b4c; BYTE $0x11 // movq (%rcx), %r10 + WORD $0x8945; BYTE $0xd0 // movl %r10d, %r8d + LONG $0x20fa8341 // cmpl $32, %r10d + JGE LBB0_1 + WORD $0x8545; BYTE $0xd2 // testl %r10d, %r10d + JLE LBB0_7 + LONG $0x10f88341 // cmpl $16, %r8d + JAE LBB0_10 + WORD $0x3145; BYTE $0xdb // xorl %r11d, %r11d + WORD $0x3145; BYTE $0xc9 // xorl %r9d, %r9d + JMP LBB0_13 + +LBB0_1: + WORD $0x634d; BYTE $0xc8 // movslq %r8d, %r9 + LONG $0xc0eff9c5 // vpxor %xmm0, %xmm0, %xmm0 + WORD $0xdb31 // xorl %ebx, %ebx + WORD $0x894c; BYTE $0xc0 // movq %r8, %rax + LONG $0xc9eff1c5 // vpxor %xmm1, %xmm1, %xmm1 + +LBB0_2: + WORD $0x8948; BYTE $0xd9 // movq %rbx, %rcx + LONG $0x146ffec5; BYTE $0x1f // vmovdqu (%rdi,%rbx), %ymm2 + LONG $0x1c6ffec5; BYTE $0x1e // vmovdqu (%rsi,%rbx), %ymm3 + LONG $0xe060edc5 // vpunpcklbw %ymm0, %ymm2, %ymm4 # ymm4 = ymm2[0],ymm0[0],ymm2[1],ymm0[1],ymm2[2],ymm0[2],ymm2[3],ymm0[3],ymm2[4],ymm0[4],ymm2[5],ymm0[5],ymm2[6],ymm0[6],ymm2[7],ymm0[7],ymm2[16],ymm0[16],ymm2[17],ymm0[17],ymm2[18],ymm0[18],ymm2[19],ymm0[19],ymm2[20],ymm0[20],ymm2[21],ymm0[21],ymm2[22],ymm0[22],ymm2[23],ymm0[23] + LONG $0xe860e5c5 // vpunpcklbw %ymm0, %ymm3, %ymm5 # ymm5 = ymm3[0],ymm0[0],ymm3[1],ymm0[1],ymm3[2],ymm0[2],ymm3[3],ymm0[3],ymm3[4],ymm0[4],ymm3[5],ymm0[5],ymm3[6],ymm0[6],ymm3[7],ymm0[7],ymm3[16],ymm0[16],ymm3[17],ymm0[17],ymm3[18],ymm0[18],ymm3[19],ymm0[19],ymm3[20],ymm0[20],ymm3[21],ymm0[21],ymm3[22],ymm0[22],ymm3[23],ymm0[23] + LONG $0xe5f9ddc5 // vpsubw %ymm5, %ymm4, %ymm4 + LONG $0xd068edc5 // vpunpckhbw %ymm0, %ymm2, %ymm2 # ymm2 = ymm2[8],ymm0[8],ymm2[9],ymm0[9],ymm2[10],ymm0[10],ymm2[11],ymm0[11],ymm2[12],ymm0[12],ymm2[13],ymm0[13],ymm2[14],ymm0[14],ymm2[15],ymm0[15],ymm2[24],ymm0[24],ymm2[25],ymm0[25],ymm2[26],ymm0[26],ymm2[27],ymm0[27],ymm2[28],ymm0[28],ymm2[29],ymm0[29],ymm2[30],ymm0[30],ymm2[31],ymm0[31] + LONG $0xd868e5c5 // vpunpckhbw %ymm0, %ymm3, %ymm3 # ymm3 = ymm3[8],ymm0[8],ymm3[9],ymm0[9],ymm3[10],ymm0[10],ymm3[11],ymm0[11],ymm3[12],ymm0[12],ymm3[13],ymm0[13],ymm3[14],ymm0[14],ymm3[15],ymm0[15],ymm3[24],ymm0[24],ymm3[25],ymm0[25],ymm3[26],ymm0[26],ymm3[27],ymm0[27],ymm3[28],ymm0[28],ymm3[29],ymm0[29],ymm3[30],ymm0[30],ymm3[31],ymm0[31] + LONG $0xd3f9edc5 // vpsubw %ymm3, %ymm2, %ymm2 + LONG $0xdcf5ddc5 // vpmaddwd %ymm4, %ymm4, %ymm3 + LONG $0xc9fee5c5 // vpaddd %ymm1, %ymm3, %ymm1 + LONG $0xd2f5edc5 // vpmaddwd %ymm2, %ymm2, %ymm2 + LONG $0xcafef5c5 // vpaddd %ymm2, %ymm1, %ymm1 + LONG $0x20c38348 // addq $32, %rbx + LONG $0xe0c08348 // addq $-32, %rax + LONG $0x3fc18348 // addq $63, %rcx + WORD $0x394c; BYTE $0xc9 // cmpq %r9, %rcx + JL LBB0_2 + LONG $0x397de3c4; WORD $0x01c8 // vextracti128 $1, %ymm1, %xmm0 + LONG $0xc1fef9c5 // vpaddd %xmm1, %xmm0, %xmm0 + LONG $0xc870f9c5; BYTE $0x1b // vpshufd $27, %xmm0, %xmm1 # xmm1 = xmm0[3,2,1,0] + LONG $0xc0fef1c5 // vpaddd %xmm0, %xmm1, %xmm0 + LONG $0xc870f9c5; BYTE $0x55 // vpshufd $85, %xmm0, %xmm1 # xmm1 = xmm0[1,1,1,1] + LONG $0xc0fef1c5 // vpaddd %xmm0, %xmm1, %xmm0 + LONG $0x7e79c1c4; BYTE $0xc1 // vmovd %xmm0, %r9d + WORD $0x3944; BYTE $0xd3 // cmpl %r10d, %ebx + JGE LBB0_18 + WORD $0x894d; BYTE $0xc2 // movq %r8, %r10 + WORD $0x2949; BYTE $0xda // subq %rbx, %r10 + LONG $0x20fa8349 // cmpq $32, %r10 + JAE LBB0_14 + WORD $0x8948; BYTE $0xd8 // movq %rbx, %rax + JMP LBB0_17 + +LBB0_7: + WORD $0x3145; BYTE $0xc9 // xorl %r9d, %r9d + JMP LBB0_18 + +LBB0_10: + WORD $0x8945; BYTE $0xc2 // movl %r8d, %r10d + LONG $0x0fe28341 // andl $15, %r10d + WORD $0x8945; BYTE $0xc3 // movl %r8d, %r11d + LONG $0xf0e38341 // andl $-16, %r11d + LONG $0xc0eff9c5 // vpxor %xmm0, %xmm0, %xmm0 + WORD $0xc031 // xorl %eax, %eax + LONG $0xc9eff1c5 // vpxor %xmm1, %xmm1, %xmm1 + LONG $0xd2efe9c5 // vpxor %xmm2, %xmm2, %xmm2 + LONG $0xdbefe1c5 // vpxor %xmm3, %xmm3, %xmm3 + +LBB0_11: + LONG $0x327de2c4; WORD $0x0724 // vpmovzxbq (%rdi,%rax), %ymm4 # ymm4 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero,mem[2],zero,zero,zero,zero,zero,zero,zero,mem[3],zero,zero,zero,zero,zero,zero,zero + LONG $0x327de2c4; WORD $0x076c; BYTE $0x04 // vpmovzxbq 4(%rdi,%rax), %ymm5 # ymm5 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero,mem[2],zero,zero,zero,zero,zero,zero,zero,mem[3],zero,zero,zero,zero,zero,zero,zero + LONG $0x327de2c4; WORD $0x0774; BYTE $0x08 // vpmovzxbq 8(%rdi,%rax), %ymm6 # ymm6 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero,mem[2],zero,zero,zero,zero,zero,zero,zero,mem[3],zero,zero,zero,zero,zero,zero,zero + LONG $0x327de2c4; WORD $0x077c; BYTE $0x0c // vpmovzxbq 12(%rdi,%rax), %ymm7 # ymm7 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero,mem[2],zero,zero,zero,zero,zero,zero,zero,mem[3],zero,zero,zero,zero,zero,zero,zero + LONG $0x327d62c4; WORD $0x0604 // vpmovzxbq (%rsi,%rax), %ymm8 # ymm8 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero,mem[2],zero,zero,zero,zero,zero,zero,zero,mem[3],zero,zero,zero,zero,zero,zero,zero + LONG $0xfb5dc1c4; BYTE $0xe0 // vpsubq %ymm8, %ymm4, %ymm4 + LONG $0x327d62c4; WORD $0x0644; BYTE $0x04 // vpmovzxbq 4(%rsi,%rax), %ymm8 # ymm8 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero,mem[2],zero,zero,zero,zero,zero,zero,zero,mem[3],zero,zero,zero,zero,zero,zero,zero + LONG $0xfb55c1c4; BYTE $0xe8 // vpsubq %ymm8, %ymm5, %ymm5 + LONG $0x327d62c4; WORD $0x0644; BYTE $0x08 // vpmovzxbq 8(%rsi,%rax), %ymm8 # ymm8 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero,mem[2],zero,zero,zero,zero,zero,zero,zero,mem[3],zero,zero,zero,zero,zero,zero,zero + LONG $0x327d62c4; WORD $0x064c; BYTE $0x0c // vpmovzxbq 12(%rsi,%rax), %ymm9 # ymm9 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero,mem[2],zero,zero,zero,zero,zero,zero,zero,mem[3],zero,zero,zero,zero,zero,zero,zero + LONG $0xfb4dc1c4; BYTE $0xf0 // vpsubq %ymm8, %ymm6, %ymm6 + LONG $0xfb45c1c4; BYTE $0xf9 // vpsubq %ymm9, %ymm7, %ymm7 + LONG $0x285de2c4; BYTE $0xe4 // vpmuldq %ymm4, %ymm4, %ymm4 + LONG $0xc0d4ddc5 // vpaddq %ymm0, %ymm4, %ymm0 + LONG $0x2855e2c4; BYTE $0xe5 // vpmuldq %ymm5, %ymm5, %ymm4 + LONG $0xc9d4ddc5 // vpaddq %ymm1, %ymm4, %ymm1 + LONG $0x284de2c4; BYTE $0xe6 // vpmuldq %ymm6, %ymm6, %ymm4 + LONG $0xd2d4ddc5 // vpaddq %ymm2, %ymm4, %ymm2 + LONG $0x2845e2c4; BYTE $0xe7 // vpmuldq %ymm7, %ymm7, %ymm4 + LONG $0xdbd4ddc5 // vpaddq %ymm3, %ymm4, %ymm3 + LONG $0x10c08348 // addq $16, %rax + WORD $0x3949; BYTE $0xc3 // cmpq %rax, %r11 + JNE LBB0_11 + LONG $0xc0d4f5c5 // vpaddq %ymm0, %ymm1, %ymm0 + LONG $0xc0d4edc5 // vpaddq %ymm0, %ymm2, %ymm0 + LONG $0xc0d4e5c5 // vpaddq %ymm0, %ymm3, %ymm0 + LONG $0x397de3c4; WORD $0x01c1 // vextracti128 $1, %ymm0, %xmm1 + LONG $0xc1d4f9c5 // vpaddq %xmm1, %xmm0, %xmm0 + LONG $0xc870f9c5; BYTE $0xee // vpshufd $238, %xmm0, %xmm1 # xmm1 = xmm0[2,3,2,3] + LONG $0xc1d4f9c5 // vpaddq %xmm1, %xmm0, %xmm0 + LONG $0x7ef9c1c4; BYTE $0xc1 // vmovq %xmm0, %r9 + WORD $0x854d; BYTE $0xd2 // testq %r10, %r10 + JE LBB0_18 + +LBB0_13: + LONG $0x04b60f42; BYTE $0x1f // movzbl (%rdi,%r11), %eax + LONG $0x0cb60f42; BYTE $0x1e // movzbl (%rsi,%r11), %ecx + WORD $0x2948; BYTE $0xc8 // subq %rcx, %rax + LONG $0xc0af0f48 // imulq %rax, %rax + WORD $0x0149; BYTE $0xc1 // addq %rax, %r9 + LONG $0x01c38349 // addq $1, %r11 + WORD $0x394d; BYTE $0xd8 // cmpq %r11, %r8 + JNE LBB0_13 + JMP LBB0_18 + +LBB0_14: + WORD $0x894d; BYTE $0xd3 // movq %r10, %r11 + LONG $0xe0e38349 // andq $-32, %r11 + LONG $0xe0e08348 // andq $-32, %rax + WORD $0x0148; BYTE $0xd8 // addq %rbx, %rax + LONG $0x18c38348 // addq $24, %rbx + LONG $0x6e79c1c4; BYTE $0xc1 // vmovd %r9d, %xmm0 + LONG $0xc9eff1c5 // vpxor %xmm1, %xmm1, %xmm1 + WORD $0x894d; BYTE $0xd9 // movq %r11, %r9 + LONG $0xd2efe9c5 // vpxor %xmm2, %xmm2, %xmm2 + LONG $0xdbefe1c5 // vpxor %xmm3, %xmm3, %xmm3 + +LBB0_15: + LONG $0x317de2c4; WORD $0x1f64; BYTE $0xe8 // vpmovzxbd -24(%rdi,%rbx), %ymm4 # ymm4 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero + LONG $0x317de2c4; WORD $0x1f6c; BYTE $0xf0 // vpmovzxbd -16(%rdi,%rbx), %ymm5 # ymm5 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero + LONG $0x317de2c4; WORD $0x1f74; BYTE $0xf8 // vpmovzxbd -8(%rdi,%rbx), %ymm6 # ymm6 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero + LONG $0x317de2c4; WORD $0x1f3c // vpmovzxbd (%rdi,%rbx), %ymm7 # ymm7 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero + LONG $0x317d62c4; WORD $0x1e44; BYTE $0xe8 // vpmovzxbd -24(%rsi,%rbx), %ymm8 # ymm8 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero + LONG $0xfa5dc1c4; BYTE $0xe0 // vpsubd %ymm8, %ymm4, %ymm4 + LONG $0x317d62c4; WORD $0x1e44; BYTE $0xf0 // vpmovzxbd -16(%rsi,%rbx), %ymm8 # ymm8 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero + LONG $0xfa55c1c4; BYTE $0xe8 // vpsubd %ymm8, %ymm5, %ymm5 + LONG $0x317d62c4; WORD $0x1e44; BYTE $0xf8 // vpmovzxbd -8(%rsi,%rbx), %ymm8 # ymm8 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero + LONG $0x317d62c4; WORD $0x1e0c // vpmovzxbd (%rsi,%rbx), %ymm9 # ymm9 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero + LONG $0xfa4dc1c4; BYTE $0xf0 // vpsubd %ymm8, %ymm6, %ymm6 + LONG $0xfa45c1c4; BYTE $0xf9 // vpsubd %ymm9, %ymm7, %ymm7 + LONG $0x405de2c4; BYTE $0xe4 // vpmulld %ymm4, %ymm4, %ymm4 + LONG $0xc0feddc5 // vpaddd %ymm0, %ymm4, %ymm0 + LONG $0x4055e2c4; BYTE $0xe5 // vpmulld %ymm5, %ymm5, %ymm4 + LONG $0xc9feddc5 // vpaddd %ymm1, %ymm4, %ymm1 + LONG $0x404de2c4; BYTE $0xe6 // vpmulld %ymm6, %ymm6, %ymm4 + LONG $0xd2feddc5 // vpaddd %ymm2, %ymm4, %ymm2 + LONG $0x4045e2c4; BYTE $0xe7 // vpmulld %ymm7, %ymm7, %ymm4 + LONG $0xdbfeddc5 // vpaddd %ymm3, %ymm4, %ymm3 + LONG $0x20c38348 // addq $32, %rbx + LONG $0xe0c18349 // addq $-32, %r9 + JNE LBB0_15 + LONG $0xc0fef5c5 // vpaddd %ymm0, %ymm1, %ymm0 + LONG $0xc0feedc5 // vpaddd %ymm0, %ymm2, %ymm0 + LONG $0xc0fee5c5 // vpaddd %ymm0, %ymm3, %ymm0 + LONG $0x397de3c4; WORD $0x01c1 // vextracti128 $1, %ymm0, %xmm1 + LONG $0xc1fef9c5 // vpaddd %xmm1, %xmm0, %xmm0 + LONG $0xc870f9c5; BYTE $0xee // vpshufd $238, %xmm0, %xmm1 # xmm1 = xmm0[2,3,2,3] + LONG $0xc1fef9c5 // vpaddd %xmm1, %xmm0, %xmm0 + LONG $0xc870f9c5; BYTE $0x55 // vpshufd $85, %xmm0, %xmm1 # xmm1 = xmm0[1,1,1,1] + LONG $0xc1fef9c5 // vpaddd %xmm1, %xmm0, %xmm0 + LONG $0x7e79c1c4; BYTE $0xc1 // vmovd %xmm0, %r9d + WORD $0x394d; BYTE $0xda // cmpq %r11, %r10 + JE LBB0_18 + +LBB0_17: + LONG $0x070cb60f // movzbl (%rdi,%rax), %ecx + LONG $0x061cb60f // movzbl (%rsi,%rax), %ebx + WORD $0xd929 // subl %ebx, %ecx + WORD $0xaf0f; BYTE $0xc9 // imull %ecx, %ecx + WORD $0x0141; BYTE $0xc9 // addl %ecx, %r9d + LONG $0x01c08348 // addq $1, %rax + WORD $0x3949; BYTE $0xc0 // cmpq %rax, %r8 + JNE LBB0_17 + +LBB0_18: + WORD $0x8944; BYTE $0x0a // movl %r9d, (%rdx) + LONG $0xf8658d48 // leaq -8(%rbp), %rsp + BYTE $0x5b // popq %rbx + BYTE $0x5d // popq %rbp + WORD $0xf8c5; BYTE $0x77 // vzeroupper + BYTE $0xc3 // retq diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/l2_float_byte_avx256.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/l2_float_byte_avx256.go new file mode 100644 index 0000000000000000000000000000000000000000..7fc8e87a7e92c060d499a632ffb710bd4ff5b47b --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/l2_float_byte_avx256.go @@ -0,0 +1,21 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +//go:build !noasm && amd64 + +// Code generated by GoAT. DO NOT EDIT. + +package asm + +import "unsafe" + +//go:noescape +func l2_float_byte_256(a, b, res, len unsafe.Pointer) diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/l2_float_byte_avx256.s b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/l2_float_byte_avx256.s new file mode 100644 index 0000000000000000000000000000000000000000..23caf68fa7f9119441b0611b4f8373f7f2ceac42 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/l2_float_byte_avx256.s @@ -0,0 +1,183 @@ +//go:build !noasm && amd64 +// Code generated by GoAT. DO NOT EDIT. + +TEXT ·l2_float_byte_256(SB), $0-32 + MOVQ a+0(FP), DI + MOVQ b+8(FP), SI + MOVQ res+16(FP), DX + MOVQ len+24(FP), CX + BYTE $0x55 // pushq %rbp + WORD $0x8948; BYTE $0xe5 // movq %rsp, %rbp + LONG $0xf8e48348 // andq $-8, %rsp + WORD $0x018b // movl (%rcx), %eax + WORD $0xf883; BYTE $0x07 // cmpl $7, %eax + JG LBB0_7 + WORD $0x01a8 // testb $1, %al + JNE LBB0_3 + LONG $0xc057f8c5 // vxorps %xmm0, %xmm0, %xmm0 + WORD $0xc189 // movl %eax, %ecx + WORD $0xf883; BYTE $0x01 // cmpl $1, %eax + JNE LBB0_5 + +LBB0_22: + LONG $0x0211fac5 // vmovss %xmm0, (%rdx) + WORD $0x8948; BYTE $0xec // movq %rbp, %rsp + BYTE $0x5d // popq %rbp + BYTE $0xc3 // retq + +LBB0_7: + LONG $0xc057f8c5 // vxorps %xmm0, %xmm0, %xmm0 + WORD $0xf883; BYTE $0x20 // cmpl $32, %eax + JB LBB0_8 + LONG $0xc057f8c5 // vxorps %xmm0, %xmm0, %xmm0 + LONG $0xc957f0c5 // vxorps %xmm1, %xmm1, %xmm1 + LONG $0xd257e8c5 // vxorps %xmm2, %xmm2, %xmm2 + LONG $0xdb57e0c5 // vxorps %xmm3, %xmm3, %xmm3 + +LBB0_14: + LONG $0x2710fcc5 // vmovups (%rdi), %ymm4 + LONG $0x6f10fcc5; BYTE $0x20 // vmovups 32(%rdi), %ymm5 + LONG $0x7710fcc5; BYTE $0x40 // vmovups 64(%rdi), %ymm6 + LONG $0x7f10fcc5; BYTE $0x60 // vmovups 96(%rdi), %ymm7 + LONG $0x317d62c4; BYTE $0x06 // vpmovzxbd (%rsi), %ymm8 # ymm8 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero + LONG $0x5b7c41c4; BYTE $0xc0 // vcvtdq2ps %ymm8, %ymm8 + LONG $0x5c5cc1c4; BYTE $0xe0 // vsubps %ymm8, %ymm4, %ymm4 + LONG $0x317d62c4; WORD $0x0846 // vpmovzxbd 8(%rsi), %ymm8 # ymm8 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero + LONG $0x5b7c41c4; BYTE $0xc0 // vcvtdq2ps %ymm8, %ymm8 + LONG $0x5c54c1c4; BYTE $0xe8 // vsubps %ymm8, %ymm5, %ymm5 + LONG $0x317d62c4; WORD $0x1046 // vpmovzxbd 16(%rsi), %ymm8 # ymm8 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero + LONG $0x5b7c41c4; BYTE $0xc0 // vcvtdq2ps %ymm8, %ymm8 + LONG $0x5c4cc1c4; BYTE $0xf0 // vsubps %ymm8, %ymm6, %ymm6 + LONG $0x317d62c4; WORD $0x1846 // vpmovzxbd 24(%rsi), %ymm8 # ymm8 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero + LONG $0x5b7c41c4; BYTE $0xc0 // vcvtdq2ps %ymm8, %ymm8 + LONG $0x5c44c1c4; BYTE $0xf8 // vsubps %ymm8, %ymm7, %ymm7 + LONG $0xb85de2c4; BYTE $0xdc // vfmadd231ps %ymm4, %ymm4, %ymm3 # ymm3 = (ymm4 * ymm4) + ymm3 + LONG $0xb855e2c4; BYTE $0xd5 // vfmadd231ps %ymm5, %ymm5, %ymm2 # ymm2 = (ymm5 * ymm5) + ymm2 + LONG $0xb84de2c4; BYTE $0xce // vfmadd231ps %ymm6, %ymm6, %ymm1 # ymm1 = (ymm6 * ymm6) + ymm1 + LONG $0xb845e2c4; BYTE $0xc7 // vfmadd231ps %ymm7, %ymm7, %ymm0 # ymm0 = (ymm7 * ymm7) + ymm0 + WORD $0xc083; BYTE $0xe0 // addl $-32, %eax + LONG $0x80ef8348 // subq $-128, %rdi + LONG $0x20c68348 // addq $32, %rsi + WORD $0xf883; BYTE $0x1f // cmpl $31, %eax + JA LBB0_14 + WORD $0xf883; BYTE $0x08 // cmpl $8, %eax + JAE LBB0_9 + JMP LBB0_11 + +LBB0_3: + WORD $0xb60f; BYTE $0x0e // movzbl (%rsi), %ecx + LONG $0xc12afac5 // vcvtsi2ss %ecx, %xmm0, %xmm0 + LONG $0x0f10fac5 // vmovss (%rdi), %xmm1 # xmm1 = mem[0],zero,zero,zero + LONG $0xc05cf2c5 // vsubss %xmm0, %xmm1, %xmm0 + LONG $0xc059fac5 // vmulss %xmm0, %xmm0, %xmm0 + LONG $0xc957f0c5 // vxorps %xmm1, %xmm1, %xmm1 + LONG $0xc158fac5 // vaddss %xmm1, %xmm0, %xmm0 + WORD $0x488d; BYTE $0xff // leal -1(%rax), %ecx + LONG $0x04c78348 // addq $4, %rdi + LONG $0x01c68348 // addq $1, %rsi + WORD $0xf883; BYTE $0x01 // cmpl $1, %eax + JE LBB0_22 + +LBB0_5: + WORD $0x8941; BYTE $0xc8 // movl %ecx, %r8d + WORD $0xc931 // xorl %ecx, %ecx + +LBB0_6: + LONG $0x0e04b60f // movzbl (%rsi,%rcx), %eax + LONG $0xc82ae2c5 // vcvtsi2ss %eax, %xmm3, %xmm1 + LONG $0x1410fac5; BYTE $0x8f // vmovss (%rdi,%rcx,4), %xmm2 # xmm2 = mem[0],zero,zero,zero + LONG $0xc95ceac5 // vsubss %xmm1, %xmm2, %xmm1 + LONG $0x5410fac5; WORD $0x048f // vmovss 4(%rdi,%rcx,4), %xmm2 # xmm2 = mem[0],zero,zero,zero + LONG $0xc959f2c5 // vmulss %xmm1, %xmm1, %xmm1 + LONG $0xc158fac5 // vaddss %xmm1, %xmm0, %xmm0 + LONG $0x0e44b60f; BYTE $0x01 // movzbl 1(%rsi,%rcx), %eax + LONG $0xc82ae2c5 // vcvtsi2ss %eax, %xmm3, %xmm1 + LONG $0xc95ceac5 // vsubss %xmm1, %xmm2, %xmm1 + LONG $0xc959f2c5 // vmulss %xmm1, %xmm1, %xmm1 + LONG $0xc158fac5 // vaddss %xmm1, %xmm0, %xmm0 + LONG $0x02c18348 // addq $2, %rcx + WORD $0x3941; BYTE $0xc8 // cmpl %ecx, %r8d + JNE LBB0_6 + JMP LBB0_22 + +LBB0_8: + LONG $0xc957f0c5 // vxorps %xmm1, %xmm1, %xmm1 + LONG $0xd257e8c5 // vxorps %xmm2, %xmm2, %xmm2 + LONG $0xdb57e0c5 // vxorps %xmm3, %xmm3, %xmm3 + +LBB0_9: + LONG $0x317de2c4; BYTE $0x26 // vpmovzxbd (%rsi), %ymm4 # ymm4 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero + LONG $0x2f10fcc5 // vmovups (%rdi), %ymm5 + LONG $0xe45bfcc5 // vcvtdq2ps %ymm4, %ymm4 + LONG $0xe45cd4c5 // vsubps %ymm4, %ymm5, %ymm4 + LONG $0xb85de2c4; BYTE $0xdc // vfmadd231ps %ymm4, %ymm4, %ymm3 # ymm3 = (ymm4 * ymm4) + ymm3 + WORD $0xc083; BYTE $0xf8 // addl $-8, %eax + LONG $0x20c78348 // addq $32, %rdi + LONG $0x08c68348 // addq $8, %rsi + WORD $0xf883; BYTE $0x07 // cmpl $7, %eax + JA LBB0_9 + +LBB0_11: + WORD $0xc085 // testl %eax, %eax + JE LBB0_12 + WORD $0x01a8 // testb $1, %al + JNE LBB0_17 + LONG $0xe457d8c5 // vxorps %xmm4, %xmm4, %xmm4 + WORD $0xc189 // movl %eax, %ecx + WORD $0xf883; BYTE $0x01 // cmpl $1, %eax + JNE LBB0_19 + JMP LBB0_21 + +LBB0_12: + LONG $0xe457d8c5 // vxorps %xmm4, %xmm4, %xmm4 + JMP LBB0_21 + +LBB0_17: + WORD $0xb60f; BYTE $0x0e // movzbl (%rsi), %ecx + LONG $0xe12ab2c5 // vcvtsi2ss %ecx, %xmm9, %xmm4 + LONG $0x2f10fac5 // vmovss (%rdi), %xmm5 # xmm5 = mem[0],zero,zero,zero + LONG $0xe45cd2c5 // vsubss %xmm4, %xmm5, %xmm4 + LONG $0xe459dac5 // vmulss %xmm4, %xmm4, %xmm4 + LONG $0xed57d0c5 // vxorps %xmm5, %xmm5, %xmm5 + LONG $0xe558dac5 // vaddss %xmm5, %xmm4, %xmm4 + WORD $0x488d; BYTE $0xff // leal -1(%rax), %ecx + LONG $0x04c78348 // addq $4, %rdi + LONG $0x01c68348 // addq $1, %rsi + WORD $0xf883; BYTE $0x01 // cmpl $1, %eax + JE LBB0_21 + +LBB0_19: + WORD $0x8941; BYTE $0xc8 // movl %ecx, %r8d + WORD $0xc931 // xorl %ecx, %ecx + +LBB0_20: + LONG $0x0e04b60f // movzbl (%rsi,%rcx), %eax + LONG $0xe82ab2c5 // vcvtsi2ss %eax, %xmm9, %xmm5 + LONG $0x3410fac5; BYTE $0x8f // vmovss (%rdi,%rcx,4), %xmm6 # xmm6 = mem[0],zero,zero,zero + LONG $0xed5ccac5 // vsubss %xmm5, %xmm6, %xmm5 + LONG $0x7410fac5; WORD $0x048f // vmovss 4(%rdi,%rcx,4), %xmm6 # xmm6 = mem[0],zero,zero,zero + LONG $0xed59d2c5 // vmulss %xmm5, %xmm5, %xmm5 + LONG $0xe558dac5 // vaddss %xmm5, %xmm4, %xmm4 + LONG $0x0e44b60f; BYTE $0x01 // movzbl 1(%rsi,%rcx), %eax + LONG $0xe82ab2c5 // vcvtsi2ss %eax, %xmm9, %xmm5 + LONG $0xed5ccac5 // vsubss %xmm5, %xmm6, %xmm5 + LONG $0xed59d2c5 // vmulss %xmm5, %xmm5, %xmm5 + LONG $0xe558dac5 // vaddss %xmm5, %xmm4, %xmm4 + LONG $0x02c18348 // addq $2, %rcx + WORD $0x3941; BYTE $0xc8 // cmpl %ecx, %r8d + JNE LBB0_20 + +LBB0_21: + LONG $0xd358ecc5 // vaddps %ymm3, %ymm2, %ymm2 + LONG $0xc058f4c5 // vaddps %ymm0, %ymm1, %ymm0 + LONG $0xc258fcc5 // vaddps %ymm2, %ymm0, %ymm0 + LONG $0xc07cffc5 // vhaddps %ymm0, %ymm0, %ymm0 + LONG $0xc07cffc5 // vhaddps %ymm0, %ymm0, %ymm0 + LONG $0x197de3c4; WORD $0x01c1 // vextractf128 $1, %ymm0, %xmm1 + LONG $0xc158fac5 // vaddss %xmm1, %xmm0, %xmm0 + LONG $0xc058dac5 // vaddss %xmm0, %xmm4, %xmm0 + LONG $0x0211fac5 // vmovss %xmm0, (%rdx) + WORD $0x8948; BYTE $0xec // movq %rbp, %rsp + BYTE $0x5d // popq %rbp + WORD $0xf8c5; BYTE $0x77 // vzeroupper + BYTE $0xc3 // retq diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/l2_inline.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/l2_inline.go new file mode 100644 index 0000000000000000000000000000000000000000..3891915d433bb60306359d0aee99b07c6b308915 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/l2_inline.go @@ -0,0 +1,213 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package asm + +// Experiment with inlining and flattening the L2Squared distancer. +// Theoretically, this should be faster than the loop version for small vectors +// - it avoids the loop overhead +// - it eliminates the bounds check by reversing the iteration +// - it allows l22 and l24 to be inlined (the other ones are too large) +// See go tool compile -d=ssa/check_bce/debug=1 -m l2_inline.go + +func l22[T number, U number](x []T, y []T) U { + diff := U(x[1]) - U(y[1]) + sum := diff * diff + + diff = U(x[0]) - U(y[0]) + sum += diff * diff + + return sum +} + +func l23[T number, U number](x []T, y []T) U { + diff := U(x[2]) - U(y[2]) + sum := diff * diff + + return l22[T, U](x, y) + sum +} + +func l24[T number, U number](x []T, y []T) U { + diff := U(x[3]) - U(y[3]) + sum := diff * diff + + diff = U(x[2]) - U(y[2]) + sum += diff * diff + + return l22[T, U](x, y) + sum +} + +func l25[T number, U number](x []T, y []T) U { + diff := U(x[4]) - U(y[4]) + sum := diff * diff + + return l24[T, U](x, y) + sum +} + +func l26[T number, U number](x []T, y []T) U { + diff := U(x[5]) - U(y[5]) + sum := diff * diff + + diff = U(x[4]) - U(y[4]) + sum += diff * diff + + return l24[T, U](x, y) + sum +} + +func l28[T number, U number](x []T, y []T) U { + diff := U(x[7]) - U(y[7]) + sum := diff * diff + + diff = U(x[6]) - U(y[6]) + sum += diff * diff + + diff = U(x[5]) - U(y[5]) + sum += diff * diff + + diff = U(x[4]) - U(y[4]) + sum += diff * diff + + return l24[T, U](x, y) + sum +} + +func l210[T number, U number](x []T, y []T) U { + diff := U(x[9]) - U(y[9]) + sum := diff * diff + + diff = U(x[8]) - U(y[8]) + sum += diff * diff + + diff = U(x[7]) - U(y[7]) + sum += diff * diff + + diff = U(x[6]) - U(y[6]) + sum += diff * diff + + diff = U(x[5]) - U(y[5]) + sum += diff * diff + + diff = U(x[4]) - U(y[4]) + sum += diff * diff + + return l24[T, U](x, y) + sum +} + +func l212[T number, U number](x []T, y []T) U { + diff := U(x[11]) - U(y[11]) + sum := diff * diff + + diff = U(x[10]) - U(y[10]) + sum += diff * diff + + diff = U(x[9]) - U(y[9]) + sum += diff * diff + + diff = U(x[8]) - U(y[8]) + sum += diff * diff + + diff = U(x[7]) - U(y[7]) + sum += diff * diff + + diff = U(x[6]) - U(y[6]) + sum += diff * diff + + diff = U(x[5]) - U(y[5]) + sum += diff * diff + + diff = U(x[4]) - U(y[4]) + sum += diff * diff + + return l24[T, U](x, y) + sum +} + +func l22FloatByte(x []float32, y []byte) float32 { + diff := x[1] - float32(y[1]) + sum := diff * diff + + diff = x[0] - float32(y[0]) + sum += diff * diff + + return sum +} + +func l23FloatByte(x []float32, y []byte) float32 { + diff := x[2] - float32(y[2]) + sum := diff * diff + + return l22FloatByte(x, y) + sum +} + +func l24FloatByte(x []float32, y []byte) float32 { + diff := x[3] - float32(y[3]) + sum := diff * diff + + diff = x[2] - float32(y[2]) + sum += diff * diff + + return l22FloatByte(x, y) + sum +} + +func l25FloatByte(x []float32, y []byte) float32 { + diff := x[4] - float32(y[4]) + sum := diff * diff + + return l24FloatByte(x, y) + sum +} + +func l210FloatByte(x []float32, y []byte) float32 { + diff := x[9] - float32(y[9]) + sum := diff * diff + + diff = x[8] - float32(y[8]) + sum += diff * diff + + diff = x[7] - float32(y[7]) + sum += diff * diff + + diff = x[6] - float32(y[6]) + sum += diff * diff + + diff = x[5] - float32(y[5]) + sum += diff * diff + + diff = x[4] - float32(y[4]) + sum += diff * diff + + return l24FloatByte(x, y) + sum +} + +func l212FloatByte(x []float32, y []byte) float32 { + diff := x[11] - float32(y[11]) + sum := diff * diff + + diff = x[10] - float32(y[10]) + sum += diff * diff + + diff = x[9] - float32(y[9]) + sum += diff * diff + + diff = x[8] - float32(y[8]) + sum += diff * diff + + diff = x[7] - float32(y[7]) + sum += diff * diff + + diff = x[6] - float32(y[6]) + sum += diff * diff + + diff = x[5] - float32(y[5]) + sum += diff * diff + + diff = x[4] - float32(y[4]) + sum += diff * diff + + return l24FloatByte(x, y) + sum +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/l2_inline_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/l2_inline_test.go new file mode 100644 index 0000000000000000000000000000000000000000..87bdd4c71b274258d884b19af7a6fc7d6c937c8b --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/l2_inline_test.go @@ -0,0 +1,90 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package asm + +import ( + "fmt" + "math/rand" + "testing" +) + +func l2Loop(a, b []float32) float32 { + var sum float32 + + for i := range a { + diff := a[i] - b[i] + sum += diff * diff + } + + return sum +} + +func BenchmarkL2InlineVsLoop(b *testing.B) { + lengths := []int{2, 4, 6, 8, 10, 12} + for _, length := range lengths { + x := make([]float32, length) + y := make([]float32, length) + + for i := range x { + x[i] = rand.Float32() + y[i] = rand.Float32() + } + + b.Run(fmt.Sprintf("vector dim=%d", length), func(b *testing.B) { + b.Run("loop", func(b *testing.B) { + for i := 0; i < b.N; i++ { + l2Loop(x, y) + } + }) + + b.Run("flat", func(b *testing.B) { + // written to ensure that the compiler + // inlines the function when possible + switch length { + case 2: + b.ResetTimer() + for i := 0; i < b.N; i++ { + l22[float32, float32](x, y) + } + case 4: + b.ResetTimer() + for i := 0; i < b.N; i++ { + l24[float32, float32](x, y) + } + case 6: + b.ResetTimer() + for i := 0; i < b.N; i++ { + l26[float32, float32](x, y) + } + case 8: + b.ResetTimer() + for i := 0; i < b.N; i++ { + l28[float32, float32](x, y) + } + case 10: + b.ResetTimer() + for i := 0; i < b.N; i++ { + l210[float32, float32](x, y) + } + case 12: + b.ResetTimer() + for i := 0; i < b.N; i++ { + l212[float32, float32](x, y) + } + default: + panic("unsupported length") + } + }) + }) + + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/l2_neon_arm64.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/l2_neon_arm64.go new file mode 100644 index 0000000000000000000000000000000000000000..aae9dcfbfdec6c3fe8ad9fbcfcbe4baedb3b72e1 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/l2_neon_arm64.go @@ -0,0 +1,21 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +//go:build !noasm && arm64 + +// AUTO-GENERATED BY GOAT -- DO NOT EDIT + +package asm + +import "unsafe" + +//go:noescape +func l2_neon(a, b, res, len unsafe.Pointer) diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/l2_neon_arm64.s b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/l2_neon_arm64.s new file mode 100644 index 0000000000000000000000000000000000000000..7060e4ed163024c54615c7ceafb4b536dd1e11eb --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/l2_neon_arm64.s @@ -0,0 +1,151 @@ +//go:build !noasm && arm64 +// AUTO-GENERATED BY GOAT -- DO NOT EDIT + +TEXT ·l2_neon(SB), $0-32 + MOVD a+0(FP), R0 + MOVD b+8(FP), R1 + MOVD res+16(FP), R2 + MOVD len+24(FP), R3 + WORD $0xa9bf7bfd // stp x29, x30, [sp, #-16]! + WORD $0xf9400069 // ldr x9, [x3] + WORD $0x910003fd // mov x29, sp + WORD $0x6b0903e8 // negs w8, w9 + WORD $0x1200052a // and w10, w9, #0x3 + WORD $0x12000508 // and w8, w8, #0x3 + WORD $0x5a884548 // csneg w8, w10, w8, mi + WORD $0x4b08012a // sub w10, w9, w8 + WORD $0x7100415f // cmp w10, #16 + WORD $0x540000ea // b.ge .LBB0_2 + WORD $0x6f00e400 // movi v0.2d, #0000000000000000 + WORD $0x6f00e401 // movi v1.2d, #0000000000000000 + WORD $0x2a1f03eb // mov w11, wzr + WORD $0x6f00e403 // movi v3.2d, #0000000000000000 + WORD $0x6f00e402 // movi v2.2d, #0000000000000000 + WORD $0x1400001a // b .LBB0_4 + +LBB0_2: + WORD $0x6f00e402 // movi v2.2d, #0000000000000000 + WORD $0x6f00e403 // movi v3.2d, #0000000000000000 + WORD $0xaa1f03eb // mov x11, xzr + WORD $0x6f00e401 // movi v1.2d, #0000000000000000 + WORD $0x6f00e400 // movi v0.2d, #0000000000000000 + WORD $0xaa0003ec // mov x12, x0 + WORD $0xaa0103ed // mov x13, x1 + +LBB0_3: + WORD $0x4cdf2984 // ld1 { v4.4s, v5.4s, v6.4s, v7.4s }, [x12], #64 + WORD $0x9100816e // add x14, x11, #32 + WORD $0x4cdf29b0 // ld1 { v16.4s, v17.4s, v18.4s, v19.4s }, [x13], #64 + WORD $0xeb0a01df // cmp x14, x10 + WORD $0x9100416b // add x11, x11, #16 + WORD $0x4eb0d494 // fsub v20.4s, v4.4s, v16.4s + WORD $0x4eb1d4b5 // fsub v21.4s, v5.4s, v17.4s + WORD $0x4eb2d4d6 // fsub v22.4s, v6.4s, v18.4s + WORD $0x4eb3d4e4 // fsub v4.4s, v7.4s, v19.4s + WORD $0x6e34de85 // fmul v5.4s, v20.4s, v20.4s + WORD $0x6e35dea6 // fmul v6.4s, v21.4s, v21.4s + WORD $0x6e36dec7 // fmul v7.4s, v22.4s, v22.4s + WORD $0x6e24dc84 // fmul v4.4s, v4.4s, v4.4s + WORD $0x4e25d442 // fadd v2.4s, v2.4s, v5.4s + WORD $0x4e26d463 // fadd v3.4s, v3.4s, v6.4s + WORD $0x4e27d421 // fadd v1.4s, v1.4s, v7.4s + WORD $0x4e24d400 // fadd v0.4s, v0.4s, v4.4s + WORD $0x54fffde9 // b.ls .LBB0_3 + +LBB0_4: + WORD $0x6b0a017f // cmp w11, w10 + WORD $0x540001ea // b.ge .LBB0_7 + WORD $0x2a0b03ec // mov w12, w11 + WORD $0x93407d4a // sxtw x10, w10 + WORD $0x2a0b03eb // mov w11, w11 + WORD $0xd37e7d8d // ubfiz x13, x12, #2, #32 + WORD $0x8b0d002c // add x12, x1, x13 + WORD $0x8b0d000d // add x13, x0, x13 + +LBB0_6: + WORD $0x3cc105a4 // ldr q4, [x13], #16 + WORD $0x9100116b // add x11, x11, #4 + WORD $0x3cc10585 // ldr q5, [x12], #16 + WORD $0xeb0a017f // cmp x11, x10 + WORD $0x4ea5d484 // fsub v4.4s, v4.4s, v5.4s + WORD $0x6e24dc84 // fmul v4.4s, v4.4s, v4.4s + WORD $0x4e24d442 // fadd v2.4s, v2.4s, v4.4s + WORD $0x54ffff2b // b.lt .LBB0_6 + +LBB0_7: + WORD $0x6e22d442 // faddp v2.4s, v2.4s, v2.4s + WORD $0x6e23d463 // faddp v3.4s, v3.4s, v3.4s + WORD $0x7100051f // cmp w8, #1 + WORD $0x6e21d421 // faddp v1.4s, v1.4s, v1.4s + WORD $0x6e20d400 // faddp v0.4s, v0.4s, v0.4s + WORD $0x7e30d842 // faddp s2, v2.2s + WORD $0x7e30d863 // faddp s3, v3.2s + WORD $0x7e30d821 // faddp s1, v1.2s + WORD $0x7e30d800 // faddp s0, v0.2s + WORD $0x1e232842 // fadd s2, s2, s3 + WORD $0x1e212841 // fadd s1, s2, s1 + WORD $0x1e202820 // fadd s0, s1, s0 + WORD $0x540006eb // b.lt .LBB0_13 + WORD $0x93407d29 // sxtw x9, w9 + WORD $0x4b0803ec // neg w12, w8 + WORD $0xcb08012a // sub x10, x9, x8 + WORD $0x9100054b // add x11, x10, #1 + WORD $0xeb09017f // cmp x11, x9 + WORD $0x9a8ad52a // csinc x10, x9, x10, le + WORD $0x8b08014a // add x10, x10, x8 + WORD $0xcb09014b // sub x11, x10, x9 + WORD $0x8b2cc12a // add x10, x9, w12, sxtw + WORD $0xf100217f // cmp x11, #8 + WORD $0x54000463 // b.lo .LBB0_12 + WORD $0xd37ef52c // lsl x12, x9, #2 + WORD $0xcb28c98c // sub x12, x12, w8, sxtw #2 + WORD $0x927df168 // and x8, x11, #0xfffffffffffffff8 + WORD $0x8b08014a // add x10, x10, x8 + WORD $0xaa0803ee // mov x14, x8 + WORD $0x9100418d // add x13, x12, #16 + WORD $0x8b0d002c // add x12, x1, x13 + WORD $0x8b0d000d // add x13, x0, x13 + +LBB0_10: + WORD $0xad7f9181 // ldp q1, q4, [x12, #-16] + WORD $0xf10021ce // subs x14, x14, #8 + WORD $0xad7f8da2 // ldp q2, q3, [x13, #-16] + WORD $0x9100818c // add x12, x12, #32 + WORD $0x910081ad // add x13, x13, #32 + WORD $0x4ea1d441 // fsub v1.4s, v2.4s, v1.4s + WORD $0x6e21dc21 // fmul v1.4s, v1.4s, v1.4s + WORD $0x5e0c0422 // mov s2, v1.s[1] + WORD $0x1e212800 // fadd s0, s0, s1 + WORD $0x5e140425 // mov s5, v1.s[2] + WORD $0x5e1c0421 // mov s1, v1.s[3] + WORD $0x1e222800 // fadd s0, s0, s2 + WORD $0x4ea4d462 // fsub v2.4s, v3.4s, v4.4s + WORD $0x1e252800 // fadd s0, s0, s5 + WORD $0x6e22dc42 // fmul v2.4s, v2.4s, v2.4s + WORD $0x1e212800 // fadd s0, s0, s1 + WORD $0x5e0c0441 // mov s1, v2.s[1] + WORD $0x5e140443 // mov s3, v2.s[2] + WORD $0x1e222800 // fadd s0, s0, s2 + WORD $0x1e212800 // fadd s0, s0, s1 + WORD $0x5e1c0441 // mov s1, v2.s[3] + WORD $0x1e232800 // fadd s0, s0, s3 + WORD $0x1e212800 // fadd s0, s0, s1 + WORD $0x54fffd21 // b.ne .LBB0_10 + WORD $0xeb08017f // cmp x11, x8 + WORD $0x54000140 // b.eq .LBB0_13 + +LBB0_12: + WORD $0xd37ef548 // lsl x8, x10, #2 + WORD $0x9100054a // add x10, x10, #1 + WORD $0xeb09015f // cmp x10, x9 + WORD $0xbc686801 // ldr s1, [x0, x8] + WORD $0xbc686822 // ldr s2, [x1, x8] + WORD $0x1e223821 // fsub s1, s1, s2 + WORD $0x1e210821 // fmul s1, s1, s1 + WORD $0x1e212800 // fadd s0, s0, s1 + WORD $0x54ffff0b // b.lt .LBB0_12 + +LBB0_13: + WORD $0xbd000040 // str s0, [x2] + WORD $0xa8c17bfd // ldp x29, x30, [sp], #16 + WORD $0xd65f03c0 // ret diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/l2_neon_byte_arm64.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/l2_neon_byte_arm64.go new file mode 100644 index 0000000000000000000000000000000000000000..be33ed521a34a6234dcd09c9c04bc8c8336a0b00 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/l2_neon_byte_arm64.go @@ -0,0 +1,21 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +//go:build !noasm && arm64 + +// AUTO-GENERATED BY GOAT -- DO NOT EDIT + +package asm + +import "unsafe" + +//go:noescape +func l2_neon_byte_256(a, b, res, len unsafe.Pointer) diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/l2_neon_byte_arm64.s b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/l2_neon_byte_arm64.s new file mode 100644 index 0000000000000000000000000000000000000000..0a3d7affe1f57e5d0926927cad182edf9340697d --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/l2_neon_byte_arm64.s @@ -0,0 +1,204 @@ +//go:build !noasm && arm64 +// AUTO-GENERATED BY GOAT -- DO NOT EDIT + +TEXT ·l2_neon_byte_256(SB), $0-32 + MOVD a+0(FP), R0 + MOVD b+8(FP), R1 + MOVD res+16(FP), R2 + MOVD len+24(FP), R3 + WORD $0xf9400069 // ldr x9, [x3] + WORD $0x6b0903e8 // negs w8, w9 + WORD $0x12000d2a // and w10, w9, #0xf + WORD $0x12000d08 // and w8, w8, #0xf + WORD $0x5a884548 // csneg w8, w10, w8, mi + WORD $0x4b08012a // sub w10, w9, w8 + WORD $0x7101015f // cmp w10, #64 + WORD $0x540000ea // b.ge .LBB0_2 + WORD $0x6f00e401 // movi v1.2d, #0000000000000000 + WORD $0x6f00e403 // movi v3.2d, #0000000000000000 + WORD $0x2a1f03eb // mov w11, wzr + WORD $0x6f00e402 // movi v2.2d, #0000000000000000 + WORD $0x6f00e400 // movi v0.2d, #0000000000000000 + WORD $0x14000026 // b .LBB0_4 + +LBB0_2: + WORD $0x6f00e400 // movi v0.2d, #0000000000000000 + WORD $0x6f00e402 // movi v2.2d, #0000000000000000 + WORD $0xaa1f03eb // mov x11, xzr + WORD $0x6f00e403 // movi v3.2d, #0000000000000000 + WORD $0x6f00e401 // movi v1.2d, #0000000000000000 + +LBB0_3: + WORD $0x8b0b000c // add x12, x0, x11 + WORD $0x8b0b002d // add x13, x1, x11 + WORD $0x4c402184 // ld1 { v4.16b, v5.16b, v6.16b, v7.16b }, [x12] + WORD $0x4c4021b0 // ld1 { v16.16b, v17.16b, v18.16b, v19.16b }, [x13] + WORD $0x9102016c // add x12, x11, #128 + WORD $0xeb0a019f // cmp x12, x10 + WORD $0x9101016b // add x11, x11, #64 + WORD $0x2e302094 // usubl v20.8h, v4.8b, v16.8b + WORD $0x2e3120b5 // usubl v21.8h, v5.8b, v17.8b + WORD $0x2e3220d6 // usubl v22.8h, v6.8b, v18.8b + WORD $0x2e3320f7 // usubl v23.8h, v7.8b, v19.8b + WORD $0x6e302098 // usubl2 v24.8h, v4.16b, v16.16b + WORD $0x6e3120b9 // usubl2 v25.8h, v5.16b, v17.16b + WORD $0x6e3220da // usubl2 v26.8h, v6.16b, v18.16b + WORD $0x6e3320e4 // usubl2 v4.8h, v7.16b, v19.16b + WORD $0x0e748280 // smlal v0.4s, v20.4h, v20.4h + WORD $0x0e7582a2 // smlal v2.4s, v21.4h, v21.4h + WORD $0x0e7682c3 // smlal v3.4s, v22.4h, v22.4h + WORD $0x0e7782e1 // smlal v1.4s, v23.4h, v23.4h + WORD $0x4e748280 // smlal2 v0.4s, v20.8h, v20.8h + WORD $0x4e7582a2 // smlal2 v2.4s, v21.8h, v21.8h + WORD $0x4e7682c3 // smlal2 v3.4s, v22.8h, v22.8h + WORD $0x4e7782e1 // smlal2 v1.4s, v23.8h, v23.8h + WORD $0x0e788300 // smlal v0.4s, v24.4h, v24.4h + WORD $0x0e798322 // smlal v2.4s, v25.4h, v25.4h + WORD $0x0e7a8343 // smlal v3.4s, v26.4h, v26.4h + WORD $0x0e648081 // smlal v1.4s, v4.4h, v4.4h + WORD $0x4e788300 // smlal2 v0.4s, v24.8h, v24.8h + WORD $0x4e798322 // smlal2 v2.4s, v25.8h, v25.8h + WORD $0x4e7a8343 // smlal2 v3.4s, v26.8h, v26.8h + WORD $0x4e648081 // smlal2 v1.4s, v4.8h, v4.8h + WORD $0x54fffc29 // b.ls .LBB0_3 + +LBB0_4: + WORD $0x6b0a017f // cmp w11, w10 + WORD $0x540001ca // b.ge .LBB0_7 + WORD $0x93407d4a // sxtw x10, w10 + WORD $0x2a0b03eb // mov w11, w11 + +LBB0_6: + WORD $0x3ceb6804 // ldr q4, [x0, x11] + WORD $0x3ceb6825 // ldr q5, [x1, x11] + WORD $0x9100416b // add x11, x11, #16 + WORD $0xeb0a017f // cmp x11, x10 + WORD $0x2e252086 // usubl v6.8h, v4.8b, v5.8b + WORD $0x6e252084 // usubl2 v4.8h, v4.16b, v5.16b + WORD $0x0e6680c0 // smlal v0.4s, v6.4h, v6.4h + WORD $0x4e6680c0 // smlal2 v0.4s, v6.8h, v6.8h + WORD $0x0e648080 // smlal v0.4s, v4.4h, v4.4h + WORD $0x4e648080 // smlal2 v0.4s, v4.8h, v4.8h + WORD $0x54fffecb // b.lt .LBB0_6 + +LBB0_7: + WORD $0x4ea38400 // add v0.4s, v0.4s, v3.4s + WORD $0x4ea18441 // add v1.4s, v2.4s, v1.4s + WORD $0x7100051f // cmp w8, #1 + WORD $0x4ea08420 // add v0.4s, v1.4s, v0.4s + WORD $0x4eb1b805 // addv s5, v0.4s + WORD $0x1e2600ad // fmov w13, s5 + WORD $0x54000c0b // b.lt .LBB0_21 + WORD $0xa9bf7bfd // stp x29, x30, [sp, #-16]! + WORD $0x93407d29 // sxtw x9, w9 + WORD $0x4b0803ec // neg w12, w8 + WORD $0x910003fd // mov x29, sp + WORD $0xcb08012a // sub x10, x9, x8 + WORD $0x9100054b // add x11, x10, #1 + WORD $0xeb09017f // cmp x11, x9 + WORD $0x9a8ad52a // csinc x10, x9, x10, le + WORD $0x8b08014a // add x10, x10, x8 + WORD $0xcb09014b // sub x11, x10, x9 + WORD $0x8b2cc12a // add x10, x9, w12, sxtw + WORD $0xf100217f // cmp x11, #8 + WORD $0x54000963 // b.lo .LBB0_19 + WORD $0xf100817f // cmp x11, #32 + WORD $0x54000062 // b.hs .LBB0_11 + WORD $0xaa1f03ec // mov x12, xzr + WORD $0x1400002f // b .LBB0_15 + +LBB0_11: + WORD $0x6f00e400 // movi v0.2d, #0000000000000000 + WORD $0x6f00e401 // movi v1.2d, #0000000000000000 + WORD $0xcb28c12d // sub x13, x9, w8, sxtw + WORD $0x6f00e402 // movi v2.2d, #0000000000000000 + WORD $0x6f00e403 // movi v3.2d, #0000000000000000 + WORD $0x927be96c // and x12, x11, #0xffffffffffffffe0 + WORD $0x6f00e404 // movi v4.2d, #0000000000000000 + WORD $0x6f00e406 // movi v6.2d, #0000000000000000 + WORD $0x910041ae // add x14, x13, #16 + WORD $0x6f00e407 // movi v7.2d, #0000000000000000 + WORD $0x8b0e002d // add x13, x1, x14 + WORD $0x8b0e000e // add x14, x0, x14 + WORD $0x6e0404a0 // mov v0.s[0], v5.s[0] + WORD $0x6f00e405 // movi v5.2d, #0000000000000000 + WORD $0xaa0c03ef // mov x15, x12 + +LBB0_12: + WORD $0xad7fcdb0 // ldp q16, q19, [x13, #-16] + WORD $0xf10081ef // subs x15, x15, #32 + WORD $0xad7fc9d1 // ldp q17, q18, [x14, #-16] + WORD $0x910081ad // add x13, x13, #32 + WORD $0x910081ce // add x14, x14, #32 + WORD $0x6e302234 // usubl2 v20.8h, v17.16b, v16.16b + WORD $0x2e302230 // usubl v16.8h, v17.8b, v16.8b + WORD $0x6e332251 // usubl2 v17.8h, v18.16b, v19.16b + WORD $0x2e332252 // usubl v18.8h, v18.8b, v19.8b + WORD $0x4e748283 // smlal2 v3.4s, v20.8h, v20.8h + WORD $0x0e748282 // smlal v2.4s, v20.4h, v20.4h + WORD $0x4e708201 // smlal2 v1.4s, v16.8h, v16.8h + WORD $0x0e708200 // smlal v0.4s, v16.4h, v16.4h + WORD $0x4e718227 // smlal2 v7.4s, v17.8h, v17.8h + WORD $0x0e718225 // smlal v5.4s, v17.4h, v17.4h + WORD $0x4e728246 // smlal2 v6.4s, v18.8h, v18.8h + WORD $0x0e728244 // smlal v4.4s, v18.4h, v18.4h + WORD $0x54fffde1 // b.ne .LBB0_12 + WORD $0x4ea184c1 // add v1.4s, v6.4s, v1.4s + WORD $0x4ea384e3 // add v3.4s, v7.4s, v3.4s + WORD $0xeb0c017f // cmp x11, x12 + WORD $0x4ea08480 // add v0.4s, v4.4s, v0.4s + WORD $0x4ea284a2 // add v2.4s, v5.4s, v2.4s + WORD $0x4ea38421 // add v1.4s, v1.4s, v3.4s + WORD $0x4ea28400 // add v0.4s, v0.4s, v2.4s + WORD $0x4ea18400 // add v0.4s, v0.4s, v1.4s + WORD $0x4eb1b800 // addv s0, v0.4s + WORD $0x1e26000d // fmov w13, s0 + WORD $0x54000440 // b.eq .LBB0_20 + WORD $0xf27d057f // tst x11, #0x18 + WORD $0x54000300 // b.eq .LBB0_18 + +LBB0_15: + WORD $0x6f00e400 // movi v0.2d, #0000000000000000 + WORD $0x6f00e401 // movi v1.2d, #0000000000000000 + WORD $0x8b09018e // add x14, x12, x9 + WORD $0xcb28c1ce // sub x14, x14, w8, sxtw + WORD $0x4e041da0 // mov v0.s[0], w13 + WORD $0x927df16d // and x13, x11, #0xfffffffffffffff8 + WORD $0x8b0d014a // add x10, x10, x13 + WORD $0xcb0d0188 // sub x8, x12, x13 + WORD $0x8b0e000c // add x12, x0, x14 + WORD $0x8b0e002e // add x14, x1, x14 + +LBB0_16: + WORD $0xfc408582 // ldr d2, [x12], #8 + WORD $0xb1002108 // adds x8, x8, #8 + WORD $0xfc4085c3 // ldr d3, [x14], #8 + WORD $0x2e232042 // usubl v2.8h, v2.8b, v3.8b + WORD $0x4e628041 // smlal2 v1.4s, v2.8h, v2.8h + WORD $0x0e628040 // smlal v0.4s, v2.4h, v2.4h + WORD $0x54ffff41 // b.ne .LBB0_16 + WORD $0x4ea18400 // add v0.4s, v0.4s, v1.4s + WORD $0xeb0d017f // cmp x11, x13 + WORD $0x4eb1b800 // addv s0, v0.4s + WORD $0x1e26000d // fmov w13, s0 + WORD $0x54000061 // b.ne .LBB0_19 + WORD $0x14000009 // b .LBB0_20 + +LBB0_18: + WORD $0x8b0c014a // add x10, x10, x12 + +LBB0_19: + WORD $0x386a6808 // ldrb w8, [x0, x10] + WORD $0x386a682b // ldrb w11, [x1, x10] + WORD $0x9100054a // add x10, x10, #1 + WORD $0xeb09015f // cmp x10, x9 + WORD $0x4b0b0108 // sub w8, w8, w11 + WORD $0x1b08350d // madd w13, w8, w8, w13 + WORD $0x54ffff4b // b.lt .LBB0_19 + +LBB0_20: + WORD $0xa8c17bfd // ldp x29, x30, [sp], #16 + +LBB0_21: + WORD $0xb900004d // str w13, [x2] + WORD $0xd65f03c0 // ret diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/l2_stub_amd64.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/l2_stub_amd64.go new file mode 100644 index 0000000000000000000000000000000000000000..226cc14360f843d2dc6e78d4a77e6dd65279f7d7 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/l2_stub_amd64.go @@ -0,0 +1,16 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by command: go run l2.go -out l2_amd64.s -stubs l2_stub_amd64.go. DO NOT EDIT. + +package asm + +func L2(x []float32, y []float32) float32 diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/l2_sve_arm64.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/l2_sve_arm64.go new file mode 100644 index 0000000000000000000000000000000000000000..11913bbda8b1cb270062551dc24f448c4cf6e9f7 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/l2_sve_arm64.go @@ -0,0 +1,21 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +//go:build !noasm && arm64 + +// AUTO-GENERATED BY GOAT -- DO NOT EDIT + +package asm + +import "unsafe" + +//go:noescape +func l2_sve(a, b, res, len unsafe.Pointer) diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/l2_sve_arm64.s b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/l2_sve_arm64.s new file mode 100644 index 0000000000000000000000000000000000000000..dbf46c72d460e35c3bb379669fddf2f3fbfc48a3 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/l2_sve_arm64.s @@ -0,0 +1,133 @@ +//go:build !noasm && arm64 +// AUTO-GENERATED BY GOAT -- DO NOT EDIT + +TEXT ·l2_sve(SB), $0-32 + MOVD a+0(FP), R0 + MOVD b+8(FP), R1 + MOVD res+16(FP), R2 + MOVD len+24(FP), R3 + WORD $0xf9400068 // ldr x8, [x3] + WORD $0x04a0e3ea // cntw x10 + WORD $0xcb0a03e9 // neg x9, x10 + WORD $0x04bf502c // rdvl x12, #1 + WORD $0x2598e3e0 // ptrue p0.s + WORD $0x8a090109 // and x9, x8, x9 + WORD $0xeb09019f // cmp x12, x9 + WORD $0x540000e9 // b.ls .LBB0_2 + WORD $0x25b8c000 // mov z0.s, #0 + WORD $0xaa1f03eb // mov x11, xzr + WORD $0x04603002 // mov z2.d, z0.d + WORD $0x04603003 // mov z3.d, z0.d + WORD $0x04603001 // mov z1.d, z0.d + WORD $0x14000028 // b .LBB0_5 + +LBB0_2: + WORD $0x25b8c001 // mov z1.s, #0 + WORD $0x04bf5070 // rdvl x16, #3 + WORD $0xaa1f03eb // mov x11, xzr + WORD $0x8b0c000f // add x15, x0, x12 + WORD $0x8b0c0032 // add x18, x1, x12 + WORD $0x04613023 // mov z3.d, z1.d + WORD $0x04bf5051 // rdvl x17, #2 + WORD $0x04613022 // mov z2.d, z1.d + WORD $0x04613020 // mov z0.d, z1.d + WORD $0x8b10000d // add x13, x0, x16 + WORD $0x8b11000e // add x14, x0, x17 + WORD $0x8b100030 // add x16, x1, x16 + WORD $0x8b110031 // add x17, x1, x17 + +LBB0_3: + WORD $0xa54b4004 // ld1w { z4.s }, p0/z, [x0, x11, lsl #2] + WORD $0xa54b41e5 // ld1w { z5.s }, p0/z, [x15, x11, lsl #2] + WORD $0xa54b41c6 // ld1w { z6.s }, p0/z, [x14, x11, lsl #2] + WORD $0xa54b41a7 // ld1w { z7.s }, p0/z, [x13, x11, lsl #2] + WORD $0xa54b4030 // ld1w { z16.s }, p0/z, [x1, x11, lsl #2] + WORD $0x65900484 // fsub z4.s, z4.s, z16.s + WORD $0xa54b4251 // ld1w { z17.s }, p0/z, [x18, x11, lsl #2] + WORD $0xa54b4232 // ld1w { z18.s }, p0/z, [x17, x11, lsl #2] + WORD $0xa54b4213 // ld1w { z19.s }, p0/z, [x16, x11, lsl #2] + WORD $0x659104a5 // fsub z5.s, z5.s, z17.s + WORD $0x659204c6 // fsub z6.s, z6.s, z18.s + WORD $0x659304e7 // fsub z7.s, z7.s, z19.s + WORD $0x8b0c016b // add x11, x11, x12 + WORD $0x8b0b0183 // add x3, x12, x11 + WORD $0xeb09007f // cmp x3, x9 + WORD $0x65a40081 // fmla z1.s, p0/m, z4.s, z4.s + WORD $0x65a500a3 // fmla z3.s, p0/m, z5.s, z5.s + WORD $0x65a600c2 // fmla z2.s, p0/m, z6.s, z6.s + WORD $0x65a700e0 // fmla z0.s, p0/m, z7.s, z7.s + WORD $0x54fffda9 // b.ls .LBB0_3 + WORD $0x14000006 // b .LBB0_5 + +LBB0_4: + WORD $0xa54b4004 // ld1w { z4.s }, p0/z, [x0, x11, lsl #2] + WORD $0xa54b4025 // ld1w { z5.s }, p0/z, [x1, x11, lsl #2] + WORD $0x8b0a016b // add x11, x11, x10 + WORD $0x65850484 // fsub z4.s, z4.s, z5.s + WORD $0x65a40081 // fmla z1.s, p0/m, z4.s, z4.s + +LBB0_5: + WORD $0xeb09017f // cmp x11, x9 + WORD $0x54ffff43 // b.lo .LBB0_4 + WORD $0x65802021 // faddv s1, p0, z1.s + WORD $0xeb08013f // cmp x9, x8 + WORD $0x65802063 // faddv s3, p0, z3.s + WORD $0x1e232821 // fadd s1, s1, s3 + WORD $0x65802042 // faddv s2, p0, z2.s + WORD $0x65802000 // faddv s0, p0, z0.s + WORD $0x1e222821 // fadd s1, s1, s2 + WORD $0x1e202820 // fadd s0, s1, s0 + WORD $0x54000580 // b.eq .LBB0_13 + WORD $0xb240012a // orr x10, x9, #0x1 + WORD $0xeb0a011f // cmp x8, x10 + WORD $0x9a8a810a // csel x10, x8, x10, hi + WORD $0xcb09014b // sub x11, x10, x9 + WORD $0x0460e3ea // cnth x10 + WORD $0xeb0a017f // cmp x11, x10 + WORD $0x54000062 // b.hs .LBB0_9 + WORD $0xaa0903ea // mov x10, x9 + WORD $0x1400001b // b .LBB0_12 + +LBB0_9: + WORD $0xa9bf7bfd // stp x29, x30, [sp, #-16]! + WORD $0xcb0a03ed // neg x13, x10 + WORD $0x04bf504f // rdvl x15, #2 + WORD $0x8b09080e // add x14, x0, x9, lsl #2 + WORD $0x910003fd // mov x29, sp + WORD $0x8a0d016c // and x12, x11, x13 + WORD $0x8b0c012a // add x10, x9, x12 + WORD $0x8b090829 // add x9, x1, x9, lsl #2 + WORD $0xaa0c03f0 // mov x16, x12 + +LBB0_10: + WORD $0xa540a1c1 // ld1w { z1.s }, p0/z, [x14] + WORD $0xa540a123 // ld1w { z3.s }, p0/z, [x9] + WORD $0xab0d0210 // adds x16, x16, x13 + WORD $0x65830421 // fsub z1.s, z1.s, z3.s + WORD $0xa541a1c2 // ld1w { z2.s }, p0/z, [x14, #1, mul vl] + WORD $0xa541a124 // ld1w { z4.s }, p0/z, [x9, #1, mul vl] + WORD $0x8b0f01ce // add x14, x14, x15 + WORD $0x8b0f0129 // add x9, x9, x15 + WORD $0x65810821 // fmul z1.s, z1.s, z1.s + WORD $0x65982020 // fadda s0, p0, s0, z1.s + WORD $0x65840441 // fsub z1.s, z2.s, z4.s + WORD $0x65810821 // fmul z1.s, z1.s, z1.s + WORD $0x65982020 // fadda s0, p0, s0, z1.s + WORD $0x54fffe61 // b.ne .LBB0_10 + WORD $0xeb0c017f // cmp x11, x12 + WORD $0xa8c17bfd // ldp x29, x30, [sp], #16 + WORD $0x54000120 // b.eq .LBB0_13 + +LBB0_12: + WORD $0xbc6a7801 // ldr s1, [x0, x10, lsl #2] + WORD $0xbc6a7822 // ldr s2, [x1, x10, lsl #2] + WORD $0x9100054a // add x10, x10, #1 + WORD $0xeb08015f // cmp x10, x8 + WORD $0x1e223821 // fsub s1, s1, s2 + WORD $0x1e210821 // fmul s1, s1, s1 + WORD $0x1e212800 // fadd s0, s0, s1 + WORD $0x54ffff23 // b.lo .LBB0_12 + +LBB0_13: + WORD $0xbd000040 // str s0, [x2] + WORD $0xd65f03c0 // ret diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/prefetch.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/prefetch.go new file mode 100644 index 0000000000000000000000000000000000000000..8ae03c6817499abf702b030d78acdb6b75f58624 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/prefetch.go @@ -0,0 +1,33 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +//go:build ignore +// +build ignore + +package main + +import ( + . "github.com/mmcloughlin/avo/build" + . "github.com/mmcloughlin/avo/operand" + // . "github.com/mmcloughlin/avo/reg" +) + +func main() { + TEXT("Prefetch", NOSPLIT, "func(addr uintptr)") + addr := Mem{Base: Load(Param("addr"), GP64())} + _ = addr + + PREFETCHT0(addr) + + RET() + + Generate() +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/prefetch_amd64.s b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/prefetch_amd64.s new file mode 100644 index 0000000000000000000000000000000000000000..6f3344b60ab29627ad9113b343a1260b26856f5c --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/prefetch_amd64.s @@ -0,0 +1,10 @@ +// Code generated by command: go run prefetch.go -out prefetch.s -stubs prefetch_stub.go. DO NOT EDIT. + +#include "textflag.h" + +// func Prefetch(addr uintptr) +// Requires: MMX+ +TEXT ·Prefetch(SB), NOSPLIT, $0-8 + MOVQ addr+0(FP), AX + PREFETCHT0 (AX) + RET diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/prefetch_stub_amd64.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/prefetch_stub_amd64.go new file mode 100644 index 0000000000000000000000000000000000000000..f99d86db2c109575954d7f5667442c9925a30d07 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/asm/prefetch_stub_amd64.go @@ -0,0 +1,16 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by command: go run prefetch.go -out prefetch.s -stubs prefetch_stub.go. DO NOT EDIT. + +package asm + +func Prefetch(addr uintptr) diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/bench_amd64_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/bench_amd64_test.go new file mode 100644 index 0000000000000000000000000000000000000000..7a86f0dae725c3cd55690d09035ca88cf239dd6b --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/bench_amd64_test.go @@ -0,0 +1,74 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package distancer + +import ( + "fmt" + "testing" + + "github.com/weaviate/weaviate/adapters/repos/db/vector/hnsw/distancer/asm" +) + +func benchmarkDot(b *testing.B, dims int, dotFn func(a, b []float32) float32) { + r := getRandomSeed() + + vec1 := make([]float32, dims) + vec2 := make([]float32, dims) + for i := range vec1 { + vec1[i] = r.Float32() + vec2[i] = r.Float32() + } + + b.ResetTimer() + for n := 0; n < b.N; n++ { + dotFn(vec1, vec2) + } +} + +func BenchmarkDot(b *testing.B) { + dims := []int{2, 4, 6, 8, 10, 12, 16, 24, 30, 32, 128, 256, 300, 384, 512, 768, 1024, 1536} + for _, dim := range dims { + b.Run(fmt.Sprintf("%d dimensions", dim), func(b *testing.B) { + b.Run("pure go", func(b *testing.B) { benchmarkDot(b, dim, DotProductFloatGo) }) + b.Run("avx", func(b *testing.B) { benchmarkDot(b, dim, asm.Dot) }) + b.Run("avx512", func(b *testing.B) { benchmarkDot(b, dim, asm.DotAVX512) }) + }) + } +} + +func benchmarkHamming(b *testing.B, dims int, hammingFn func(a, b []float32) float32) { + r := getRandomSeed() + + vec1 := make([]float32, dims) + vec2 := make([]float32, dims) + for i := range vec1 { + vec1[i] = r.Float32() + vec2[i] = r.Float32() + } + + b.ResetTimer() + for n := 0; n < b.N; n++ { + hammingFn(vec1, vec2) + } +} + +func BenchmarkHamming(b *testing.B) { + dims := []int{2, 4, 6, 8, 10, 12, 16, 24, 30, 32, 128, 256, 300, 384, 512, 768, 1024, 1536} + + for _, dim := range dims { + b.Run(fmt.Sprintf("%d dimensions", dim), func(b *testing.B) { + b.Run("pure go", func(b *testing.B) { benchmarkHamming(b, dim, HammingDistanceGo) }) + b.Run("avx256", func(b *testing.B) { benchmarkHamming(b, dim, asm.DotAVX256) }) + b.Run("avx512", func(b *testing.B) { benchmarkHamming(b, dim, asm.DotAVX512) }) + }) + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/bench_arm64_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/bench_arm64_test.go new file mode 100644 index 0000000000000000000000000000000000000000..215229c8613563e4d9481df39e293b2a2f286efd --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/bench_arm64_test.go @@ -0,0 +1,140 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package distancer + +import ( + "fmt" + "testing" + + "github.com/weaviate/weaviate/adapters/repos/db/vector/hnsw/distancer/asm" + "golang.org/x/sys/cpu" +) + +func benchmarkDotGo(b *testing.B, dims int) { + r := getRandomSeed() + + vec1 := make([]float32, dims) + vec2 := make([]float32, dims) + for i := range vec1 { + vec1[i] = r.Float32() + vec2[i] = r.Float32() + } + + b.ResetTimer() + for n := 0; n < b.N; n++ { + DotProductFloatGo(vec1, vec2) + } +} + +func benchmarkDotNeon(b *testing.B, dims int) { + r := getRandomSeed() + + vec1 := make([]float32, dims) + vec2 := make([]float32, dims) + for i := range vec1 { + vec1[i] = r.Float32() + vec2[i] = r.Float32() + } + + b.ResetTimer() + + for n := 0; n < b.N; n++ { + if cpu.ARM64.HasSVE { + asm.Dot_SVE(vec1, vec2) + } else { + asm.Dot_Neon(vec1, vec2) + } + } +} + +func BenchmarkDot(b *testing.B) { + dims := []int{30, 32, 128, 256, 300, 384, 600, 768, 1024, 1536} + for _, dim := range dims { + b.Run(fmt.Sprintf("%d dimensions", dim), func(b *testing.B) { + b.Run("pure go", func(b *testing.B) { benchmarkDotGo(b, dim) }) + b.Run("avx", func(b *testing.B) { benchmarkDotNeon(b, dim) }) + }) + } +} + +func benchmarkHammingGo(b *testing.B, dims int) { + r := getRandomSeed() + + vec1 := make([]float32, dims) + vec2 := make([]float32, dims) + for i := range vec1 { + vec1[i] = r.Float32() + vec2[i] = r.Float32() + } + + b.ResetTimer() + for n := 0; n < b.N; n++ { + HammingDistanceGo(vec1, vec2) + } +} + +func benchmarkHammingNeon(b *testing.B, dims int) { + r := getRandomSeed() + + vec1 := make([]float32, dims) + vec2 := make([]float32, dims) + for i := range vec1 { + vec1[i] = r.Float32() + vec2[i] = r.Float32() + } + + b.ResetTimer() + for n := 0; n < b.N; n++ { + asm.Hamming(vec1, vec2) + } +} + +func BenchmarkHamming(b *testing.B) { + dims := []int{ + 1, + 2, + 3, + 4, + 5, + 6, + 8, + 10, + 12, + 16, + 24, + 30, + 31, + 32, + 64, + 67, + 128, + 256, + 260, + 299, + 300, + 384, + 390, + 600, + 768, + 777, + 784, + 1024, + 1536, + } + + for _, dim := range dims { + b.Run(fmt.Sprintf("%d dimensions", dim), func(b *testing.B) { + b.Run("pure go", func(b *testing.B) { benchmarkHammingGo(b, dim) }) + b.Run("avx", func(b *testing.B) { benchmarkHammingNeon(b, dim) }) + }) + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/bench_dist_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/bench_dist_test.go new file mode 100644 index 0000000000000000000000000000000000000000..31aa6705a2c91cca8af0c6a8f5773786518ca4ca --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/bench_dist_test.go @@ -0,0 +1,42 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package distancer_test + +import ( + "math/rand" + "testing" + + "github.com/weaviate/weaviate/adapters/repos/db/helpers" + "github.com/weaviate/weaviate/adapters/repos/db/vector/hnsw/distancer" + "github.com/weaviate/weaviate/adapters/repos/db/vector/testinghelpers" +) + +func BenchmarkDist(b *testing.B) { + distancerDot := distancer.NewDotProductProvider() + v, _ := testinghelpers.RandomVecs(2, 0, 1536) + b.ResetTimer() + for i := 0; i < b.N; i++ { + distancerDot.SingleDist(v[0], v[1]) + } +} + +func BenchmarkAllow(b *testing.B) { + ids := make([]uint64, 0, 40_000) + for i := 0; i < 40_000; i++ { + ids = append(ids, rand.Uint64()%300_000_000) + } + allow := helpers.NewAllowList(ids...) + b.ResetTimer() + for i := 0; i < b.N; i++ { + allow.Contains(rand.Uint64() % 300_000_000) + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/c/.gitignore b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/c/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..f406623fc69d98956a8381165fa0471e4ab387df --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/c/.gitignore @@ -0,0 +1,2 @@ +*.o +*.s \ No newline at end of file diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/c/dot_avx256_amd64.c b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/c/dot_avx256_amd64.c new file mode 100644 index 0000000000000000000000000000000000000000..2a0e237c1a44e8e5e0ae3e8772c7305492ab55aa --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/c/dot_avx256_amd64.c @@ -0,0 +1,97 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2023 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +#include + +void dot_256(float *a, float *b, float *res, long *len) +{ + int n = *len; + float sum = 0; + + // fast path for small dimensions + if (n < 8) + { + do + { + sum += a[0] * b[0]; + n--; + a++; + b++; + } while (n); + + *res = sum; + return; + } + + // Create 4 registers to store the results + __m256 acc[4]; + acc[0] = _mm256_setzero_ps(); + acc[1] = _mm256_setzero_ps(); + acc[2] = _mm256_setzero_ps(); + acc[3] = _mm256_setzero_ps(); + + while (n >= 32) + { + // Unroll loop for 32 floats + __m256 a_vec0 = _mm256_loadu_ps(a); + __m256 a_vec1 = _mm256_loadu_ps(a + 8); + __m256 a_vec2 = _mm256_loadu_ps(a + 16); + __m256 a_vec3 = _mm256_loadu_ps(a + 24); + + __m256 b_vec0 = _mm256_loadu_ps(b); + __m256 b_vec1 = _mm256_loadu_ps(b + 8); + __m256 b_vec2 = _mm256_loadu_ps(b + 16); + __m256 b_vec3 = _mm256_loadu_ps(b + 24); + + acc[0] = _mm256_fmadd_ps(a_vec0, b_vec0, acc[0]); + acc[1] = _mm256_fmadd_ps(a_vec1, b_vec1, acc[1]); + acc[2] = _mm256_fmadd_ps(a_vec2, b_vec2, acc[2]); + acc[3] = _mm256_fmadd_ps(a_vec3, b_vec3, acc[3]); + + n -= 32; + a += 32; + b += 32; + } + + // Process 8 floats at a time + while (n >= 8) + { + __m256 a_vec0 = _mm256_loadu_ps(a); + __m256 b_vec0 = _mm256_loadu_ps(b); + + acc[0] = _mm256_fmadd_ps(a_vec0, b_vec0, acc[0]); + + n -= 8; + a += 8; + b += 8; + } + + // Tail + while (n) + { + sum += a[0] * b[0]; + n--; + a++; + b++; + } + + // Reduce and store the result + acc[0] = _mm256_add_ps(acc[1], acc[0]); + acc[2] = _mm256_add_ps(acc[3], acc[2]); + acc[0] = _mm256_add_ps(acc[2], acc[0]); + __m256 t1 = _mm256_hadd_ps(acc[0], acc[0]); + __m256 t2 = _mm256_hadd_ps(t1, t1); + __m128 t3 = _mm256_extractf128_ps(t2, 1); + __m128 t4 = _mm_add_ps(_mm256_castps256_ps128(t2), t3); + sum += _mm_cvtss_f32(t4); + + *res = sum; +} \ No newline at end of file diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/c/dot_avx512_amd64.c b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/c/dot_avx512_amd64.c new file mode 100644 index 0000000000000000000000000000000000000000..8a9d1b799e8342a5bc6d6376546931c04aba8f16 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/c/dot_avx512_amd64.c @@ -0,0 +1,177 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2023 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +#include + +void dot_512(float *a, float *b, float *res, long *len) +{ + int n = *len; + float sum = 0; + + // fast path for small dimensions + if (n < 8) + { + do + { + sum += a[0] * b[0]; + n--; + a++; + b++; + } while (n); + + *res = sum; + return; + } + + // Create 4 registers to store the results + __m256 acc[4]; + acc[0] = _mm256_setzero_ps(); + acc[1] = _mm256_setzero_ps(); + acc[2] = _mm256_setzero_ps(); + acc[3] = _mm256_setzero_ps(); + + if (n >= 128) + { + // create 8 registers + __m512 acc5[8]; + acc5[0] = _mm512_setzero_ps(); + acc5[1] = _mm512_setzero_ps(); + acc5[2] = _mm512_setzero_ps(); + acc5[3] = _mm512_setzero_ps(); + acc5[4] = _mm512_setzero_ps(); + acc5[5] = _mm512_setzero_ps(); + acc5[6] = _mm512_setzero_ps(); + acc5[7] = _mm512_setzero_ps(); + + // Process 128 floats at a time + do + { + __m512 a_vec0 = _mm512_loadu_ps(a); + __m512 a_vec1 = _mm512_loadu_ps(a + 16); + __m512 a_vec2 = _mm512_loadu_ps(a + 32); + __m512 a_vec3 = _mm512_loadu_ps(a + 48); + __m512 a_vec4 = _mm512_loadu_ps(a + 64); + __m512 a_vec5 = _mm512_loadu_ps(a + 80); + __m512 a_vec6 = _mm512_loadu_ps(a + 96); + __m512 a_vec7 = _mm512_loadu_ps(a + 112); + + __m512 b_vec0 = _mm512_loadu_ps(b); + __m512 b_vec1 = _mm512_loadu_ps(b + 16); + __m512 b_vec2 = _mm512_loadu_ps(b + 32); + __m512 b_vec3 = _mm512_loadu_ps(b + 48); + __m512 b_vec4 = _mm512_loadu_ps(b + 64); + __m512 b_vec5 = _mm512_loadu_ps(b + 80); + __m512 b_vec6 = _mm512_loadu_ps(b + 96); + __m512 b_vec7 = _mm512_loadu_ps(b + 112); + + acc5[0] = _mm512_fmadd_ps(a_vec0, b_vec0, acc5[0]); + acc5[1] = _mm512_fmadd_ps(a_vec1, b_vec1, acc5[1]); + acc5[2] = _mm512_fmadd_ps(a_vec2, b_vec2, acc5[2]); + acc5[3] = _mm512_fmadd_ps(a_vec3, b_vec3, acc5[3]); + acc5[4] = _mm512_fmadd_ps(a_vec4, b_vec4, acc5[4]); + acc5[5] = _mm512_fmadd_ps(a_vec5, b_vec5, acc5[5]); + acc5[6] = _mm512_fmadd_ps(a_vec6, b_vec6, acc5[6]); + acc5[7] = _mm512_fmadd_ps(a_vec7, b_vec7, acc5[7]); + + n -= 128; + a += 128; + b += 128; + } while (n >= 128); + + acc5[0] = _mm512_add_ps(acc5[1], acc5[0]); + acc5[2] = _mm512_add_ps(acc5[3], acc5[2]); + acc5[4] = _mm512_add_ps(acc5[5], acc5[4]); + acc5[6] = _mm512_add_ps(acc5[7], acc5[6]); + acc5[0] = _mm512_add_ps(acc5[2], acc5[0]); + acc5[4] = _mm512_add_ps(acc5[6], acc5[4]); + acc5[0] = _mm512_add_ps(acc5[4], acc5[0]); + + __m256 low = _mm512_castps512_ps256(acc5[0]); + __m256 high = _mm256_castpd_ps(_mm512_extractf64x4_pd(_mm512_castps_pd(acc5[0]), 1)); + + acc[0] = _mm256_add_ps(low, acc[0]); + acc[0] = _mm256_add_ps(high, acc[0]); + + if (!n) + { + // Reduce and store the result + acc[0] = _mm256_add_ps(acc[1], acc[0]); + acc[2] = _mm256_add_ps(acc[3], acc[2]); + acc[0] = _mm256_add_ps(acc[2], acc[0]); + + __m256 t1 = _mm256_hadd_ps(acc[0], acc[0]); + __m256 t2 = _mm256_hadd_ps(t1, t1); + __m128 t3 = _mm256_extractf128_ps(t2, 1); + __m128 t4 = _mm_add_ps(_mm256_castps256_ps128(t2), t3); + sum += _mm_cvtss_f32(t4); + + *res = sum; + return; + } + } + + while (n >= 32) + { + // Unroll loop for 32 floats + __m256 a_vec0 = _mm256_loadu_ps(a); + __m256 a_vec1 = _mm256_loadu_ps(a + 8); + __m256 a_vec2 = _mm256_loadu_ps(a + 16); + __m256 a_vec3 = _mm256_loadu_ps(a + 24); + + __m256 b_vec0 = _mm256_loadu_ps(b); + __m256 b_vec1 = _mm256_loadu_ps(b + 8); + __m256 b_vec2 = _mm256_loadu_ps(b + 16); + __m256 b_vec3 = _mm256_loadu_ps(b + 24); + + acc[0] = _mm256_fmadd_ps(a_vec0, b_vec0, acc[0]); + acc[1] = _mm256_fmadd_ps(a_vec1, b_vec1, acc[1]); + acc[2] = _mm256_fmadd_ps(a_vec2, b_vec2, acc[2]); + acc[3] = _mm256_fmadd_ps(a_vec3, b_vec3, acc[3]); + + n -= 32; + a += 32; + b += 32; + } + + // Process 8 floats at a time + while (n >= 8) + { + __m256 a_vec0 = _mm256_loadu_ps(a); + __m256 b_vec0 = _mm256_loadu_ps(b); + + acc[0] = _mm256_fmadd_ps(a_vec0, b_vec0, acc[0]); + + n -= 8; + a += 8; + b += 8; + } + + // Tail + while (n) + { + sum += a[0] * b[0]; + n--; + a++; + b++; + } + + // Reduce and store the result + acc[0] = _mm256_add_ps(acc[1], acc[0]); + acc[2] = _mm256_add_ps(acc[3], acc[2]); + acc[0] = _mm256_add_ps(acc[2], acc[0]); + __m256 t1 = _mm256_hadd_ps(acc[0], acc[0]); + __m256 t2 = _mm256_hadd_ps(t1, t1); + __m128 t3 = _mm256_extractf128_ps(t2, 1); + __m128 t4 = _mm_add_ps(_mm256_castps256_ps128(t2), t3); + sum += _mm_cvtss_f32(t4); + + *res = sum; +} \ No newline at end of file diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/c/dot_byte_arm64.c b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/c/dot_byte_arm64.c new file mode 100644 index 0000000000000000000000000000000000000000..9962164d00c2bbb5d52fec02524d97a8556740c6 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/c/dot_byte_arm64.c @@ -0,0 +1,129 @@ +#include + +void dot_byte_256(unsigned char *a, unsigned char *b, unsigned int *res, long *len) +{ + int size = *len; + + // Use the vectorized version for the first n - (n % 16) elements + int l = size - (size % 16); + + uint32x4_t res_vec0 = vdupq_n_u32(0); + uint32x4_t res_vec1 = vdupq_n_u32(0); + uint32x4_t res_vec2 = vdupq_n_u32(0); + uint32x4_t res_vec3 = vdupq_n_u32(0); + + int i = 0; + + // Load 4*16 bytes at a time + while (i + 64 <= l) + { + uint8x16x4_t a4 = vld1q_u8_x4(a + i); + uint8x16x4_t b4 = vld1q_u8_x4(b + i); + + // Convert 8-bit vectors to 16-bit vectors to prevent overflow + uint16x8_t a0_low = vmovl_u8(vget_low_u8(a4.val[0])); + uint16x8_t a0_high = vmovl_u8(vget_high_u8(a4.val[0])); + uint16x8_t b0_low = vmovl_u8(vget_low_u8(b4.val[0])); + uint16x8_t b0_high = vmovl_u8(vget_high_u8(b4.val[0])); + + uint16x8_t a1_low = vmovl_u8(vget_low_u8(a4.val[1])); + uint16x8_t a1_high = vmovl_u8(vget_high_u8(a4.val[1])); + uint16x8_t b1_low = vmovl_u8(vget_low_u8(b4.val[1])); + uint16x8_t b1_high = vmovl_u8(vget_high_u8(b4.val[1])); + + uint16x8_t a2_low = vmovl_u8(vget_low_u8(a4.val[2])); + uint16x8_t a2_high = vmovl_u8(vget_high_u8(a4.val[2])); + uint16x8_t b2_low = vmovl_u8(vget_low_u8(b4.val[2])); + uint16x8_t b2_high = vmovl_u8(vget_high_u8(b4.val[2])); + + uint16x8_t a3_low = vmovl_u8(vget_low_u8(a4.val[3])); + uint16x8_t a3_high = vmovl_u8(vget_high_u8(a4.val[3])); + uint16x8_t b3_low = vmovl_u8(vget_low_u8(b4.val[3])); + uint16x8_t b3_high = vmovl_u8(vget_high_u8(b4.val[3])); + + // Multiply 16-bit vectors + uint16x8_t product0_low = vmulq_u16(a0_low, b0_low); + uint16x8_t product0_high = vmulq_u16(a0_high, b0_high); + + uint16x8_t product1_low = vmulq_u16(a1_low, b1_low); + uint16x8_t product1_high = vmulq_u16(a1_high, b1_high); + + uint16x8_t product2_low = vmulq_u16(a2_low, b2_low); + uint16x8_t product2_high = vmulq_u16(a2_high, b2_high); + + uint16x8_t product3_low = vmulq_u16(a3_low, b3_low); + uint16x8_t product3_high = vmulq_u16(a3_high, b3_high); + + // Sum the products to 32-bit integers + uint32x4_t sum0_low_32 = vpaddlq_u16(product0_low); + uint32x4_t sum0_high_32 = vpaddlq_u16(product0_high); + + uint32x4_t sum1_low_32 = vpaddlq_u16(product1_low); + uint32x4_t sum1_high_32 = vpaddlq_u16(product1_high); + + uint32x4_t sum2_low_32 = vpaddlq_u16(product2_low); + uint32x4_t sum2_high_32 = vpaddlq_u16(product2_high); + + uint32x4_t sum3_low_32 = vpaddlq_u16(product3_low); + uint32x4_t sum3_high_32 = vpaddlq_u16(product3_high); + + // Add the results to the final vectors + res_vec0 = vaddq_u32(res_vec0, sum0_low_32); + res_vec0 = vaddq_u32(res_vec0, sum0_high_32); + + res_vec1 = vaddq_u32(res_vec1, sum1_low_32); + res_vec1 = vaddq_u32(res_vec1, sum1_high_32); + + res_vec2 = vaddq_u32(res_vec2, sum2_low_32); + res_vec2 = vaddq_u32(res_vec2, sum2_high_32); + + res_vec3 = vaddq_u32(res_vec3, sum3_low_32); + res_vec3 = vaddq_u32(res_vec3, sum3_high_32); + + i += 64; + } + + // Process the remaining elements + while (i < l) + { + uint8x16_t a_vec = vld1q_u8(a + i); + uint8x16_t b_vec = vld1q_u8(b + i); + + // Convert 8-bit vectors to 16-bit vectors to prevent overflow + uint16x8_t a_vec_low = vmovl_u8(vget_low_u8(a_vec)); + uint16x8_t a_vec_high = vmovl_u8(vget_high_u8(a_vec)); + uint16x8_t b_vec_low = vmovl_u8(vget_low_u8(b_vec)); + uint16x8_t b_vec_high = vmovl_u8(vget_high_u8(b_vec)); + + // Multiply 16-bit vectors + uint16x8_t product_low = vmulq_u16(a_vec_low, b_vec_low); + uint16x8_t product_high = vmulq_u16(a_vec_high, b_vec_high); + + // Sum the products to 32-bit integers + uint32x4_t sum_low_32 = vpaddlq_u16(product_low); + uint32x4_t sum_high_32 = vpaddlq_u16(product_high); + + // Add the results to the final vector + res_vec0 = vaddq_u32(res_vec0, sum_low_32); + res_vec0 = vaddq_u32(res_vec0, sum_high_32); + + i += 16; + } + + uint32_t sum = 0; + + sum += vaddvq_u32(res_vec0); + sum += vaddvq_u32(res_vec1); + sum += vaddvq_u32(res_vec2); + sum += vaddvq_u32(res_vec3); + + // Process the last few elements manually + int j = l; + while (j < size) + { + sum += (uint32_t)(a[j]) * (uint32_t)(b[j]); + j++; + } + + *res = sum; +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/c/dot_byte_avx256.c b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/c/dot_byte_avx256.c new file mode 100644 index 0000000000000000000000000000000000000000..97bab3febf017fc6f2f16310c42c600652ac249f --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/c/dot_byte_avx256.c @@ -0,0 +1,78 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2023 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +#include +#include + +void dot_byte_256(unsigned char *a, unsigned char *b, unsigned int *res, long *len) +{ + int n = *len; + + // fast path for small dimensions + if (n < 32) + { + long acc = 0; + for (int i = 0; i < n; i++) + { + acc += (unsigned int)(a[i]) * (unsigned int)(b[i]); + } + + *res = acc; + return; + } + + __m256i acc = _mm256_setzero_si256(); + + int i; + // Process 32 bytes at a time + for (i = 0; i + 31 < n; i += 32) + { + __m256i vec_a, vec_b; + + // Load 32 bytes + vec_a = _mm256_loadu_si256((const __m256i *)(a + i)); + vec_b = _mm256_loadu_si256((const __m256i *)(b + i)); + + // Create two registries for vector a + __m256i a_high = _mm256_srli_epi16(vec_a, 8); // arithmetic right shift + __m256i a_low = _mm256_bslli_epi128(vec_a, 1); // left 1 byte = low to high in each 16-bit element + a_low = _mm256_srli_epi16(a_low, 8); // arithmetic right shift + + // Create two registries for vector b + __m256i b_high = _mm256_srli_epi16(vec_b, 8); + __m256i b_low = _mm256_bslli_epi128(vec_b, 1); + b_low = _mm256_srli_epi16(b_low, 8); + + __m256i prod_hi = _mm256_madd_epi16(a_high, b_high); + __m256i prod_lo = _mm256_madd_epi16(a_low, b_low); + + __m256i quadsum = _mm256_add_epi32(prod_lo, prod_hi); + + acc = _mm256_add_epi32(acc, quadsum); + } + + // Reduce + __m128i acc_low = _mm256_extracti128_si256(acc, 0); + __m128i acc_high = _mm256_extracti128_si256(acc, 1); + __m128i acc128 = _mm_add_epi32(acc_low, acc_high); + acc128 = _mm_add_epi32(acc128, _mm_shuffle_epi32(acc128, _MM_SHUFFLE(0, 1, 2, 3))); + acc128 = _mm_add_epi32(acc128, _mm_shuffle_epi32(acc128, _MM_SHUFFLE(0, 0, 0, 1))); + + unsigned int result = _mm_extract_epi32(acc128, 0); + + // Tail + for (; i < n; i++) + { + result += (unsigned int)(a[i]) * (unsigned int)(b[i]); + } + + *res = result; +} \ No newline at end of file diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/c/dot_float_byte_arm64.c b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/c/dot_float_byte_arm64.c new file mode 100644 index 0000000000000000000000000000000000000000..ac86f885dcec4e05e09b9ae477be61a3e7366091 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/c/dot_float_byte_arm64.c @@ -0,0 +1,47 @@ +#include + +// Dot product between float and byte arrays +// Due to limitations with goat, it can only handle arrays of multiple of 16 elements. +// i.e len % 16 == 0 and len >= 16 +void dot_float_byte_neon(float *a, unsigned char *b, float *res, long *len) +{ + int n = *len; + float sum = 0; + + // Create 4*4 registers to store the result + float32x4_t res_vec = vdupq_n_f32(0); + + // Vectorized loop + while (n >= 16) + { + float32x4_t a_vec0 = vld1q_f32(a); + float32x4_t a_vec1 = vld1q_f32(a + 4); + float32x4_t a_vec2 = vld1q_f32(a + 8); + float32x4_t a_vec3 = vld1q_f32(a + 12); + + uint8x16_t byte_vector = vld1q_u8(b); + uint16x8_t byte_vector_low = vmovl_u8(vget_low_u8(byte_vector)); + uint16x8_t byte_vector_high = vmovl_u8(vget_high_u8(byte_vector)); + + float32x4_t b_vec0 = vcvtq_f32_u32(vmovl_u16(vget_low_u16(byte_vector_low))); + float32x4_t b_vec1 = vcvtq_f32_u32(vmovl_u16(vget_high_u16(byte_vector_low))); + float32x4_t b_vec2 = vcvtq_f32_u32(vmovl_u16(vget_low_u16(byte_vector_high))); + float32x4_t b_vec3 = vcvtq_f32_u32(vmovl_u16(vget_high_u16(byte_vector_high))); + + res_vec = vmlaq_f32(res_vec, a_vec0, b_vec0); + res_vec = vmlaq_f32(res_vec, a_vec1, b_vec1); + res_vec = vmlaq_f32(res_vec, a_vec2, b_vec2); + res_vec = vmlaq_f32(res_vec, a_vec3, b_vec3); + + n -= 16; + a += 16; + b += 16; + } + + // Horizontal add + float temp[4]; + vst1q_f32(temp, res_vec); + sum += temp[0] + temp[1] + temp[2] + temp[3]; + + *res = sum; +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/c/dot_float_byte_avx256.c b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/c/dot_float_byte_avx256.c new file mode 100644 index 0000000000000000000000000000000000000000..aeb48f3a34ad2795cfa98ba63dc360fded5b85ce --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/c/dot_float_byte_avx256.c @@ -0,0 +1,105 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2023 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +#include +#include + +void dot_float_byte_256(float *a, unsigned char *b, float *res, long *len) +{ + int n = *len; + float sum = 0; + + // fast path for small dimensions + if (n < 8) + { + do + { + sum += a[0] * (float)(b[0]); + n--; + a++; + b++; + } while (n); + + *res = sum; + return; + } + + // Create 4 registers to store the results + __m256 acc[4]; + acc[0] = _mm256_setzero_ps(); + acc[1] = _mm256_setzero_ps(); + acc[2] = _mm256_setzero_ps(); + acc[3] = _mm256_setzero_ps(); + + while (n >= 32) + { + // Unroll loop for 32 floats + __m256 a_vec0 = _mm256_loadu_ps(a); + __m256 a_vec1 = _mm256_loadu_ps(a + 8); + __m256 a_vec2 = _mm256_loadu_ps(a + 16); + __m256 a_vec3 = _mm256_loadu_ps(a + 24); + + // Unroll loop for 32 bytes + __m128i b_byte_vec0 = _mm_loadu_si128((__m128i *)b); + __m128i b_byte_vec1 = _mm_loadu_si128((__m128i *)(b + 8)); + __m128i b_byte_vec2 = _mm_loadu_si128((__m128i *)(b + 16)); + __m128i b_byte_vec3 = _mm_loadu_si128((__m128i *)(b + 24)); + // Convert to floats + __m256 b_vec0 = _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32(b_byte_vec0)); + __m256 b_vec1 = _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32(b_byte_vec1)); + __m256 b_vec2 = _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32(b_byte_vec2)); + __m256 b_vec3 = _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32(b_byte_vec3)); + + acc[0] = _mm256_fmadd_ps(a_vec0, b_vec0, acc[0]); + acc[1] = _mm256_fmadd_ps(a_vec1, b_vec1, acc[1]); + acc[2] = _mm256_fmadd_ps(a_vec2, b_vec2, acc[2]); + acc[3] = _mm256_fmadd_ps(a_vec3, b_vec3, acc[3]); + + n -= 32; + a += 32; + b += 32; + } + + // Process 8 floats at a time + while (n >= 8) + { + __m256 a_vec0 = _mm256_loadu_ps(a); + __m128i b_byte_vec0 = _mm_loadl_epi64((__m128i *)b); + __m256 b_vec0 = _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32(b_byte_vec0)); + + acc[0] = _mm256_fmadd_ps(a_vec0, b_vec0, acc[0]); + + n -= 8; + a += 8; + b += 8; + } + + // Tail + while (n) + { + sum += a[0] * (float)(b[0]); + n--; + a++; + b++; + } + + // Reduce and store the result + acc[0] = _mm256_add_ps(acc[1], acc[0]); + acc[2] = _mm256_add_ps(acc[3], acc[2]); + acc[0] = _mm256_add_ps(acc[2], acc[0]); + __m256 t1 = _mm256_hadd_ps(acc[0], acc[0]); + __m256 t2 = _mm256_hadd_ps(t1, t1); + __m128 t3 = _mm256_extractf128_ps(t2, 1); + __m128 t4 = _mm_add_ps(_mm256_castps256_ps128(t2), t3); + sum += _mm_cvtss_f32(t4); + + *res = sum; +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/c/dot_neon_arm64.c b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/c/dot_neon_arm64.c new file mode 100644 index 0000000000000000000000000000000000000000..072309fe2a163b56a421b0f02e655a7ce929263b --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/c/dot_neon_arm64.c @@ -0,0 +1,66 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +#include + +// dot only works with length >= 16 +void dot_neon(float *a, float *b, float *res, long *len) +{ + int size = *len; + + // use the vectorized version for the first n - (n % 4) elements + int l = size - (size % 4); + + // create 4*4 registers to store the result + float32x4_t res_vec0 = vdupq_n_f32(0); + float32x4_t res_vec1 = vdupq_n_f32(0); + float32x4_t res_vec2 = vdupq_n_f32(0); + float32x4_t res_vec3 = vdupq_n_f32(0); + + int i = 0; + + // load 4*4 floats at a time + while (i + 16 <= l) + { + float32x4x4_t a4 = vld1q_f32_x4(a + i); + float32x4x4_t b4 = vld1q_f32_x4(b + i); + + res_vec0 += vmulq_f32(a4.val[0], b4.val[0]); + res_vec1 += vmulq_f32(a4.val[1], b4.val[1]); + res_vec2 += vmulq_f32(a4.val[2], b4.val[2]); + res_vec3 += vmulq_f32(a4.val[3], b4.val[3]); + + i += 16; + } + + while (i < l) + { + float32x4_t a_vec = vld1q_f32(a + i); + float32x4_t b_vec = vld1q_f32(b + i); + res_vec0 += vmulq_f32(a_vec, b_vec); + + i += 4; + } + + // convert to scalar + float sum = vaddvq_f32(res_vec0); + sum += vaddvq_f32(res_vec1); + sum += vaddvq_f32(res_vec2); + sum += vaddvq_f32(res_vec3); + + // add the remaining vectors + for (int i = l; i < size; i++) + { + sum += a[i] * b[i]; + } + + res[0] = sum; +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/c/dot_sve_arm64.c b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/c/dot_sve_arm64.c new file mode 100644 index 0000000000000000000000000000000000000000..ca7a91eb9cd0682998ee3ac0a505207173c180c4 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/c/dot_sve_arm64.c @@ -0,0 +1,79 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +#include + +// dot_sve only works with length >= 16 +void dot_sve(float *a, float *b, float *res, long *len) +{ + uint64_t size = *len; + + uint64_t vsize = svcntw(); + uint64_t vsizex4 = vsize * 4; + + // use the vectorized version for the first n - (n % 4) elements + uint64_t l = size - (size % vsize); + + // create 4*4 registers to store the result + svfloat32_t res_vec0 = svdup_n_f32(0.0f); + svfloat32_t res_vec1 = svdup_n_f32(0.0f); + svfloat32_t res_vec2 = svdup_n_f32(0.0f); + svfloat32_t res_vec3 = svdup_n_f32(0.0f); + + svbool_t pred = svptrue_b32(); + + uint64_t i = 0; + + // load 4*vsize floats at a time + while (i + vsizex4 <= l) + { + svfloat32_t a0 = svld1_f32(pred, a + i); + svfloat32_t a1 = svld1_f32(pred, a + i + vsize); + svfloat32_t a2 = svld1_f32(pred, a + i + vsize * 2); + svfloat32_t a3 = svld1_f32(pred, a + i + vsize * 3); + svfloat32_t b0 = svld1_f32(pred, b + i); + svfloat32_t b1 = svld1_f32(pred, b + i + vsize); + svfloat32_t b2 = svld1_f32(pred, b + i + vsize * 2); + svfloat32_t b3 = svld1_f32(pred, b + i + vsize * 3); + + res_vec0 = svmad_f32_m(pred, a0, b0, res_vec0); + res_vec1 = svmad_f32_m(pred, a1, b1, res_vec1); + res_vec2 = svmad_f32_m(pred, a2, b2, res_vec2); + res_vec3 = svmad_f32_m(pred, a3, b3, res_vec3); + + i += vsizex4; + } + + while (i < l) + { + svfloat32_t a_vec = svld1_f32(pred, a + i); + svfloat32_t b_vec = svld1_f32(pred, b + i); + + res_vec0 = svmad_f32_x(pred, a_vec, b_vec, res_vec0); + + i += vsize; + } + + // reduce + float32_t sum = svaddv_f32(pred, res_vec0); + sum += svaddv_f32(pred, res_vec1); + sum += svaddv_f32(pred, res_vec2); + sum += svaddv_f32(pred, res_vec3); + + // add the remaining vectors + for (i = l; i < size; i++) + { + float32_t prod = a[i] * b[i]; + sum += prod; + } + + res[0] = sum; +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/c/hamming_arm64.c b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/c/hamming_arm64.c new file mode 100644 index 0000000000000000000000000000000000000000..426ec2460d3aee3a024769b858cf228358dfd0e5 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/c/hamming_arm64.c @@ -0,0 +1,75 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +#include + +// hamming only works with length >= 16 +void hamming(float *a, float *b, float *res, long *len) +{ + int size = *len; + + // use the vectorized version for the first n - (n % 4) elements + int l = size - (size % 4); + + // create 4*4 registers to store the result + uint32x4_t res_vec0 = vdupq_n_u32(0); + uint32x4_t res_vec1 = vdupq_n_u32(0); + uint32x4_t res_vec2 = vdupq_n_u32(0); + uint32x4_t res_vec3 = vdupq_n_u32(0); + + int i = 0; + + uint32x4_t imr_1 = vdupq_n_u32(0); + uint32x4_t imr_2 = vdupq_n_u32(0); + uint32x4_t imr_3 = vdupq_n_u32(0); + uint32x4_t imr_4 = vdupq_n_u32(0); + + // load 4*4 floats at a time + while (i + 16 <= l) + { + float32x4x4_t a4 = vld1q_f32_x4(a + i); + float32x4x4_t b4 = vld1q_f32_x4(b + i); + + res_vec0 -= vreinterpretq_s32_f32(vceqq_f32(a4.val[0], b4.val[0])); + res_vec1 -= vreinterpretq_s32_f32(vceqq_f32(a4.val[1], b4.val[1])); + res_vec2 -= vreinterpretq_s32_f32(vceqq_f32(a4.val[2], b4.val[2])); + res_vec3 -= vreinterpretq_s32_f32(vceqq_f32(a4.val[3], b4.val[3])); + + i += 16; + } + + while (i < l) + { + float32x4_t a_vec = vld1q_f32(a + i); + float32x4_t b_vec = vld1q_f32(b + i); + res_vec0 -= vreinterpretq_s32_f32(vceqq_f32(a_vec, b_vec)); + + i += 4; + } + + // convert to f32 implicitly + int32_t sum = size; + sum -= vaddvq_u32(res_vec0); + sum -= vaddvq_u32(res_vec1); + sum -= vaddvq_u32(res_vec2); + sum -= vaddvq_u32(res_vec3); + + // add the remaining vectors + for (int i = l; i < size; i++) + { + if (a[i] == b[i]) + { + sum--; + } + } + + res[0] = sum; +} \ No newline at end of file diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/c/hamming_avx256_amd64.c b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/c/hamming_avx256_amd64.c new file mode 100644 index 0000000000000000000000000000000000000000..2b9fd86a2663fa03ad5668f2e08550a8d91c2e8d --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/c/hamming_avx256_amd64.c @@ -0,0 +1,144 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2023 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +#include + +void hamming_256(float *a, float *b, float *res, long *len) +{ + int n = *len; + int sum = 0; + + // fast path for small dimensions + if (n < 8) + { + do + { + sum += a[0] != b[0] ? 1 : 0; + n--; + a++; + b++; + } while (n); + + *res = sum; + return; + } + + // Create 4 registers to store the results + __m256i acc[4]; + acc[0] = _mm256_setzero_si256(); + acc[1] = _mm256_setzero_si256(); + acc[2] = _mm256_setzero_si256(); + acc[3] = _mm256_setzero_si256(); + + __m256i ones_256 = _mm256_set1_epi32(1); + __m256i zeros_256 = _mm256_setzero_si256(); + + __m256i blend1_256 = _mm256_setzero_si256(); + __m256i blend2_256 = _mm256_setzero_si256(); + __m256i blend3_256 = _mm256_setzero_si256(); + __m256i blend4_256 = _mm256_setzero_si256(); + + __m256 cmp_result_1 = _mm256_setzero_ps(); + __m256 cmp_result_2 = _mm256_setzero_ps(); + __m256 cmp_result_3 = _mm256_setzero_ps(); + __m256 cmp_result_4 = _mm256_setzero_ps(); + + __m256i cmp_result_i_1 = _mm256_setzero_si256(); + __m256i cmp_result_i_2 = _mm256_setzero_si256(); + __m256i cmp_result_i_3 = _mm256_setzero_si256(); + __m256i cmp_result_i_4 = _mm256_setzero_si256(); + + while (n >= 32) + { + // Unroll loop for 32 floats + __m256 a_vec0 = _mm256_loadu_ps(a); + __m256 a_vec1 = _mm256_loadu_ps(a + 8); + __m256 a_vec2 = _mm256_loadu_ps(a + 16); + __m256 a_vec3 = _mm256_loadu_ps(a + 24); + + __m256 b_vec0 = _mm256_loadu_ps(b); + __m256 b_vec1 = _mm256_loadu_ps(b + 8); + __m256 b_vec2 = _mm256_loadu_ps(b + 16); + __m256 b_vec3 = _mm256_loadu_ps(b + 24); + + cmp_result_1 = _mm256_cmp_ps(a_vec0, b_vec0, _CMP_NEQ_OQ); + cmp_result_2 = _mm256_cmp_ps(a_vec1, b_vec1, _CMP_NEQ_OQ); + cmp_result_3 = _mm256_cmp_ps(a_vec2, b_vec2, _CMP_NEQ_OQ); + cmp_result_4 = _mm256_cmp_ps(a_vec3, b_vec3, _CMP_NEQ_OQ); + + cmp_result_i_1 = _mm256_castps_si256(cmp_result_1); + cmp_result_i_2 = _mm256_castps_si256(cmp_result_2); + cmp_result_i_3 = _mm256_castps_si256(cmp_result_3); + cmp_result_i_4 = _mm256_castps_si256(cmp_result_4); + + blend1_256 = _mm256_blendv_epi8(zeros_256, ones_256, cmp_result_i_1); + blend2_256 = _mm256_blendv_epi8(zeros_256, ones_256, cmp_result_i_2); + blend3_256 = _mm256_blendv_epi8(zeros_256, ones_256, cmp_result_i_3); + blend4_256 = _mm256_blendv_epi8(zeros_256, ones_256, cmp_result_i_4); + + acc[0] = _mm256_add_epi32(acc[0], blend1_256); + acc[1] = _mm256_add_epi32(acc[1], blend2_256); + acc[2] = _mm256_add_epi32(acc[2], blend3_256); + acc[3] = _mm256_add_epi32(acc[3], blend4_256); + + n -= 32; + a += 32; + b += 32; + } + + // Process 8 floats at a time + while (n >= 8) + { + __m256 a_vec0 = _mm256_loadu_ps(a); + __m256 b_vec0 = _mm256_loadu_ps(b); + + // Perform comparison. _CMP_NEQ_OQ checks for not-equal (ordered, non-signaling) + cmp_result_1 = _mm256_cmp_ps(a_vec0, b_vec0, _CMP_NEQ_OQ); + + // Cast the comparison result to integer type to use with blendv + cmp_result_i_1 = _mm256_castps_si256(cmp_result_1); + + // Blend based on the comparison result. Note that blendv uses the MSB of each byte. + blend1_256 = _mm256_blendv_epi8(zeros_256, ones_256, cmp_result_i_1); + + // Accumulate the result + + acc[0] = _mm256_add_epi32(acc[0], blend1_256); + + n -= 8; + a += 8; + b += 8; + } + + // Tail + while (n) + { + if (a[0] != b[0]) + { + sum++; + } + n--; + a++; + b++; + } + + // Reduce and store the result + acc[0] = _mm256_add_epi32(acc[1], acc[0]); + acc[2] = _mm256_add_epi32(acc[3], acc[2]); + acc[0] = _mm256_add_epi32(acc[2], acc[0]); + __m256 t1 = _mm256_hadd_epi32(acc[0], acc[0]); + __m256 t2 = _mm256_hadd_epi32(t1, t1); + __m128i t3 = _mm256_extracti128_si256(t2, 1); // Extract the high 128 bits as integer vector + __m128i t4 = _mm_add_epi32(_mm256_castsi256_si128(t2), t3); // Add two __m128i vectors + sum += _mm_extract_epi32(t4, 0); + + *res = sum; +} \ No newline at end of file diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/c/hamming_avx512_amd64.c b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/c/hamming_avx512_amd64.c new file mode 100644 index 0000000000000000000000000000000000000000..f969e6194ffb22d5741f5fba42621d8eaf4701e9 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/c/hamming_avx512_amd64.c @@ -0,0 +1,239 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2023 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +#include + +void hamming_512(float *a, float *b, float *res, long *len) +{ + int n = *len; + int sum = 0; + + // fast path for small dimensions + if (n < 8) + { + do + { + sum += a[0] != b[0] ? 1 : 0; + n--; + a++; + b++; + } while (n); + + *res = sum; + return; + } + + __mmask16 mask0 = 0; + __mmask16 mask1 = 0; + __mmask16 mask2 = 0; + __mmask16 mask3 = 0; + __mmask16 mask4 = 0; + __mmask16 mask5 = 0; + __mmask16 mask6 = 0; + __mmask16 mask7 = 0; + + __m512i ones = _mm512_set1_epi32(1); + __m512i zeros = _mm512_setzero_si512(); + + __m512 blend0 = _mm512_setzero_ps(); + __m512 blend1 = _mm512_setzero_ps(); + __m512 blend2 = _mm512_setzero_ps(); + __m512 blend3 = _mm512_setzero_ps(); + __m512 blend4 = _mm512_setzero_ps(); + __m512 blend5 = _mm512_setzero_ps(); + __m512 blend6 = _mm512_setzero_ps(); + __m512 blend7 = _mm512_setzero_ps(); + + // Create 4 registers to store the results + __m256i acc[4]; + acc[0] = _mm256_setzero_si256(); + acc[1] = _mm256_setzero_si256(); + acc[2] = _mm256_setzero_si256(); + acc[3] = _mm256_setzero_si256(); + + if (n >= 128) + { + __m512i acc5[8]; + acc5[0] = _mm512_setzero_si512(); + acc5[1] = _mm512_setzero_si512(); + acc5[2] = _mm512_setzero_si512(); + acc5[3] = _mm512_setzero_si512(); + acc5[4] = _mm512_setzero_si512(); + acc5[5] = _mm512_setzero_si512(); + acc5[6] = _mm512_setzero_si512(); + acc5[7] = _mm512_setzero_si512(); + + // Process 128 floats at a time + do + { + __m512 a_vec0 = _mm512_loadu_ps(a); + __m512 a_vec1 = _mm512_loadu_ps(a + 16); + __m512 a_vec2 = _mm512_loadu_ps(a + 32); + __m512 a_vec3 = _mm512_loadu_ps(a + 48); + __m512 a_vec4 = _mm512_loadu_ps(a + 64); + __m512 a_vec5 = _mm512_loadu_ps(a + 80); + __m512 a_vec6 = _mm512_loadu_ps(a + 96); + __m512 a_vec7 = _mm512_loadu_ps(a + 112); + + __m512 b_vec0 = _mm512_loadu_ps(b); + __m512 b_vec1 = _mm512_loadu_ps(b + 16); + __m512 b_vec2 = _mm512_loadu_ps(b + 32); + __m512 b_vec3 = _mm512_loadu_ps(b + 48); + __m512 b_vec4 = _mm512_loadu_ps(b + 64); + __m512 b_vec5 = _mm512_loadu_ps(b + 80); + __m512 b_vec6 = _mm512_loadu_ps(b + 96); + __m512 b_vec7 = _mm512_loadu_ps(b + 112); + + mask0 = _mm512_cmp_ps_mask(a_vec0, b_vec0, _CMP_NEQ_OQ); + mask1 = _mm512_cmp_ps_mask(a_vec1, b_vec1, _CMP_NEQ_OQ); + mask2 = _mm512_cmp_ps_mask(a_vec2, b_vec2, _CMP_NEQ_OQ); + mask3 = _mm512_cmp_ps_mask(a_vec3, b_vec3, _CMP_NEQ_OQ); + mask4 = _mm512_cmp_ps_mask(a_vec4, b_vec4, _CMP_NEQ_OQ); + mask5 = _mm512_cmp_ps_mask(a_vec5, b_vec5, _CMP_NEQ_OQ); + mask6 = _mm512_cmp_ps_mask(a_vec6, b_vec6, _CMP_NEQ_OQ); + mask7 = _mm512_cmp_ps_mask(a_vec7, b_vec7, _CMP_NEQ_OQ); + + blend0 = _mm512_mask_blend_epi32(mask7, zeros, ones); + blend1 = _mm512_mask_blend_epi32(mask0, zeros, ones); + blend2 = _mm512_mask_blend_epi32(mask1, zeros, ones); + blend3 = _mm512_mask_blend_epi32(mask2, zeros, ones); + blend4 = _mm512_mask_blend_epi32(mask3, zeros, ones); + blend5 = _mm512_mask_blend_epi32(mask4, zeros, ones); + blend6 = _mm512_mask_blend_epi32(mask5, zeros, ones); + blend7 = _mm512_mask_blend_epi32(mask6, zeros, ones); + + acc5[0] = _mm512_add_epi32(acc5[0], blend0); + acc5[0] = _mm512_add_epi32(acc5[0], blend1); + acc5[1] = _mm512_add_epi32(acc5[1], blend2); + acc5[2] = _mm512_add_epi32(acc5[2], blend3); + acc5[3] = _mm512_add_epi32(acc5[3], blend4); + acc5[4] = _mm512_add_epi32(acc5[4], blend5); + acc5[5] = _mm512_add_epi32(acc5[5], blend6); + acc5[6] = _mm512_add_epi32(acc5[6], blend7); + + n -= 128; + a += 128; + b += 128; + } while (n >= 128); + + acc5[0] = _mm512_add_epi32(acc5[1], acc5[0]); + acc5[2] = _mm512_add_epi32(acc5[3], acc5[2]); + acc5[4] = _mm512_add_epi32(acc5[5], acc5[4]); + acc5[6] = _mm512_add_epi32(acc5[7], acc5[6]); + acc5[0] = _mm512_add_epi32(acc5[2], acc5[0]); + acc5[4] = _mm512_add_epi32(acc5[6], acc5[4]); + acc5[0] = _mm512_add_epi32(acc5[4], acc5[0]); + + __m256i low = _mm512_castsi512_si256(acc5[0]); + __m256i high = _mm512_extracti32x8_epi32(acc5[0], 1); + + acc[0] = _mm256_add_epi32(low, acc[0]); + acc[0] = _mm256_add_epi32(high, acc[0]); + + if (!n) + { + // Reduce and store the result + acc[0] = _mm256_add_epi32(acc[1], acc[0]); + acc[2] = _mm256_add_epi32(acc[3], acc[2]); + acc[0] = _mm256_add_epi32(acc[2], acc[0]); + + __m256i t1 = _mm256_hadd_epi32(acc[0], acc[0]); + __m256i t2 = _mm256_hadd_epi32(t1, t1); + __m128i t3 = _mm256_extracti128_si256(t2, 1); // Extract the high 128 bits as integer vector + __m128i t4 = _mm_add_epi32(_mm256_castsi256_si128(t2), t3); // Add two __m128i vectors + sum += _mm_extract_epi32(t4, 0); + + *res = sum; + return; + } + } + + __m256i ones_256 = _mm256_set1_epi32(1); + __m256i zeros_256 = _mm256_setzero_si256(); // Vector of zeros + // Use mask to blend 1s and 0s: 1s where comparison is true, 0s elsewhere + __m256i blend0_256 = _mm256_setzero_si256(); + __m256i blend1_256 = _mm256_setzero_si256(); + __m256i blend2_256 = _mm256_setzero_si256(); + __m256i blend3_256 = _mm256_setzero_si256(); + + while (n >= 32) + { + // Unroll loop for 32 floats + __m256 a_vec0 = _mm256_loadu_ps(a); + __m256 a_vec1 = _mm256_loadu_ps(a + 8); + __m256 a_vec2 = _mm256_loadu_ps(a + 16); + __m256 a_vec3 = _mm256_loadu_ps(a + 24); + + __m256 b_vec0 = _mm256_loadu_ps(b); + __m256 b_vec1 = _mm256_loadu_ps(b + 8); + __m256 b_vec2 = _mm256_loadu_ps(b + 16); + __m256 b_vec3 = _mm256_loadu_ps(b + 24); + + mask0 = _mm256_cmp_ps_mask(a_vec0, b_vec0, _CMP_NEQ_OQ); + mask1 = _mm256_cmp_ps_mask(a_vec1, b_vec1, _CMP_NEQ_OQ); + mask2 = _mm256_cmp_ps_mask(a_vec2, b_vec2, _CMP_NEQ_OQ); + mask3 = _mm256_cmp_ps_mask(a_vec3, b_vec3, _CMP_NEQ_OQ); + + blend0_256 = _mm256_mask_blend_epi32(mask0, zeros_256, ones_256); + blend1_256 = _mm256_mask_blend_epi32(mask1, zeros_256, ones_256); + blend2_256 = _mm256_mask_blend_epi32(mask2, zeros_256, ones_256); + blend3_256 = _mm256_mask_blend_epi32(mask3, zeros_256, ones_256); + + acc[0] = _mm256_add_epi32(acc[0], blend0_256); + acc[1] = _mm256_add_epi32(acc[1], blend1_256); + acc[2] = _mm256_add_epi32(acc[2], blend2_256); + acc[3] = _mm256_add_epi32(acc[3], blend3_256); + + n -= 32; + a += 32; + b += 32; + } + + // Process 8 floats at a time + while (n >= 8) + { + __m256 a_vec0 = _mm256_loadu_ps(a); + __m256 b_vec0 = _mm256_loadu_ps(b); + + mask0 = _mm256_cmp_ps_mask(a_vec0, b_vec0, _CMP_NEQ_OQ); + + blend1_256 = _mm256_mask_blend_epi32(mask0, zeros_256, ones_256); + acc[0] = _mm256_add_epi32(acc[0], blend1_256); + + n -= 8; + a += 8; + b += 8; + } + + // Tail + while (n) + { + if (a[0] != b[0]) + { + sum++; + } + n--; + a++; + b++; + } + + // Reduce and store the result + acc[0] = _mm256_add_epi32(acc[1], acc[0]); + acc[2] = _mm256_add_epi32(acc[3], acc[2]); + acc[0] = _mm256_add_epi32(acc[2], acc[0]); + __m256 t1 = _mm256_hadd_epi32(acc[0], acc[0]); + __m256 t2 = _mm256_hadd_epi32(t1, t1); + __m128i t3 = _mm256_extracti128_si256(t2, 1); // Extract the high 128 bits as integer vector + __m128i t4 = _mm_add_epi32(_mm256_castsi256_si128(t2), t3); // Add two __m128i vectors + sum += _mm_extract_epi32(t4, 0); + + *res = sum; +} \ No newline at end of file diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/c/hamming_bitwise_arm64.c b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/c/hamming_bitwise_arm64.c new file mode 100644 index 0000000000000000000000000000000000000000..9f1eb92a632fcc0763d9087c3ddb40897f7511cd --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/c/hamming_bitwise_arm64.c @@ -0,0 +1,74 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// +#include + +// hamming only works with length >= 16 + +void hamming_bitwise(unsigned long long *a, unsigned long long *b, + unsigned long long *res, long *len) { + int size = *len; + + // use the vectorized version for the first n - (n % 4) elements + int l = size - (size % 4); + + // create 4*4 registers to store the result + uint32x4_t res_vec0 = vdupq_n_u32(0); + uint32x4_t res_vec1 = vdupq_n_u32(0); + uint32x4_t res_vec2 = vdupq_n_u32(0); + uint32x4_t res_vec3 = vdupq_n_u32(0); + + int i = 0; + + // load 2*4 uint64s at a time + while (i + 8 <= l) { + uint64x2x4_t a4 = vld1q_u64_x4(a + i); + uint64x2x4_t b4 = vld1q_u64_x4(b + i); + + res_vec0 += vpaddlq_u16(vpaddlq_u8( + vcntq_u8(vreinterpretq_u8_u64(veorq_u64(a4.val[0], b4.val[0]))))); + + res_vec1 += vpaddlq_u16(vpaddlq_u8( + vcntq_u8(vreinterpretq_u8_u64(veorq_u64(a4.val[1], b4.val[1]))))); + + res_vec2 += vpaddlq_u16(vpaddlq_u8( + vcntq_u8(vreinterpretq_u8_u64(veorq_u64(a4.val[2], b4.val[2]))))); + + res_vec3 += vpaddlq_u16(vpaddlq_u8( + vcntq_u8(vreinterpretq_u8_u64(veorq_u64(a4.val[3], b4.val[3]))))); + + i += 8; + } + + while (i < l) { + uint64x2_t a4 = vld1q_u64(a + i); + uint64x2_t b4 = vld1q_u64(b + i); + + res_vec0 += vpaddlq_u16( + vpaddlq_u8(vcntq_u8(vreinterpretq_u8_u64(veorq_u64(a4, b4))))); + + i += 2; + } + + // convert to f32 implicitly + int32_t sum = 0; + sum += vaddvq_u32(res_vec0); + sum += vaddvq_u32(res_vec1); + sum += vaddvq_u32(res_vec2); + sum += vaddvq_u32(res_vec3); + + // add the remaining vectors + for (int i = l; i < size; i++) { + uint64_t xor_result = a[i] ^ b[i]; + sum += __builtin_popcountll(xor_result); + } + + res[0] = sum; +} \ No newline at end of file diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/c/hamming_bitwise_avx256_amd64.c b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/c/hamming_bitwise_avx256_amd64.c new file mode 100644 index 0000000000000000000000000000000000000000..5c1bc518f3d4f703d99d0dbf96e627f349b376e6 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/c/hamming_bitwise_avx256_amd64.c @@ -0,0 +1,127 @@ +#include +#include +#include +#include +#include + +static inline uint64_t popcnt_AVX2_lookup(__m256i *vec, __m256i *low_mask_vec, + __m256i *lookup_vec) { + + size_t i = 0; + + __m256i acc = _mm256_setzero_si256(); + + const __m256i lo = _mm256_and_si256(*vec, *low_mask_vec); + const __m256i hi = + _mm256_and_si256(_mm256_srli_epi16(*vec, 4), *low_mask_vec); + const __m256i popcnt1 = _mm256_shuffle_epi8(*lookup_vec, lo); + const __m256i popcnt2 = _mm256_shuffle_epi8(*lookup_vec, hi); + __m256i local = _mm256_setzero_si256(); + local = _mm256_add_epi8(local, popcnt1); + local = _mm256_add_epi8(local, popcnt2); + + acc = _mm256_add_epi64(acc, _mm256_sad_epu8(local, _mm256_setzero_si256())); + + uint64_t result = 0; + + result += (uint64_t)(_mm256_extract_epi64(acc, 0)); + result += (uint64_t)(_mm256_extract_epi64(acc, 1)); + result += (uint64_t)(_mm256_extract_epi64(acc, 2)); + result += (uint64_t)(_mm256_extract_epi64(acc, 3)); + + return result; +} + +static inline uint64_t popcnt_64bit(uint64_t *src, uint64_t *popcnt_constants) { + uint64_t x = *src; + x = (x & popcnt_constants[0]) + ((x >> 1) & popcnt_constants[0]); + x = (x & popcnt_constants[1]) + ((x >> 2) & popcnt_constants[1]); + x = (x & popcnt_constants[2]) + ((x >> 4) & popcnt_constants[2]); + return (x * popcnt_constants[3]) >> 56; +} + +void hamming_bitwise_256(uint64_t *a, uint64_t *b, uint64_t *res, long *len, + uint8_t *lookup_avx, uint64_t *popcnt_constants) { + + int n = *len; + + __m256i lookup_vec = _mm256_loadu_si256((__m256i *)lookup_avx); + __m256i low_mask_vec = _mm256_set1_epi64x(popcnt_constants[4]); + + uint64_t sum = 0; + + // fast path for small dimensions + if (n < 8) { + do { + uint64_t xor = a[0] ^ b[0]; + sum += popcnt_64bit(&xor, popcnt_constants); + + n--; + a++; + b++; + } while (n); + + *res = sum; + return; + } + + __m256i zeros_256 = _mm256_setzero_si256(); + + size_t size = 256 / 8; + + while (n >= 16) { + __m256i a_vec0 = _mm256_loadu_si256((__m256i const *)a); + __m256i a_vec1 = _mm256_loadu_si256((__m256i const *)(a + 4)); + __m256i a_vec2 = _mm256_loadu_si256((__m256i const *)(a + 8)); + __m256i a_vec3 = _mm256_loadu_si256((__m256i const *)(a + 12)); + + __m256i b_vec0 = _mm256_loadu_si256((__m256i const *)b); + __m256i b_vec1 = _mm256_loadu_si256((__m256i const *)(b + 4)); + __m256i b_vec2 = _mm256_loadu_si256((__m256i const *)(b + 8)); + __m256i b_vec3 = _mm256_loadu_si256((__m256i const *)(b + 12)); + + __m256i cmp_result_1 = _mm256_xor_si256(a_vec0, b_vec0); + __m256i cmp_result_2 = _mm256_xor_si256(a_vec1, b_vec1); + __m256i cmp_result_3 = _mm256_xor_si256(a_vec2, b_vec2); + __m256i cmp_result_4 = _mm256_xor_si256(a_vec3, b_vec3); + + uint64_t *p1 = (uint64_t *)&cmp_result_1; + uint64_t *p2 = (uint64_t *)&cmp_result_2; + uint64_t *p3 = (uint64_t *)&cmp_result_3; + uint64_t *p4 = (uint64_t *)&cmp_result_4; + + sum += popcnt_AVX2_lookup(&cmp_result_1, &low_mask_vec, &lookup_vec) + + popcnt_AVX2_lookup(&cmp_result_2, &low_mask_vec, &lookup_vec) + + popcnt_AVX2_lookup(&cmp_result_3, &low_mask_vec, &lookup_vec) + + popcnt_AVX2_lookup(&cmp_result_4, &low_mask_vec, &lookup_vec); + + n -= 16; + a += 16; + b += 16; + } + + while (n >= 4) { + __m256i a_vec0 = _mm256_loadu_si256((__m256i const *)a); + __m256i b_vec0 = _mm256_loadu_si256((__m256i const *)b); + + __m256i cmp_result_1 = _mm256_xor_si256(a_vec0, b_vec0); + + uint64_t *p1 = (uint64_t *)&cmp_result_1; + + sum += popcnt_AVX2_lookup(&cmp_result_1, &low_mask_vec, &lookup_vec); + n -= 4; + a += 4; + b += 4; + } + + while (n) { + uint64_t xor = a[0] ^ b[0]; + sum += popcnt_64bit(&xor, popcnt_constants); + n--; + a++; + b++; + } + + *res = sum; + return; +} \ No newline at end of file diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/c/hamming_bitwise_avx512_amd64.c b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/c/hamming_bitwise_avx512_amd64.c new file mode 100644 index 0000000000000000000000000000000000000000..6171331f041936b82ec77612e9116020f67688c8 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/c/hamming_bitwise_avx512_amd64.c @@ -0,0 +1,248 @@ +#include +#include +#include +#include +#include + +inline uint64_t popcnt_AVX2_lookup_512(__m256i *vec, __m256i *low_mask_vec, + __m256i *lookup_vec) { + + size_t i = 0; + + __m256i acc = _mm256_setzero_si256(); + + const __m256i lo = _mm256_and_si256(*vec, *low_mask_vec); + const __m256i hi = + _mm256_and_si256(_mm256_srli_epi16(*vec, 4), *low_mask_vec); + const __m256i popcnt1 = _mm256_shuffle_epi8(*lookup_vec, lo); + const __m256i popcnt2 = _mm256_shuffle_epi8(*lookup_vec, hi); + __m256i local = _mm256_setzero_si256(); + local = _mm256_add_epi8(local, popcnt1); + local = _mm256_add_epi8(local, popcnt2); + + acc = _mm256_add_epi64(acc, _mm256_sad_epu8(local, _mm256_setzero_si256())); + + uint64_t result = 0; + + result += (uint64_t)(_mm256_extract_epi64(acc, 0)); + result += (uint64_t)(_mm256_extract_epi64(acc, 1)); + result += (uint64_t)(_mm256_extract_epi64(acc, 2)); + result += (uint64_t)(_mm256_extract_epi64(acc, 3)); + + return result; +} + +void popcount(__m512i *v_ptr, __m512i *result, uint64_t *constants_avx512) { + __m512i v = *v_ptr; + const __m512i m1 = _mm512_set1_epi64(constants_avx512[0]); + const __m512i m2 = _mm512_set1_epi64((constants_avx512[1])); + const __m512i m4 = _mm512_set1_epi64(constants_avx512[2]); + + const __m512i t1 = _mm512_sub_epi8(v, (_mm512_srli_epi16(v, 1) & m1)); + const __m512i t2 = _mm512_add_epi8(t1 & m2, (_mm512_srli_epi16(t1, 2) & m2)); + const __m512i t3 = _mm512_add_epi8(t2, _mm512_srli_epi16(t2, 4)) & m4; + *result = _mm512_sad_epu8(t3, _mm512_setzero_si512()); +} + +void CSA(__m512i *h, __m512i *l, __m512i *a_ptr, __m512i *b_ptr, + __m512i *c_ptr) { + __m512i a = *a_ptr; + __m512i b = *b_ptr; + __m512i c = *c_ptr; + *l = _mm512_ternarylogic_epi32(c, b, a, 0x96); + *h = _mm512_ternarylogic_epi32(c, b, a, 0xe8); +} + +uint64_t simd_sum_epu64_256(__m256i *v_ptr) { + + __m256i v = *v_ptr; + + return (uint64_t)(_mm256_extract_epi64(v, 0)) + + (uint64_t)(_mm256_extract_epi64(v, 1)) + + (uint64_t)(_mm256_extract_epi64(v, 2)) + + (uint64_t)(_mm256_extract_epi64(v, 3)); +} + +uint64_t simd_sum_epu64_512(__m512i *v_ptr) { + + __m512i v = *v_ptr; + + __m256i lo = _mm512_extracti64x4_epi64(v, 0); + __m256i hi = _mm512_extracti64x4_epi64(v, 1); + + return simd_sum_epu64_256(&lo) + simd_sum_epu64_256(&hi); +} + +uint64_t popcnt_AVX512_harleyseal(__m512i *data, uint64_t *size_ptr, + uint64_t *constants_avx512) { + + uint64_t size = *size_ptr; + + __m512i total = _mm512_setzero_si512(); + __m512i ones = _mm512_setzero_si512(); + __m512i twos = _mm512_setzero_si512(); + __m512i fours = _mm512_setzero_si512(); + __m512i eights = _mm512_setzero_si512(); + __m512i sixteens = _mm512_setzero_si512(); + __m512i twosA, twosB, foursA, foursB, eightsA, eightsB; + const uint64_t limit = size - size % 16; + uint64_t i = 0; + + for (; i < limit; i += 16) { + + CSA(&twosA, &ones, &ones, &data[i + 0], &data[i + 1]); + CSA(&twosB, &ones, &ones, &data[i + 2], &data[i + 3]); + CSA(&foursA, &twos, &twos, &twosA, &twosB); + CSA(&twosA, &ones, &ones, &data[i + 4], &data[i + 5]); + CSA(&twosB, &ones, &ones, &data[i + 6], &data[i + 7]); + CSA(&foursB, &twos, &twos, &twosA, &twosB); + CSA(&eightsA, &fours, &fours, &foursA, &foursB); + CSA(&twosA, &ones, &ones, &data[i + 8], &data[i + 9]); + CSA(&twosB, &ones, &ones, &data[i + 10], &data[i + 11]); + CSA(&foursA, &twos, &twos, &twosA, &twosB); + CSA(&twosA, &ones, &ones, &data[i + 12], &data[i + 13]); + CSA(&twosB, &ones, &ones, &data[i + 14], &data[i + 15]); + CSA(&foursB, &twos, &twos, &twosA, &twosB); + CSA(&eightsB, &fours, &fours, &foursA, &foursB); + CSA(&sixteens, &eights, &eights, &eightsA, &eightsB); + __m512i popcount_sixteens = _mm512_setzero_si512(); + popcount(&sixteens, &popcount_sixteens, constants_avx512); + total = _mm512_add_epi64(total, popcount_sixteens); + } + + __m512i popcount_eights = _mm512_setzero_si512(); + popcount(&eights, &popcount_eights, constants_avx512); + __m512i popcount_fours = _mm512_setzero_si512(); + popcount(&fours, &popcount_fours, constants_avx512); + __m512i popcount_twos = _mm512_setzero_si512(); + popcount(&twos, &popcount_twos, constants_avx512); + __m512i popcount_ones = _mm512_setzero_si512(); + popcount(&ones, &popcount_ones, constants_avx512); + + total = _mm512_slli_epi64(total, 4); // * 16 + total = _mm512_add_epi64(total, + _mm512_slli_epi64(popcount_eights, 3)); // += 8 * ... + total = _mm512_add_epi64(total, + _mm512_slli_epi64(popcount_fours, 2)); // += 4 * ... + total = _mm512_add_epi64(total, + _mm512_slli_epi64(popcount_twos, 1)); // += 2 * ... + total = _mm512_add_epi64(total, popcount_ones); + + for (; i < size; i++) { + __m512i result = _mm512_setzero_si512(); + popcount(&data[i], &result, constants_avx512); + total = _mm512_add_epi64(total, result); + } + + return simd_sum_epu64_512(&total); +} + +static inline uint64_t popcnt_64bit_512(uint64_t *src, + uint64_t *popcnt_constants) { + uint64_t x = *src; + x = (x & popcnt_constants[0]) + ((x >> 1) & popcnt_constants[0]); + x = (x & popcnt_constants[1]) + ((x >> 2) & popcnt_constants[1]); + x = (x & popcnt_constants[2]) + ((x >> 4) & popcnt_constants[2]); + return (x * popcnt_constants[3]) >> 56; +} + +void hamming_bitwise_512(uint64_t *a, uint64_t *b, uint64_t *res, long *len, + uint64_t *popcnt_constants) { + + int n = *len; + + uint64_t sum = 0; + + // fast path for small dimensions + if (n < 8) { + do { + uint64_t xor = a[0] ^ b[0]; + sum += popcnt_64bit_512(&xor, popcnt_constants); + + n--; + a++; + b++; + } while (n); + + *res = sum; + return; + } + + __m256i zeros_256 = _mm256_setzero_si256(); + + // process 128 uint64s at a time + while (n >= 128) { + + size_t size = 16; + + __m512i a_vec0 = _mm512_loadu_si512((__m512i const *)a); + __m512i a_vec1 = _mm512_loadu_si512((__m512i const *)(a + 8)); + __m512i a_vec2 = _mm512_loadu_si512((__m512i const *)(a + 16)); + __m512i a_vec3 = _mm512_loadu_si512((__m512i const *)(a + 24)); + __m512i a_vec4 = _mm512_loadu_si512((__m512i const *)(a + 32)); + __m512i a_vec5 = _mm512_loadu_si512((__m512i const *)(a + 40)); + __m512i a_vec6 = _mm512_loadu_si512((__m512i const *)(a + 48)); + __m512i a_vec7 = _mm512_loadu_si512((__m512i const *)(a + 56)); + __m512i a_vec8 = _mm512_loadu_si512((__m512i const *)(a + 64)); + __m512i a_vec9 = _mm512_loadu_si512((__m512i const *)(a + 72)); + __m512i a_vec10 = _mm512_loadu_si512((__m512i const *)(a + 80)); + __m512i a_vec11 = _mm512_loadu_si512((__m512i const *)(a + 88)); + __m512i a_vec12 = _mm512_loadu_si512((__m512i const *)(a + 96)); + __m512i a_vec13 = _mm512_loadu_si512((__m512i const *)(a + 104)); + __m512i a_vec14 = _mm512_loadu_si512((__m512i const *)(a + 112)); + __m512i a_vec15 = _mm512_loadu_si512((__m512i const *)(a + 120)); + + __m512i b_vec0 = _mm512_loadu_si512((__m512i const *)b); + __m512i b_vec1 = _mm512_loadu_si512((__m512i const *)(b + 8)); + __m512i b_vec2 = _mm512_loadu_si512((__m512i const *)(b + 16)); + __m512i b_vec3 = _mm512_loadu_si512((__m512i const *)(b + 24)); + __m512i b_vec4 = _mm512_loadu_si512((__m512i const *)(b + 32)); + __m512i b_vec5 = _mm512_loadu_si512((__m512i const *)(b + 40)); + __m512i b_vec6 = _mm512_loadu_si512((__m512i const *)(b + 48)); + __m512i b_vec7 = _mm512_loadu_si512((__m512i const *)(b + 56)); + __m512i b_vec8 = _mm512_loadu_si512((__m512i const *)(b + 64)); + __m512i b_vec9 = _mm512_loadu_si512((__m512i const *)(b + 72)); + __m512i b_vec10 = _mm512_loadu_si512((__m512i const *)(b + 80)); + __m512i b_vec11 = _mm512_loadu_si512((__m512i const *)(b + 88)); + __m512i b_vec12 = _mm512_loadu_si512((__m512i const *)(b + 96)); + __m512i b_vec13 = _mm512_loadu_si512((__m512i const *)(b + 104)); + __m512i b_vec14 = _mm512_loadu_si512((__m512i const *)(b + 112)); + __m512i b_vec15 = _mm512_loadu_si512((__m512i const *)(b + 120)); + + __m512i cmp_results[16]; + + cmp_results[0] = _mm512_xor_si512(a_vec0, b_vec0); + cmp_results[1] = _mm512_xor_si512(a_vec1, b_vec1); + cmp_results[2] = _mm512_xor_si512(a_vec2, b_vec2); + cmp_results[3] = _mm512_xor_si512(a_vec3, b_vec3); + cmp_results[4] = _mm512_xor_si512(a_vec4, b_vec4); + cmp_results[5] = _mm512_xor_si512(a_vec5, b_vec5); + cmp_results[6] = _mm512_xor_si512(a_vec6, b_vec6); + cmp_results[7] = _mm512_xor_si512(a_vec7, b_vec7); + cmp_results[8] = _mm512_xor_si512(a_vec8, b_vec8); + cmp_results[9] = _mm512_xor_si512(a_vec9, b_vec9); + cmp_results[10] = _mm512_xor_si512(a_vec10, b_vec10); + cmp_results[11] = _mm512_xor_si512(a_vec11, b_vec11); + cmp_results[12] = _mm512_xor_si512(a_vec12, b_vec12); + cmp_results[13] = _mm512_xor_si512(a_vec13, b_vec13); + cmp_results[14] = _mm512_xor_si512(a_vec14, b_vec14); + cmp_results[15] = _mm512_xor_si512(a_vec15, b_vec15); + + sum += popcnt_AVX512_harleyseal(cmp_results, &size, popcnt_constants); + + n -= 128; + a += 128; + b += 128; + } + + while (n) { + uint64_t xor = a[0] ^ b[0]; + sum += popcnt_64bit_512(&xor, popcnt_constants); + n--; + a++; + b++; + } + + *res = sum; + return; +} \ No newline at end of file diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/c/l2_avx256_amd64.c b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/c/l2_avx256_amd64.c new file mode 100644 index 0000000000000000000000000000000000000000..498cf76f2293335bc5a4bc55d7a30acdb39767f0 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/c/l2_avx256_amd64.c @@ -0,0 +1,107 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2023 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +#include + +void l2_256(float *a, float *b, float *res, long *len) +{ + int n = *len; + float sum = 0; + + // fast path for small dimensions + if (n < 8) + { + do + { + float diff = a[0] - b[0]; + float sq = diff * diff; + sum += sq; + n--; + a++; + b++; + } while (n); + + *res = sum; + return; + } + + // Create 4 registers to store the results + __m256 acc[4]; + acc[0] = _mm256_setzero_ps(); + acc[1] = _mm256_setzero_ps(); + acc[2] = _mm256_setzero_ps(); + acc[3] = _mm256_setzero_ps(); + + while (n >= 32) + { + // Unroll loop for 32 floats + __m256 a_vec0 = _mm256_loadu_ps(a); + __m256 a_vec1 = _mm256_loadu_ps(a + 8); + __m256 a_vec2 = _mm256_loadu_ps(a + 16); + __m256 a_vec3 = _mm256_loadu_ps(a + 24); + + __m256 b_vec0 = _mm256_loadu_ps(b); + __m256 b_vec1 = _mm256_loadu_ps(b + 8); + __m256 b_vec2 = _mm256_loadu_ps(b + 16); + __m256 b_vec3 = _mm256_loadu_ps(b + 24); + + __m256 diff0 = _mm256_sub_ps(a_vec0, b_vec0); + __m256 diff1 = _mm256_sub_ps(a_vec1, b_vec1); + __m256 diff2 = _mm256_sub_ps(a_vec2, b_vec2); + __m256 diff3 = _mm256_sub_ps(a_vec3, b_vec3); + + acc[0] = _mm256_fmadd_ps(diff0, diff0, acc[0]); + acc[1] = _mm256_fmadd_ps(diff1, diff1, acc[1]); + acc[2] = _mm256_fmadd_ps(diff2, diff2, acc[2]); + acc[3] = _mm256_fmadd_ps(diff3, diff3, acc[3]); + + n -= 32; + a += 32; + b += 32; + } + + // Process 8 floats at a time + while (n >= 8) + { + __m256 a_vec0 = _mm256_loadu_ps(a); + __m256 b_vec0 = _mm256_loadu_ps(b); + __m256 diff0 = _mm256_sub_ps(a_vec0, b_vec0); + + acc[0] = _mm256_fmadd_ps(diff0, diff0, acc[0]); + + n -= 8; + a += 8; + b += 8; + } + + // Tail + while (n) + { + float diff = a[0] - b[0]; + float sq = diff * diff; + sum += sq; + n--; + a++; + b++; + } + + // Reduce and store the result + acc[0] = _mm256_add_ps(acc[1], acc[0]); + acc[2] = _mm256_add_ps(acc[3], acc[2]); + acc[0] = _mm256_add_ps(acc[2], acc[0]); + __m256 t1 = _mm256_hadd_ps(acc[0], acc[0]); + __m256 t2 = _mm256_hadd_ps(t1, t1); + __m128 t3 = _mm256_extractf128_ps(t2, 1); + __m128 t4 = _mm_add_ps(_mm256_castps256_ps128(t2), t3); + sum += _mm_cvtss_f32(t4); + + *res = sum; +} \ No newline at end of file diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/c/l2_avx512_amd64.c b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/c/l2_avx512_amd64.c new file mode 100644 index 0000000000000000000000000000000000000000..24b24bfb40512824dafa7fb4b18d05fe78058d02 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/c/l2_avx512_amd64.c @@ -0,0 +1,196 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2023 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +#include + +void l2_512(float *a, float *b, float *res, long *len) +{ + int n = *len; + float sum = 0; + + // fast path for small dimensions + if (n < 8) + { + do + { + float diff = a[0] - b[0]; + float sq = diff * diff; + sum += sq; + n--; + a++; + b++; + } while (n); + + *res = sum; + return; + } + + // Create 4 registers to store the results + __m256 acc[4]; + acc[0] = _mm256_setzero_ps(); + acc[1] = _mm256_setzero_ps(); + acc[2] = _mm256_setzero_ps(); + acc[3] = _mm256_setzero_ps(); + + if (n >= 128) + { + // create 8 registers + __m512 acc5[8]; + acc5[0] = _mm512_setzero_ps(); + acc5[1] = _mm512_setzero_ps(); + acc5[2] = _mm512_setzero_ps(); + acc5[3] = _mm512_setzero_ps(); + acc5[4] = _mm512_setzero_ps(); + acc5[5] = _mm512_setzero_ps(); + acc5[6] = _mm512_setzero_ps(); + acc5[7] = _mm512_setzero_ps(); + + // Process 128 floats at a time + do + { + __m512 a_vec0 = _mm512_loadu_ps(a); + __m512 a_vec1 = _mm512_loadu_ps(a + 16); + __m512 a_vec2 = _mm512_loadu_ps(a + 32); + __m512 a_vec3 = _mm512_loadu_ps(a + 48); + __m512 a_vec4 = _mm512_loadu_ps(a + 64); + __m512 a_vec5 = _mm512_loadu_ps(a + 80); + __m512 a_vec6 = _mm512_loadu_ps(a + 96); + __m512 a_vec7 = _mm512_loadu_ps(a + 112); + + __m512 b_vec0 = _mm512_loadu_ps(b); + __m512 b_vec1 = _mm512_loadu_ps(b + 16); + __m512 b_vec2 = _mm512_loadu_ps(b + 32); + __m512 b_vec3 = _mm512_loadu_ps(b + 48); + __m512 b_vec4 = _mm512_loadu_ps(b + 64); + __m512 b_vec5 = _mm512_loadu_ps(b + 80); + __m512 b_vec6 = _mm512_loadu_ps(b + 96); + __m512 b_vec7 = _mm512_loadu_ps(b + 112); + + __m512 diff0 = _mm512_sub_ps(a_vec0, b_vec0); + __m512 diff1 = _mm512_sub_ps(a_vec1, b_vec1); + __m512 diff2 = _mm512_sub_ps(a_vec2, b_vec2); + __m512 diff3 = _mm512_sub_ps(a_vec3, b_vec3); + __m512 diff4 = _mm512_sub_ps(a_vec4, b_vec4); + __m512 diff5 = _mm512_sub_ps(a_vec5, b_vec5); + __m512 diff6 = _mm512_sub_ps(a_vec6, b_vec6); + __m512 diff7 = _mm512_sub_ps(a_vec7, b_vec7); + + acc5[0] = _mm512_fmadd_ps(diff0, diff0, acc5[0]); + acc5[1] = _mm512_fmadd_ps(diff1, diff1, acc5[1]); + acc5[2] = _mm512_fmadd_ps(diff2, diff2, acc5[2]); + acc5[3] = _mm512_fmadd_ps(diff3, diff3, acc5[3]); + acc5[4] = _mm512_fmadd_ps(diff4, diff4, acc5[4]); + acc5[5] = _mm512_fmadd_ps(diff5, diff5, acc5[5]); + acc5[6] = _mm512_fmadd_ps(diff6, diff6, acc5[6]); + acc5[7] = _mm512_fmadd_ps(diff7, diff7, acc5[7]); + + n -= 128; + a += 128; + b += 128; + } while (n >= 128); + + acc5[0] = _mm512_add_ps(acc5[1], acc5[0]); + acc5[2] = _mm512_add_ps(acc5[3], acc5[2]); + acc5[4] = _mm512_add_ps(acc5[5], acc5[4]); + acc5[6] = _mm512_add_ps(acc5[7], acc5[6]); + acc5[0] = _mm512_add_ps(acc5[2], acc5[0]); + acc5[4] = _mm512_add_ps(acc5[6], acc5[4]); + acc5[0] = _mm512_add_ps(acc5[4], acc5[0]); + + __m256 low = _mm512_castps512_ps256(acc5[0]); + __m256 high = _mm256_castpd_ps(_mm512_extractf64x4_pd(_mm512_castps_pd(acc5[0]), 1)); + + acc[0] = _mm256_add_ps(low, acc[0]); + acc[0] = _mm256_add_ps(high, acc[0]); + + if (!n) + { + // Reduce and store the result + acc[0] = _mm256_add_ps(acc[1], acc[0]); + acc[2] = _mm256_add_ps(acc[3], acc[2]); + acc[0] = _mm256_add_ps(acc[2], acc[0]); + + __m256 t1 = _mm256_hadd_ps(acc[0], acc[0]); + __m256 t2 = _mm256_hadd_ps(t1, t1); + __m128 t3 = _mm256_extractf128_ps(t2, 1); + __m128 t4 = _mm_add_ps(_mm256_castps256_ps128(t2), t3); + sum += _mm_cvtss_f32(t4); + + *res = sum; + return; + } + } + + while (n >= 32) + { + // Unroll loop for 32 floats + __m256 a_vec0 = _mm256_loadu_ps(a); + __m256 a_vec1 = _mm256_loadu_ps(a + 8); + __m256 a_vec2 = _mm256_loadu_ps(a + 16); + __m256 a_vec3 = _mm256_loadu_ps(a + 24); + + __m256 b_vec0 = _mm256_loadu_ps(b); + __m256 b_vec1 = _mm256_loadu_ps(b + 8); + __m256 b_vec2 = _mm256_loadu_ps(b + 16); + __m256 b_vec3 = _mm256_loadu_ps(b + 24); + + __m256 diff0 = _mm256_sub_ps(a_vec0, b_vec0); + __m256 diff1 = _mm256_sub_ps(a_vec1, b_vec1); + __m256 diff2 = _mm256_sub_ps(a_vec2, b_vec2); + __m256 diff3 = _mm256_sub_ps(a_vec3, b_vec3); + + acc[0] = _mm256_fmadd_ps(diff0, diff0, acc[0]); + acc[1] = _mm256_fmadd_ps(diff1, diff1, acc[1]); + acc[2] = _mm256_fmadd_ps(diff2, diff2, acc[2]); + acc[3] = _mm256_fmadd_ps(diff3, diff3, acc[3]); + + n -= 32; + a += 32; + b += 32; + } + + // Process 8 floats at a time + while (n >= 8) + { + __m256 a_vec0 = _mm256_loadu_ps(a); + __m256 b_vec0 = _mm256_loadu_ps(b); + __m256 diff0 = _mm256_sub_ps(a_vec0, b_vec0); + + acc[0] = _mm256_fmadd_ps(diff0, diff0, acc[0]); + + n -= 8; + a += 8; + b += 8; + } + + // Tail + while (n) + { + float diff = a[0] - b[0]; + float sq = diff * diff; + sum += sq; + n--; + a++; + b++; + } + + // Reduce and store the result + acc[0] = _mm256_add_ps(acc[1], acc[0]); + acc[2] = _mm256_add_ps(acc[3], acc[2]); + acc[0] = _mm256_add_ps(acc[2], acc[0]); + __m256 t1 = _mm256_hadd_ps(acc[0], acc[0]); + __m256 t2 = _mm256_hadd_ps(t1, t1); + __m128 t3 = _mm256_extractf128_ps(t2, 1); + __m128 t4 = _mm_add_ps(_mm256_castps256_ps128(t2), t3); + sum += _mm_cvtss_f32(t4); + + *res = sum; +} \ No newline at end of file diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/c/l2_byte_avx256_amd64.c b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/c/l2_byte_avx256_amd64.c new file mode 100644 index 0000000000000000000000000000000000000000..31a963efb8d9daecaabd21ea8309ddd704f9682f --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/c/l2_byte_avx256_amd64.c @@ -0,0 +1,79 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2023 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +#include +#include + +void l2_byte_256(unsigned char *a, unsigned char *b, unsigned int *res, long *len) +{ + int n = *len; + + // Fast path for small dimensions + if (n < 32) + { + long acc = 0; + for (int i = 0; i < n; i++) + { + long diff = a[i] - b[i]; + acc += diff * diff; + } + + *res = acc; + return; + } + + __m256i acc = _mm256_setzero_si256(); + + int i; + // Process 32 bytes at a time + for (i = 0; i + 31 < n; i += 32) + { + // Load 32 bytes + __m256i vec_a = _mm256_loadu_si256((__m256i_u*)(a + i)); + __m256i vec_b = _mm256_loadu_si256((__m256i_u*)(b + i)); + + // Unpack 8 to 16 bits + __m256i va_lo = _mm256_unpacklo_epi8(vec_a, _mm256_setzero_si256()); + __m256i vb_lo = _mm256_unpacklo_epi8(vec_b, _mm256_setzero_si256()); + __m256i va_hi = _mm256_unpackhi_epi8(vec_a, _mm256_setzero_si256()); + __m256i vb_hi = _mm256_unpackhi_epi8(vec_b, _mm256_setzero_si256()); + + // Diff on high and low bits + __m256i diff_lo = _mm256_sub_epi16(va_lo, vb_lo); + __m256i diff_hi = _mm256_sub_epi16(va_hi, vb_hi); + + // Square the diffs + __m256i sq_diff_lo = _mm256_madd_epi16(diff_lo, diff_lo); + __m256i sq_diff_hi = _mm256_madd_epi16(diff_hi, diff_hi); + + // Accumulate the results + acc = _mm256_add_epi32(acc, sq_diff_lo); + acc = _mm256_add_epi32(acc, sq_diff_hi); + } + + // Reduce + __m128i acc_low = _mm256_extracti128_si256(acc, 0); + __m128i acc_high = _mm256_extracti128_si256(acc, 1); + __m128i acc128 = _mm_add_epi32(acc_low, acc_high); + acc128 = _mm_add_epi32(acc128, _mm_shuffle_epi32(acc128, _MM_SHUFFLE(0, 1, 2, 3))); + acc128 = _mm_add_epi32(acc128, _mm_shuffle_epi32(acc128, _MM_SHUFFLE(0, 0, 0, 1))); + + int result = _mm_extract_epi32(acc128, 0); + + // Tail + for (; i < n; i++) + { + long diff = a[i] - b[i]; + result += diff * diff; + } + + *res = result; +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/c/l2_float_byte_avx256.c b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/c/l2_float_byte_avx256.c new file mode 100644 index 0000000000000000000000000000000000000000..c22cb415429a999c13eb62375c9fb8c6ae28cf94 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/c/l2_float_byte_avx256.c @@ -0,0 +1,115 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2023 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +#include +#include + +void l2_float_byte_256(float *a, unsigned char *b, float *res, long *len) +{ + int n = *len; + float sum = 0; + + // fast path for small dimensions + if (n < 8) + { + do + { + float diff = a[0] - (float)(b[0]); + float sq = diff * diff; + sum += sq; + n--; + a++; + b++; + } while (n); + + *res = sum; + return; + } + + // Create 4 registers to store the results + __m256 acc[4]; + acc[0] = _mm256_setzero_ps(); + acc[1] = _mm256_setzero_ps(); + acc[2] = _mm256_setzero_ps(); + acc[3] = _mm256_setzero_ps(); + + while (n >= 32) + { + // Unroll loop for 32 floats + __m256 a_vec0 = _mm256_loadu_ps(a); + __m256 a_vec1 = _mm256_loadu_ps(a + 8); + __m256 a_vec2 = _mm256_loadu_ps(a + 16); + __m256 a_vec3 = _mm256_loadu_ps(a + 24); + + // Unroll loop for 32 bytes + __m128i b_byte_vec0 = _mm_loadu_si128((__m128i *)b); + __m128i b_byte_vec1 = _mm_loadu_si128((__m128i *)(b + 8)); + __m128i b_byte_vec2 = _mm_loadu_si128((__m128i *)(b + 16)); + __m128i b_byte_vec3 = _mm_loadu_si128((__m128i *)(b + 24)); + // Convert to floats + __m256 b_vec0 = _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32(b_byte_vec0)); + __m256 b_vec1 = _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32(b_byte_vec1)); + __m256 b_vec2 = _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32(b_byte_vec2)); + __m256 b_vec3 = _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32(b_byte_vec3)); + + __m256 diff0 = _mm256_sub_ps(a_vec0, b_vec0); + __m256 diff1 = _mm256_sub_ps(a_vec1, b_vec1); + __m256 diff2 = _mm256_sub_ps(a_vec2, b_vec2); + __m256 diff3 = _mm256_sub_ps(a_vec3, b_vec3); + + acc[0] = _mm256_fmadd_ps(diff0, diff0, acc[0]); + acc[1] = _mm256_fmadd_ps(diff1, diff1, acc[1]); + acc[2] = _mm256_fmadd_ps(diff2, diff2, acc[2]); + acc[3] = _mm256_fmadd_ps(diff3, diff3, acc[3]); + + n -= 32; + a += 32; + b += 32; + } + + // Process 8 floats at a time + while (n >= 8) + { + __m256 a_vec0 = _mm256_loadu_ps(a); + __m128i b_byte_vec0 = _mm_loadl_epi64((__m128i *)b); + __m256 b_vec0 = _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32(b_byte_vec0)); + __m256 diff0 = _mm256_sub_ps(a_vec0, b_vec0); + + acc[0] = _mm256_fmadd_ps(diff0, diff0, acc[0]); + + n -= 8; + a += 8; + b += 8; + } + + // Tail + while (n) + { + float diff = a[0] - b[0]; + float sq = diff * diff; + sum += sq; + n--; + a++; + b++; + } + + // Reduce and store the result + acc[0] = _mm256_add_ps(acc[1], acc[0]); + acc[2] = _mm256_add_ps(acc[3], acc[2]); + acc[0] = _mm256_add_ps(acc[2], acc[0]); + __m256 t1 = _mm256_hadd_ps(acc[0], acc[0]); + __m256 t2 = _mm256_hadd_ps(t1, t1); + __m128 t3 = _mm256_extractf128_ps(t2, 1); + __m128 t4 = _mm_add_ps(_mm256_castps256_ps128(t2), t3); + sum += _mm_cvtss_f32(t4); + + *res = sum; +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/c/l2_neon_arm64.c b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/c/l2_neon_arm64.c new file mode 100644 index 0000000000000000000000000000000000000000..47edc80976c9700d3e8c151525b0230a38ed6151 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/c/l2_neon_arm64.c @@ -0,0 +1,73 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +#include + +// l2_neon only works with length >= 16 +void l2_neon(float *a, float *b, float *res, long *len) +{ + int size = *len; + + // use the vectorized version for the first n - (n % 4) elements + int l = size - (size % 4); + + // create 4*4 registers to store the result + float32x4_t res_vec0 = vdupq_n_f32(0); + float32x4_t res_vec1 = vdupq_n_f32(0); + float32x4_t res_vec2 = vdupq_n_f32(0); + float32x4_t res_vec3 = vdupq_n_f32(0); + + int i = 0; + + // load 4*4 floats at a time + while (i + 16 <= l) + { + float32x4x4_t a4 = vld1q_f32_x4(a + i); + float32x4x4_t b4 = vld1q_f32_x4(b + i); + + float32x4_t diff0 = vsubq_f32(a4.val[0], b4.val[0]); + float32x4_t diff1 = vsubq_f32(a4.val[1], b4.val[1]); + float32x4_t diff2 = vsubq_f32(a4.val[2], b4.val[2]); + float32x4_t diff3 = vsubq_f32(a4.val[3], b4.val[3]); + res_vec0 += vmulq_f32(diff0, diff0); + res_vec1 += vmulq_f32(diff1, diff1); + res_vec2 += vmulq_f32(diff2, diff2); + res_vec3 += vmulq_f32(diff3, diff3); + + i += 16; + } + + while (i < l) + { + float32x4_t a_vec = vld1q_f32(a + i); + float32x4_t b_vec = vld1q_f32(b + i); + float32x4_t diff = vsubq_f32(a_vec, b_vec); + res_vec0 += vmulq_f32(diff, diff); + + i += 4; + } + + // convert to scalar + float sum = vaddvq_f32(res_vec0); + sum += vaddvq_f32(res_vec1); + sum += vaddvq_f32(res_vec2); + sum += vaddvq_f32(res_vec3); + + // add the remaining vectors + for (int i = l; i < size; i++) + { + float diff = a[i] - b[i]; + float sq = diff * diff; + sum += sq; + } + + res[0] = sum; +} \ No newline at end of file diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/c/l2_neon_byte_arm64.c b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/c/l2_neon_byte_arm64.c new file mode 100644 index 0000000000000000000000000000000000000000..56cd9ec01a0374d48b826e7b85bb640aebbb033f --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/c/l2_neon_byte_arm64.c @@ -0,0 +1,112 @@ +#include + +void l2_neon_byte_256(unsigned char *a, unsigned char *b, unsigned int *res, long *len) +{ + int size = *len; + + // Use the vectorized version for the first n - (n % 16) elements + int l = size - (size % 16); + + uint32x4_t res_vec0 = vdupq_n_u32(0); + uint32x4_t res_vec1 = vdupq_n_u32(0); + uint32x4_t res_vec2 = vdupq_n_u32(0); + uint32x4_t res_vec3 = vdupq_n_u32(0); + + int i = 0; + + // Load 4*16 bytes at a time + while (i + 64 <= l) + { + uint8x16x4_t a4 = vld1q_u8_x4(a + i); + uint8x16x4_t b4 = vld1q_u8_x4(b + i); + + int16x8_t diff_0_low, diff_0_high, diff_1_low, diff_1_high, diff_2_low, diff_2_high, diff_3_low, diff_3_high; + + // Compute differences and extend to signed 16-bit integers + diff_0_low = vreinterpretq_s16_u16(vsubl_u8(vget_low_u8(a4.val[0]), vget_low_u8(b4.val[0]))); + diff_0_high = vreinterpretq_s16_u16(vsubl_u8(vget_high_u8(a4.val[0]), vget_high_u8(b4.val[0]))); + + diff_1_low = vreinterpretq_s16_u16(vsubl_u8(vget_low_u8(a4.val[1]), vget_low_u8(b4.val[1]))); + diff_1_high = vreinterpretq_s16_u16(vsubl_u8(vget_high_u8(a4.val[1]), vget_high_u8(b4.val[1]))); + + diff_2_low = vreinterpretq_s16_u16(vsubl_u8(vget_low_u8(a4.val[2]), vget_low_u8(b4.val[2]))); + diff_2_high = vreinterpretq_s16_u16(vsubl_u8(vget_high_u8(a4.val[2]), vget_high_u8(b4.val[2]))); + + diff_3_low = vreinterpretq_s16_u16(vsubl_u8(vget_low_u8(a4.val[3]), vget_low_u8(b4.val[3]))); + diff_3_high = vreinterpretq_s16_u16(vsubl_u8(vget_high_u8(a4.val[3]), vget_high_u8(b4.val[3]))); + + int32x4_t sq_0_low = vmull_s16(vget_low_s16(diff_0_low), vget_low_s16(diff_0_low)); + sq_0_low += vmull_s16(vget_high_s16(diff_0_low), vget_high_s16(diff_0_low)); + int32x4_t sq_0_high = vmull_s16(vget_low_s16(diff_0_high), vget_low_s16(diff_0_high)); + sq_0_high += vmull_s16(vget_high_s16(diff_0_high), vget_high_s16(diff_0_high)); + + int32x4_t sq_1_low = vmull_s16(vget_low_s16(diff_1_low), vget_low_s16(diff_1_low)); + sq_1_low += vmull_s16(vget_high_s16(diff_1_low), vget_high_s16(diff_1_low)); + int32x4_t sq_1_high = vmull_s16(vget_low_s16(diff_1_high), vget_low_s16(diff_1_high)); + sq_1_high += vmull_s16(vget_high_s16(diff_1_high), vget_high_s16(diff_1_high)); + + int32x4_t sq_2_low = vmull_s16(vget_low_s16(diff_2_low), vget_low_s16(diff_2_low)); + sq_2_low += vmull_s16(vget_high_s16(diff_2_low), vget_high_s16(diff_2_low)); + int32x4_t sq_2_high = vmull_s16(vget_low_s16(diff_2_high), vget_low_s16(diff_2_high)); + sq_2_high += vmull_s16(vget_high_s16(diff_2_high), vget_high_s16(diff_2_high)); + + int32x4_t sq_3_low = vmull_s16(vget_low_s16(diff_3_low), vget_low_s16(diff_3_low)); + sq_3_low += vmull_s16(vget_high_s16(diff_3_low), vget_high_s16(diff_3_low)); + int32x4_t sq_3_high = vmull_s16(vget_low_s16(diff_3_high), vget_low_s16(diff_3_high)); + sq_3_high += vmull_s16(vget_high_s16(diff_3_high), vget_high_s16(diff_3_high)); + + // convert to unsigned 32-bit ints (square is garantueed to be positive) + res_vec0 += vreinterpretq_u32_s32(sq_0_low); + res_vec0 += vreinterpretq_u32_s32(sq_0_high); + + res_vec1 += vreinterpretq_u32_s32(sq_1_low); + res_vec1 += vreinterpretq_u32_s32(sq_1_high); + + res_vec2 += vreinterpretq_u32_s32(sq_2_low); + res_vec2 += vreinterpretq_u32_s32(sq_2_high); + + res_vec3 += vreinterpretq_u32_s32(sq_3_low); + res_vec3 += vreinterpretq_u32_s32(sq_3_high); + + i += 64; + } + + // Process the remaining elements + while (i < l) + { + uint8x16_t a_vec = vld1q_u8(a + i); + uint8x16_t b_vec = vld1q_u8(b + i); + + int16x8_t diff_low = vreinterpretq_s16_u16(vsubl_u8(vget_low_u8(a_vec), vget_low_u8(b_vec))); + int16x8_t diff_high = vreinterpretq_s16_u16(vsubl_u8(vget_high_u8(a_vec), vget_high_u8(b_vec))); + + int32x4_t sq_low = vmull_s16(vget_low_s16(diff_low), vget_low_s16(diff_low)); + sq_low += vmull_s16(vget_high_s16(diff_low), vget_high_s16(diff_low)); + + int32x4_t sq_high = vmull_s16(vget_low_s16(diff_high), vget_low_s16(diff_high)); + sq_high += vmull_s16(vget_high_s16(diff_high), vget_high_s16(diff_high)); + + res_vec0 += vreinterpretq_u32_s32(sq_low); + res_vec0 += vreinterpretq_u32_s32(sq_high); + + i += 16; + } + + uint32_t sum = 0; + + sum += vaddvq_u32(res_vec0); + sum += vaddvq_u32(res_vec1); + sum += vaddvq_u32(res_vec2); + sum += vaddvq_u32(res_vec3); + + // Process the last few elements manually + int j = l; + while (j < size) + { + int32_t diff = (int32_t)(a[j]) - (int32_t)(b[j]); + sum += (uint32_t)(diff * diff); + j++; + } + + *res = sum; +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/c/l2_sve_arm64.c b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/c/l2_sve_arm64.c new file mode 100644 index 0000000000000000000000000000000000000000..62df2223cba76f5b34a3d4f52bfb7870423f94f8 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/c/l2_sve_arm64.c @@ -0,0 +1,86 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +#include + +// l2_sve only works with length >= 16 +void l2_sve(float *a, float *b, float *res, long *len) +{ + uint64_t size = *len; + + uint64_t vsize = svcntw(); + uint64_t vsizex4 = vsize * 4; + + // use the vectorized version for the first n - (n % 4) elements + uint64_t l = size - (size % vsize); + + // create 4*4 registers to store the result + svfloat32_t res_vec0 = svdup_n_f32(0.0f); + svfloat32_t res_vec1 = svdup_n_f32(0.0f); + svfloat32_t res_vec2 = svdup_n_f32(0.0f); + svfloat32_t res_vec3 = svdup_n_f32(0.0f); + + svbool_t pred = svptrue_b32(); + + uint64_t i = 0; + + // load 4*vsize floats at a time + while (i + vsizex4 <= l) + { + svfloat32_t a0 = svld1_f32(pred, a + i); + svfloat32_t a1 = svld1_f32(pred, a + i + vsize); + svfloat32_t a2 = svld1_f32(pred, a + i + vsize*2); + svfloat32_t a3 = svld1_f32(pred, a + i + vsize*3); + svfloat32_t b0 = svld1_f32(pred, b + i); + svfloat32_t b1 = svld1_f32(pred, b + i + vsize); + svfloat32_t b2 = svld1_f32(pred, b + i + vsize*2); + svfloat32_t b3 = svld1_f32(pred, b + i + vsize*3); + + svfloat32_t diff0 = svsub_f32_x(pred, a0, b0); + svfloat32_t diff1 = svsub_f32_x(pred, a1, b1); + svfloat32_t diff2 = svsub_f32_x(pred, a2, b2); + svfloat32_t diff3 = svsub_f32_x(pred, a3, b3); + + res_vec0 = svmla_f32_x(pred, res_vec0, diff0, diff0); + res_vec1 = svmla_f32_x(pred, res_vec1, diff1, diff1); + res_vec2 = svmla_f32_x(pred, res_vec2, diff2, diff2); + res_vec3 = svmla_f32_x(pred, res_vec3, diff3, diff3); + + i += vsizex4; + } + + while (i < l) + { + svfloat32_t a_vec = svld1_f32(pred, a + i); + svfloat32_t b_vec = svld1_f32(pred, b + i); + svfloat32_t diff = svsub_f32_x(pred, a_vec, b_vec); + res_vec0 = svmla_f32_x(pred, res_vec0, diff, diff); + + i += vsize; + } + + // reduce + float32_t sum = svaddv_f32(pred, res_vec0); + sum += svaddv_f32(pred, res_vec1); + sum += svaddv_f32(pred, res_vec2); + sum += svaddv_f32(pred, res_vec3); + + // add the remaining vectors + for (i = l; i < size; i++) + { + float32_t diff = a[i] - b[i]; + float32_t sq = diff * diff; + sum += sq; + } + + res[0] = sum; +} + diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/cosine_dist.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/cosine_dist.go new file mode 100644 index 0000000000000000000000000000000000000000..bc1931d6418fd3ce8ceab142ac2009590cb836f8 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/cosine_dist.go @@ -0,0 +1,79 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package distancer + +import ( + "github.com/pkg/errors" +) + +type CosineDistance struct { + a []float32 +} + +func (d *CosineDistance) Distance(b []float32) (float32, error) { + if len(d.a) != len(b) { + return 0, errors.Wrapf(ErrVectorLength, "%d vs %d", + len(d.a), len(b)) + } + + dist := 1 - dotProductImplementation(d.a, b) + + if dist < 0 { + return 0, nil + } + return dist, nil +} + +type CosineDistanceProvider struct{} + +func NewCosineDistanceProvider() CosineDistanceProvider { + return CosineDistanceProvider{} +} + +func (d CosineDistanceProvider) SingleDist(a, b []float32) (float32, error) { + if len(a) != len(b) { + return 0, errors.Wrapf(ErrVectorLength, "%d vs %d", + len(a), len(b)) + } + + prod := 1 - dotProductImplementation(a, b) + + if prod < 0 { + return 0, nil + } + return prod, nil +} + +func (d CosineDistanceProvider) Type() string { + return "cosine-dot" +} + +func (d CosineDistanceProvider) New(a []float32) Distancer { + return &CosineDistance{a: a} +} + +func (d CosineDistanceProvider) Step(x, y []float32) float32 { + var sum float32 + for i := range x { + sum += x[i] * y[i] + } + + return sum +} + +func (d CosineDistanceProvider) Wrap(x float32) float32 { + w := 1 - x + if w < 0 { + return 0 + } + return w +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/cosine_dist_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/cosine_dist_test.go new file mode 100644 index 0000000000000000000000000000000000000000..3761066dbd56c1fe87518efce5560b42007998c7 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/cosine_dist_test.go @@ -0,0 +1,148 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package distancer + +import ( + "math/rand" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestCosineDistancer(t *testing.T) { + t.Run("identical vectors", func(t *testing.T) { + vec1 := Normalize([]float32{0.1, 0.3, 0.7}) + vec2 := Normalize([]float32{0.1, 0.3, 0.7}) + expectedDistance := float32(0.0) + + dist, err := NewCosineDistanceProvider().New(vec1).Distance(vec2) + require.Nil(t, err) + + control, err := NewCosineDistanceProvider().SingleDist(vec1, vec2) + require.Nil(t, err) + assert.Equal(t, control, dist) + assert.Equal(t, expectedDistance, dist) + }) + + t.Run("different vectors, but identical angle", func(t *testing.T) { + vec1 := Normalize([]float32{0.1, 0.3, 0.7}) + vec2 := Normalize([]float32{0.2, 0.6, 1.4}) + expectedDistance := float32(0.0) + + dist, err := NewCosineDistanceProvider().New(vec1).Distance(vec2) + require.Nil(t, err) + + control, err := NewCosineDistanceProvider().SingleDist(vec1, vec2) + require.Nil(t, err) + assert.Equal(t, control, dist) + assert.Equal(t, expectedDistance, dist) + }) + + t.Run("different vectors", func(t *testing.T) { + vec1 := Normalize([]float32{0.1, 0.3, 0.7}) + vec2 := Normalize([]float32{0.2, 0.2, 0.2}) + expectedDistance := float32(0.173) + + dist, err := NewCosineDistanceProvider().New(vec1).Distance(vec2) + require.Nil(t, err) + + control, err := NewCosineDistanceProvider().SingleDist(vec1, vec2) + require.Nil(t, err) + assert.Equal(t, control, dist) + assert.InDelta(t, expectedDistance, dist, 0.01) + }) + + t.Run("opposite vectors", func(t *testing.T) { + // This is unique to cosine/angular distance. + vec1 := Normalize([]float32{0.1, 0.3, 0.7}) + vec2 := Normalize([]float32{-0.1, -0.3, -0.7}) + expectedDistance := float32(2) + + dist, err := NewCosineDistanceProvider().New(vec1).Distance(vec2) + require.Nil(t, err) + + control, err := NewCosineDistanceProvider().SingleDist(vec1, vec2) + require.Nil(t, err) + assert.Equal(t, control, dist) + assert.InDelta(t, expectedDistance, dist, 0.01) + }) +} + +func TestCosineDistancerStepbyStep(t *testing.T) { + t.Run("step by step equals SingleDist", func(t *testing.T) { + vec1 := Normalize([]float32{3, 4, 5}) + vec2 := Normalize([]float32{-3, -4, -5}) + + expectedDistance, err := NewCosineDistanceProvider().New(vec1).Distance(vec2) + require.Nil(t, err) + + distanceProvider := NewCosineDistanceProvider() + sum := float32(0.0) + for i := range vec1 { + sum += distanceProvider.Step([]float32{vec1[i]}, []float32{vec2[i]}) + } + control := distanceProvider.Wrap(sum) + + assert.Equal(t, control, expectedDistance) + }) +} + +func TestNoNegativeDistance(t *testing.T) { + dimensions := 1536 + vectors := make([][]float32, 20) + vectors[0] = make([]float32, dimensions) + for i := 0; i < dimensions; i++ { + vectors[0][i] = rand.Float32() - 0.5 + } + Normalize(vectors[0]) + for i := 1; i < len(vectors); i++ { + vectors[i] = make([]float32, dimensions) + for j := 0; j < dimensions; j++ { + vectors[i][j] = vectors[i][j] + (rand.Float32()-0.5)*0.00001 + } + Normalize(vectors[i]) + } + + t.Run("test single distance", func(t *testing.T) { + distanceProvider := NewCosineDistanceProvider() + for _, vec1 := range vectors { + for _, vec2 := range vectors { + dist, err := distanceProvider.SingleDist(vec1, vec2) + require.Nil(t, err) + assert.True(t, dist >= 0) + } + } + }) + + t.Run("test distancer", func(t *testing.T) { + distanceProvider := NewCosineDistanceProvider() + for _, vec1 := range vectors { + distancer := distanceProvider.New(vec1) + for _, vec2 := range vectors { + dist, err := distancer.Distance(vec2) + require.Nil(t, err) + assert.True(t, dist >= 0) + } + } + }) + + t.Run("test wrap", func(t *testing.T) { + distanceProvider := NewCosineDistanceProvider() + for _, vec1 := range vectors { + for _, vec2 := range vectors { + dist := distanceProvider.Wrap(distanceProvider.Step(vec1, vec2)) + assert.True(t, dist >= 0) + } + } + }) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/dot_product.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/dot_product.go new file mode 100644 index 0000000000000000000000000000000000000000..6946234a65c0129bb211acf631c9db3ce80d80ed --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/dot_product.go @@ -0,0 +1,98 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package distancer + +import ( + "github.com/pkg/errors" +) + +// can be set depending on architecture, e.g. pure go, AVX-enabled assembly, etc. +// Warning: This is not the dot product distance, but the pure product. +// +// This default will always work, regardless of architecture. An init function +// will overwrite it on amd64 if AVX is present. +var dotProductImplementation func(a, b []float32) float32 = func(a, b []float32) float32 { + var sum float32 + for i := range a { + sum += a[i] * b[i] + } + + return sum +} + +func DotProductFloatGo(a, b []float32) float32 { + return -dotProductGo[float32, float32](a, b) +} + +func DotProductByteGo(a, b []uint8) uint32 { + return dotProductGo[uint8, uint32](a, b) +} + +func dotProductGo[C uint8 | float32, T uint32 | float32](a, b []C) T { + var sum T + for i := range a { + sum += T(a[i]) * T(b[i]) + } + return sum +} + +type DotProduct struct { + a []float32 +} + +func (d *DotProduct) Distance(b []float32) (float32, error) { + if len(d.a) != len(b) { + return 0, errors.Wrapf(ErrVectorLength, "%d vs %d", + len(d.a), len(b)) + } + + dist := -dotProductImplementation(d.a, b) + return dist, nil +} + +type DotProductProvider struct{} + +func NewDotProductProvider() DotProductProvider { + return DotProductProvider{} +} + +func (d DotProductProvider) SingleDist(a, b []float32) (float32, error) { + if len(a) != len(b) { + return 0, errors.Wrapf(ErrVectorLength, "%d vs %d", + len(a), len(b)) + } + + prod := -dotProductImplementation(a, b) + + return prod, nil +} + +func (d DotProductProvider) Type() string { + return "dot" +} + +func (d DotProductProvider) New(a []float32) Distancer { + return &DotProduct{a: a} +} + +func (d DotProductProvider) Step(x, y []float32) float32 { + var sum float32 + for i := range x { + sum += x[i] * y[i] + } + + return sum +} + +func (d DotProductProvider) Wrap(x float32) float32 { + return -x +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/dot_product_amd64.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/dot_product_amd64.go new file mode 100644 index 0000000000000000000000000000000000000000..fad26bf3594b71b87c5b81d94893c61825194deb --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/dot_product_amd64.go @@ -0,0 +1,25 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package distancer + +import ( + "github.com/weaviate/weaviate/adapters/repos/db/vector/hnsw/distancer/asm" + "golang.org/x/sys/cpu" +) + +func init() { + if cpu.X86.HasAMXBF16 && cpu.X86.HasAVX512 { + dotProductImplementation = asm.DotAVX512 + } else if cpu.X86.HasAVX2 { + dotProductImplementation = asm.DotAVX256 + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/dot_product_amd64_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/dot_product_amd64_test.go new file mode 100644 index 0000000000000000000000000000000000000000..77aaf53963d1bf8368512658fdb7c904edebe101 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/dot_product_amd64_test.go @@ -0,0 +1,390 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package distancer + +import ( + "fmt" + "math" + "testing" + "unsafe" + + "github.com/weaviate/weaviate/adapters/repos/db/vector/hnsw/distancer/asm" + "golang.org/x/sys/cpu" +) + +var dotByteImpl func(a, b []byte) uint32 = func(a, b []byte) uint32 { + var sum uint32 + + for i := range a { + sum += uint32(a[i]) * uint32(b[i]) + } + + return sum +} + +var dotFloatByteImpl func(a []float32, b []byte) float32 = func(a []float32, b []byte) float32 { + var sum float32 + + for i := range a { + sum += a[i] * float32(b[i]) + } + + return sum +} + +func testDotProductFixedValue(t *testing.T, size uint, dotFn func(x []float32, y []float32) float32) { + count := 100 + countFailed := 0 + for i := 0; i < count; i++ { + vec1 := make([]float32, size) + vec2 := make([]float32, size) + for i := range vec1 { + vec1[i] = 1 + vec2[i] = 1 + } + vec1 = Normalize(vec1) + vec2 = Normalize(vec2) + res := -dotFn(vec1, vec2) + if math.IsNaN(float64(res)) { + panic("NaN") + } + + resControl := DotProductFloatGo(vec1, vec2) + delta := float64(0.01) + diff := float64(resControl) - float64(res) + if diff < -delta || diff > delta { + countFailed++ + + fmt.Printf("run %d: match: %f != %f\n", i, resControl, res) + + t.Fail() + } + } + + fmt.Printf("total failed: %d\n", countFailed) +} + +func testDotProductRandomValue(t *testing.T, size uint, dotFn func(x []float32, y []float32) float32) { + r := getRandomSeed() + count := 100 + countFailed := 0 + + vec1s := make([][]float32, count) + vec2s := make([][]float32, count) + + for i := 0; i < count; i++ { + vec1 := make([]float32, size) + vec2 := make([]float32, size) + for j := range vec1 { + vec1[j] = r.Float32() + vec2[j] = r.Float32() + } + vec1s[i] = Normalize(vec1) + vec2s[i] = Normalize(vec2) + } + + for i := 0; i < count; i++ { + res := -dotFn(vec1s[i], vec2s[i]) + if math.IsNaN(float64(res)) { + panic("NaN") + } + + resControl := DotProductFloatGo(vec1s[i], vec2s[i]) + delta := float64(0.01) + diff := float64(resControl) - float64(res) + if diff < -delta || diff > delta { + countFailed++ + + fmt.Printf("run %d: match: %f != %f, %d\n", i, resControl, res, (unsafe.Pointer(&vec1s[i][0]))) + + t.Fail() + } + + } + fmt.Printf("total failed: %d\n", countFailed) +} + +func TestCompareDotProductImplementations(t *testing.T) { + sizes := []uint{ + 1, + 2, + 3, + 4, + 5, + 6, + 8, + 10, + 12, + 16, + 24, + 30, + 31, + 32, + 64, + 67, + 128, + 256, + 260, + 299, + 300, + 384, + 390, + 600, + 768, + 777, + 784, + 1024, + 1536, + } + + for _, size := range sizes { + t.Run(fmt.Sprintf("with size %d", size), func(t *testing.T) { + testDotProductFixedValue(t, size, asm.DotAVX256) + testDotProductRandomValue(t, size, asm.DotAVX256) + if cpu.X86.HasAVX512 { + testDotProductFixedValue(t, size, asm.DotAVX512) + testDotProductRandomValue(t, size, asm.DotAVX512) + } + }) + } +} + +func testDotProductByteFixedValue(t *testing.T, size uint, dotFn func(x []uint8, y []uint8) uint32) { + vec1 := make([]uint8, size) + vec2 := make([]uint8, size) + for i := range vec1 { + vec1[i] = 1 + vec2[i] = 1 + } + res := dotFn(vec1, vec2) + + resControl := dotByteImpl(vec1, vec2) + if uint32(resControl) != res { + t.Logf("for dim: %d -> want: %d, got: %d", size, resControl, res) + t.Fail() + } +} + +func testDotProductByteRandomValue(t *testing.T, size uint, dotFn func(x []byte, y []byte) uint32) { + r := getRandomSeed() + count := 100 + + vec1s := make([][]byte, count) + vec2s := make([][]byte, count) + + for i := 0; i < count; i++ { + vec1 := make([]byte, size) + vec2 := make([]byte, size) + for j := range vec1 { + vec1[j] = byte(r.Uint32() % 256) + vec2[j] = byte(r.Uint32() % 256) + } + + vec1s[i] = vec1 + vec2s[i] = vec2 + } + + for i := 0; i < count; i++ { + res := dotFn(vec1s[i], vec2s[i]) + + resControl := dotByteImpl(vec1s[i], vec2s[i]) + if resControl != res { + t.Logf("for dim: %d -> want: %d, got: %d", size, resControl, res) + t.Fail() + } + } +} + +func TestCompareDotProductByte(t *testing.T) { + sizes := []uint{ + 1, + 2, + 3, + 4, + 5, + 6, + 8, + 10, + 12, + 16, + 24, + 30, + 31, + 32, + 64, + 67, + 128, + 256, + 260, + 299, + 300, + 384, + 390, + 600, + 768, + 777, + 784, + 1024, + 1536, + } + + for _, size := range sizes { + t.Run(fmt.Sprintf("with size %d", size), func(t *testing.T) { + testDotProductByteFixedValue(t, size, asm.DotByteAVX256) + testDotProductByteRandomValue(t, size, asm.DotByteAVX256) + }) + } +} + +func benchmarkDotByte(b *testing.B, dims int, dotFn func(a, b []byte) uint32) { + r := getRandomSeed() + + vec1 := make([]byte, dims) + vec2 := make([]byte, dims) + for i := range vec1 { + vec1[i] = byte(r.Uint32() % 256) + vec2[i] = byte(r.Uint32() % 256) + } + + b.ResetTimer() + for n := 0; n < b.N; n++ { + dotFn(vec1, vec2) + } +} + +func BenchmarkDotByte(b *testing.B) { + dims := []int{2, 4, 6, 8, 10, 12, 16, 24, 30, 32, 128, 256, 300, 384, 512, 768, 1024, 1536} + for _, dim := range dims { + b.Run(fmt.Sprintf("%d dimensions", dim), func(b *testing.B) { + // benchmarkDotByte(b, dim, dotByteImpl) + benchmarkDotByte(b, dim, asm.DotByteAVX256) + + // b.Run("pure go", func(b *testing.B) { benchmarkDotByte(b, dim, dotByteImpl) }) + // b.Run("avx", func(b *testing.B) { benchmarkDotByte(b, dim, asm.DotByteAVX256) }) + }) + } +} + +func testDotProductFloatByteFixedValue(t *testing.T, size uint, dotFn func(x []float32, y []uint8) float32) { + vec1 := make([]float32, size) + vec2 := make([]uint8, size) + for i := range vec1 { + vec1[i] = 1 + vec2[i] = 1 + } + res := dotFn(vec1, vec2) + + resControl := dotFloatByteImpl(vec1, vec2) + if resControl != res { + t.Logf("for dim: %d -> want: %f, got: %f", size, resControl, res) + t.Fail() + } +} + +func testDotProductFloatByteRandomValue(t *testing.T, size uint, dotFn func(x []float32, y []byte) float32) { + r := getRandomSeed() + count := 100 + + vec1s := make([][]float32, count) + vec2s := make([][]byte, count) + + for i := 0; i < count; i++ { + vec1 := make([]float32, size) + vec2 := make([]byte, size) + for j := range vec1 { + vec1[j] = float32(r.Uint32() % 1000) + vec2[j] = byte(r.Uint32() % 256) + } + + vec1s[i] = Normalize(vec1) + vec2s[i] = vec2 + } + + for i := 0; i < count; i++ { + res := dotFn(vec1s[i], vec2s[i]) + + resControl := dotFloatByteImpl(vec1s[i], vec2s[i]) + delta := float64(0.05) + diff := float64(resControl) - float64(res) + if diff < -delta || diff > delta { + t.Logf("for dim: %d -> want: %f, got: %f, diff: %f", size, resControl, res, diff) + t.Fail() + } + } +} + +func TestCompareDotProductFloatByte(t *testing.T) { + sizes := []uint{ + 1, + 2, + 3, + 4, + 5, + 6, + 8, + 10, + 12, + 16, + 24, + 30, + 31, + 32, + 64, + 67, + 128, + 256, + 260, + 299, + 300, + 384, + 390, + 600, + 768, + 777, + 784, + 1024, + 1536, + } + + for _, size := range sizes { + t.Run(fmt.Sprintf("with size %d", size), func(t *testing.T) { + testDotProductFloatByteFixedValue(t, size, asm.DotFloatByteAVX256) + testDotProductFloatByteRandomValue(t, size, asm.DotFloatByteAVX256) + }) + } +} + +func benchmarkDotFloatByte(b *testing.B, dims int, dotFn func(a []float32, b []byte) float32) { + r := getRandomSeed() + + vec1 := make([]float32, dims) + vec2 := make([]byte, dims) + for i := range vec1 { + vec1[i] = float32(r.Uint32() % 1000) + vec2[i] = byte(r.Uint32() % 256) + } + + b.ResetTimer() + for n := 0; n < b.N; n++ { + dotFn(vec1, vec2) + } +} + +func BenchmarkDotFloatByte(b *testing.B) { + dims := []int{2, 4, 6, 8, 10, 12, 16, 24, 30, 32, 128, 256, 300, 384, 512, 768, 1024, 1536} + for _, dim := range dims { + b.Run(fmt.Sprintf("%d dimensions", dim), func(b *testing.B) { + b.Run("pure go", func(b *testing.B) { benchmarkDotFloatByte(b, dim, dotFloatByteImpl) }) + b.Run("avx", func(b *testing.B) { benchmarkDotFloatByte(b, dim, asm.DotFloatByteAVX256) }) + }) + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/dot_product_arm64.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/dot_product_arm64.go new file mode 100644 index 0000000000000000000000000000000000000000..29dc87459b68d893ad994d24bb3d8c6ffc46053b --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/dot_product_arm64.go @@ -0,0 +1,27 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package distancer + +import ( + "github.com/weaviate/weaviate/adapters/repos/db/vector/hnsw/distancer/asm" + "golang.org/x/sys/cpu" +) + +func init() { + if cpu.ARM64.HasASIMD { + if cpu.ARM64.HasSVE { + dotProductImplementation = asm.Dot_SVE + } else { + dotProductImplementation = asm.Dot_Neon + } + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/dot_product_arm64_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/dot_product_arm64_test.go new file mode 100644 index 0000000000000000000000000000000000000000..c261bca5bbcdbe56c634a7b7520d6ed3168ffb75 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/dot_product_arm64_test.go @@ -0,0 +1,382 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package distancer + +import ( + "fmt" + "math" + "testing" + "unsafe" + + "github.com/weaviate/weaviate/adapters/repos/db/vector/hnsw/distancer/asm" + "golang.org/x/sys/cpu" +) + +var dotByteImpl func(a, b []byte) uint32 = func(a, b []byte) uint32 { + var sum uint32 + + for i := range a { + sum += uint32(a[i]) * uint32(b[i]) + } + + return uint32(sum) +} + +func dotFloatByteImpl(a []float32, b []byte) float32 { + var sum float32 + + for i := range a { + sum += a[i] * float32(b[i]) + } + + return sum +} + +func testDotProductFixedValue(t *testing.T, size uint) { + count := 100 + countFailed := 0 + for i := 0; i < count; i++ { + vec1 := make([]float32, size) + vec2 := make([]float32, size) + for i := range vec1 { + vec1[i] = 129 + vec2[i] = 129 + } + vec1 = Normalize(vec1) + vec2 = Normalize(vec2) + res := -asm.Dot_Neon(vec1, vec2) + if cpu.ARM64.HasSVE { + res = -asm.Dot_SVE(vec1, vec2) + } + if math.IsNaN(float64(res)) { + panic("NaN") + } + + resControl := DotProductFloatGo(vec1, vec2) + delta := float64(0.01) + diff := float64(resControl) - float64(res) + if diff < -delta || diff > delta { + countFailed++ + + fmt.Printf("run %d: match: %f != %f\n", i, resControl, res) + + t.Fail() + } + } + + fmt.Printf("total failed: %d\n", countFailed) +} + +func testDotProductRandomValue(t *testing.T, size uint) { + r := getRandomSeed() + count := 100 + countFailed := 0 + + vec1s := make([][]float32, count) + vec2s := make([][]float32, count) + + for i := 0; i < count; i++ { + vec1 := make([]float32, size) + vec2 := make([]float32, size) + for j := range vec1 { + vec1[j] = r.Float32() + vec2[j] = r.Float32() + } + vec1s[i] = Normalize(vec1) + vec2s[i] = Normalize(vec2) + } + + for i := 0; i < count; i++ { + res := -asm.Dot_Neon(vec1s[i], vec2s[i]) + if cpu.ARM64.HasSVE { + res = -asm.Dot_SVE(vec1s[i], vec2s[i]) + } + if math.IsNaN(float64(res)) { + panic("NaN") + } + + resControl := DotProductFloatGo(vec1s[i], vec2s[i]) + delta := float64(0.01) + diff := float64(resControl) - float64(res) + if diff < -delta || diff > delta { + countFailed++ + + fmt.Printf("run %d: match: %f != %f, %d\n", i, resControl, res, (unsafe.Pointer(&vec1s[i][0]))) + + t.Fail() + } + + } + fmt.Printf("total failed: %d\n", countFailed) +} + +func TestCompareDotProductImplementations(t *testing.T) { + sizes := []uint{ + 1, + 4, + 8, + 16, + 31, + 32, + 35, + 64, + 67, + 128, + 130, + 256, + 260, + 384, + 390, + 768, + 777, + } + + for _, size := range sizes { + t.Run(fmt.Sprintf("with size %d", size), func(t *testing.T) { + testDotProductFixedValue(t, size) + testDotProductRandomValue(t, size) + }) + } +} + +func testDotProductByteFixedValue(t *testing.T, size uint, dotFn func(x []uint8, y []uint8) uint32) { + for num := 0; num < 255; num++ { + vec1 := make([]uint8, size) + vec2 := make([]uint8, size) + for i := range vec1 { + vec1[i] = uint8(num) + vec2[i] = uint8(num) + } + res := dotFn(vec1, vec2) + + resControl := dotByteImpl(vec1, vec2) + if uint32(resControl) != res { + t.Logf("for dim: %d -> want: %d, got: %d", size, resControl, res) + t.Fail() + } + } +} + +func testDotProductByteRandomValue(t *testing.T, size uint, dotFn func(x []byte, y []byte) uint32) { + r := getRandomSeed() + count := 100 + + vec1s := make([][]byte, count) + vec2s := make([][]byte, count) + + for i := 0; i < count; i++ { + vec1 := make([]byte, size) + vec2 := make([]byte, size) + for j := range vec1 { + rand1 := byte(r.Uint32() % 256) + rand2 := byte(r.Uint32() % 256) + + vec1[j] = rand1 + vec2[j] = rand2 + } + } + + for i := 0; i < count; i++ { + res := dotFn(vec1s[i], vec2s[i]) + + resControl := dotByteImpl(vec1s[i], vec2s[i]) + if uint32(resControl) != res { + t.Logf("for dim: %d -> want: %d, got: %d", size, resControl, res) + t.Fail() + } + } +} + +func TestCompareDotProductByte(t *testing.T) { + sizes := []uint{ + 1, + 2, + 3, + 4, + 5, + 6, + 8, + 10, + 12, + 16, + 24, + 30, + 31, + 32, + 64, + 67, + 128, + 256, + 260, + 299, + 300, + 384, + 390, + 600, + 768, + 777, + 784, + 1024, + 1536, + } + + for _, size := range sizes { + t.Run(fmt.Sprintf("with size %d", size), func(t *testing.T) { + testDotProductByteFixedValue(t, size, asm.DotByteARM64) + testDotProductByteRandomValue(t, size, asm.DotByteARM64) + }) + } +} + +func benchmarkDotByte(b *testing.B, dims int, dotFn func(a, b []byte) uint32) { + r := getRandomSeed() + + vec1 := make([]byte, dims) + vec2 := make([]byte, dims) + for i := range vec1 { + vec1[i] = byte(r.Uint32() % 256) + vec2[i] = byte(r.Uint32() % 256) + } + + b.ResetTimer() + for n := 0; n < b.N; n++ { + dotFn(vec1, vec2) + } +} + +func BenchmarkDotByte(b *testing.B) { + dims := []int{2, 4, 6, 8, 10, 12, 16, 24, 30, 32, 128, 256, 300, 384, 512, 768, 1024, 1536} + for _, dim := range dims { + b.Run(fmt.Sprintf("%d dimensions", dim), func(b *testing.B) { + // benchmarkDotByte(b, dim, dotByteImpl) + benchmarkDotByte(b, dim, asm.DotByteARM64) + + // b.Run("pure go", func(b *testing.B) { benchmarkDotByte(b, dim, dotByteImpl) }) + // b.Run("avx", func(b *testing.B) { benchmarkDotByte(b, dim, asm.DotByteAVX256) }) + }) + } +} + +func testDotProductFloatByteFixedValue(t *testing.T, size uint, dotFn func(x []float32, y []uint8) float32) { + for num := 0; num < 4; num++ { + vec1 := make([]float32, size) + vec2 := make([]uint8, size) + for i := range vec1 { + vec1[i] = float32(num) + vec2[i] = uint8(num) + } + res := dotFn(vec1, vec2) + + resControl := dotFloatByteImpl(vec1, vec2) + if resControl != res { + t.Logf("for dim: %d and %d -> want: %f, got: %f", num, size, resControl, res) + t.Fail() + } + } +} + +func testDotProductFloatByteRandomValue(t *testing.T, size uint, dotFn func(x []float32, y []byte) float32) { + r := getRandomSeed() + count := 100 + + vec1s := make([][]float32, count) + vec2s := make([][]byte, count) + + for i := 0; i < count; i++ { + vec1 := make([]float32, size) + vec2 := make([]byte, size) + for j := range vec1 { + rand1 := r.Float32() + rand2 := byte(r.Uint32() % 256) + + vec1[j] = rand1 + vec2[j] = rand2 + } + } + + for i := 0; i < count; i++ { + res := dotFn(vec1s[i], vec2s[i]) + + resControl := dotFloatByteImpl(vec1s[i], vec2s[i]) + if resControl != res { + t.Logf("for dim: %d -> want: %f, got: %f", size, resControl, res) + t.Fail() + } + } +} + +func TestCompareDotProductFloatByte(t *testing.T) { + sizes := []uint{ + 1, + 2, + 3, + 4, + 5, + 6, + 8, + 10, + 12, + 16, + 24, + 30, + 31, + 32, + 64, + 67, + 128, + 256, + 260, + 299, + 300, + 384, + 390, + 600, + 768, + 777, + 784, + 1024, + 1536, + } + + for _, size := range sizes { + t.Run(fmt.Sprintf("with size %d", size), func(t *testing.T) { + testDotProductFloatByteFixedValue(t, size, asm.DotFloatByte_Neon) + testDotProductFloatByteRandomValue(t, size, asm.DotFloatByte_Neon) + }) + } +} + +func benchmarkDotFloatByte(b *testing.B, dims int, dotFn func(a []float32, b []byte) float32) { + r := getRandomSeed() + + vec1 := make([]float32, dims) + vec2 := make([]byte, dims) + for i := range vec1 { + vec1[i] = r.Float32() + vec2[i] = byte(r.Uint32() % 256) + } + + b.ResetTimer() + for n := 0; n < b.N; n++ { + dotFn(vec1, vec2) + } +} + +func BenchmarkDotFloatByte(b *testing.B) { + dims := []int{2, 4, 6, 8, 10, 12, 16, 24, 30, 32, 128, 256, 300, 384, 512, 768, 1024, 1536} + for _, dim := range dims { + b.Run(fmt.Sprintf("%d dimensions", dim), func(b *testing.B) { + b.Run("pure go", func(b *testing.B) { benchmarkDotFloatByte(b, dim, dotFloatByteImpl) }) + b.Run("neon", func(b *testing.B) { benchmarkDotFloatByte(b, dim, asm.DotFloatByte_Neon) }) + }) + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/dot_product_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/dot_product_test.go new file mode 100644 index 0000000000000000000000000000000000000000..89f08de69b94f9cefc9313d592efbc7081bab2a0 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/dot_product_test.go @@ -0,0 +1,82 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package distancer + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestDotDistancer(t *testing.T) { + t.Run("identical vectors", func(t *testing.T) { + vec1 := []float32{3, 4, 5} + vec2 := []float32{3, 4, 5} + expectedDistance := float32(-50) + + dist, err := NewDotProductProvider().New(vec1).Distance(vec2) + require.Nil(t, err) + + control, err := NewDotProductProvider().SingleDist(vec1, vec2) + require.Nil(t, err) + assert.Equal(t, control, dist) + assert.Equal(t, expectedDistance, dist) + }) + + t.Run("without matching dimensions", func(t *testing.T) { + vec1 := []float32{0, 1, 0, 2, 0, 3} + vec2 := []float32{1, 0, 2, 0, 3, 0} + expectedDistance := float32(0) + + dist, err := NewDotProductProvider().New(vec1).Distance(vec2) + require.Nil(t, err) + + control, err := NewDotProductProvider().SingleDist(vec1, vec2) + require.Nil(t, err) + assert.Equal(t, control, dist) + assert.Equal(t, expectedDistance, dist) + }) + + t.Run("very different vectors", func(t *testing.T) { + vec1 := []float32{3, 4, 5} + vec2 := []float32{-3, -4, -5} + expectedDistance := float32(+50) + + dist, err := NewDotProductProvider().New(vec1).Distance(vec2) + require.Nil(t, err) + + control, err := NewDotProductProvider().SingleDist(vec1, vec2) + require.Nil(t, err) + assert.Equal(t, control, dist) + assert.Equal(t, expectedDistance, dist) + }) +} + +func TestDotDistancerStepbyStep(t *testing.T) { + t.Run("step by step equals SingleDist", func(t *testing.T) { + vec1 := []float32{3, 4, 5} + vec2 := []float32{-3, -4, -5} + + expectedDistance, err := NewDotProductProvider().New(vec1).Distance(vec2) + require.Nil(t, err) + + distanceProvider := NewDotProductProvider() + sum := float32(0.0) + for i := range vec1 { + sum += distanceProvider.Step([]float32{vec1[i]}, []float32{vec2[i]}) + } + control := distanceProvider.Wrap(sum) + + assert.Equal(t, control, expectedDistance) + }) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/errors.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/errors.go new file mode 100644 index 0000000000000000000000000000000000000000..08e3e5fde742dc7ecb5f60dc69859f3210aeeede --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/errors.go @@ -0,0 +1,16 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package distancer + +import "fmt" + +var ErrVectorLength = fmt.Errorf("vector lengths don't match") diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/geo_spatial.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/geo_spatial.go new file mode 100644 index 0000000000000000000000000000000000000000..dbd23e9a92892e25709da6c23cf1d02c879f7129 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/geo_spatial.go @@ -0,0 +1,75 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package distancer + +import ( + "fmt" + "math" +) + +func geoDist(a, b []float32) (float32, error) { + if len(a) != 2 || len(b) != 2 { + return 0, fmt.Errorf("distance vectors must have len 2") + } + + latA := a[0] + latB := b[0] + lonA := a[1] + lonB := b[1] + const R = float64(6371e3) + + latARadian := float64(latA * math.Pi / 180) + latBRadian := float64(latB * math.Pi / 180) + deltaLatRadian := float64(latB-latA) * math.Pi / 180 + deltaLonRadian := float64(lonB-lonA) * math.Pi / 180 + + A := math.Sin(deltaLatRadian/2)*math.Sin(deltaLatRadian/2) + + math.Cos(latARadian)*math.Cos(latBRadian)*math.Sin(deltaLonRadian/2)*math.Sin(deltaLonRadian/2) + + C := 2 * math.Atan2(math.Sqrt(A), math.Sqrt(1-A)) + + return float32(R * C), nil +} + +type GeoDistancer struct { + a []float32 +} + +func (g GeoDistancer) Distance(b []float32) (float32, error) { + return geoDist(g.a, b) +} + +type GeoProvider struct{} + +func (gp GeoProvider) New(vec []float32) Distancer { + return GeoDistancer{a: vec} +} + +func (gp GeoProvider) SingleDist(vec1, vec2 []float32) (float32, error) { + return geoDist(vec1, vec2) +} + +func (gp GeoProvider) Type() string { + return "geo" +} + +func (gp GeoProvider) Step(x, y []float32) float32 { + panic("Not implemented") +} + +func (gp GeoProvider) Wrap(x float32) float32 { + panic("Not implemented") +} + +func NewGeoProvider() Provider { + return GeoProvider{} +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/geo_spatial_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/geo_spatial_test.go new file mode 100644 index 0000000000000000000000000000000000000000..99c6e3fa82b87065ad6c90aa5d51d473c9064e71 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/geo_spatial_test.go @@ -0,0 +1,30 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package distancer + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestGeoSpatialDistance(t *testing.T) { + t.Run("between Munich and Stuttgart", func(t *testing.T) { + munich := []float32{48.137154, 11.576124} + stuttgart := []float32{48.783333, 9.183333} + + dist, err := NewGeoProvider().New(munich).Distance(stuttgart) + require.Nil(t, err) + assert.InDelta(t, 190000, dist, 1000) + }) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/hamming.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/hamming.go new file mode 100644 index 0000000000000000000000000000000000000000..1a3606e09d5e4f0ef0d0fe281b1e49b05b6564ef --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/hamming.go @@ -0,0 +1,107 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package distancer + +import ( + "math/bits" + + "github.com/pkg/errors" +) + +var hammingImpl func(a, b []float32) float32 = func(a, b []float32) float32 { + var sum float32 // default value of float in golang is 0 + + for i := range a { + if a[i] != b[i] { + sum += 1 + } + } + + return sum +} + +var hammingBitwiseImpl func(a, b []uint64) float32 = func(a, b []uint64) float32 { + total := float32(0) + for segment := range a { + total += float32(bits.OnesCount64(a[segment] ^ b[segment])) + } + return total +} + +type Hamming struct { + a []float32 +} + +func (l Hamming) Distance(b []float32) (float32, error) { + if len(l.a) != len(b) { + return 0, errors.Wrapf(ErrVectorLength, "%d vs %d", + len(l.a), len(b)) + } + + return hammingImpl(l.a, b), nil +} + +func HammingDistanceGo(a, b []float32) float32 { + var sum float32 + for i := range a { + if a[i] != b[i] { + sum += float32(1) + } + } + return sum +} + +func HammingBitwise(x []uint64, y []uint64) (float32, error) { + if len(x) != len(y) { + return 0, errors.New("both vectors should have the same len") + } + return hammingBitwiseImpl(x, y), nil +} + +type HammingProvider struct{} + +func NewHammingProvider() HammingProvider { + return HammingProvider{} +} + +func (l HammingProvider) SingleDist(a, b []float32) (float32, error) { + if len(a) != len(b) { + return 0, errors.Wrapf(ErrVectorLength, "%d vs %d", + len(a), len(b)) + } + + return hammingImpl(a, b), nil +} + +func (l HammingProvider) Type() string { + return "hamming" +} + +func (l HammingProvider) New(a []float32) Distancer { + return &Hamming{a: a} +} + +func (l HammingProvider) Step(x, y []float32) float32 { + var sum float32 // default value of float in golang is 0 + + for i := range x { + if x[i] != y[i] { + sum += float32(1) + } + } + + return sum +} + +func (l HammingProvider) Wrap(x float32) float32 { + return x +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/hamming_amd64.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/hamming_amd64.go new file mode 100644 index 0000000000000000000000000000000000000000..dfc7d993d8dd80e0408d1a24831b4189853db410 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/hamming_amd64.go @@ -0,0 +1,28 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package distancer + +import ( + "github.com/weaviate/weaviate/adapters/repos/db/vector/hnsw/distancer/asm" + "golang.org/x/sys/cpu" +) + +func init() { + if cpu.X86.HasAMXBF16 && cpu.X86.HasAVX512 { + hammingImpl = asm.HammingAVX512 + } else if cpu.X86.HasAVX2 { + hammingImpl = asm.HammingAVX256 + } + if cpu.X86.HasAVX2 { + hammingBitwiseImpl = asm.HammingBitwiseAVX256 + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/hamming_amd64_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/hamming_amd64_test.go new file mode 100644 index 0000000000000000000000000000000000000000..19bd8a96c8de105e452bfede09f8f61ec4349ee9 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/hamming_amd64_test.go @@ -0,0 +1,156 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package distancer + +import ( + "fmt" + "math/bits" + "testing" + + "github.com/weaviate/weaviate/adapters/repos/db/vector/hnsw/distancer/asm" + "golang.org/x/sys/cpu" +) + +func HammingBitwiseGo(x, y []uint64) float32 { + total := float32(0) + for segment := range x { + total += float32(bits.OnesCount64(x[segment] ^ y[segment])) + } + return total +} + +func testHammingBitwiseFixedValue(t *testing.T, size uint, hammingBitwiseFn func(x []uint64, y []uint64) float32) { + for num := 0; num < 255; num++ { + vec1 := make([]uint64, size) + vec2 := make([]uint64, size) + for i := range vec1 { + vec1[i] = uint64(num) + vec2[i] = uint64(num + 1) + } + res := hammingBitwiseFn(vec1, vec2) + + resControl := HammingBitwiseGo(vec1, vec2) + if resControl != res { + t.Logf("for dim: %d -> want: %f, got: %f", size, resControl, res) + t.Fail() + } + } +} + +func testHammingBitwiseRandomValue(t *testing.T, size uint, hammingBitwiseFn func(x []uint64, y []uint64) float32) { + r := getRandomSeed() + count := 100 + + vec1s := make([][]uint64, count) + vec2s := make([][]uint64, count) + + for i := 0; i < count; i++ { + vec1 := make([]uint64, size) + vec2 := make([]uint64, size) + for j := range vec1 { + vec1[j] = r.Uint64() + vec2[j] = r.Uint64() + + } + vec1s[i] = vec1 + vec2s[i] = vec2 + } + + for i := 0; i < count; i++ { + res := hammingBitwiseFn(vec1s[i], vec2s[i]) + + resControl := HammingBitwiseGo(vec1s[i], vec2s[i]) + if resControl != res { + t.Logf("for dim: %d -> want: %f, got: %f, factor: %f", size, resControl, res, resControl/res) + t.Fail() + } + } +} + +func TestCompareHammingBitwise(t *testing.T) { + sizes := []uint{ + 1, + 2, + 3, + 4, + 5, + 6, + 8, + 10, + 12, + 16, + 24, + 30, + 31, + 32, + 64, + 67, + 128, + 256, + 260, + 299, + 300, + 384, + 390, + 600, + 768, + 777, + 784, + 1024, + 1536, + } + + for _, size := range sizes { + t.Run(fmt.Sprintf("with size %d", size), func(t *testing.T) { + testHammingBitwiseFixedValue(t, size, hammingBitwiseImpl) + testHammingBitwiseRandomValue(t, size, hammingBitwiseImpl) + + if cpu.X86.HasAVX2 { + testHammingBitwiseFixedValue(t, size, asm.HammingBitwiseAVX256) + testHammingBitwiseRandomValue(t, size, asm.HammingBitwiseAVX256) + } + if cpu.X86.HasAMXBF16 && cpu.X86.HasAVX512 { + testHammingBitwiseFixedValue(t, size, asm.HammingBitwiseAVX512) + testHammingBitwiseRandomValue(t, size, asm.HammingBitwiseAVX512) + } + }) + } +} + +func benchmarkHammingBitwise(b *testing.B, dims int, hammingBitwiseFn func(x []uint64, y []uint64) float32) { + r := getRandomSeed() + + vec1 := make([]uint64, dims) + vec2 := make([]uint64, dims) + for i := range vec1 { + vec1[i] = r.Uint64() + vec2[i] = r.Uint64() + } + + b.ResetTimer() + for n := 0; n < b.N; n++ { + hammingBitwiseFn(vec1, vec2) + } +} + +func BenchmarkHammingBitwise(b *testing.B) { + dims := []int{2, 4, 6, 8, 10, 12, 16, 24, 30, 32, 128, 256, 300, 384, 512, 768, 1024, 1536} + for _, dim := range dims { + b.Run(fmt.Sprintf("dim%d-bits%d", dim, 64*dim), func(b *testing.B) { + benchmarkHammingBitwise(b, dim, asm.HammingBitwiseAVX256) + + b.Run("pure go", func(b *testing.B) { benchmarkHammingBitwise(b, dim, HammingBitwiseGo) }) + b.Run("avx-2", func(b *testing.B) { benchmarkHammingBitwise(b, dim, asm.HammingBitwiseAVX256) }) + b.Run("avx-512", func(b *testing.B) { benchmarkHammingBitwise(b, dim, asm.HammingBitwiseAVX512) }) + }) + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/hamming_arm64.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/hamming_arm64.go new file mode 100644 index 0000000000000000000000000000000000000000..707b59f3365f2610c70afdcdc224199e5c06f81f --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/hamming_arm64.go @@ -0,0 +1,24 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package distancer + +import ( + "github.com/weaviate/weaviate/adapters/repos/db/vector/hnsw/distancer/asm" + "golang.org/x/sys/cpu" +) + +func init() { + if cpu.ARM64.HasASIMD { + hammingImpl = asm.Hamming + hammingBitwiseImpl = asm.HammingBitwise + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/hamming_arm64_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/hamming_arm64_test.go new file mode 100644 index 0000000000000000000000000000000000000000..6eb2050822a53c38d2bc5c0f67fc6a476fb9fdca --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/hamming_arm64_test.go @@ -0,0 +1,147 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package distancer + +import ( + "fmt" + "math/bits" + "testing" + + "github.com/weaviate/weaviate/adapters/repos/db/vector/hnsw/distancer/asm" + "golang.org/x/sys/cpu" +) + +func HammingBitwiseGo(x, y []uint64) float32 { + total := float32(0) + for segment := range x { + total += float32(bits.OnesCount64(x[segment] ^ y[segment])) + } + return total +} + +func testHammingBitwiseFixedValue(t *testing.T, size uint, hammingBitwiseFn func(x []uint64, y []uint64) float32) { + for num := 0; num < 255; num++ { + vec1 := make([]uint64, size) + vec2 := make([]uint64, size) + for i := range vec1 { + vec1[i] = uint64(num) + vec2[i] = uint64(num + 1) + } + res := hammingBitwiseFn(vec1, vec2) + + resControl := HammingBitwiseGo(vec1, vec2) + if resControl != res { + t.Logf("for dim: %d -> want: %f, got: %f", size, resControl, res) + t.Fail() + } + } +} + +func testHammingBitwiseRandomValue(t *testing.T, size uint, hammingBitwiseFn func(x []uint64, y []uint64) float32) { + r := getRandomSeed() + count := 100 + + vec1s := make([][]uint64, count) + vec2s := make([][]uint64, count) + + for i := 0; i < count; i++ { + vec1 := make([]uint64, size) + vec2 := make([]uint64, size) + for j := range vec1 { + vec1[j] = r.Uint64() + vec2[j] = r.Uint64() + + } + vec1s[i] = vec1 + vec2s[i] = vec2 + } + + for i := 0; i < count; i++ { + res := hammingBitwiseFn(vec1s[i], vec2s[i]) + + resControl := HammingBitwiseGo(vec1s[i], vec2s[i]) + if resControl != res { + t.Logf("for dim: %d -> want: %f, got: %f", size, resControl, res) + t.Fail() + } + } +} + +func TestCompareHammingBitwise(t *testing.T) { + sizes := []uint{ + 1, + 2, + 3, + 4, + 5, + 6, + 8, + 10, + 12, + 16, + 24, + 30, + 31, + 32, + 64, + 67, + 128, + 256, + 260, + 299, + 300, + 384, + 390, + 600, + 768, + 777, + 784, + 1024, + 1536, + } + + for _, size := range sizes { + t.Run(fmt.Sprintf("with size %d", size), func(t *testing.T) { + if cpu.ARM64.HasASIMD { + testHammingBitwiseFixedValue(t, size, asm.HammingBitwise) + testHammingBitwiseRandomValue(t, size, asm.HammingBitwise) + } + }) + } +} + +func benchmarkHammingBitwise(b *testing.B, dims int, hammingBitwiseFn func(x []uint64, y []uint64) float32) { + r := getRandomSeed() + + vec1 := make([]uint64, dims) + vec2 := make([]uint64, dims) + for i := range vec1 { + vec1[i] = r.Uint64() + vec2[i] = r.Uint64() + } + + b.ResetTimer() + for n := 0; n < b.N; n++ { + hammingBitwiseFn(vec1, vec2) + } +} + +func BenchmarkHammingBitwise(b *testing.B) { + dims := []int{2, 4, 6, 8, 10, 12, 16, 24, 30, 32, 128, 256, 300, 384, 512, 768, 1024, 1536} + for _, dim := range dims { + b.Run(fmt.Sprintf("dim%d-bits%d", dim, 64*dim), func(b *testing.B) { + benchmarkHammingBitwise(b, dim, asm.HammingBitwise) + b.Run("pure go", func(b *testing.B) { benchmarkHammingBitwise(b, dim, HammingBitwiseGo) }) + b.Run("neon", func(b *testing.B) { benchmarkHammingBitwise(b, dim, asm.HammingBitwise) }) + }) + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/hamming_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/hamming_test.go new file mode 100644 index 0000000000000000000000000000000000000000..114f5f7a9882dfe21f7e9f709a90a387c59f4db1 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/hamming_test.go @@ -0,0 +1,177 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package distancer + +import ( + "fmt" + "testing" + "unsafe" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestHammingDistancer(t *testing.T) { + t.Run("identical vectors", func(t *testing.T) { + vec1 := []float32{3, 4, 5} + vec2 := []float32{3, 4, 5} + expectedDistance := float32(0) + + dist, err := NewHammingProvider().New(vec1).Distance(vec2) + require.Nil(t, err) + + control, err := NewHammingProvider().SingleDist(vec1, vec2) + require.Nil(t, err) + assert.Equal(t, control, dist) + assert.Equal(t, expectedDistance, dist) + }) + + t.Run("same angle, different euclidean position", func(t *testing.T) { + vec1 := []float32{3, 4, 5} + vec2 := []float32{1.5, 2, 2.5} + expectedDistance := float32(3) // all three positions are different + + dist, err := NewHammingProvider().New(vec1).Distance(vec2) + require.Nil(t, err) + + control, err := NewHammingProvider().SingleDist(vec1, vec2) + + require.Nil(t, err) + assert.Equal(t, control, dist) + assert.Equal(t, expectedDistance, dist) + }) + + t.Run("one position different", func(t *testing.T) { + vec1 := []float32{10, 11} + vec2 := []float32{10, 15} + expectedDistance := float32(1) + + dist, err := NewHammingProvider().New(vec1).Distance(vec2) + require.Nil(t, err) + + control, err := NewHammingProvider().SingleDist(vec1, vec2) + require.Nil(t, err) + assert.Equal(t, control, dist) + assert.Equal(t, expectedDistance, dist) + }) + + t.Run("three positions different", func(t *testing.T) { + vec1 := []float32{10, 11, 15, 25, 31} + vec2 := []float32{10, 15, 16, 25, 30} + expectedDistance := float32(3) + + dist, err := NewHammingProvider().New(vec1).Distance(vec2) + require.Nil(t, err) + + control, err := NewHammingProvider().SingleDist(vec1, vec2) + require.Nil(t, err) + assert.Equal(t, control, dist) + assert.Equal(t, expectedDistance, dist) + }) +} + +func TestHammingDistancerStepbyStep(t *testing.T) { + t.Run("step by step equals SingleDist", func(t *testing.T) { + vec1 := []float32{10, 11, 15, 25, 31} + vec2 := []float32{10, 15, 16, 25, 30} + + expectedDistance, err := NewHammingProvider().New(vec1).Distance(vec2) + require.Nil(t, err) + + distanceProvider := NewHammingProvider() + sum := float32(0.0) + for i := range vec1 { + sum += distanceProvider.Step([]float32{vec1[i]}, []float32{vec2[i]}) + } + control := distanceProvider.Wrap(sum) + + assert.Equal(t, control, expectedDistance) + }) +} + +func TestCompareHammingDistanceImplementations(t *testing.T) { + sizes := []uint{ + 1, + 2, + 3, + 4, + 5, + 6, + 8, + 10, + 12, + 16, + 24, + 30, + 31, + 32, + 64, + 67, + 128, + 256, + 260, + 299, + 300, + 384, + 390, + 600, + 768, + 777, + 784, + 1024, + 1536, + } + + for _, size := range sizes { + t.Run(fmt.Sprintf("with size %d", size), func(t *testing.T) { + r := getRandomSeed() + count := 1 + countFailed := 0 + + vec1s := make([][]float32, count) + vec2s := make([][]float32, count) + + for i := 0; i < count; i++ { + vec1 := make([]float32, size) + vec2 := make([]float32, size) + for j := range vec1 { + equal := r.Float32() < 0.5 + if equal { + randomValue := r.Float32() + vec1[j] = randomValue + vec2[j] = randomValue + } else { + vec1[j] = r.Float32() + vec2[j] = r.Float32() + 10 + } + } + vec1s[i] = vec1 + vec2s[i] = vec2 + } + + for i := 0; i < count; i++ { + res, err := NewHammingProvider().New(vec1s[i]).Distance(vec2s[i]) + + require.NoError(t, err) + + resControl := HammingDistanceGo(vec1s[i], vec2s[i]) + + if resControl != res { + countFailed++ + t.Fatalf("run %d: match: %f != %f, %d\n", i, resControl, res, (unsafe.Pointer(&vec1s[i][0]))) + } + + } + fmt.Printf("total failed: %d\n", countFailed) + }) + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/helper_for_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/helper_for_test.go new file mode 100644 index 0000000000000000000000000000000000000000..7b9d1a542df6b85026d1f42363c5a1509aba26ca --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/helper_for_test.go @@ -0,0 +1,21 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package distancer + +import ( + "math/rand" + "time" +) + +func getRandomSeed() *rand.Rand { + return rand.New(rand.NewSource(time.Now().UnixNano())) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/l2.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/l2.go new file mode 100644 index 0000000000000000000000000000000000000000..9566df89d54a25948fe47779d0008d8909c2d28b --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/l2.go @@ -0,0 +1,76 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package distancer + +import "github.com/pkg/errors" + +var l2SquaredImpl func(a, b []float32) float32 = func(a, b []float32) float32 { + var sum float32 + + for i := range a { + diff := a[i] - b[i] + sum += diff * diff + } + + return sum +} + +type L2Squared struct { + a []float32 +} + +func (l L2Squared) Distance(b []float32) (float32, error) { + if len(l.a) != len(b) { + return 0, errors.Wrapf(ErrVectorLength, "%d vs %d", + len(l.a), len(b)) + } + + return l2SquaredImpl(l.a, b), nil +} + +type L2SquaredProvider struct{} + +func NewL2SquaredProvider() L2SquaredProvider { + return L2SquaredProvider{} +} + +func (l L2SquaredProvider) SingleDist(a, b []float32) (float32, error) { + if len(a) != len(b) { + return 0, errors.Wrapf(ErrVectorLength, "%d vs %d", + len(a), len(b)) + } + + return l2SquaredImpl(a, b), nil +} + +func (l L2SquaredProvider) Type() string { + return "l2-squared" +} + +func (l L2SquaredProvider) New(a []float32) Distancer { + return &L2Squared{a: a} +} + +func (l L2SquaredProvider) Step(a, b []float32) float32 { + var sum float32 + + for i := range a { + diff := a[i] - b[i] + sum += diff * diff + } + + return sum +} + +func (l L2SquaredProvider) Wrap(x float32) float32 { + return x +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/l2_amd64.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/l2_amd64.go new file mode 100644 index 0000000000000000000000000000000000000000..1c6ea54d0264ec0965edfe9b95f3a8bc6688b816 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/l2_amd64.go @@ -0,0 +1,25 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package distancer + +import ( + "github.com/weaviate/weaviate/adapters/repos/db/vector/hnsw/distancer/asm" + "golang.org/x/sys/cpu" +) + +func init() { + if cpu.X86.HasAMXBF16 && cpu.X86.HasAVX512 { + l2SquaredImpl = asm.L2AVX512 + } else if cpu.X86.HasAVX2 { + l2SquaredImpl = asm.L2AVX256 + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/l2_amd64_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/l2_amd64_test.go new file mode 100644 index 0000000000000000000000000000000000000000..725f93d17bf1b1144dd6301cba332dbc38431ef9 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/l2_amd64_test.go @@ -0,0 +1,326 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package distancer + +import ( + "fmt" + "math/rand" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/adapters/repos/db/vector/hnsw/distancer/asm" + "golang.org/x/sys/cpu" +) + +func L2PureGo(a, b []float32) float32 { + var sum float32 + + for i := range a { + diff := a[i] - b[i] + sum += diff * diff + } + + return sum +} + +func L2BytePureGo(a, b []uint8) uint32 { + var sum uint32 + + for i := range a { + diff := int32(a[i]) - int32(b[i]) + sum += uint32(diff * diff) + } + + return sum +} + +func L2FloatBytePureGo(a []float32, b []byte) float32 { + var sum float32 + + for i := range a { + diff := a[i] - float32(b[i]) + sum += diff * diff + } + + return sum +} + +func Test_L2_DistanceImplementation(t *testing.T) { + lengths := []int{1, 4, 16, 31, 32, 35, 64, 67, 128, 130, 256, 260, 384, 390, 768, 777} + + for _, length := range lengths { + t.Run(fmt.Sprintf("with vector l=%d", length), func(t *testing.T) { + x := make([]float32, length) + y := make([]float32, length) + for i := range x { + x[i] = rand.Float32() + y[i] = rand.Float32() + } + + control := L2PureGo(x, y) + + asmResult := asm.L2AVX256(x, y) + assert.InEpsilon(t, control, asmResult, 0.01) + + if cpu.X86.HasAVX512 { + asmResult = asm.L2AVX512(x, y) + assert.InEpsilon(t, control, asmResult, 0.01) + } + }) + } +} + +func Test_L2_DistanceImplementation_OneNegativeValue(t *testing.T) { + lengths := []int{1, 4, 16, 31, 32, 35, 64, 67, 128, 130, 256, 260, 384, 390, 768, 777} + + for _, length := range lengths { + t.Run(fmt.Sprintf("with vector l=%d", length), func(t *testing.T) { + x := make([]float32, length) + y := make([]float32, length) + for i := range x { + x[i] = -rand.Float32() + y[i] = rand.Float32() + } + + control := L2PureGo(x, y) + + asmResult := asm.L2AVX256(x, y) + assert.InEpsilon(t, control, asmResult, 0.01) + + if cpu.X86.HasAVX512 { + asmResult = asm.L2AVX512(x, y) + assert.InEpsilon(t, control, asmResult, 0.01) + } + }) + } +} + +func Test_L2_Byte_DistanceImplementation(t *testing.T) { + lengths := []int{1, 2, 3, 4, 5, 16, 31, 32, 35, 64, 67, 128, 130, 256, 260, 384, 390, 768, 777, 1000, 1536} + + for _, length := range lengths { + t.Run(fmt.Sprintf("with vector l=%d", length), func(t *testing.T) { + x := make([]uint8, length) + y := make([]uint8, length) + for i := range x { + x[i] = uint8(rand.Uint32() % 256) + y[i] = uint8(rand.Uint32() % 256) + } + + control := L2BytePureGo(x, y) + + asmResult := asm.L2ByteAVX256(x, y) + require.Equal(t, int(control), int(asmResult)) + }) + } +} + +func Test_L2_FloatByte_DistanceImplementation(t *testing.T) { + lengths := []int{1, 2, 3, 4, 5, 16, 31, 32, 35, 64, 67, 128, 130, 256, 260, 384, 390, 768, 777, 1000, 1536} + + for _, length := range lengths { + t.Run(fmt.Sprintf("with vector l=%d", length), func(t *testing.T) { + x := make([]float32, length) + y := make([]uint8, length) + for i := range x { + x[i] = float32(rand.Uint32()) + y[i] = uint8(rand.Uint32() % 256) + } + + control := L2FloatBytePureGo(x, y) + + asmResult := asm.L2FloatByteAVX256(x, y) + assert.InEpsilon(t, control, asmResult, 0.01) + }) + } +} + +func Benchmark_L2(b *testing.B) { + r := getRandomSeed() + lengths := []int{ + 1, + 2, + 3, + 4, + 5, + 6, + 8, + 10, + 12, + 16, + 24, + 30, + 31, + 32, + 64, + 67, + 128, + 256, + 260, + 299, + 300, + 384, + 390, + 600, + 768, + 777, + 784, + 1024, + 1536, + } + for _, length := range lengths { + b.Run(fmt.Sprintf("vector dim=%d", length), func(b *testing.B) { + x := make([]float32, length) + y := make([]float32, length) + for i := range x { + x[i] = -r.Float32() + y[i] = r.Float32() + } + + b.Run("pure go", func(b *testing.B) { + for i := 0; i < b.N; i++ { + L2PureGo(x, y) + } + }) + + b.Run("asm AVX", func(b *testing.B) { + for i := 0; i < b.N; i++ { + asm.L2(x, y) + } + }) + + b.Run("asm AVX512", func(b *testing.B) { + for i := 0; i < b.N; i++ { + asm.L2AVX512(x, y) + } + }) + }) + } +} + +func Benchmark_L2Byte(b *testing.B) { + lengths := []int{ + 1, + 2, + 3, + 4, + 5, + 6, + 8, + 10, + 12, + 16, + 24, + 30, + 31, + 32, + 64, + 67, + 128, + 256, + 260, + 299, + 300, + 384, + 390, + 600, + 768, + 777, + 784, + 1024, + 1536, + } + for _, length := range lengths { + b.Run(fmt.Sprintf("vector dim=%d", length), func(b *testing.B) { + x := make([]uint8, length) + y := make([]uint8, length) + for i := range x { + x[i] = uint8(rand.Uint32() % 256) + y[i] = uint8(rand.Uint32() % 256) + } + + b.ResetTimer() + + b.Run("pure go", func(b *testing.B) { + for i := 0; i < b.N; i++ { + L2BytePureGo(x, y) + } + }) + + b.Run("asm AVX", func(b *testing.B) { + for i := 0; i < b.N; i++ { + asm.L2ByteAVX256(x, y) + } + }) + }) + } +} + +func Benchmark_L2FloatByte(b *testing.B) { + r := getRandomSeed() + lengths := []int{ + 1, + 2, + 3, + 4, + 5, + 6, + 8, + 10, + 12, + 16, + 24, + 30, + 31, + 32, + 64, + 67, + 128, + 256, + 260, + 299, + 300, + 384, + 390, + 600, + 768, + 777, + 784, + 1024, + 1536, + } + for _, length := range lengths { + b.Run(fmt.Sprintf("vector dim=%d", length), func(b *testing.B) { + x := make([]float32, length) + y := make([]byte, length) + for i := range x { + x[i] = -r.Float32() + y[i] = uint8(rand.Uint32() % 256) + } + + b.ResetTimer() + + b.Run("pure go", func(b *testing.B) { + for i := 0; i < b.N; i++ { + L2FloatBytePureGo(x, y) + } + }) + + b.Run("asm AVX", func(b *testing.B) { + for i := 0; i < b.N; i++ { + asm.L2FloatByteAVX256(x, y) + } + }) + }) + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/l2_arm64.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/l2_arm64.go new file mode 100644 index 0000000000000000000000000000000000000000..5fea748a63faefdf30a2d5d0010e77b513bb79c0 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/l2_arm64.go @@ -0,0 +1,27 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package distancer + +import ( + "github.com/weaviate/weaviate/adapters/repos/db/vector/hnsw/distancer/asm" + "golang.org/x/sys/cpu" +) + +func init() { + if cpu.ARM64.HasASIMD { + if cpu.ARM64.HasSVE { + l2SquaredImpl = asm.L2_SVE + } else { + l2SquaredImpl = asm.L2_Neon + } + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/l2_arm64_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/l2_arm64_test.go new file mode 100644 index 0000000000000000000000000000000000000000..e3fe5834598ee493374557def86fbf3117d1fdd9 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/l2_arm64_test.go @@ -0,0 +1,174 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package distancer + +import ( + "fmt" + "math/rand" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/weaviate/weaviate/adapters/repos/db/vector/hnsw/distancer/asm" + "golang.org/x/sys/cpu" +) + +func L2PureGo(a, b []float32) float32 { + var sum float32 + + for i := range a { + diff := a[i] - b[i] + sum += diff * diff + } + + return sum +} + +func L2PureGoByte(a, b []uint8) uint32 { + var sum uint32 + + for i := range a { + diff := int32(a[i]) - int32(b[i]) + sum += uint32(diff * diff) + } + + return sum +} + +func Test_L2_DistanceImplementation(t *testing.T) { + lengths := []int{1, 2, 3, 4, 5, 16, 31, 32, 35, 64, 67, 128, 130, 256, 260, 384, 390, 768, 777, 1000, 1536} + + for _, length := range lengths { + t.Run(fmt.Sprintf("with vector l=%d", length), func(t *testing.T) { + x := make([]float32, length) + y := make([]float32, length) + for i := range x { + x[i] = rand.Float32() + y[i] = rand.Float32() + } + + control := L2PureGo(x, y) + + asmResult := asm.L2_Neon(x, y) + assert.InEpsilon(t, control, asmResult, 0.01) + + if cpu.ARM64.HasSVE { + asmResult := asm.L2_SVE(x, y) + assert.InEpsilon(t, control, asmResult, 0.01) + } + }) + } +} + +func Test_L2_DistanceImplementation_OneNegativeValue(t *testing.T) { + lengths := []int{1, 2, 3, 4, 5, 16, 31, 32, 35, 64, 67, 128, 130, 256, 260, 384, 390, 768, 777, 1000, 1536} + + for _, length := range lengths { + t.Run(fmt.Sprintf("with vector l=%d", length), func(t *testing.T) { + x := make([]float32, length) + y := make([]float32, length) + for i := range x { + x[i] = -rand.Float32() + y[i] = rand.Float32() + } + + control := L2PureGo(x, y) + asmResult := asm.L2_Neon(x, y) + assert.InEpsilon(t, control, asmResult, 0.01) + + if cpu.ARM64.HasSVE { + asmResult := asm.L2_SVE(x, y) + assert.InEpsilon(t, control, asmResult, 0.01) + } + }) + } +} + +func Benchmark_L2_PureGo_VS_SIMD(b *testing.B) { + r := getRandomSeed() + lengths := []int{1, 2, 3, 4, 5, 16, 31, 32, 35, 64, 67, 128, 130, 256, 260, 384, 390, 768, 777, 1000, 1536} + for _, length := range lengths { + b.Run(fmt.Sprintf("vector dim=%d", length), func(b *testing.B) { + x := make([]float32, length) + y := make([]float32, length) + for i := range x { + x[i] = -r.Float32() + y[i] = r.Float32() + } + + b.ResetTimer() + + b.Run("pure go", func(b *testing.B) { + for i := 0; i < b.N; i++ { + L2PureGo(x, y) + } + }) + + b.Run("asm Neon", func(b *testing.B) { + for i := 0; i < b.N; i++ { + asm.L2_Neon(x, y) + } + }) + + b.Run("asm SVE", func(b *testing.B) { + for i := 0; i < b.N; i++ { + asm.L2_Neon(x, y) + } + }) + }) + } +} + +func Test_L2_Byte_DistanceImplementation_RandomValues(t *testing.T) { + lengths := []int{1, 2, 3, 4, 5, 16, 31, 32, 35, 64, 67, 128, 130, 256, 260, 384, 390, 768, 777, 1000, 1536} + + for _, length := range lengths { + t.Run(fmt.Sprintf("with vector l=%d", length), func(t *testing.T) { + x := make([]uint8, length) + y := make([]uint8, length) + for i := range x { + x[i] = uint8(rand.Uint32() % 256) + y[i] = uint8(rand.Uint32() % 256) + } + + control := L2PureGoByte(x, y) + + asmResult := asm.L2ByteARM64(x, y) + if uint32(control) != asmResult { + t.Logf("for dim: %d -> want: %d, got: %d", length, control, asmResult) + t.Fail() + } + }) + } +} + +func Test_L2_Byte_DistanceImplementation_FixedValues(t *testing.T) { + lengths := []int{1, 2, 3, 4, 5, 16, 31, 32, 35, 64, 67, 128, 130, 256, 260, 384, 390, 768, 777, 1000, 1536} + + for _, length := range lengths { + t.Run(fmt.Sprintf("with vector l=%d", length), func(t *testing.T) { + x := make([]uint8, length) + y := make([]uint8, length) + for i := range x { + x[i] = uint8(251) + y[i] = uint8(251) + } + + control := L2PureGoByte(x, y) + + asmResult := asm.L2ByteARM64(x, y) + if uint32(control) != asmResult { + t.Logf("for dim: %d -> want: %d, got: %d", length, control, asmResult) + t.Fail() + } + }) + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/l2_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/l2_test.go new file mode 100644 index 0000000000000000000000000000000000000000..391dc24f9641ed26f621e88709b7141997969925 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/l2_test.go @@ -0,0 +1,80 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package distancer + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestL2Distancer(t *testing.T) { + t.Run("identical vectors", func(t *testing.T) { + vec1 := []float32{3, 4, 5} + vec2 := []float32{3, 4, 5} + expectedDistance := float32(0) + + dist, err := NewL2SquaredProvider().New(vec1).Distance(vec2) + require.Nil(t, err) + control, err := NewL2SquaredProvider().SingleDist(vec1, vec2) + require.Nil(t, err) + assert.Equal(t, control, dist) + assert.Equal(t, expectedDistance, dist) + }) + + t.Run("same angle, different euclidean position", func(t *testing.T) { + vec1 := []float32{3, 4, 5} + vec2 := []float32{1.5, 2, 2.5} + expectedDistance := float32(12.5) + + dist, err := NewL2SquaredProvider().New(vec1).Distance(vec2) + require.Nil(t, err) + + control, err := NewL2SquaredProvider().SingleDist(vec1, vec2) + require.Nil(t, err) + assert.Equal(t, control, dist) + assert.Equal(t, expectedDistance, dist) + }) + + t.Run("different vectors", func(t *testing.T) { + vec1 := []float32{10, 11} + vec2 := []float32{13, 15} + expectedDistance := float32(25) + + dist, err := NewL2SquaredProvider().New(vec1).Distance(vec2) + require.Nil(t, err) + control, err := NewL2SquaredProvider().SingleDist(vec1, vec2) + require.Nil(t, err) + assert.Equal(t, control, dist) + assert.Equal(t, expectedDistance, dist) + }) +} + +func TestL2DistancerStepbyStep(t *testing.T) { + t.Run("step by step equals SingleDist", func(t *testing.T) { + vec1 := []float32{3, 4, 5} + vec2 := []float32{1.5, 2, 2.5} + + expectedDistance, err := NewL2SquaredProvider().New(vec1).Distance(vec2) + require.Nil(t, err) + + distanceProvider := NewL2SquaredProvider() + sum := float32(0.0) + for i := range vec1 { + sum += distanceProvider.Step([]float32{vec1[i]}, []float32{vec2[i]}) + } + control := distanceProvider.Wrap(sum) + + assert.Equal(t, control, expectedDistance) + }) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/manhattan.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/manhattan.go new file mode 100644 index 0000000000000000000000000000000000000000..644800884ecad1fd7437be87d1d5cf6c5bd4ca4c --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/manhattan.go @@ -0,0 +1,80 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package distancer + +import ( + "math" + + "github.com/pkg/errors" +) + +var manhattanImpl func(a, b []float32) float32 = func(a, b []float32) float32 { + var sum float32 + + for i := range a { + // take absolute difference, converted to float64 because math.Abs needs that + // convert back to float32 as sum is float32 + sum += float32(math.Abs(float64(a[i] - b[i]))) + } + + return sum +} + +type Manhattan struct { + a []float32 +} + +func (l Manhattan) Distance(b []float32) (float32, error) { + if len(l.a) != len(b) { + return 0, errors.Wrapf(ErrVectorLength, "%d vs %d", len(l.a), len(b)) + } + + return manhattanImpl(l.a, b), nil +} + +type ManhattanProvider struct{} + +func NewManhattanProvider() ManhattanProvider { + return ManhattanProvider{} +} + +func (l ManhattanProvider) SingleDist(a, b []float32) (float32, error) { + if len(a) != len(b) { + return 0, errors.Wrapf(ErrVectorLength, "%d vs %d", len(a), len(b)) + } + + return manhattanImpl(a, b), nil +} + +func (l ManhattanProvider) Type() string { + return "manhattan" +} + +func (l ManhattanProvider) New(a []float32) Distancer { + return &Manhattan{a: a} +} + +func (l ManhattanProvider) Step(x, y []float32) float32 { + var sum float32 + + for i := range x { + // take absolute difference, converted to float64 because math.Abs needs that + // convert back to float32 as sum is float32 + sum += float32(math.Abs(float64(x[i] - y[i]))) + } + + return sum +} + +func (l ManhattanProvider) Wrap(x float32) float32 { + return x +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/manhattan_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/manhattan_test.go new file mode 100644 index 0000000000000000000000000000000000000000..7a58b4c984052fe1119567b0316876c070c1ba60 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/manhattan_test.go @@ -0,0 +1,84 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package distancer + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestManhattanDistancer(t *testing.T) { + t.Run("identical vectors", func(t *testing.T) { + vec1 := []float32{3, 4, 5} + vec2 := []float32{3, 4, 5} + expectedDistance := float32(0) + + dist, err := NewManhattanProvider().New(vec1).Distance(vec2) + require.Nil(t, err) + + control, err := NewManhattanProvider().SingleDist(vec1, vec2) + require.Nil(t, err) + assert.Equal(t, control, dist) + assert.Equal(t, expectedDistance, dist) + }) + + t.Run("same angle, different euclidean position", func(t *testing.T) { + vec1 := []float32{3, 4, 5} + vec2 := []float32{1.5, 2, 2.5} + // distance will be abs(3-1.5) + abs(4-2) + abs(5-2.5) = 1.5 + 2 + 2.5 = 6 + expectedDistance := float32(6) + + dist, err := NewManhattanProvider().New(vec1).Distance(vec2) + require.Nil(t, err) + + control, err := NewManhattanProvider().SingleDist(vec1, vec2) + require.Nil(t, err) + assert.Equal(t, control, dist) + assert.Equal(t, expectedDistance, dist) + }) + + t.Run("different vectors", func(t *testing.T) { + vec1 := []float32{10, 11} + vec2 := []float32{13, 15} + // distance will be calculated as abs(10-13) + abs(11-15) = 3 + 4 = 7 + expectedDistance := float32(7) + + dist, err := NewManhattanProvider().New(vec1).Distance(vec2) + require.Nil(t, err) + + control, err := NewManhattanProvider().SingleDist(vec1, vec2) + require.Nil(t, err) + assert.Equal(t, control, dist) + assert.Equal(t, expectedDistance, dist) + }) +} + +func TestManhattanDistancerStepbyStep(t *testing.T) { + t.Run("step by step equals SingleDist", func(t *testing.T) { + vec1 := []float32{3, 4, 5} + vec2 := []float32{1.5, 2, 2.5} + + expectedDistance, err := NewManhattanProvider().New(vec1).Distance(vec2) + require.Nil(t, err) + + distanceProvider := NewManhattanProvider() + sum := float32(0.0) + for i := range vec1 { + sum += distanceProvider.Step([]float32{vec1[i]}, []float32{vec2[i]}) + } + control := distanceProvider.Wrap(sum) + + assert.Equal(t, control, expectedDistance) + }) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/normalize.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/normalize.go new file mode 100644 index 0000000000000000000000000000000000000000..c9a1738938eb0ee83b07d6a17f0d8ccc0aea8b72 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/normalize.go @@ -0,0 +1,32 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package distancer + +import "math" + +func Normalize(v []float32) []float32 { + var norm float32 + out := make([]float32, len(v)) + for i := range v { + norm += v[i] * v[i] + } + if norm == 0 { + return out + } + + norm = float32(math.Sqrt(float64(norm))) + for i := range v { + out[i] = v[i] / norm + } + + return out +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/provider.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/provider.go new file mode 100644 index 0000000000000000000000000000000000000000..9d666592e707d0c36558ebd2b96d1f838d697ce0 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/distancer/provider.go @@ -0,0 +1,24 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package distancer + +type Provider interface { + New(vec []float32) Distancer + SingleDist(vec1, vec2 []float32) (float32, error) + Step(x, y []float32) float32 + Wrap(x float32) float32 + Type() string +} + +type Distancer interface { + Distance(vec []float32) (float32, error) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/dynamic_ef_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/dynamic_ef_test.go new file mode 100644 index 0000000000000000000000000000000000000000..f0a3c68827e6265ffcb7ccfaa39459f6762364ac --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/dynamic_ef_test.go @@ -0,0 +1,104 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package hnsw + +import ( + "context" + "testing" + + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/adapters/repos/db/vector/hnsw/distancer" + "github.com/weaviate/weaviate/adapters/repos/db/vector/testinghelpers" + "github.com/weaviate/weaviate/entities/cyclemanager" + ent "github.com/weaviate/weaviate/entities/vectorindex/hnsw" +) + +// To prevent a regression on +// https://github.com/weaviate/weaviate/issues/1878 +func Test_DynamicEF(t *testing.T) { + type test struct { + name string + config ent.UserConfig + limit int + expectedEf int + } + + tests := []test{ + { + name: "all defaults explicitly entered, limit: 100", + config: ent.UserConfig{ + VectorCacheMaxObjects: 10, + EF: -1, + DynamicEFMin: 100, + DynamicEFMax: 500, + DynamicEFFactor: 8, + }, + limit: 100, + expectedEf: 500, + }, + { + name: "limit lower than min", + config: ent.UserConfig{ + VectorCacheMaxObjects: 10, + EF: -1, + DynamicEFMin: 100, + DynamicEFMax: 500, + DynamicEFFactor: 8, + }, + limit: 10, + expectedEf: 100, + }, + { + name: "limit within the dynamic range", + config: ent.UserConfig{ + VectorCacheMaxObjects: 10, + EF: -1, + DynamicEFMin: 100, + DynamicEFMax: 500, + DynamicEFFactor: 8, + }, + limit: 23, + expectedEf: 184, + }, + { + name: "explicit ef", + config: ent.UserConfig{ + VectorCacheMaxObjects: 10, + EF: 78, + }, + limit: 5, + expectedEf: 78, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + index, err := New(Config{ + RootPath: "doesnt-matter-as-committlogger-is-mocked-out", + ID: "dynaimc-ef-test", + MakeCommitLoggerThunk: MakeNoopCommitLogger, + DistanceProvider: distancer.NewCosineDistanceProvider(), + VectorForIDThunk: func(ctx context.Context, id uint64) ([]float32, error) { + return nil, errors.Errorf("not implemented") + }, + }, test.config, cyclemanager.NewCallbackGroupNoop(), testinghelpers.NewDummyStore(t)) + require.Nil(t, err) + + actualEF := index.searchTimeEF(test.limit) + assert.Equal(t, test.expectedEf, actualEF) + + require.Nil(t, index.Drop(context.Background())) + }) + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/flat_search.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/flat_search.go new file mode 100644 index 0000000000000000000000000000000000000000..56a4e54a1ec36eef0284042135dc2336ed384a14 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/flat_search.go @@ -0,0 +1,224 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package hnsw + +import ( + "context" + "fmt" + "sync" + "time" + + "github.com/pkg/errors" + "github.com/weaviate/weaviate/adapters/repos/db/helpers" + "github.com/weaviate/weaviate/adapters/repos/db/priorityqueue" + "github.com/weaviate/weaviate/adapters/repos/db/vector/compressionhelpers" + enterrors "github.com/weaviate/weaviate/entities/errors" + "github.com/weaviate/weaviate/entities/storobj" +) + +func (h *hnsw) flatSearch(ctx context.Context, queryVector []float32, k, limit int, + allowList helpers.AllowList, +) ([]uint64, []float32, error) { + if !h.shouldRescore() || h.muvera.Load() { + limit = k + } + + h.RLock() + nodeSize := uint64(len(h.nodes)) + h.RUnlock() + + var compressorDistancer compressionhelpers.CompressorDistancer + if h.compressed.Load() { + distancer, returnFn := h.compressor.NewDistancer(queryVector) + defer returnFn() + compressorDistancer = distancer + } + + aggregateMu := &sync.Mutex{} + results := priorityqueue.NewMax[any](limit) + + beforeIter := time.Now() + // first extract all candidates, this reduces the amount of coordination + // needed for the workers + candidates := make([]uint64, 0, allowList.Len()) + it := allowList.Iterator() + for candidate, ok := it.Next(); ok; candidate, ok = it.Next() { + candidates = append(candidates, candidate) + } + + eg := enterrors.NewErrorGroupWrapper(h.logger) + for workerID := 0; workerID < h.flatSearchConcurrency; workerID++ { + workerID := workerID + eg.Go(func() error { + localResults := priorityqueue.NewMax[any](limit) + var e storobj.ErrNotFound + for idPos := workerID; idPos < len(candidates); idPos += h.flatSearchConcurrency { + candidate := candidates[idPos] + + // Hot fix for https://github.com/weaviate/weaviate/issues/1937 + // this if statement mitigates the problem but it doesn't resolve the issue + if candidate >= nodeSize { + h.logger.WithField("action", "flatSearch"). + Debugf("trying to get candidate: %v but we only have: %v elements.", + candidate, nodeSize) + continue + } + + h.shardedNodeLocks.RLock(candidate) + c := h.nodes[candidate] + h.shardedNodeLocks.RUnlock(candidate) + + if c == nil || h.hasTombstone(candidate) { + continue + } + + dist, err := h.distToNode(compressorDistancer, candidate, queryVector) + if errors.As(err, &e) { + h.handleDeletedNode(e.DocID, "flatSearch") + continue + } + if err != nil { + return err + } + + addResult(localResults, candidate, dist, limit) + } + + aggregateMu.Lock() + defer aggregateMu.Unlock() + for localResults.Len() > 0 { + res := localResults.Pop() + addResult(results, res.ID, res.Dist, limit) + } + + return nil + }) + } + + if err := eg.Wait(); err != nil { + return nil, nil, err + } + took := time.Since(beforeIter) + helpers.AnnotateSlowQueryLog(ctx, "flat_search_iteration_took", took) + + beforeRescore := time.Now() + if h.shouldRescore() && !h.multivector.Load() { + compressorDistancer, fn := h.compressor.NewDistancer(queryVector) + if err := h.rescore(ctx, results, k, compressorDistancer); err != nil { + helpers.AnnotateSlowQueryLog(ctx, "context_error", "flat_search_rescore") + took := time.Since(beforeRescore) + helpers.AnnotateSlowQueryLog(ctx, "flat_search_rescore_took", took) + return nil, nil, fmt.Errorf("flat search: %w", err) + } + fn() + took := time.Since(beforeRescore) + helpers.AnnotateSlowQueryLog(ctx, "flat_search_rescore_took", took) + } + + ids := make([]uint64, results.Len()) + dists := make([]float32, results.Len()) + + // results is ordered in reverse, we need to flip the order before presenting + // to the user! + i := len(ids) - 1 + for results.Len() > 0 { + res := results.Pop() + ids[i] = res.ID + dists[i] = res.Dist + i-- + } + + return ids, dists, nil +} + +func (h *hnsw) flatMultiSearch(ctx context.Context, queryVector [][]float32, limit int, + allowList helpers.AllowList, +) ([]uint64, []float32, error) { + aggregateMu := &sync.Mutex{} + results := priorityqueue.NewMax[any](limit) + + beforeIter := time.Now() + // first extract all candidates, this reduces the amount of coordination + // needed for the workers + candidates := allowList.Slice() + + eg := enterrors.NewErrorGroupWrapper(h.logger) + for workerID := 0; workerID < h.flatSearchConcurrency; workerID++ { + workerID := workerID + eg.Go(func() error { + localResults := priorityqueue.NewMax[any](limit) + var e storobj.ErrNotFound + for idPos := workerID; idPos < len(candidates); idPos += h.flatSearchConcurrency { + candidate := candidates[idPos] + + dist, err := h.computeScore(queryVector, candidate) + + if errors.As(err, &e) { + h.RLock() + vecIDs := h.docIDVectors[candidate] + h.RUnlock() + for _, vecID := range vecIDs { + h.handleDeletedNode(vecID, "flatSearch") + } + continue + } + if err != nil { + return err + } + + addResult(localResults, candidate, dist, limit) + } + + aggregateMu.Lock() + defer aggregateMu.Unlock() + for localResults.Len() > 0 { + res := localResults.Pop() + addResult(results, res.ID, res.Dist, limit) + } + + return nil + }) + } + + if err := eg.Wait(); err != nil { + return nil, nil, err + } + took := time.Since(beforeIter) + helpers.AnnotateSlowQueryLog(ctx, "flat_search_iteration_took", took) + + ids := make([]uint64, results.Len()) + dists := make([]float32, results.Len()) + + // results is ordered in reverse, we need to flip the order before presenting + // to the user! + i := len(ids) - 1 + for results.Len() > 0 { + res := results.Pop() + ids[i] = res.ID + dists[i] = res.Dist + i-- + } + + return ids, dists, nil +} + +func addResult(results *priorityqueue.Queue[any], id uint64, dist float32, limit int) { + if results.Len() < limit { + results.Insert(id, dist) + return + } + + if results.Top().Dist > dist { + results.Pop() + results.Insert(id, dist) + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/flat_search_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/flat_search_test.go new file mode 100644 index 0000000000000000000000000000000000000000..389e5b7d3d604bf31e5ac9f709f2e120bd580268 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/flat_search_test.go @@ -0,0 +1,161 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +//go:build !race + +package hnsw + +import ( + "context" + "fmt" + "os" + "sync" + "testing" + "time" + + "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/assert" + "github.com/weaviate/weaviate/adapters/repos/db/helpers" + "github.com/weaviate/weaviate/adapters/repos/db/vector/common" + "github.com/weaviate/weaviate/adapters/repos/db/vector/compressionhelpers" + "github.com/weaviate/weaviate/adapters/repos/db/vector/hnsw/distancer" + "github.com/weaviate/weaviate/adapters/repos/db/vector/testinghelpers" + "github.com/weaviate/weaviate/entities/cyclemanager" + "github.com/weaviate/weaviate/entities/storobj" + ent "github.com/weaviate/weaviate/entities/vectorindex/hnsw" +) + +func Test_NoRaceCompressionRecall(t *testing.T) { + for _, includeDeletes := range []bool{false, true} { + for _, concurrency := range []int{1, 2, 4, 8} { + t.Run(fmt.Sprintf("deletes=%v concurrencys= %d", includeDeletes, concurrency), func(t *testing.T) { + path := t.TempDir() + logger, _ := test.NewNullLogger() + ctx := context.Background() + + efConstruction := 4 + ef := 64 + maxNeighbors := 4 + segments := 4 + dimensions := 16 + vectors_size := 10_000 + queries_size := 100 + before := time.Now() + vectors, queries := testinghelpers.RandomVecs(vectors_size, queries_size, dimensions) + testinghelpers.Normalize(vectors) + testinghelpers.Normalize(queries) + k := 10 + + distancer := distancer.NewCosineDistanceProvider() + + allowList := helpers.NewAllowList() + allowList.Insert(makeRange(0, uint64(vectors_size))...) + + fmt.Printf("generating data took %s\n", time.Since(before)) + + uc := ent.UserConfig{ + MaxConnections: maxNeighbors, + EFConstruction: efConstruction, + EF: ef, + VectorCacheMaxObjects: 10e12, + } + rescored := false + index, _ := New(Config{ + RootPath: path, + ID: "recallbenchmark", + MakeCommitLoggerThunk: MakeNoopCommitLogger, + ClassName: "clasRecallBenchmark", + ShardName: "shardRecallBenchmark", + DistanceProvider: distancer, + VectorForIDThunk: func(ctx context.Context, id uint64) ([]float32, error) { + if int(id) >= len(vectors) { + return nil, storobj.NewErrNotFoundf(id, "out of range") + } + return vectors[int(id)], nil + }, + TempVectorForIDThunk: func(ctx context.Context, id uint64, container *common.VectorSlice) ([]float32, error) { + copy(container.Slice, vectors[int(id)]) + rescored = true + return container.Slice, nil + }, + }, uc, cyclemanager.NewCallbackGroupNoop(), testinghelpers.NewDummyStore(t)) + init := time.Now() + compressionhelpers.Concurrently(logger, uint64(vectors_size), func(id uint64) { + index.Add(ctx, id, vectors[id]) + }) + before = time.Now() + fmt.Println("Start compressing...") + uc.PQ = ent.PQConfig{ + Enabled: true, + Segments: dimensions / segments, + Centroids: 256, + Encoder: ent.NewDefaultUserConfig().PQ.Encoder, + TrainingLimit: 5_000, + } + uc.EF = 256 + wg := sync.WaitGroup{} + wg.Add(1) + if includeDeletes { + for i := uint64(0); i < uint64(vectors_size); i += 3 { + vectors[i] = nil + } + } + truths := make([][]uint64, queries_size) + compressionhelpers.Concurrently(logger, uint64(len(queries)), func(i uint64) { + truths[i], _ = testinghelpers.BruteForce(logger, vectors, queries[i], k, testinghelpers.DistanceWrapper(distancer)) + }) + index.UpdateUserConfig(uc, func() { + fmt.Printf("Time to compress: %s\n", time.Since(before)) + fmt.Printf("Building the index took %s\n", time.Since(init)) + + if includeDeletes { + // delete every 3rd ID from the index + for i := uint64(0); i < uint64(vectors_size); i += 3 { + index.Delete(i) + } + } + + var relevant uint64 + var retrieved int + + mutex := sync.Mutex{} + compressionhelpers.Concurrently(logger, uint64(len(queries)), func(i uint64) { + results, _, _ := index.flatSearch(ctx, queries[i], k, 100, allowList) + mutex.Lock() + retrieved += k + relevant += testinghelpers.MatchesInLists(truths[i], results) + mutex.Unlock() + }) + + recall := float32(relevant) / float32(retrieved) + fmt.Println(recall) + assert.True(t, recall > 0.9) + assert.True(t, rescored) + + err := os.RemoveAll(path) + if err != nil { + fmt.Println(err) + } + wg.Done() + }) + wg.Wait() + }) + } + } +} + +func makeRange(min, max uint64) []uint64 { + a := make([]uint64, max-min+1) + for i := range a { + a[i] = min + uint64(i) + } + return a +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/generate_recall_datasets.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/generate_recall_datasets.go new file mode 100644 index 0000000000000000000000000000000000000000..57ed294ec2f9691234100b4fc03391bce847d564 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/generate_recall_datasets.go @@ -0,0 +1,118 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +//go:build ignore +// +build ignore + +package main + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "math" + "math/rand" + "sort" + + "github.com/weaviate/weaviate/adapters/repos/db/vector/hnsw/distancer" +) + +func main() { + dimensions := 256 + size := 10000 + queries := 1000 + + vectors := make([][]float32, size) + queryVectors := make([][]float32, queries) + truths := make([][]uint64, queries) + + fmt.Printf("generating %d vectors", size) + for i := 0; i < size; i++ { + vector := make([]float32, dimensions) + for j := 0; j < dimensions; j++ { + vector[j] = rand.Float32() + } + vectors[i] = Normalize(vector) + + } + fmt.Printf("done\n") + + fmt.Printf("generating %d search queries", queries) + for i := 0; i < queries; i++ { + queryVector := make([]float32, dimensions) + for j := 0; j < dimensions; j++ { + queryVector[j] = rand.Float32() + } + queryVectors[i] = Normalize(queryVector) + } + fmt.Printf("done\n") + + fmt.Printf("defining truth through brute force") + + k := 10 + for i, query := range queryVectors { + truths[i] = bruteForce(vectors, query, k) + } + + vectorsJSON, _ := json.Marshal(vectors) + queriesJSON, _ := json.Marshal(queryVectors) + truthsJSON, _ := json.Marshal(truths) + + ioutil.WriteFile("recall_vectors.json", vectorsJSON, 0o644) + ioutil.WriteFile("recall_queries.json", queriesJSON, 0o644) + ioutil.WriteFile("recall_truths.json", truthsJSON, 0o644) +} + +func Normalize(v []float32) []float32 { + var norm float32 + for i := range v { + norm += v[i] * v[i] + } + + norm = float32(math.Sqrt(float64(norm))) + for i := range v { + v[i] = v[i] / norm + } + + return v +} + +func bruteForce(vectors [][]float32, query []float32, k int) []uint64 { + type distanceAndIndex struct { + distance float32 + index uint64 + } + + distances := make([]distanceAndIndex, len(vectors)) + + for i, vec := range vectors { + dist, _ := distancer.NewCosineDistanceProvider().SingleDist(query, vec) + distances[i] = distanceAndIndex{ + index: uint64(i), + distance: dist, + } + } + + sort.Slice(distances, func(a, b int) bool { + return distances[a].distance < distances[b].distance + }) + + if len(distances) < k { + k = len(distances) + } + + out := make([]uint64, k) + for i := 0; i < k; i++ { + out[i] = distances[i].index + } + + return out +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/graph_integrity_integration_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/graph_integrity_integration_test.go new file mode 100644 index 0000000000000000000000000000000000000000..518fe7a57e3efab45082f95f0223b70c24e51f62 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/graph_integrity_integration_test.go @@ -0,0 +1,110 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +//go:build integrationTestSlow || !race + +package hnsw + +import ( + "context" + "fmt" + "math/rand" + "runtime" + "sync" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/adapters/repos/db/vector/hnsw/distancer" + "github.com/weaviate/weaviate/entities/cyclemanager" + ent "github.com/weaviate/weaviate/entities/vectorindex/hnsw" +) + +func TestGraphIntegrity(t *testing.T) { + ctx := context.Background() + dimensions := 300 + size := 1000 + efConstruction := 128 + maxNeighbors := 64 + + vectors := make([][]float32, size) + var vectorIndex *hnsw + + t.Run("generate random vectors", func(t *testing.T) { + fmt.Printf("generating %d vectors", size) + for i := 0; i < size; i++ { + vector := make([]float32, dimensions) + for j := 0; j < dimensions; j++ { + vector[j] = rand.Float32() + } + vectors[i] = vector + } + }) + + t.Run("importing into hnsw", func(t *testing.T) { + fmt.Printf("importing into hnsw\n") + index, err := New(Config{ + RootPath: "doesnt-matter-as-committlogger-is-mocked-out", + ID: "graphintegrity", + MakeCommitLoggerThunk: MakeNoopCommitLogger, + VectorForIDThunk: func(ctx context.Context, id uint64) ([]float32, error) { + return vectors[int(id)], nil + }, + DistanceProvider: distancer.NewDotProductProvider(), + }, ent.UserConfig{ + MaxConnections: maxNeighbors, + EFConstruction: efConstruction, + }, cyclemanager.NewCallbackGroupNoop(), nil) + require.Nil(t, err) + vectorIndex = index + + workerCount := runtime.GOMAXPROCS(0) + jobsForWorker := make([][][]float32, workerCount) + + for i, vec := range vectors { + workerID := i % workerCount + jobsForWorker[workerID] = append(jobsForWorker[workerID], vec) + } + + wg := &sync.WaitGroup{} + for workerID, jobs := range jobsForWorker { + wg.Add(1) + go func(workerID int, myJobs [][]float32) { + defer wg.Done() + for i, vec := range myJobs { + originalIndex := uint64(i*workerCount) + uint64(workerID) + err := vectorIndex.Add(ctx, originalIndex, vec) + require.Nil(t, err) + } + }(workerID, jobs) + } + + wg.Wait() + }) + + for _, node := range vectorIndex.nodes { + if node == nil { + continue + } + + conlen := len(node.connections.GetLayer(0)) + + // it is debatable how much value this test still adds. It used to check + // that a lot of connections are present before we had the heuristic. But + // with the heuristic it's not uncommon that a node's connections get + // reduced to a slow amount of key connections. We have thus set this value + // to 1 to make sure that no nodes are entirely unconnected, but it's + // questionable if this still adds any value at all + requiredMinimum := 1 + assert.True(t, conlen >= requiredMinimum, fmt.Sprintf( + "have %d connections, but want at least %d", conlen, requiredMinimum)) + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/helper_for_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/helper_for_test.go new file mode 100644 index 0000000000000000000000000000000000000000..042a61c415900e753386a8de40915d17574aee71 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/helper_for_test.go @@ -0,0 +1,50 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package hnsw + +import ( + "fmt" + "math/rand" + "strings" + "time" +) + +func dumpIndex(index *hnsw, labels ...string) { + if len(labels) > 0 { + fmt.Printf("--------------------------------------------------\n") + fmt.Printf("-- %s\n", strings.Join(labels, ", ")) + } + fmt.Printf("--------------------------------------------------\n") + fmt.Printf("ID: %s\n", index.id) + fmt.Printf("Entrypoint: %d\n", index.entryPointID) + fmt.Printf("Max Level: %d\n", index.currentMaximumLayer) + fmt.Printf("Tombstones %v\n", index.tombstones) + fmt.Printf("\nNodes and Connections:\n") + for _, node := range index.nodes { + if node == nil { + continue + } + + fmt.Printf(" Node %d\n", node.id) + iter := node.connections.Iterator() + for iter.Next() { + level, conns := iter.Current() + fmt.Printf(" Level %d: Connections: %v\n", level, conns) + } + } + + fmt.Printf("--------------------------------------------------\n") +} + +func getRandomSeed() *rand.Rand { + return rand.New(rand.NewSource(time.Now().UnixNano())) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/heuristic.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/heuristic.go new file mode 100644 index 0000000000000000000000000000000000000000..0f32798d07ccbfd141ec0ca8d75bdb06b30e2704 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/heuristic.go @@ -0,0 +1,135 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package hnsw + +import ( + "context" + + "github.com/pkg/errors" + "github.com/weaviate/weaviate/adapters/repos/db/helpers" + "github.com/weaviate/weaviate/adapters/repos/db/priorityqueue" + "github.com/weaviate/weaviate/entities/storobj" +) + +func (h *hnsw) selectNeighborsHeuristic(input *priorityqueue.Queue[any], + max int, denyList helpers.AllowList, +) error { + if input.Len() < max { + return nil + } + + // TODO, if this solution stays we might need something with fewer allocs + ids := make([]uint64, input.Len()) + + closestFirst := h.pools.pqHeuristic.GetMin(input.Len()) + i := uint64(0) + for input.Len() > 0 { + elem := input.Pop() + closestFirst.InsertWithValue(elem.ID, elem.Dist, i) + ids[i] = elem.ID + i++ + } + + var returnList []priorityqueue.Item[uint64] + + if h.compressed.Load() { + bag := h.compressor.NewBag() + for _, id := range ids { + err := bag.Load(context.Background(), id) + if err != nil { + return err + } + } + + returnList = h.pools.pqItemSlice.Get().([]priorityqueue.Item[uint64]) + for closestFirst.Len() > 0 && len(returnList) < max { + curr := closestFirst.Pop() + if denyList != nil && denyList.Contains(curr.ID) { + continue + } + distToQuery := curr.Dist + + good := true + for _, item := range returnList { + peerDist, err := bag.Distance(curr.ID, item.ID) + if err != nil { + return err + } + + if peerDist < distToQuery { + good = false + break + } + } + + if good { + returnList = append(returnList, curr) + } + + } + } else { + + vecs, errs := h.multiVectorForID(context.TODO(), ids) + + returnList = h.pools.pqItemSlice.Get().([]priorityqueue.Item[uint64]) + + for closestFirst.Len() > 0 && len(returnList) < max { + curr := closestFirst.Pop() + if denyList != nil && denyList.Contains(curr.ID) { + continue + } + distToQuery := curr.Dist + + currVec := vecs[curr.Value] + if err := errs[curr.Value]; err != nil { + var e storobj.ErrNotFound + if errors.As(err, &e) { + h.handleDeletedNode(e.DocID, "selectNeighborsHeuristic") + continue + } else { + // not a typed error, we can recover from, return with err + return errors.Wrapf(err, + "unrecoverable error for docID %d", curr.ID) + } + } + good := true + for _, item := range returnList { + peerDist, _ := h.distancerProvider.SingleDist(currVec, + vecs[item.Value]) + + if peerDist < distToQuery { + good = false + break + } + } + + if good { + returnList = append(returnList, curr) + } + + } + } + + h.pools.pqHeuristic.Put(closestFirst) + + for _, retElem := range returnList { + input.Insert(retElem.ID, retElem.Dist) + } + + // rewind and return to pool + returnList = returnList[:0] + + //nolint:staticcheck + h.pools.pqItemSlice.Put(returnList) + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/hnsw_stress_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/hnsw_stress_test.go new file mode 100644 index 0000000000000000000000000000000000000000..3c152f1c0450f36dc77e130b95b65e4659c101e3 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/hnsw_stress_test.go @@ -0,0 +1,524 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package hnsw + +import ( + "context" + "encoding/binary" + "fmt" + "io" + "log" + "math" + "math/rand" + "os" + "runtime/pprof" + "sync" + "testing" + "time" + + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/stretchr/testify/require" + + "github.com/weaviate/weaviate/adapters/repos/db/vector/hnsw/distancer" + "github.com/weaviate/weaviate/adapters/repos/db/vector/testinghelpers" + "github.com/weaviate/weaviate/entities/cyclemanager" + enterrors "github.com/weaviate/weaviate/entities/errors" + ent "github.com/weaviate/weaviate/entities/vectorindex/hnsw" +) + +const ( + vectorSize = 128 + vectorsPerGoroutine = 100 + parallelGoroutines = 100 + parallelSearchGoroutines = 8 +) + +func idVector(ctx context.Context, id uint64) ([]float32, error) { + vector := make([]float32, vectorSize) + for i := 0; i < vectorSize; i++ { + vector[i] = float32(id) + } + return vector, nil +} + +func idVectorSize(size int) func(ctx context.Context, id uint64) ([]float32, error) { + return func(ctx context.Context, id uint64) ([]float32, error) { + vector := make([]float32, size) + for i := 0; i < size; i++ { + vector[i] = float32(id) + } + return vector, nil + } +} + +func float32FromBytes(bytes []byte) float32 { + bits := binary.LittleEndian.Uint32(bytes) + float := math.Float32frombits(bits) + return float +} + +func int32FromBytes(bytes []byte) int { + return int(binary.LittleEndian.Uint32(bytes)) +} + +func BenchmarkConcurrentSearch(b *testing.B) { + ctx := context.Background() + siftFile := "datasets/ann-benchmarks/sift/sift_base.fvecs" + siftFileQuery := "datasets/ann-benchmarks/sift/sift_query.fvecs" + + _, err2 := os.Stat(siftFileQuery) + if _, err := os.Stat(siftFile); err != nil || err2 != nil { + b.Skip(`Sift data needs to be present.`) + } + + vectors := readSiftFloat(siftFile, 1000000) + vectorsQuery := readSiftFloat(siftFileQuery, 10000) + + index := createEmptyHnswIndexForTests(b, idVector) + // add elements + for k, vec := range vectors { + err := index.Add(ctx, uint64(k), vec) + require.Nil(b, err) + } + + // Start profiling + f, err := os.Create("cpu.out") + if err != nil { + b.Fatal(err) + } + if err := pprof.StartCPUProfile(f); err != nil { + b.Fatal(err) + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + + vectorsPerGoroutineSearch := len(vectorsQuery) / parallelSearchGoroutines + wg := sync.WaitGroup{} + + for k := 0; k < parallelSearchGoroutines; k++ { + wg.Add(1) + k := k + go func() { + goroutineIndex := k * vectorsPerGoroutineSearch + for j := 0; j < vectorsPerGoroutineSearch; j++ { + _, _, err := index.SearchByVector(ctx, vectors[goroutineIndex+j], 0, nil) + require.Nil(b, err) + + } + wg.Done() + }() + } + wg.Wait() + } + // Stop profiling + pprof.StopCPUProfile() +} + +func TestHnswStress(t *testing.T) { + ctx := context.Background() + siftFile := "datasets/ann-benchmarks/siftsmall/siftsmall_base.fvecs" + siftFileQuery := "datasets/ann-benchmarks/siftsmall/sift_query.fvecs" + _, err2 := os.Stat(siftFileQuery) + if _, err := os.Stat(siftFile); err != nil || err2 != nil { + if !*download { + t.Skip(`Sift data needs to be present. +Run test with -download to automatically download the dataset. +Ex: go test -v -run TestHnswStress . -download +`) + } + downloadDatasetFile(t, siftFile) + } + vectors := readSiftFloat(siftFile, parallelGoroutines*vectorsPerGoroutine) + vectorsQuery := readSiftFloat(siftFileQuery, parallelGoroutines*vectorsPerGoroutine) + + t.Run("Insert and search and maybe delete", func(t *testing.T) { + for n := 0; n < 1; n++ { // increase if you don't want to reread SIFT for every run + wg := sync.WaitGroup{} + index := createEmptyHnswIndexForTests(t, idVector) + for k := 0; k < parallelGoroutines; k++ { + wg.Add(2) + goroutineIndex := k * vectorsPerGoroutine + go func() { + for i := 0; i < vectorsPerGoroutine; i++ { + + err := index.Add(ctx, uint64(goroutineIndex+i), vectors[goroutineIndex+i]) + require.Nil(t, err) + } + wg.Done() + }() + + go func() { + for i := 0; i < vectorsPerGoroutine; i++ { + for j := 0; j < 5; j++ { // try a couple of times to delete if found + _, dists, err := index.SearchByVector(ctx, vectors[goroutineIndex+i], 0, nil) + require.Nil(t, err) + + if len(dists) > 0 && dists[0] == 0 { + err := index.Delete(uint64(goroutineIndex + i)) + require.Nil(t, err) + break + } else { + continue + } + } + } + wg.Done() + }() + } + wg.Wait() + } + }) + + t.Run("Insert and delete", func(t *testing.T) { + for i := 0; i < 1; i++ { // increase if you don't want to reread SIFT for every run + wg := sync.WaitGroup{} + index := createEmptyHnswIndexForTests(t, idVector) + for k := 0; k < parallelGoroutines; k++ { + wg.Add(1) + goroutineIndex := k * vectorsPerGoroutine + go func() { + for i := 0; i < vectorsPerGoroutine; i++ { + + err := index.Add(ctx, uint64(goroutineIndex+i), vectors[goroutineIndex+i]) + require.Nil(t, err) + err = index.Delete(uint64(goroutineIndex + i)) + require.Nil(t, err) + + } + wg.Done() + }() + + } + wg.Wait() + + } + }) + + t.Run("Concurrent search", func(t *testing.T) { + index := createEmptyHnswIndexForTests(t, idVector) + // add elements + for k, vec := range vectors { + err := index.Add(ctx, uint64(k), vec) + require.Nil(t, err) + } + + vectorsPerGoroutineSearch := len(vectorsQuery) / parallelSearchGoroutines + wg := sync.WaitGroup{} + + for i := 0; i < 10; i++ { // increase if you don't want to reread SIFT for every run + for k := 0; k < parallelSearchGoroutines; k++ { + wg.Add(1) + k := k + go func() { + goroutineIndex := k * vectorsPerGoroutineSearch + for j := 0; j < vectorsPerGoroutineSearch; j++ { + _, _, err := index.SearchByVector(ctx, vectors[goroutineIndex+j], 0, nil) + require.Nil(t, err) + + } + wg.Done() + }() + } + } + wg.Wait() + }) + + t.Run("Concurrent deletes", func(t *testing.T) { + for i := 0; i < 10; i++ { // increase if you don't want to reread SIFT for every run + wg := sync.WaitGroup{} + + index := createEmptyHnswIndexForTests(t, idVector) + deleteIds := make([]uint64, 50) + for j := 0; j < len(deleteIds); j++ { + err := index.Add(ctx, uint64(j), vectors[j]) + require.Nil(t, err) + deleteIds[j] = uint64(j) + } + wg.Add(2) + + go func() { + err := index.Delete(deleteIds[25:]...) + require.Nil(t, err) + wg.Done() + }() + go func() { + err := index.Delete(deleteIds[:24]...) + require.Nil(t, err) + wg.Done() + }() + + wg.Wait() + + time.Sleep(time.Microsecond * 100) + index.Lock() + require.NotNil(t, index.nodes[24]) + index.Unlock() + + } + }) + + t.Run("Random operations", func(t *testing.T) { + for i := 0; i < 1; i++ { // increase if you don't want to reread SIFT for every run + index := createEmptyHnswIndexForTests(t, idVector) + + var inserted struct { + sync.Mutex + ids []uint64 + set map[uint64]struct{} + } + inserted.set = make(map[uint64]struct{}) + + claimUnusedID := func() (uint64, bool) { + inserted.Lock() + defer inserted.Unlock() + + if len(inserted.ids) == len(vectors) { + return 0, false + } + + try := 0 + for { + id := uint64(rand.Intn(len(vectors))) + if _, ok := inserted.set[id]; !ok { + inserted.ids = append(inserted.ids, id) + inserted.set[id] = struct{}{} + return id, true + } + + try++ + if try > 50 { + log.Printf("[WARN] tried %d times, retrying...\n", try) + } + } + } + + getInsertedIDs := func(n int) []uint64 { + inserted.Lock() + defer inserted.Unlock() + + if len(inserted.ids) < n { + return nil + } + + if n > len(inserted.ids) { + n = len(inserted.ids) + } + + ids := make([]uint64, n) + copy(ids, inserted.ids[:n]) + + return ids + } + + removeInsertedIDs := func(ids ...uint64) { + inserted.Lock() + defer inserted.Unlock() + + for _, id := range ids { + delete(inserted.set, id) + for i, insertedID := range inserted.ids { + if insertedID == id { + inserted.ids = append(inserted.ids[:i], inserted.ids[i+1:]...) + break + } + } + } + } + + ops := []func(){ + // Add + func() { + id, ok := claimUnusedID() + if !ok { + return + } + + err := index.Add(ctx, id, vectors[id]) + require.Nil(t, err) + }, + // Delete + func() { + // delete 5% of the time + if rand.Int31()%20 == 0 { + return + } + + ids := getInsertedIDs(rand.Intn(100) + 1) + + err := index.Delete(ids...) + require.Nil(t, err) + + removeInsertedIDs(ids...) + }, + // Search + func() { + // search 50% of the time + if rand.Int31()%2 == 0 { + return + } + + id := rand.Intn(len(vectors)) + + _, _, err := index.SearchByVector(ctx, vectors[id], 0, nil) + require.Nil(t, err) + }, + } + + ctx, cancel := context.WithTimeout(context.Background(), 100*time.Second) + defer cancel() + + g, ctx := enterrors.NewErrorGroupWithContextWrapper(logrus.New(), ctx) + + // run parallelGoroutines goroutines + for i := 0; i < parallelGoroutines; i++ { + g.Go(func() error { + for { + select { + case <-ctx.Done(): + return ctx.Err() + default: + ops[rand.Intn(len(ops))]() + } + } + }) + } + + g.Wait() + } + }) +} + +func readSiftFloat(file string, maxObjects int) [][]float32 { + var vectors [][]float32 + + f, err := os.Open(file) + if err != nil { + panic(errors.Wrap(err, "Could not open SIFT file")) + } + defer f.Close() + + fi, err := f.Stat() + if err != nil { + panic(errors.Wrap(err, "Could not get SIFT file properties")) + } + fileSize := fi.Size() + if fileSize < 1000000 { + panic("The file is only " + fmt.Sprint(fileSize) + " bytes long. Did you forgot to install git lfs?") + } + + // The sift data is a binary file containing floating point vectors + // For each entry, the first 4 bytes is the length of the vector (in number of floats, not in bytes) + // which is followed by the vector data with vector length * 4 bytes. + // |-length-vec1 (4bytes)-|-Vec1-data-(4*length-vector-1 bytes)-|-length-vec2 (4bytes)-|-Vec2-data-(4*length-vector-2 bytes)-| + // The vector length needs to be converted from bytes to int + // The vector data needs to be converted from bytes to float + // Note that the vector entries are of type float but are integer numbers eg 2.0 + bytesPerF := 4 + vectorBytes := make([]byte, bytesPerF+vectorSize*bytesPerF) + for i := 0; i >= 0; i++ { + _, err = f.Read(vectorBytes) + if errors.Is(err, io.EOF) { + break + } else if err != nil { + panic(err) + } + if int32FromBytes(vectorBytes[0:bytesPerF]) != vectorSize { + panic("Each vector must have 128 entries.") + } + vectorFloat := make([]float32, 0, vectorSize) + for j := 0; j < vectorSize; j++ { + start := (j + 1) * bytesPerF // first bytesPerF are length of vector + vectorFloat = append(vectorFloat, float32FromBytes(vectorBytes[start:start+bytesPerF])) + } + + vectors = append(vectors, vectorFloat) + + if i >= maxObjects { + break + } + } + if len(vectors) < maxObjects { + panic("Could not load all elements.") + } + + return vectors +} + +func TestConcurrentDelete(t *testing.T) { + ctx := context.Background() + siftFile := "datasets/ann-benchmarks/siftsmall/siftsmall_base.fvecs" + if _, err := os.Stat(siftFile); err != nil { + if !*download { + t.Skip(`Sift data needs to be present. +Run test with -download to automatically download the dataset. +Ex: go test -v -run TestHnswStress . -download +`) + } + downloadDatasetFile(t, siftFile) + } + numGoroutines := 10 + numVectors := 10 + vectors := readSiftFloat(siftFile, numGoroutines*numVectors) + + for n := 0; n < 50; n++ { // increase if you don't want to reread SIFT for every run + store := testinghelpers.NewDummyStore(t) + + index, err := New(Config{ + RootPath: "doesnt-matter-as-committlogger-is-mocked-out", + ID: "delete-test", + MakeCommitLoggerThunk: MakeNoopCommitLogger, + DistanceProvider: distancer.NewCosineDistanceProvider(), + VectorForIDThunk: func(ctx context.Context, id uint64) ([]float32, error) { + return vectors[int(id)], nil + }, + TempVectorForIDThunk: TempVectorForIDThunk(vectors), + }, ent.UserConfig{ + MaxConnections: 30, + EFConstruction: 128, + VectorCacheMaxObjects: 100000, + }, cyclemanager.NewCallbackGroupNoop(), store) + require.Nil(t, err) + + var wg sync.WaitGroup + for i := 0; i < numGoroutines; i++ { + wg.Add(1) + goroutineIndex := i * numVectors + go func() { + defer wg.Done() + for j := 0; j < numVectors; j++ { + require.Nil(t, index.Add(ctx, uint64(goroutineIndex+j), vectors[goroutineIndex+j])) + } + }() + } + + for i := 0; i < numGoroutines; i++ { + wg.Add(1) + goroutineIndex := i * numVectors + go func() { + defer wg.Done() + for j := 0; j < numVectors; j++ { + require.Nil(t, index.Delete(uint64(goroutineIndex+j))) + } + }() + } + + for i := 0; i < numGoroutines; i++ { + for i := 0; i < 10; i++ { + err := index.CleanUpTombstonedNodes(neverStop) + require.Nil(t, err) + } + } + wg.Wait() + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/index.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/index.go new file mode 100644 index 0000000000000000000000000000000000000000..9e47f147763850c2eb0bd90704256bbb97b7fdd2 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/index.go @@ -0,0 +1,1026 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package hnsw + +import ( + "context" + "fmt" + "io" + "math" + "math/rand" + "runtime" + "strings" + "sync" + "sync/atomic" + + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + + "github.com/weaviate/weaviate/adapters/repos/db/lsmkv" + "github.com/weaviate/weaviate/adapters/repos/db/priorityqueue" + "github.com/weaviate/weaviate/adapters/repos/db/vector/cache" + "github.com/weaviate/weaviate/adapters/repos/db/vector/common" + "github.com/weaviate/weaviate/adapters/repos/db/vector/compressionhelpers" + "github.com/weaviate/weaviate/adapters/repos/db/vector/hnsw/distancer" + "github.com/weaviate/weaviate/adapters/repos/db/vector/multivector" + "github.com/weaviate/weaviate/entities/cyclemanager" + "github.com/weaviate/weaviate/entities/schema/config" + "github.com/weaviate/weaviate/entities/storobj" + ent "github.com/weaviate/weaviate/entities/vectorindex/hnsw" + "github.com/weaviate/weaviate/usecases/memwatch" +) + +type hnsw struct { + // global lock to prevent concurrent map read/write, etc. + sync.RWMutex + + // certain operations related to deleting, such as finding a new entrypoint + // can only run sequentially, this separate lock helps assuring this without + // blocking the general usage of the hnsw index + deleteLock *sync.Mutex + + tombstoneLock *sync.RWMutex + + // prevents tombstones cleanup to be performed in parallel with index reset operation + resetLock *sync.RWMutex + // indicates whether reset operation occurred or not - if so tombstones cleanup method + // is aborted as it makes no sense anymore + resetCtx context.Context + resetCtxCancel context.CancelFunc + + // indicates the index is shutting down + shutdownCtx context.Context + shutdownCtxCancel context.CancelFunc + + // make sure the very first insert happens just once, otherwise we + // accidentally overwrite previous entrypoints on parallel imports on an + // empty graph + initialInsertOnce *sync.Once + + // Each node should not have more edges than this number + maximumConnections int + + // Nodes in the lowest level have a separate (usually higher) max connection + // limit + maximumConnectionsLayerZero int + + // the current maximum can be smaller than the configured maximum because of + // the exponentially decaying layer function. The initial entry is started at + // layer 0, but this has the chance to grow with every subsequent entry + currentMaximumLayer int + + // this is a point on the highest level, if we insert a new point with a + // higher level it will become the new entry point. Note tat the level of + // this point is always currentMaximumLayer + entryPointID uint64 + + // ef parameter used in construction phases, should be higher than ef during querying + efConstruction int + + // ef at search time + ef int64 + + // only used if ef=-1 + efMin int64 + efMax int64 + efFactor int64 + + // on filtered searches with less than n elements, perform flat search + flatSearchCutoff int64 + flatSearchConcurrency int + + levelNormalizer float64 + + nodes []*vertex + + vectorForID common.VectorForID[float32] + TempVectorForIDThunk common.TempVectorForID[float32] + TempMultiVectorForIDThunk common.TempVectorForID[[]float32] + multiVectorForID common.MultiVectorForID + trackDimensionsOnce sync.Once + trackMuveraOnce sync.Once + trackRQOnce sync.Once + dims int32 + + cache cache.Cache[float32] + waitForCachePrefill bool + + commitLog CommitLogger + + // a lookup of current tombstones (i.e. nodes that have received a tombstone, + // but have not been cleaned up yet) Cleanup is the process of removal of all + // outgoing edges to the tombstone as well as deleting the tombstone itself. + // This process should happen periodically. + tombstones map[uint64]struct{} + + tombstoneCleanupCallbackCtrl cyclemanager.CycleCallbackCtrl + + // // for distributed spike, can be used to call a insertExternal on a different graph + // insertHook func(node, targetLevel int, neighborsAtLevel map[int][]uint32) + + id string + rootPath string + + logger logrus.FieldLogger + distancerProvider distancer.Provider + multiDistancerProvider distancer.Provider + pools *pools + + forbidFlat bool // mostly used in testing scenarios where we want to use the index even in scenarios where we typically wouldn't + + metrics *Metrics + insertMetrics *insertMetrics + + randFunc func() float64 // added to temporarily get rid on flakiness in tombstones related tests. to be removed after fixing WEAVIATE-179 + + // The deleteVsInsertLock makes sure that there are no concurrent delete and + // insert operations happening. It uses an RW-Mutex with: + // + // RLock -> Insert operations, this means any number of import operations can + // happen concurrently. + // + // Lock -> Delete operation. This means only a single delete operation can + // occur at a time, no insert operation can occur simultaneously with a + // delete. Since the delete is cheap (just marking the node as deleted), the + // single-threadedness of deletes is not a big problem. + // + // This lock was introduced as part of + // https://github.com/weaviate/weaviate/issues/2194 + // + // See + // https://github.com/weaviate/weaviate/pull/2191#issuecomment-1242726787 + // where we ran performance tests to make sure introducing this lock has no + // negative impact on performance. + deleteVsInsertLock sync.RWMutex + + compressed atomic.Bool + doNotRescore bool + acornSearch atomic.Bool + acornFilterRatio float64 + + disableSnapshots bool + snapshotOnStartup bool + + compressor compressionhelpers.VectorCompressor + pqConfig ent.PQConfig + bqConfig ent.BQConfig + sqConfig ent.SQConfig + rqConfig ent.RQConfig + rqActive bool + // rescoring compressed vectors is disk-bound. On cold starts, we cannot + // rescore sequentially, as that would take very long. This setting allows us + // to define the rescoring concurrency. + rescoreConcurrency int + + compressActionLock *sync.RWMutex + className string + shardName string + VectorForIDThunk common.VectorForID[float32] + MultiVectorForIDThunk common.VectorForID[[]float32] + shardedNodeLocks *common.ShardedRWLocks + store *lsmkv.Store + + allocChecker memwatch.AllocChecker + tombstoneCleanupRunning atomic.Bool + + visitedListPoolMaxSize int + + // only used for multivector mode + multivector atomic.Bool + muvera atomic.Bool + muveraEncoder *multivector.MuveraEncoder + docIDVectors map[uint64][]uint64 + vecIDcounter uint64 + maxDocID uint64 +} + +type CommitLogger interface { + ID() string + AddNode(node *vertex) error + SetEntryPointWithMaxLayer(id uint64, level int) error + AddLinkAtLevel(nodeid uint64, level int, target uint64) error + ReplaceLinksAtLevel(nodeid uint64, level int, targets []uint64) error + AddTombstone(nodeid uint64) error + RemoveTombstone(nodeid uint64) error + DeleteNode(nodeid uint64) error + ClearLinks(nodeid uint64) error + ClearLinksAtLevel(nodeid uint64, level uint16) error + Reset() error + Drop(ctx context.Context) error + Flush() error + Shutdown(ctx context.Context) error + RootPath() string + SwitchCommitLogs(bool) error + AddPQCompression(compressionhelpers.PQData) error + AddSQCompression(compressionhelpers.SQData) error + AddMuvera(multivector.MuveraData) error + AddRQCompression(compressionhelpers.RQData) error + AddBRQCompression(compressionhelpers.BRQData) error + InitMaintenance() + + CreateSnapshot() (bool, int64, error) + CreateAndLoadSnapshot() (*DeserializationResult, int64, error) + LoadSnapshot() (*DeserializationResult, int64, error) +} + +type BufferedLinksLogger interface { + AddLinkAtLevel(nodeid uint64, level int, target uint64) error + ReplaceLinksAtLevel(nodeid uint64, level int, targets []uint64) error + Close() error // Close should Flush and Close +} + +type MakeCommitLogger func() (CommitLogger, error) + +// New creates a new HNSW index, the commit logger is provided through a thunk +// (a function which can be deferred). This is because creating a commit logger +// opens files for writing. However, checking whether a file is present, is a +// criterium for the index to see if it has to recover from disk or if its a +// truly new index. So instead the index is initialized, with un-biased disk +// checks first and only then is the commit logger created +func New(cfg Config, uc ent.UserConfig, + tombstoneCallbacks cyclemanager.CycleCallbackGroup, store *lsmkv.Store, +) (*hnsw, error) { + if err := cfg.Validate(); err != nil { + return nil, errors.Wrap(err, "invalid config") + } + + if cfg.Logger == nil { + logger := logrus.New() + logger.Out = io.Discard + cfg.Logger = logger + } + + normalizeOnRead := cfg.DistanceProvider.Type() == "cosine-dot" + + var vectorCache cache.Cache[float32] + + var muveraEncoder *multivector.MuveraEncoder + if uc.Multivector.Enabled && !uc.Multivector.MuveraConfig.Enabled { + vectorCache = cache.NewShardedMultiFloat32LockCache(cfg.MultiVectorForIDThunk, uc.VectorCacheMaxObjects, + cfg.Logger, normalizeOnRead, cache.DefaultDeletionInterval, cfg.AllocChecker) + } else { + if uc.Multivector.MuveraConfig.Enabled { + muveraEncoder = multivector.NewMuveraEncoder(uc.Multivector.MuveraConfig, store) + err := store.CreateOrLoadBucket( + context.Background(), + cfg.ID+"_muvera_vectors", + lsmkv.WithStrategy(lsmkv.StrategyReplace), + lsmkv.WithWriteSegmentInfoIntoFileName(cfg.WriteSegmentInfoIntoFileName), + lsmkv.WithWriteMetadata(cfg.WriteMetadataFilesEnabled), + ) + if err != nil { + return nil, errors.Wrapf(err, "Create or load bucket (muvera store)") + } + muveraVectorForID := func(ctx context.Context, id uint64) ([]float32, error) { + return muveraEncoder.GetMuveraVectorForID(id, cfg.ID+"_muvera_vectors") + } + vectorCache = cache.NewShardedFloat32LockCache( + muveraVectorForID, cfg.MultiVectorForIDThunk, uc.VectorCacheMaxObjects, 1, cfg.Logger, + normalizeOnRead, cache.DefaultDeletionInterval, cfg.AllocChecker) + + } else { + vectorCache = cache.NewShardedFloat32LockCache(cfg.VectorForIDThunk, cfg.MultiVectorForIDThunk, uc.VectorCacheMaxObjects, 1, cfg.Logger, + normalizeOnRead, cache.DefaultDeletionInterval, cfg.AllocChecker) + } + } + resetCtx, resetCtxCancel := context.WithCancel(context.Background()) + shutdownCtx, shutdownCtxCancel := context.WithCancel(context.Background()) + index := &hnsw{ + maximumConnections: uc.MaxConnections, + + // inspired by original paper and other implementations + maximumConnectionsLayerZero: 2 * uc.MaxConnections, + + // inspired by c++ implementation + levelNormalizer: 1 / math.Log(float64(uc.MaxConnections)), + efConstruction: uc.EFConstruction, + flatSearchCutoff: int64(uc.FlatSearchCutoff), + flatSearchConcurrency: max(cfg.FlatSearchConcurrency, 1), + acornFilterRatio: cfg.AcornFilterRatio, + disableSnapshots: cfg.DisableSnapshots, + snapshotOnStartup: cfg.SnapshotOnStartup, + nodes: make([]*vertex, cache.InitialSize), + cache: vectorCache, + waitForCachePrefill: cfg.WaitForCachePrefill, + vectorForID: vectorCache.Get, + multiVectorForID: vectorCache.MultiGet, + id: cfg.ID, + rootPath: cfg.RootPath, + tombstones: map[uint64]struct{}{}, + logger: cfg.Logger, + distancerProvider: cfg.DistanceProvider, + deleteLock: &sync.Mutex{}, + tombstoneLock: &sync.RWMutex{}, + resetLock: &sync.RWMutex{}, + resetCtx: resetCtx, + resetCtxCancel: resetCtxCancel, + shutdownCtx: shutdownCtx, + shutdownCtxCancel: shutdownCtxCancel, + initialInsertOnce: &sync.Once{}, + + ef: int64(uc.EF), + efMin: int64(uc.DynamicEFMin), + efMax: int64(uc.DynamicEFMax), + efFactor: int64(uc.DynamicEFFactor), + + metrics: NewMetrics(cfg.PrometheusMetrics, cfg.ClassName, cfg.ShardName), + shardName: cfg.ShardName, + + randFunc: rand.Float64, + compressActionLock: &sync.RWMutex{}, + className: cfg.ClassName, + VectorForIDThunk: cfg.VectorForIDThunk, + MultiVectorForIDThunk: cfg.MultiVectorForIDThunk, + TempVectorForIDThunk: cfg.TempVectorForIDThunk, + TempMultiVectorForIDThunk: cfg.TempMultiVectorForIDThunk, + pqConfig: uc.PQ, + bqConfig: uc.BQ, + sqConfig: uc.SQ, + rqConfig: uc.RQ, + rescoreConcurrency: 2 * runtime.GOMAXPROCS(0), // our default for IO-bound activties + shardedNodeLocks: common.NewDefaultShardedRWLocks(), + + store: store, + allocChecker: cfg.AllocChecker, + visitedListPoolMaxSize: cfg.VisitedListPoolMaxSize, + + docIDVectors: make(map[uint64][]uint64), + muveraEncoder: muveraEncoder, + } + index.acornSearch.Store(uc.FilterStrategy == ent.FilterStrategyAcorn) + + index.multivector.Store(uc.Multivector.Enabled) + index.muvera.Store(uc.Multivector.MuveraConfig.Enabled) + + if uc.BQ.Enabled { + var err error + if uc.Multivector.Enabled && !uc.Multivector.MuveraConfig.Enabled { + index.compressor, err = compressionhelpers.NewBQMultiCompressor( + index.distancerProvider, uc.VectorCacheMaxObjects, cfg.Logger, store, + cfg.AllocChecker) + } else { + index.compressor, err = compressionhelpers.NewBQCompressor( + index.distancerProvider, uc.VectorCacheMaxObjects, cfg.Logger, store, + cfg.AllocChecker) + } + if err != nil { + return nil, err + } + index.compressed.Store(true) + index.cache.Drop() + index.cache = nil + } + + if uc.RQ.Enabled { + index.rqActive = true + } + + if uc.Multivector.Enabled { + index.multiDistancerProvider = distancer.NewDotProductProvider() + if !uc.Multivector.MuveraConfig.Enabled { + err := index.store.CreateOrLoadBucket( + context.Background(), + cfg.ID+"_mv_mappings", + lsmkv.WithStrategy(lsmkv.StrategyReplace), + lsmkv.WithLazySegmentLoading(cfg.LazyLoadSegments), + lsmkv.WithWriteSegmentInfoIntoFileName(cfg.WriteSegmentInfoIntoFileName), + lsmkv.WithWriteMetadata(cfg.WriteMetadataFilesEnabled), + ) + if err != nil { + return nil, errors.Wrapf(err, "Create or load bucket (multivector store)") + } + } + } + + if err := index.init(cfg); err != nil { + return nil, errors.Wrapf(err, "init index %q", index.id) + } + + // TODO common_cycle_manager move to poststartup? + id := strings.Join([]string{ + "hnsw", "tombstone_cleanup", + index.className, index.shardName, index.id, + }, "/") + index.tombstoneCleanupCallbackCtrl = tombstoneCallbacks.Register(id, index.tombstoneCleanup) + index.insertMetrics = newInsertMetrics(index.metrics) + + return index, nil +} + +// TODO: use this for incoming replication +// func (h *hnsw) insertFromExternal(nodeId, targetLevel int, neighborsAtLevel map[int][]uint32) { +// defer m.addBuildingReplication(time.Now()) + +// // randomly introduce up to 50ms delay to account for network slowness +// time.Sleep(time.Duration(rand.Intn(500)) * time.Millisecond) + +// var node *hnswVertex +// h.RLock() +// total := len(h.nodes) +// if total > nodeId { +// node = h.nodes[nodeId] // it could be that we implicitly added this node already because it was referenced +// } +// h.RUnlock() + +// if node == nil { +// node = &hnswVertex{ +// id: nodeId, +// connections: make(map[int][]uint32), +// level: targetLevel, +// } +// } else { +// node.level = targetLevel +// } + +// if total == 0 { +// h.Lock() +// h.commitLog.SetEntryPointWithMaxLayer(node.id, 0) +// h.entryPointID = node.id +// h.currentMaximumLayer = 0 +// node.connections = map[int][]uint32{} +// node.level = 0 +// // h.nodes = make([]*hnswVertex, 100000) +// h.commitLog.AddNode(node) +// h.nodes[node.id] = node +// h.Unlock() +// return +// } + +// currentMaximumLayer := h.currentMaximumLayer +// h.Lock() +// h.nodes[nodeId] = node +// h.commitLog.AddNode(node) +// h.Unlock() + +// for level := min(targetLevel, currentMaximumLayer); level >= 0; level-- { +// neighbors := neighborsAtLevel[level] + +// for _, neighborID := range neighbors { +// h.RLock() +// neighbor := h.nodes[neighborID] +// if neighbor == nil { +// // due to everything being parallel it could be that the linked neighbor +// // doesn't exist yet +// h.nodes[neighborID] = &hnswVertex{ +// id: int(neighborID), +// connections: make(map[int][]uint32), +// } +// neighbor = h.nodes[neighborID] +// } +// h.RUnlock() + +// neighbor.linkAtLevel(level, uint32(nodeId), h.commitLog) +// node.linkAtLevel(level, uint32(neighbor.id), h.commitLog) + +// neighbor.RLock() +// currentConnections := neighbor.connections[level] +// neighbor.RUnlock() + +// maximumConnections := h.maximumConnections +// if level == 0 { +// maximumConnections = h.maximumConnectionsLayerZero +// } + +// if len(currentConnections) <= maximumConnections { +// // nothing to do, skip +// continue +// } + +// // TODO: support both neighbor selection algos +// updatedConnections := h.selectNeighborsSimpleFromId(nodeId, currentConnections, maximumConnections) + +// neighbor.Lock() +// h.commitLog.ReplaceLinksAtLevel(neighbor.id, level, updatedConnections) +// neighbor.connections[level] = updatedConnections +// neighbor.Unlock() +// } +// } + +// if targetLevel > h.currentMaximumLayer { +// h.Lock() +// h.commitLog.SetEntryPointWithMaxLayer(nodeId, targetLevel) +// h.entryPointID = nodeId +// h.currentMaximumLayer = targetLevel +// h.Unlock() +// } + +// } + +func (h *hnsw) findBestEntrypointForNode(ctx context.Context, currentMaxLevel, targetLevel int, + entryPointID uint64, nodeVec []float32, distancer compressionhelpers.CompressorDistancer, +) (uint64, error) { + // in case the new target is lower than the current max, we need to search + // each layer for a better candidate and update the candidate + for level := currentMaxLevel; level > targetLevel; level-- { + eps := priorityqueue.NewMin[any](1) + var dist float32 + var err error + if h.compressed.Load() { + dist, err = distancer.DistanceToNode(entryPointID) + } else { + dist, err = h.distToNode(distancer, entryPointID, nodeVec) + } + + var e storobj.ErrNotFound + if errors.As(err, &e) { + h.handleDeletedNode(e.DocID, "findBestEntrypointForNode") + continue + } + if err != nil { + return 0, errors.Wrapf(err, + "calculate distance between insert node and entry point at level %d", level) + } + + eps.Insert(entryPointID, dist) + res, err := h.searchLayerByVectorWithDistancer(ctx, nodeVec, eps, 1, level, nil, distancer) + if err != nil { + return 0, + errors.Wrapf(err, "update candidate: search layer at level %d", level) + } + if res.Len() > 0 { + // if we could find a new entrypoint, use it + // in case everything was tombstoned, stick with the existing one + elem := res.Pop() + n := h.nodeByID(elem.ID) + if n != nil && !n.isUnderMaintenance() { + // but not if the entrypoint is under maintenance + entryPointID = elem.ID + } + } + + h.pools.pqResults.Put(res) + } + + return entryPointID, nil +} + +func (h *hnsw) distBetweenNodes(a, b uint64) (float32, error) { + if h.compressed.Load() { + dist, err := h.compressor.DistanceBetweenCompressedVectorsFromIDs(context.Background(), a, b) + if err != nil { + return 0, err + } + + return dist, nil + } + + // TODO: introduce single search/transaction context instead of spawning new + // ones + vecA, errA := h.vectorForID(context.Background(), a) + + if errA != nil { + var e storobj.ErrNotFound + if errors.As(errA, &e) { + h.handleDeletedNode(e.DocID, "distBetweenNodes") + return 0, nil + } + // not a typed error, we can recover from, return with err + return 0, errors.Wrapf(errA, + "could not get vector of object at docID %d", a) + } + + if len(vecA) == 0 { + return 0, fmt.Errorf("got a nil or zero-length vector at docID %d", a) + } + + vecB, errB := h.vectorForID(context.Background(), b) + + if errB != nil { + var e storobj.ErrNotFound + if errors.As(errB, &e) { + h.handleDeletedNode(e.DocID, "distBetweenNodes") + return 0, nil + } + // not a typed error, we can recover from, return with err + return 0, errors.Wrapf(errB, + "could not get vector of object at docID %d", b) + } + + if len(vecB) == 0 { + return 0, fmt.Errorf("got a nil or zero-length vector at docID %d", b) + } + + return h.distancerProvider.SingleDist(vecA, vecB) +} + +func (h *hnsw) distToNode(distancer compressionhelpers.CompressorDistancer, node uint64, vecB []float32) (float32, error) { + if h.compressed.Load() { + dist, err := distancer.DistanceToNode(node) + if err != nil { + return 0, err + } + + return dist, nil + } + + // TODO: introduce single search/transaction context instead of spawning new + // ones + var vecA []float32 + var err error + vecA, err = h.vectorForID(context.Background(), node) + if err != nil { + var e storobj.ErrNotFound + if errors.As(err, &e) { + h.handleDeletedNode(e.DocID, "distBetweenNodeAndVec") + return 0, nil + } + // not a typed error, we can recover from, return with err + return 0, errors.Wrapf(err, + "could not get vector of object at docID %d", node) + } + + if len(vecA) == 0 { + return 0, fmt.Errorf( + "got a nil or zero-length vector at docID %d", node) + } + + if len(vecB) == 0 { + return 0, fmt.Errorf( + "got a nil or zero-length vector as search vector") + } + + return h.distancerProvider.SingleDist(vecA, vecB) +} + +func (h *hnsw) isEmpty() bool { + h.RLock() + defer h.RUnlock() + h.shardedNodeLocks.RLock(h.entryPointID) + defer h.shardedNodeLocks.RUnlock(h.entryPointID) + + return h.isEmptyUnlocked() +} + +func (h *hnsw) isEmptyUnlocked() bool { + return h.entryPointID > uint64(len(h.nodes)) || h.nodes[h.entryPointID] == nil +} + +func (h *hnsw) nodeByID(id uint64) *vertex { + h.RLock() + defer h.RUnlock() + + if id >= uint64(len(h.nodes)) { + // See https://github.com/weaviate/weaviate/issues/1838 for details. + // This could be after a crash recovery when the object store is "further + // ahead" than the hnsw index and we receive a delete request + return nil + } + + h.shardedNodeLocks.RLock(id) + defer h.shardedNodeLocks.RUnlock(id) + + return h.nodes[id] +} + +func (h *hnsw) Drop(ctx context.Context) error { + // cancel tombstone cleanup goroutine + if err := h.tombstoneCleanupCallbackCtrl.Unregister(ctx); err != nil { + return errors.Wrap(err, "hnsw drop") + } + + if h.compressed.Load() { + err := h.compressor.Drop() + if err != nil { + return fmt.Errorf("failed to shutdown compressed store") + } + } else { + // cancel vector cache goroutine + h.cache.Drop() + } + + // cancel commit logger last, as the tombstone cleanup cycle might still + // write while it's still running + err := h.commitLog.Drop(ctx) + if err != nil { + return errors.Wrap(err, "commit log drop") + } + + return nil +} + +func (h *hnsw) Shutdown(ctx context.Context) error { + h.shutdownCtxCancel() + + if err := h.commitLog.Shutdown(ctx); err != nil { + return errors.Wrap(err, "hnsw shutdown") + } + + if err := h.tombstoneCleanupCallbackCtrl.Unregister(ctx); err != nil { + return errors.Wrap(err, "hnsw shutdown") + } + + if h.compressed.Load() { + err := h.compressor.Drop() + if err != nil { + return errors.Wrap(err, "hnsw shutdown") + } + } else { + h.cache.Drop() + } + + return nil +} + +func (h *hnsw) Flush() error { + return h.commitLog.Flush() +} + +func (h *hnsw) Entrypoint() uint64 { + h.RLock() + defer h.RUnlock() + + return h.entryPointID +} + +func (h *hnsw) ContainsDoc(docID uint64) bool { + if h.Multivector() && !h.muvera.Load() { + h.RLock() + vecIds, exists := h.docIDVectors[docID] + h.RUnlock() + return exists && !h.hasTombstones(vecIds) + } + + h.RLock() + h.shardedNodeLocks.RLock(docID) + exists := len(h.nodes) > int(docID) && h.nodes[docID] != nil + h.shardedNodeLocks.RUnlock(docID) + h.RUnlock() + + return exists && !h.hasTombstone(docID) +} + +func (h *hnsw) Iterate(fn func(docID uint64) bool) { + if h.Multivector() && !h.muvera.Load() { + h.iterateMulti(fn) + return + } + h.iterate(fn) +} + +func (h *hnsw) iterate(fn func(docID uint64) bool) { + var id uint64 + + for { + if h.shutdownCtx.Err() != nil { + return + } + if h.resetCtx.Err() != nil { + return + } + + h.RLock() + h.shardedNodeLocks.RLock(id) + stop := int(id) >= len(h.nodes) + exists := !stop && h.nodes[id] != nil + h.shardedNodeLocks.RUnlock(id) + h.RUnlock() + + if stop { + return + } + + if exists && !h.hasTombstone(id) { + if !fn(id) { + return + } + } + + id++ + } +} + +func (h *hnsw) iterateMulti(fn func(docID uint64) bool) { + h.RLock() + indexedDocIDs := make([]uint64, 0, len(h.docIDVectors)) + for docID := range h.docIDVectors { + indexedDocIDs = append(indexedDocIDs, docID) + } + h.RUnlock() + + for _, docID := range indexedDocIDs { + if h.shutdownCtx.Err() != nil || h.resetCtx.Err() != nil { + return + } + + h.RLock() + nodes, ok := h.docIDVectors[docID] + h.RUnlock() + + if ok && !h.hasTombstones(nodes) { + if !fn(docID) { + return + } + } + } +} + +func (h *hnsw) ShouldUpgrade() (bool, int) { + if h.sqConfig.Enabled { + return h.sqConfig.Enabled, h.sqConfig.TrainingLimit + } + if h.rqConfig.Enabled { + return h.rqConfig.Enabled, 1 + } + return h.pqConfig.Enabled, h.pqConfig.TrainingLimit +} + +func (h *hnsw) ShouldCompressFromConfig(config config.VectorIndexConfig) (bool, int) { + hnswConfig := config.(ent.UserConfig) + if hnswConfig.SQ.Enabled { + return hnswConfig.SQ.Enabled, hnswConfig.SQ.TrainingLimit + } + if hnswConfig.RQ.Enabled { + return hnswConfig.RQ.Enabled, 1 + } + return hnswConfig.PQ.Enabled, hnswConfig.PQ.TrainingLimit +} + +func (h *hnsw) Compressed() bool { + return h.compressed.Load() +} + +func (h *hnsw) Multivector() bool { + return h.multivector.Load() +} + +func (h *hnsw) Upgraded() bool { + return h.Compressed() +} + +func (h *hnsw) AlreadyIndexed() uint64 { + return uint64(h.cache.CountVectors()) +} + +func (h *hnsw) normalizeVec(vec []float32) []float32 { + if h.distancerProvider.Type() == "cosine-dot" { + // cosine-dot requires normalized vectors, as the dot product and cosine + // similarity are only identical if the vector is normalized + return distancer.Normalize(vec) + } + return vec +} + +func (h *hnsw) normalizeVecs(vecs [][]float32) [][]float32 { + if h.distancerProvider.Type() == "cosine-dot" { + normalized := make([][]float32, len(vecs)) + for i, vec := range vecs { + normalized[i] = distancer.Normalize(vec) + } + return normalized + } + return vecs +} + +func IsHNSWIndex(index any) bool { + _, ok := index.(*hnsw) + return ok +} + +func AsHNSWIndex(index any) Index { + h, _ := index.(*hnsw) + return h +} + +// This interface exposes public methods of the HNSW index +// that are not part of the VectorIndex interface. +// It is a workaround to avoid circular dependencies. +type Index interface { + CleanUpTombstonedNodes(shouldAbort cyclemanager.ShouldAbortCallback) error +} + +type nodeLevel struct { + nodeId uint64 + level int +} + +func (h *hnsw) calculateUnreachablePoints() []uint64 { + h.RLock() + defer h.RUnlock() + + visitedPairs := make(map[nodeLevel]bool) + candidateList := []nodeLevel{{h.entryPointID, h.currentMaximumLayer}} + + for len(candidateList) > 0 { + currentNode := candidateList[len(candidateList)-1] + candidateList = candidateList[:len(candidateList)-1] + if !visitedPairs[currentNode] { + visitedPairs[currentNode] = true + h.shardedNodeLocks.RLock(currentNode.nodeId) + node := h.nodes[currentNode.nodeId] + if node != nil { + node.Lock() + neighbors := node.connectionsAtLowerLevelsNoLock(currentNode.level, visitedPairs) + node.Unlock() + candidateList = append(candidateList, neighbors...) + } + h.shardedNodeLocks.RUnlock(currentNode.nodeId) + } + } + + visitedNodes := make(map[uint64]bool, len(visitedPairs)) + for k, v := range visitedPairs { + if v { + visitedNodes[k.nodeId] = true + } + } + + unvisitedNodes := []uint64{} + for i := 0; i < len(h.nodes); i++ { + var id uint64 + h.shardedNodeLocks.RLock(uint64(i)) + if h.nodes[i] != nil { + id = h.nodes[i].id + } + h.shardedNodeLocks.RUnlock(uint64(i)) + if id == 0 { + continue + } + if !visitedNodes[uint64(i)] { + unvisitedNodes = append(unvisitedNodes, id) + } + + } + return unvisitedNodes +} + +type HnswStats struct { + Dimensions int32 `json:"dimensions"` + EntryPointID uint64 `json:"entryPointID"` + DistributionLayers map[int]uint `json:"distributionLayers"` + UnreachablePoints []uint64 `json:"unreachablePoints"` + NumTombstones int `json:"numTombstones"` + CacheSize int32 `json:"cacheSize"` + Compressed bool `json:"compressed"` + CompressorStats compressionhelpers.CompressionStats `json:"compressionStats"` + CompressionType string `json:"compressionType"` +} + +func (s *HnswStats) IndexType() common.IndexType { + return common.IndexTypeHNSW +} + +func (h *hnsw) Stats() (*HnswStats, error) { + h.RLock() + defer h.RUnlock() + distributionLayers := map[int]uint{} + + for _, node := range h.nodes { + func() { + if node == nil { + return + } + node.Lock() + defer node.Unlock() + l := node.level + if l == 0 && node.connections.Layers() == 0 { + return + } + c, ok := distributionLayers[l] + if !ok { + distributionLayers[l] = 0 + } + + distributionLayers[l] = c + 1 + }() + } + + stats := HnswStats{ + Dimensions: h.dims, + EntryPointID: h.entryPointID, + DistributionLayers: distributionLayers, + UnreachablePoints: h.calculateUnreachablePoints(), + NumTombstones: len(h.tombstones), + CacheSize: h.cache.Len(), + Compressed: h.compressed.Load(), + } + + if stats.Compressed { + stats.CompressorStats = h.compressor.Stats() + } else { + stats.CompressorStats = compressionhelpers.UncompressedStats{} + } + + stats.CompressionType = stats.CompressorStats.CompressionType() + + return &stats, nil +} + +func (h *hnsw) Type() common.IndexType { + return common.IndexTypeHNSW +} + +func (h *hnsw) CompressionStats() compressionhelpers.CompressionStats { + if h.compressed.Load() { + return h.compressor.Stats() + } + return compressionhelpers.UncompressedStats{} +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/index_corrupt_commitlogs_integration_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/index_corrupt_commitlogs_integration_test.go new file mode 100644 index 0000000000000000000000000000000000000000..0d3689c6c7cce59ed7bf40fb989deb03a2aa0cd7 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/index_corrupt_commitlogs_integration_test.go @@ -0,0 +1,132 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +//go:build integrationTest +// +build integrationTest + +package hnsw + +import ( + "context" + "fmt" + "os" + "path" + "testing" + + "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/adapters/repos/db/vector/hnsw/distancer" + "github.com/weaviate/weaviate/adapters/repos/db/vector/testinghelpers" + "github.com/weaviate/weaviate/entities/cyclemanager" + hnswent "github.com/weaviate/weaviate/entities/vectorindex/hnsw" +) + +func TestStartupWithCorruptCondenseFiles(t *testing.T) { + ctx := context.Background() + rootPath := t.TempDir() + + logger, _ := test.NewNullLogger() + _, err := NewCommitLogger(rootPath, "corrupt_test", logger, + cyclemanager.NewCallbackGroupNoop()) + require.Nil(t, err) + + data := [][]float32{ + {0.1, 0.2}, + {0.12, 0.2}, + {0.13, 0.2}, + {0.14, 0.2}, + {0.15, 0.2}, + {0.16, 0.2}, + {0.16, 0.2}, + {0.17, 0.2}, + } + + var index *hnsw + + t.Run("set up an index with the specified commit logger", func(t *testing.T) { + idx, err := New(Config{ + MakeCommitLoggerThunk: func() (CommitLogger, error) { + return NewCommitLogger(rootPath, "corrupt_test", logger, + cyclemanager.NewCallbackGroupNoop()) + }, + ID: "corrupt_test", + RootPath: rootPath, + DistanceProvider: distancer.NewCosineDistanceProvider(), + Logger: logger, + VectorForIDThunk: func(ctx context.Context, id uint64) ([]float32, error) { + return data[int(id)], nil + }, + }, hnswent.UserConfig{ + MaxConnections: 100, + EFConstruction: 100, + CleanupIntervalSeconds: 0, + }, cyclemanager.NewCallbackGroupNoop(), testinghelpers.NewDummyStore(t)) + require.Nil(t, err) + index = idx + }) + + t.Run("add data", func(t *testing.T) { + for i, vec := range data { + err := index.Add(ctx, uint64(i), vec) + require.Nil(t, err) + } + }) + + index.Flush() + + t.Run("create a corrupt commit log file without deleting the original", + func(t *testing.T) { + input, ok, err := getCurrentCommitLogFileName(commitLogDirectory(rootPath, + "corrupt_test")) + require.Nil(t, err) + require.True(t, ok) + + f, err := os.Create(path.Join(commitLogDirectory(rootPath, "corrupt_test"), + fmt.Sprintf("%s.condensed", input))) + require.Nil(t, err) + + // write random non-sense to make sure the file is corrupt + _, err = f.Write([]uint8{0xa8, 0x07, 0x34, 0x77, 0xf8, 0xff}) + require.Nil(t, err) + f.Close() + }) + + t.Run("destroy the old index", func(t *testing.T) { + // kill the index + index = nil + }) + + t.Run("create a new one from the disk files", func(t *testing.T) { + idx, err := New(Config{ + MakeCommitLoggerThunk: MakeNoopCommitLogger, // no longer need a real one + ID: "corrupt_test", + RootPath: rootPath, + DistanceProvider: distancer.NewCosineDistanceProvider(), + Logger: logger, + VectorForIDThunk: func(ctx context.Context, id uint64) ([]float32, error) { + return data[int(id)], nil + }, + }, hnswent.UserConfig{ + MaxConnections: 100, + EFConstruction: 100, + CleanupIntervalSeconds: 0, + }, cyclemanager.NewCallbackGroupNoop(), testinghelpers.NewDummyStore(t)) + require.Nil(t, err) + index = idx + }) + + t.Run("verify querying works", func(t *testing.T) { + res, _, err := index.SearchByVector(ctx, []float32{0.08, 0.08}, 100, nil) + require.Nil(t, err) + assert.Len(t, res, 8) + }) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/index_slowdown_bug_intergration_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/index_slowdown_bug_intergration_test.go new file mode 100644 index 0000000000000000000000000000000000000000..b9c5d9bb11ccabf454e7009ca38416b21446f40f --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/index_slowdown_bug_intergration_test.go @@ -0,0 +1,118 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +//go:build integrationTestBug +// +build integrationTestBug + +package hnsw + +import ( + "context" + "fmt" + "math" + "math/rand" + "runtime" + "sync" + "testing" + "time" + + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/adapters/repos/db/vector/hnsw/distancer" + "github.com/weaviate/weaviate/adapters/repos/db/vector/testinghelpers" +) + +func Normalize(v []float32) []float32 { + var norm float32 + for i := range v { + norm += v[i] * v[i] + } + + norm = float32(math.Sqrt(float64(norm))) + for i := range v { + v[i] = v[i] / norm + } + + return v +} + +func TestSlowDownBugAtHighEF(t *testing.T) { + dimensions := 256 + size := 25000 + efConstruction := 2000 + maxNeighbors := 100 + + vectors := make([][]float32, size) + var vectorIndex *hnsw + + t.Run("generate random vectors", func(t *testing.T) { + fmt.Printf("generating %d vectors", size) + for i := 0; i < size; i++ { + vector := make([]float32, dimensions) + for j := 0; j < dimensions; j++ { + vector[j] = rand.Float32() + } + vectors[i] = Normalize(vector) + } + fmt.Printf("done\n") + }) + + t.Run("importing into hnsw", func(t *testing.T) { + fmt.Printf("importing into hnsw\n") + index, err := New(Config{ + RootPath: "doesnt-matter-as-committlogger-is-mocked-out", + ID: "recallbenchmark", + MakeCommitLoggerThunk: MakeNoopCommitLogger, + DistanceProvider: distancer.NewDotProductProvider(), + // DistanceProvider: distancer.NewCosineDistanceProvider(), + VectorForIDThunk: func(ctx context.Context, id uint64) ([]float32, error) { + return nil, nil + }, + }, UserConfig{ + MaxConnections: maxNeighbors, + EFConstruction: efConstruction, + }, testinghelpers.NewDummyStore(t)) + + require.Nil(t, err) + vectorIndex = index + + workerCount := runtime.GOMAXPROCS(0) + // workerCount := 1 + jobsForWorker := make([][][]float32, workerCount) + + for i, vec := range vectors { + workerID := i % workerCount + jobsForWorker[workerID] = append(jobsForWorker[workerID], vec) + } + + beforeImport := time.Now() + wg := &sync.WaitGroup{} + for workerID, jobs := range jobsForWorker { + wg.Add(1) + go func(workerID int, myJobs [][]float32) { + defer wg.Done() + for i, vec := range myJobs { + originalIndex := (i * workerCount) + workerID + err := vectorIndex.Add(uint64(originalIndex), vec) + require.Nil(t, err) + } + }(workerID, jobs) + } + + wg.Wait() + // neighbor := bruteForceCosine(vectors, vectors[0], 2) + // dist, _, _ := distancer.NewCosineDistanceProvider().SingleDist(vectors[0], vectors[neighbor[1]]) + // fmt.Printf("distance between 0 and %d is %f\n", neighbor[1], dist) + fmt.Printf("import took %s\n", time.Since(beforeImport)) + // vectorIndex.Dump() + + t.Fail() + }) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/index_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/index_test.go new file mode 100644 index 0000000000000000000000000000000000000000..57969b12102a38845e6c7f5ff1fe459f77bb2486 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/index_test.go @@ -0,0 +1,422 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package hnsw + +import ( + "context" + "sync" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/adapters/repos/db/vector/cache" + "github.com/weaviate/weaviate/adapters/repos/db/vector/common" + "github.com/weaviate/weaviate/adapters/repos/db/vector/hnsw/distancer" + "github.com/weaviate/weaviate/adapters/repos/db/vector/testinghelpers" + "github.com/weaviate/weaviate/entities/cyclemanager" + ent "github.com/weaviate/weaviate/entities/vectorindex/hnsw" +) + +func TestHnswIndex(t *testing.T) { + ctx := context.Background() + index := createEmptyHnswIndexForTests(t, testVectorForID) + + for i, vec := range testVectors { + err := index.Add(ctx, uint64(i), vec) + require.Nil(t, err) + } + + t.Run("searching within cluster 1", func(t *testing.T) { + position := 0 + res, _, err := index.SearchByVector(ctx, testVectors[position], 3, nil) + require.Nil(t, err) + assert.ElementsMatch(t, []uint64{0, 1, 2}, res) + }) + + t.Run("searching within cluster 2", func(t *testing.T) { + position := 3 + res, _, err := index.SearchByVector(ctx, testVectors[position], 3, nil) + require.Nil(t, err) + assert.ElementsMatch(t, []uint64{3, 4, 5}, res) + }) + + t.Run("searching within cluster 3", func(t *testing.T) { + position := 6 + res, _, err := index.SearchByVector(ctx, testVectors[position], 3, nil) + require.Nil(t, err) + assert.ElementsMatch(t, []uint64{6, 7, 8}, res) + }) + + t.Run("searching within cluster 2 with a scope larger than the cluster", func(t *testing.T) { + position := 3 + res, _, err := index.SearchByVector(ctx, testVectors[position], 50, nil) + require.Nil(t, err) + assert.Equal(t, []uint64{ + 3, 5, 4, // cluster 2 + 7, 8, 6, // cluster 3 + 2, 1, 0, // cluster 1 + }, res) + }) + + t.Run("searching with negative value of k", func(t *testing.T) { + position := 0 + _, _, err := index.SearchByVector(ctx, testVectors[position], -1, nil) + require.Error(t, err) + }) +} + +func TestHnswIndexGrow(t *testing.T) { + ctx := context.Background() + vector := []float32{0.1, 0.2} + vecForIDFn := func(ctx context.Context, id uint64) ([]float32, error) { + return vector, nil + } + index := createEmptyHnswIndexForTests(t, vecForIDFn) + + t.Run("should grow initial empty index", func(t *testing.T) { + // when we invoke Add method suggesting a size bigger then the default + // initial size, then if we don't grow an index at initial state + // we get: panic: runtime error: index out of range [25001] with length 25000 + // in order to avoid this, insertInitialElement method is now able + // to grow it's size at initial state + err := index.Add(ctx, uint64(cache.InitialSize+1), vector) + require.Nil(t, err) + }) + + t.Run("should grow index without panic", func(t *testing.T) { + // This test shows that we had an edge case that was not covered + // in growIndexToAccomodateNode method which was leading to panic: + // panic: runtime error: index out of range [170001] with length 170001 + vector := []float32{0.11, 0.22} + id := uint64(5*cache.InitialSize + 1) + err := index.Add(ctx, id, vector) + require.Nil(t, err) + // index should grow to 5001 + assert.Equal(t, int(id)+cache.MinimumIndexGrowthDelta, len(index.nodes)) + assert.Equal(t, int32(id+2*cache.MinimumIndexGrowthDelta), index.cache.Len()) + // try to add a vector with id: 8001 + id = uint64(6*cache.InitialSize + cache.MinimumIndexGrowthDelta + 1) + err = index.Add(ctx, id, vector) + require.Nil(t, err) + // index should grow to at least 8001 + assert.GreaterOrEqual(t, len(index.nodes), 8001) + assert.GreaterOrEqual(t, index.cache.Len(), int32(8001)) + }) + + t.Run("should grow index", func(t *testing.T) { + // should not increase the nodes size + sizeBefore := len(index.nodes) + cacheBefore := index.cache.Len() + idDontGrowIndex := uint64(6*cache.InitialSize - 1) + err := index.Add(ctx, idDontGrowIndex, vector) + require.Nil(t, err) + assert.Equal(t, sizeBefore, len(index.nodes)) + assert.Equal(t, cacheBefore, index.cache.Len()) + // should increase nodes + id := uint64(8*cache.InitialSize + 1) + err = index.Add(ctx, id, vector) + require.Nil(t, err) + assert.GreaterOrEqual(t, len(index.nodes), int(id)) + assert.GreaterOrEqual(t, index.cache.Len(), int32(id)) + // should increase nodes when a much greater id is passed + id = uint64(20*cache.InitialSize + 22) + err = index.Add(ctx, id, vector) + require.Nil(t, err) + assert.Equal(t, int(id)+cache.MinimumIndexGrowthDelta, len(index.nodes)) + assert.Equal(t, int32(id+2*cache.MinimumIndexGrowthDelta), index.cache.Len()) + }) +} + +func TestHnswIndexGrowSafely(t *testing.T) { + vector := []float32{0.1, 0.2} + vecForIDFn := func(ctx context.Context, id uint64) ([]float32, error) { + return vector, nil + } + index := createEmptyHnswIndexForTests(t, vecForIDFn) + + t.Run("concurrently add nodes to grow index", func(t *testing.T) { + growAttempts := 20 + var wg sync.WaitGroup + offset := uint64(len(index.nodes)) + ctx := context.Background() + + addVectorPair := func(ids []uint64) { + defer wg.Done() + err := index.AddBatch(ctx, ids, [][]float32{vector, vector}) + require.Nil(t, err) + } + + for i := 0; i < growAttempts; i++ { + wg.Add(4) + go addVectorPair([]uint64{offset - 4, offset - 5}) + go addVectorPair([]uint64{offset - 3, offset}) + go addVectorPair([]uint64{offset - 2, offset + 2}) + go addVectorPair([]uint64{offset - 1, offset + 3}) + wg.Wait() + offset = uint64(len(index.nodes)) + } + + // Calculate non-nil nodes + nonNilNodes := 0 + for _, node := range index.nodes { + if node != nil { + nonNilNodes++ + } + } + + assert.Equal(t, growAttempts*8, nonNilNodes) + }) +} + +func createEmptyHnswIndexForTests(t testing.TB, vecForIDFn common.VectorForID[float32]) *hnsw { + cfg := createVectorHnswIndexTestConfig() + cfg.VectorForIDThunk = vecForIDFn + + index, err := New(cfg, ent.UserConfig{ + MaxConnections: 30, + EFConstruction: 60, + EF: 36, + }, cyclemanager.NewCallbackGroupNoop(), testinghelpers.NewDummyStore(t)) + require.Nil(t, err) + return index +} + +func createEmptyMultiVectorHnswIndexForTests(t testing.TB, vecForIDFn common.VectorForID[[]float32]) *hnsw { + cfg := createVectorHnswIndexTestConfig() + cfg.MultiVectorForIDThunk = vecForIDFn + + index, err := New(cfg, ent.UserConfig{ + MaxConnections: 30, + EFConstruction: 60, + Multivector: ent.MultivectorConfig{ + Enabled: true, + }, + }, cyclemanager.NewCallbackGroupNoop(), testinghelpers.NewDummyStore(t)) + require.Nil(t, err) + return index +} + +func createVectorHnswIndexTestConfig() Config { + // mock out commit logger before adding data so we don't leave a disk + // footprint. Commit logging and deserializing from a (condensed) commit log + // is tested in a separate integration test that takes care of providing and + // cleaning up the correct place on disk to write test files + return Config{ + RootPath: "doesnt-matter-as-committlogger-is-mocked-out", + ID: "unittest", + MakeCommitLoggerThunk: MakeNoopCommitLogger, + DistanceProvider: distancer.NewCosineDistanceProvider(), + VectorForIDThunk: testVectorForID, + } +} + +func TestHnswIndexContainsDoc(t *testing.T) { + testHnswIndexContainsDoc(t, genericVecTestHelperSingle()) +} + +func TestHnswIndexContainsDoc_MultiVector(t *testing.T) { + testHnswIndexContainsDoc(t, genericVecTestHelperMulti()) +} + +func testHnswIndexContainsDoc[T float32 | []float32](t *testing.T, h genericVecTestHelper[T]) { + ctx := context.Background() + + t.Run("should return false if index is empty", func(t *testing.T) { + vecForIDFn := func(ctx context.Context, id uint64) ([]T, error) { + t.Fatalf("vecForID should not be called on empty index") + return nil, nil + } + index := h.createIndex(t, vecForIDFn) + require.False(t, index.ContainsDoc(1)) + }) + + t.Run("should return true if node is in the index", func(t *testing.T) { + index := h.createIndex(t, h.vecForIDFn) + for i, vec := range h.testVectors { + err := h.addDocToIndex(index, ctx, uint64(i), vec) + require.NoError(t, err) + } + require.True(t, index.ContainsDoc(5)) + }) + + t.Run("should return false if node is not in the index", func(t *testing.T) { + index := h.createIndex(t, h.vecForIDFn) + for i, vec := range h.testVectors { + err := h.addDocToIndex(index, ctx, uint64(i), vec) + require.NoError(t, err) + } + require.False(t, index.ContainsDoc(100)) + }) + + t.Run("should return false if node is deleted", func(t *testing.T) { + index := h.createIndex(t, h.vecForIDFn) + for i, vec := range h.testVectors { + err := h.addDocToIndex(index, ctx, uint64(i), vec) + require.NoError(t, err) + } + err := h.deleteDocFromIndex(index, ctx, uint64(5)) + require.Nil(t, err) + require.False(t, index.ContainsDoc(5)) + }) +} + +func TestHnswIndexIterate(t *testing.T) { + testHnswIndexIterate(t, genericVecTestHelperSingle()) +} + +func TestHnswIndexIterate_MultiVector(t *testing.T) { + testHnswIndexIterate(t, genericVecTestHelperMulti()) +} + +func testHnswIndexIterate[T float32 | []float32](t *testing.T, h genericVecTestHelper[T]) { + ctx := context.Background() + t.Run("should not run callback on empty index", func(t *testing.T) { + vecForIDFn := func(ctx context.Context, id uint64) ([]T, error) { + t.Fatalf("vecForID should not be called on empty index") + return nil, nil + } + index := h.createIndex(t, vecForIDFn) + index.Iterate(func(id uint64) bool { + t.Fatalf("callback should not be called on empty index") + return true + }) + }) + + t.Run("should iterate over all nodes", func(t *testing.T) { + index := h.createIndex(t, h.vecForIDFn) + for i, vec := range h.testVectors { + err := h.addDocToIndex(index, ctx, uint64(i), vec) + require.NoError(t, err) + } + + visited := make([]bool, len(h.testVectors)) + index.Iterate(func(id uint64) bool { + visited[id] = true + return true + }) + for i, v := range visited { + assert.True(t, v, "node %d was not visited", i) + } + }) + + t.Run("should stop iteration when callback returns false", func(t *testing.T) { + index := h.createIndex(t, h.vecForIDFn) + for i, vec := range h.testVectors { + err := h.addDocToIndex(index, ctx, uint64(i), vec) + require.NoError(t, err) + } + + counter := 0 + index.Iterate(func(id uint64) bool { + counter++ + return counter < 5 + }) + require.Equal(t, 5, counter) + }) + + t.Run("should stop iteration when shutdownCtx is canceled", func(t *testing.T) { + index := h.createIndex(t, h.vecForIDFn) + for i, vec := range h.testVectors { + err := h.addDocToIndex(index, ctx, uint64(i), vec) + require.NoError(t, err) + } + + counter := 0 + index.Iterate(func(id uint64) bool { + counter++ + if counter == 5 { + err := index.Shutdown(context.Background()) + require.NoError(t, err) + } + return true + }) + require.Equal(t, 5, counter) + }) + + t.Run("should stop iteration when resetCtx is canceled", func(t *testing.T) { + index := h.createIndex(t, h.vecForIDFn) + for i, vec := range h.testVectors { + err := h.addDocToIndex(index, ctx, uint64(i), vec) + require.NoError(t, err) + } + + counter := 0 + index.Iterate(func(id uint64) bool { + counter++ + if counter == 5 { + index.resetCtxCancel() + } + return true + }) + require.Equal(t, 5, counter) + }) + + t.Run("should skip deleted nodes", func(t *testing.T) { + index := h.createIndex(t, h.vecForIDFn) + for i, vec := range h.testVectors { + err := h.addDocToIndex(index, ctx, uint64(i), vec) + require.NoError(t, err) + } + + err := h.deleteDocFromIndex(index, ctx, uint64(5)) + require.NoError(t, err) + + visited := make([]bool, len(h.testVectors)) + index.Iterate(func(id uint64) bool { + visited[id] = true + return true + }) + for i, v := range visited { + if i == 5 { + assert.False(t, v, "node %d was visited", i) + } else { + assert.True(t, v, "node %d was not visited", i) + } + } + }) +} + +type genericVecTestHelper[T float32 | []float32] struct { + createIndex func(t testing.TB, vecForIDFn common.VectorForID[T]) *hnsw + vecForIDFn common.VectorForID[T] + testVectors [][]T + addDocToIndex func(i *hnsw, ctx context.Context, docID uint64, vec []T) error + deleteDocFromIndex func(i *hnsw, ctx context.Context, docIDs ...uint64) error +} + +func genericVecTestHelperSingle() genericVecTestHelper[float32] { + return genericVecTestHelper[float32]{ + createIndex: createEmptyHnswIndexForTests, + vecForIDFn: testVectorForID, + testVectors: testVectors, + addDocToIndex: func(i *hnsw, ctx context.Context, docID uint64, vec []float32) error { + return i.Add(ctx, docID, vec) + }, + deleteDocFromIndex: func(i *hnsw, ctx context.Context, docIDs ...uint64) error { + return i.Delete(docIDs...) + }, + } +} + +func genericVecTestHelperMulti() genericVecTestHelper[[]float32] { + return genericVecTestHelper[[]float32]{ + createIndex: createEmptyMultiVectorHnswIndexForTests, + vecForIDFn: testMultiVectorForID, + testVectors: testMultiVectors, + addDocToIndex: func(i *hnsw, ctx context.Context, docID uint64, vec [][]float32) error { + return i.AddMulti(ctx, docID, vec) + }, + deleteDocFromIndex: func(i *hnsw, ctx context.Context, docIDs ...uint64) error { + return i.DeleteMulti(docIDs...) + }, + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/index_too_many_links_bug_integration_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/index_too_many_links_bug_integration_test.go new file mode 100644 index 0000000000000000000000000000000000000000..fe3a1f72296ec5a04e1b94b04bc7bcb5164aae01 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/index_too_many_links_bug_integration_test.go @@ -0,0 +1,269 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +//go:build integrationTest && !race +// +build integrationTest,!race + +package hnsw + +import ( + "context" + "runtime" + "sync" + "testing" + + "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/adapters/repos/db/vector/hnsw/distancer" + "github.com/weaviate/weaviate/adapters/repos/db/vector/testinghelpers" + "github.com/weaviate/weaviate/entities/cyclemanager" + ent "github.com/weaviate/weaviate/entities/vectorindex/hnsw" +) + +// The !race build tag makes sure that this test is EXCLUDED from running with +// the race detector on, but now we also need to make sure that it runs in the +// separate no-race test run. To INCLUDE it there we use the Test_NoRace_ +// prefix. +// This test imports 10,000 objects concurrently which is extremely expensive +// with the race detector on. +// It prevents a regression on +// https://github.com/weaviate/weaviate/issues/1868 +func Test_NoRace_ManySmallCommitlogs(t *testing.T) { + n := 10000 + dim := 16 + m := 8 + + r := getRandomSeed() + rootPath := t.TempDir() + + logger, _ := test.NewNullLogger() + ctx := context.Background() + + parentCommitLoggerCallbacks := cyclemanager.NewCallbackGroup("parentCommitLogger", logger, 1) + parentCommitLoggerCycle := cyclemanager.NewManager( + cyclemanager.HnswCommitLoggerCycleTicker(), + parentCommitLoggerCallbacks.CycleCallback, logger) + parentCommitLoggerCycle.Start() + defer parentCommitLoggerCycle.StopAndWait(ctx) + commitLoggerCallbacks := cyclemanager.NewCallbackGroup("childCommitLogger", logger, 1) + commitLoggerCallbacksCtrl := parentCommitLoggerCallbacks.Register("commitLogger", commitLoggerCallbacks.CycleCallback) + + parentTombstoneCleanupCallbacks := cyclemanager.NewCallbackGroup("parentTombstoneCleanup", logger, 1) + parentTombstoneCleanupCycle := cyclemanager.NewManager( + cyclemanager.NewFixedTicker(1), + parentTombstoneCleanupCallbacks.CycleCallback, logger) + parentTombstoneCleanupCycle.Start() + defer parentTombstoneCleanupCycle.StopAndWait(ctx) + tombstoneCleanupCallbacks := cyclemanager.NewCallbackGroup("childTombstoneCleanup", logger, 1) + tombstoneCleanupCallbacksCtrl := parentTombstoneCleanupCallbacks.Register("tombstoneCleanup", tombstoneCleanupCallbacks.CycleCallback) + + original, err := NewCommitLogger(rootPath, "too_many_links_test", logger, commitLoggerCallbacks, + WithCommitlogThreshold(1e5), + WithCommitlogThresholdForCombining(5e5)) + require.Nil(t, err) + + data := make([][]float32, n) + for i := range data { + data[i] = make([]float32, dim) + for j := range data[i] { + data[i][j] = r.Float32() + } + + } + + var index *hnsw + + t.Run("set up an index with the specified commit logger", func(t *testing.T) { + idx, err := New(Config{ + MakeCommitLoggerThunk: func() (CommitLogger, error) { + return original, nil + }, + ID: "too_many_links_test", + RootPath: rootPath, + DistanceProvider: distancer.NewCosineDistanceProvider(), + Logger: logger, + VectorForIDThunk: func(ctx context.Context, id uint64) ([]float32, error) { + return data[int(id)], nil + }, + }, ent.UserConfig{ + MaxConnections: m, + EFConstruction: 128, + CleanupIntervalSeconds: 0, + + // The actual size does not matter for this test, but if it defaults to + // zero it will constantly think it's full and needs to be deleted - even + // after just being deleted, so make sure to use a positive number here. + VectorCacheMaxObjects: 2 * n, + }, tombstoneCleanupCallbacks, testinghelpers.NewDummyStore(t)) + require.Nil(t, err) + idx.PostStartup() + index = idx + }) + + t.Run("add data", func(t *testing.T) { + type tuple struct { + vec []float32 + id uint64 + } + + jobs := make(chan tuple, n) + + wg := sync.WaitGroup{} + worker := func(jobs chan tuple) { + for job := range jobs { + index.Add(ctx, job.id, job.vec) + } + + wg.Done() + } + + for i := 0; i < runtime.GOMAXPROCS(0); i++ { + wg.Add(1) + go worker(jobs) + } + + for i, vec := range data { + jobs <- tuple{id: uint64(i), vec: vec} + } + + close(jobs) + + wg.Wait() + }) + + index.Flush() + + t.Run("verify there are no nodes with too many links - control", func(t *testing.T) { + for i, node := range index.nodes { + if node == nil { + continue + } + + node.connections.IterateOnLayers(func(level uint8, conns []uint64) { + m := index.maximumConnections + if level == 0 { + m = index.maximumConnectionsLayerZero + } + + assert.LessOrEqualf(t, len(conns), m, "node %d at level %d with %d conns", + i, level, len(conns)) + }) + } + }) + + t.Run("delete 10 percent of data", func(t *testing.T) { + type tuple struct { + vec []float32 + id uint64 + } + + jobs := make(chan tuple, n) + + wg := sync.WaitGroup{} + worker := func(jobs chan tuple) { + for job := range jobs { + index.Delete(job.id) + } + + wg.Done() + } + + for i := 0; i < runtime.GOMAXPROCS(0); i++ { + wg.Add(1) + go worker(jobs) + } + + for i, vec := range data[:n/10] { + jobs <- tuple{id: uint64(i), vec: vec} + } + + close(jobs) + + wg.Wait() + }) + + index.Flush() + + t.Run("verify there are no nodes with too many links - post deletion", func(t *testing.T) { + for i, node := range index.nodes { + if node == nil { + continue + } + + node.connections.IterateOnLayers(func(level uint8, conns []uint64) { + m := index.maximumConnections + if level == 0 { + m = index.maximumConnectionsLayerZero + } + + assert.LessOrEqualf(t, len(conns), m, "node %d at level %d with %d conns", + i, level, len(conns)) + }) + } + }) + + t.Run("destroy the old index", func(t *testing.T) { + // kill the commit loger and index + require.Nil(t, original.Shutdown(context.Background())) + index = nil + original = nil + }) + + t.Run("create a new one from the disk files", func(t *testing.T) { + idx, err := New(Config{ + MakeCommitLoggerThunk: MakeNoopCommitLogger, // no longer need a real one + ID: "too_many_links_test", + RootPath: rootPath, + DistanceProvider: distancer.NewCosineDistanceProvider(), + Logger: logger, + VectorForIDThunk: func(ctx context.Context, id uint64) ([]float32, error) { + return data[int(id)], nil + }, + }, ent.UserConfig{ + MaxConnections: m, + EFConstruction: 128, + CleanupIntervalSeconds: 1, + + // The actual size does not matter for this test, but if it defaults to + // zero it will constantly think it's full and needs to be deleted - even + // after just being deleted, so make sure to use a positive number here. + VectorCacheMaxObjects: 2 * n, + }, tombstoneCleanupCallbacks, testinghelpers.NewDummyStore(t)) + require.Nil(t, err) + idx.PostStartup() + index = idx + }) + + t.Run("verify there are no nodes with too many links - after restart", func(t *testing.T) { + for i, node := range index.nodes { + if node == nil { + continue + } + + node.connections.IterateOnLayers(func(level uint8, conns []uint64) { + m := index.maximumConnections + if level == 0 { + m = index.maximumConnectionsLayerZero + } + + require.LessOrEqualf(t, len(conns), m, "node %d at level %d with %d conns", + i, level, len(conns)) + }) + } + }) + + t.Run("destroy the index", func(t *testing.T) { + require.Nil(t, index.Drop(context.Background())) + require.Nil(t, commitLoggerCallbacksCtrl.Unregister(ctx)) + require.Nil(t, tombstoneCleanupCallbacksCtrl.Unregister(ctx)) + }) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/insert.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/insert.go new file mode 100644 index 0000000000000000000000000000000000000000..6aeea394edc87c6201a06f48179671c0f2b9881c --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/insert.go @@ -0,0 +1,527 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package hnsw + +import ( + "context" + "encoding/binary" + "fmt" + "math" + "sync/atomic" + "time" + + "github.com/pkg/errors" + "github.com/weaviate/weaviate/adapters/repos/db/helpers" + "github.com/weaviate/weaviate/adapters/repos/db/vector/common" + "github.com/weaviate/weaviate/adapters/repos/db/vector/compressionhelpers" + "github.com/weaviate/weaviate/adapters/repos/db/vector/hnsw/packedconn" + "github.com/weaviate/weaviate/adapters/repos/db/vector/multivector" +) + +func (h *hnsw) ValidateBeforeInsert(vector []float32) error { + dims := int(atomic.LoadInt32(&h.dims)) + + // no vectors exist + if dims == 0 { + return nil + } + + // check if vector length is the same as existing nodes + if dims != len(vector) { + return errors.Wrapf(common.ErrWrongDimensions, "new node has a vector with length %v. "+ + "Existing nodes have vectors with length %v", len(vector), dims) + } + + return nil +} + +func (h *hnsw) ValidateMultiBeforeInsert(vector [][]float32) error { + dims := int(atomic.LoadInt32(&h.dims)) + + if h.muvera.Load() { + return nil + } + + // no vectors exist + if dims == 0 { + vecDimensions := make(map[int]struct{}) + for i := range vector { + vecDimensions[len(vector[i])] = struct{}{} + } + if len(vecDimensions) > 1 { + return fmt.Errorf("multi vector array consists of vectors with varying dimensions") + } + return nil + } + + // check if vector length is the same as existing nodes + for i := range vector { + if dims != len(vector[i]) { + return fmt.Errorf("new node has a multi vector with length %v at position %v. "+ + "Existing nodes have vectors with length %v", len(vector[i]), i, dims) + } + } + + return nil +} + +func (h *hnsw) AddBatch(ctx context.Context, ids []uint64, vectors [][]float32) error { + if err := ctx.Err(); err != nil { + return err + } + if h.multivector.Load() && !h.muvera.Load() { + return errors.Errorf("AddBatch called on multivector index") + } + if len(ids) != len(vectors) { + return errors.Errorf("ids and vectors sizes does not match") + } + if len(ids) == 0 { + return errors.Errorf("insertBatch called with empty lists") + } + + var err error + h.trackDimensionsOnce.Do(func() { + dims := len(vectors[0]) + for _, vec := range vectors { + if len(vec) != dims { + err = errors.Errorf("addBatch called with vectors of different lengths") + return + } + } + if err == nil { + atomic.StoreInt32(&h.dims, int32(len(vectors[0]))) + } + }) + + if err != nil { + return err + } + + if h.rqConfig.Enabled && h.rqActive { + h.trackRQOnce.Do(func() { + h.compressor, err = compressionhelpers.NewRQCompressor( + h.distancerProvider, 1e12, h.logger, h.store, + h.allocChecker, int(h.rqConfig.Bits), int(h.dims)) + + if err == nil { + h.compressed.Store(true) + h.cache.Drop() + h.compressor.PersistCompression(h.commitLog) + } + }) + if err != nil { + return err + } + } + + levels := make([]int, len(ids)) + maxId := uint64(0) + for i, id := range ids { + if maxId < id { + maxId = id + } + levels[i] = int(h.generateLevel()) // TODO: represent level as uint8 + } + h.RLock() + if maxId >= uint64(len(h.nodes)) { + h.RUnlock() + h.Lock() + if maxId >= uint64(len(h.nodes)) { + err := h.growIndexToAccomodateNode(maxId, h.logger) + if err != nil { + h.Unlock() + return errors.Wrapf(err, "grow HNSW index to accommodate node %d", maxId) + } + } + h.Unlock() + } else { + h.RUnlock() + } + + for i := range ids { + if err := ctx.Err(); err != nil { + return err + } + + vector := vectors[i] + node := &vertex{ + id: ids[i], + level: levels[i], + } + globalBefore := time.Now() + if len(vector) == 0 { + return errors.Errorf("insert called with nil-vector") + } + + h.metrics.InsertVector() + + vector = h.normalizeVec(vector) + err := h.addOne(ctx, vector, node) + if err != nil { + return err + } + + h.insertMetrics.total(globalBefore) + } + return nil +} + +func (h *hnsw) AddMultiBatch(ctx context.Context, docIDs []uint64, vectors [][][]float32) error { + if err := ctx.Err(); err != nil { + return err + } + if !h.multivector.Load() { + return errors.Errorf("addMultiBatch called on non-multivector index") + } + if len(docIDs) != len(vectors) { + return errors.Errorf("ids and vectors sizes does not match") + } + if len(docIDs) == 0 { + return errors.Errorf("addMultiBatch called with empty lists") + } + + if h.muvera.Load() { + h.trackMuveraOnce.Do(func() { + h.muveraEncoder.InitEncoder(len(vectors[0][0])) + h.Lock() + if err := h.muveraEncoder.PersistMuvera(h.commitLog); err != nil { + h.Unlock() + h.logger.WithField("action", "persist muvera").Error(err) + return + } + h.Unlock() + }) + // Process all vectors + processedVectors := make([][]float32, len(vectors)) + for i, v := range vectors { + processedVectors[i] = h.muveraEncoder.EncodeDoc(v) + docIDBytes := make([]byte, 8) + binary.BigEndian.PutUint64(docIDBytes, docIDs[i]) + muveraBytes := multivector.MuveraBytesFromFloat32(processedVectors[i]) + if err := h.store.Bucket(h.id+"_muvera_vectors").Put(docIDBytes, muveraBytes); err != nil { + return errors.Wrap(err, fmt.Sprintf("failed to put %s_muvera_vectors into the bucket", h.id)) + } + } + // Replace original vectors with processed ones + return h.AddBatch(ctx, docIDs, processedVectors) + } + + var err error + h.trackDimensionsOnce.Do(func() { + dim := len(vectors[0][0]) + for _, doc := range vectors { + for _, vec := range doc { + if len(vec) != dim { + err = errors.Errorf("addMultiBatch called with vectors of different lengths") + return + } + } + } + if err == nil { + atomic.StoreInt32(&h.dims, int32(len(vectors[0][0]))) + } + }) + + if err != nil { + return err + } + if h.rqConfig.Enabled && h.rqActive { + h.trackRQOnce.Do(func() { + h.compressor, err = compressionhelpers.NewRQMultiCompressor( + h.distancerProvider, 1e12, h.logger, h.store, + h.allocChecker, int(h.rqConfig.Bits), int(h.dims)) + + if err == nil { + h.Lock() + data := h.cache.All() + h.compressor.GrowCache(h.vecIDcounter) + compressionhelpers.Concurrently(h.logger, uint64(len(data)), + func(index uint64) { + if len(data[index]) == 0 { + return + } + docID, relativeID := h.cache.GetKeys(index) + h.compressor.PreloadPassage(index, docID, relativeID, data[index]) + }) + h.compressed.Store(true) + h.cache.Drop() + h.compressor.PersistCompression(h.commitLog) + h.Unlock() + } + }) + if err != nil { + return err + } + } + if err != nil { + return err + } + + for i, docID := range docIDs { + numVectors := len(vectors[i]) + levels := make([]int, numVectors) + for j := range numVectors { + levels[j] = int(h.generateLevel()) // TODO: represent level as uint8 + } + + h.Lock() + counter := h.vecIDcounter + h.vecIDcounter += uint64(numVectors) + h.Unlock() + + maxId := counter + uint64(numVectors) + + h.RLock() + if maxId >= uint64(len(h.nodes)) { + h.RUnlock() + h.Lock() + if maxId >= uint64(len(h.nodes)) { + err := h.growIndexToAccomodateNode(maxId, h.logger) + if err != nil { + h.Unlock() + return errors.Wrapf(err, "grow HNSW index to accommodate node %d", maxId) + } + } + h.Unlock() + } else { + h.RUnlock() + } + + ids := make([]uint64, numVectors) + for id := range ids { + ids[id] = counter + uint64(id) + } + if h.compressed.Load() { + h.compressor.PreloadMulti(docID, ids, vectors[i]) + } else { + h.cache.PreloadMulti(docID, ids, vectors[i]) + } + for j := range numVectors { + if err := ctx.Err(); err != nil { + return err + } + + vector := vectors[i][j] + + globalBefore := time.Now() + if len(vector) == 0 { + return errors.Errorf("insert called with nil-vector") + } + + h.metrics.InsertVector() + + vector = h.normalizeVec(vector) + + nodeId := counter + counter++ + + node := &vertex{ + id: uint64(nodeId), + level: levels[j], + } + + h.Lock() + h.docIDVectors[docID] = append(h.docIDVectors[docIDs[i]], nodeId) + h.Unlock() + + nodeIDBytes := make([]byte, 8) + binary.BigEndian.PutUint64(nodeIDBytes, nodeId) + docIDBytes := make([]byte, 8) + binary.BigEndian.PutUint64(docIDBytes, docID) + err := h.store.Bucket(h.id+"_mv_mappings").Put(nodeIDBytes, docIDBytes) + if err != nil { + return errors.Wrap(err, fmt.Sprintf("failed to put %s_mv_mappings into the bucket", h.id)) + } + + err = h.addOne(ctx, vector, node) + if err != nil { + return err + } + + h.insertMetrics.total(globalBefore) + } + + } + + return nil +} + +func (h *hnsw) addOne(ctx context.Context, vector []float32, node *vertex) error { + h.compressActionLock.RLock() + h.deleteVsInsertLock.RLock() + + before := time.Now() + + defer func() { + h.deleteVsInsertLock.RUnlock() + h.compressActionLock.RUnlock() + h.insertMetrics.updateGlobalEntrypoint(before) + }() + + wasFirst := false + var firstInsertError error + h.initialInsertOnce.Do(func() { + if h.isEmpty() { + wasFirst = true + firstInsertError = h.insertInitialElement(node, vector) + } + }) + if wasFirst { + if firstInsertError != nil { + return firstInsertError + } + return nil + } + + node.markAsMaintenance() + + h.RLock() + // initially use the "global" entrypoint which is guaranteed to be on the + // currently highest layer + entryPointID := h.entryPointID + // initially use the level of the entrypoint which is the highest level of + // the h-graph in the first iteration + currentMaximumLayer := h.currentMaximumLayer + h.RUnlock() + + targetLevel := node.level + var err error + node.connections, err = packedconn.NewWithMaxLayer(uint8(targetLevel)) + if err != nil { + return err + } + + if err = h.commitLog.AddNode(node); err != nil { + return err + } + + nodeId := node.id + + h.shardedNodeLocks.Lock(nodeId) + h.nodes[nodeId] = node + h.shardedNodeLocks.Unlock(nodeId) + + singleVector := !h.multivector.Load() || h.muvera.Load() + if singleVector { + if h.compressed.Load() { + h.compressor.Preload(nodeId, vector) + } else { + h.cache.Preload(nodeId, vector) + } + } + + h.insertMetrics.prepareAndInsertNode(before) + before = time.Now() + + var distancer compressionhelpers.CompressorDistancer + var returnFn compressionhelpers.ReturnDistancerFn + if h.compressed.Load() { + distancer, returnFn = h.compressor.NewDistancer(vector) + defer returnFn() + } + entryPointID, err = h.findBestEntrypointForNode(ctx, currentMaximumLayer, targetLevel, + entryPointID, vector, distancer) + if err != nil { + return errors.Wrap(err, "find best entrypoint") + } + + h.insertMetrics.findEntrypoint(before) + before = time.Now() + + // TODO: check findAndConnectNeighbors... + if err := h.findAndConnectNeighbors(ctx, node, entryPointID, vector, distancer, + targetLevel, currentMaximumLayer, helpers.NewAllowList()); err != nil { + return errors.Wrap(err, "find and connect neighbors") + } + + h.insertMetrics.findAndConnectTotal(before) + before = time.Now() + + node.unmarkAsMaintenance() + + h.RLock() + if targetLevel > h.currentMaximumLayer { + h.RUnlock() + h.Lock() + // check again to avoid changes from RUnlock to Lock again + if targetLevel > h.currentMaximumLayer { + if err := h.commitLog.SetEntryPointWithMaxLayer(nodeId, targetLevel); err != nil { + h.Unlock() + return err + } + + h.entryPointID = nodeId + h.currentMaximumLayer = targetLevel + } + h.Unlock() + } else { + h.RUnlock() + } + + return nil +} + +func (h *hnsw) Add(ctx context.Context, id uint64, vector []float32) error { + return h.AddBatch(ctx, []uint64{id}, [][]float32{vector}) +} + +func (h *hnsw) AddMulti(ctx context.Context, id uint64, vector [][]float32) error { + return h.AddMultiBatch(ctx, []uint64{id}, [][][]float32{vector}) +} + +func (h *hnsw) insertInitialElement(node *vertex, nodeVec []float32) error { + h.Lock() + defer h.Unlock() + + if err := h.commitLog.SetEntryPointWithMaxLayer(node.id, 0); err != nil { + return err + } + + h.entryPointID = node.id + h.currentMaximumLayer = 0 + conns, err := packedconn.NewWithElements([][]uint64{ + make([]uint64, 0, h.maximumConnectionsLayerZero), + }) + if err != nil { + return err + } + node.connections = conns + node.level = 0 + if err := h.commitLog.AddNode(node); err != nil { + return err + } + + err = h.growIndexToAccomodateNode(node.id, h.logger) + if err != nil { + return errors.Wrapf(err, "grow HNSW index to accommodate node %d", node.id) + } + + h.shardedNodeLocks.Lock(node.id) + h.nodes[node.id] = node + h.shardedNodeLocks.Unlock(node.id) + + singleVector := !h.multivector.Load() || h.muvera.Load() + if singleVector { + if h.compressed.Load() { + h.compressor.Preload(node.id, nodeVec) + } else { + h.cache.Preload(node.id, nodeVec) + } + } + + // go h.insertHook(node.id, 0, node.connections) + return nil +} + +func (h *hnsw) generateLevel() uint8 { + return uint8(math.Floor(-math.Log(max(h.randFunc(), 1e-19)) * h.levelNormalizer)) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/insert_metrics.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/insert_metrics.go new file mode 100644 index 0000000000000000000000000000000000000000..6a4b8e9a0f14dd2aa7a4e1099a74f040eb1678e7 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/insert_metrics.go @@ -0,0 +1,38 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package hnsw + +type insertMetrics struct { + total Observer + prepareAndInsertNode Observer + findEntrypoint Observer + updateGlobalEntrypoint Observer + findAndConnectTotal Observer + findAndConnectSearch Observer + findAndConnectHeuristic Observer + findAndConnectUpdateConnections Observer +} + +// newInsertMetrics curries the prometheus observers just once at creation time +// and therefore avoids having to make a lot of allocations on the hot path +func newInsertMetrics(metrics *Metrics) *insertMetrics { + return &insertMetrics{ + total: metrics.TrackInsertObserver("total"), + prepareAndInsertNode: metrics.TrackInsertObserver("prepare_and_insert_node"), + findEntrypoint: metrics.TrackInsertObserver("find_entrypoint"), + updateGlobalEntrypoint: metrics.TrackInsertObserver("update_global_entrypoint"), + findAndConnectTotal: metrics.TrackInsertObserver("find_and_connect_total"), + findAndConnectSearch: metrics.TrackInsertObserver("find_and_connect_search"), + findAndConnectHeuristic: metrics.TrackInsertObserver("find_and_connect_heuristic"), + findAndConnectUpdateConnections: metrics.TrackInsertObserver("find_and_connect_update_connections"), + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/insert_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/insert_test.go new file mode 100644 index 0000000000000000000000000000000000000000..f8b5a03bea265ff277021688d2d84dd4d799d031 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/insert_test.go @@ -0,0 +1,148 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package hnsw + +import ( + "math" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestHnswCappedLevel(t *testing.T) { + type testCase struct { + name string + maxConnections int + randomValue float64 + expectedLevel uint8 + } + + testCases := []testCase{ + { + name: "test max function with normal value", + maxConnections: 64, + randomValue: 0.3, + expectedLevel: 0, + }, + { + name: "randFunc returning 1-eps (very close to 1)", + maxConnections: 16, + randomValue: 1.0 - 1e-15, + expectedLevel: 0, + }, + { + name: "randFunc returning 1", + maxConnections: 16, + randomValue: 1.0, + expectedLevel: 0, + }, + { + name: "randFunc returning 0.5", + maxConnections: 32, + randomValue: 0.5, + expectedLevel: 0, + }, + { + name: "randFunc returning 0.1", + maxConnections: 16, + randomValue: 0.1, + expectedLevel: 0, + }, + { + name: "randFunc returning 0.01", + maxConnections: 16, + randomValue: 0.01, + expectedLevel: 1, + }, + { + name: "randFunc returning 1e-10", + maxConnections: 16, + randomValue: 1e-10, + expectedLevel: 8, + }, + { + name: "randFunc returning exactly 1e-20", + maxConnections: 16, + randomValue: 1e-20, + expectedLevel: 15, + }, + { + name: "randFunc returning value less than 1e-20", + maxConnections: 16, + randomValue: 1e-25, + expectedLevel: 15, + }, + { + name: "different maxConnections: 8 with 0.1", + maxConnections: 8, + randomValue: 0.1, + expectedLevel: 1, + }, + { + name: "different maxConnections: 32 with 0.1", + maxConnections: 32, + randomValue: 0.1, + expectedLevel: 0, + }, + { + name: "different maxConnections: 8 with 0.01", + maxConnections: 8, + randomValue: 0.01, + expectedLevel: 2, + }, + { + name: "different maxConnections: 32 with 0.01", + maxConnections: 32, + randomValue: 0.01, + expectedLevel: 1, + }, + { + name: "test max function with very small value", + maxConnections: 2, + randomValue: 1e-50, + expectedLevel: 63, // Should use 1e-20 due to max function, same as 1e-20 case + }, + { + name: "test max function with very small value", + maxConnections: 2, + randomValue: 1e-100, + expectedLevel: 63, // Should use 1e-20 due to max function, same as 1e-20 case + }, + { + name: "test max function with zero small value", + maxConnections: 2, + randomValue: 0.0, + expectedLevel: 63, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + // Create minimal HNSW struct with only the fields needed for level() function + h := &hnsw{ + randFunc: func() float64 { return tc.randomValue }, + levelNormalizer: 1.0 / math.Log(float64(tc.maxConnections)), + } + + level := h.generateLevel() + + oldLevel := 0 + if tc.randomValue != 0 { + oldLevel = int(math.Floor(-math.Log(tc.randomValue) * h.levelNormalizer)) + } + + assert.Equal(t, tc.expectedLevel, level, + "Test case: %s\nRandom value: %f\nMaxConnections: %d\nLevelNormalizer: %f\nExpected: %d, Got: %d, OldLevel: %d", + tc.name, tc.randomValue, tc.maxConnections, h.levelNormalizer, tc.expectedLevel, level, oldLevel) + }) + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/maintenance.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/maintenance.go new file mode 100644 index 0000000000000000000000000000000000000000..c9540c0154a66d3d973913d91d3bfaf7a624908b --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/maintenance.go @@ -0,0 +1,117 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package hnsw + +import ( + "time" + + "github.com/sirupsen/logrus" + "github.com/weaviate/weaviate/adapters/repos/db/vector/cache" + "github.com/weaviate/weaviate/adapters/repos/db/vector/hnsw/visited" +) + +const ( + indexGrowthRate = 1.25 +) + +// growIndexToAccomodateNode is a wrapper around the growIndexToAccomodateNode +// function growing the index of the hnsw struct. It does not do any locking on +// its own, make sure that this function is called from a single-thread or +// locked situation +func (h *hnsw) growIndexToAccomodateNode(id uint64, logger logrus.FieldLogger) error { + defer func() { + h.metrics.SetSize(len(h.nodes)) + }() + + before := time.Now() + + // check whether h.nodes slice needs growing + // not to unnecessarily lock h.shardedNodeLocks + if id < uint64(len(h.nodes)) { + return nil + } + + // lock h.nodes' individual elements to avoid race between writing to elements + // and copying entire slice in growIndexToAccomodateNode method + h.shardedNodeLocks.LockAll() + + newIndex, _, err := growIndexToAccomodateNode(h.nodes, id, logger) + if err != nil { + h.shardedNodeLocks.UnlockAll() + return err + } + + defer h.metrics.GrowDuration(before) + + if h.compressed.Load() { + h.compressor.GrowCache(uint64(len(newIndex))) + } else { + h.cache.Grow(uint64(len(newIndex))) + } + + h.pools.visitedListsLock.Lock() + h.pools.visitedLists.Destroy() + h.pools.visitedLists = nil + h.pools.visitedLists = visited.NewPool(1, len(newIndex)+512, h.visitedListPoolMaxSize) + h.pools.visitedListsLock.Unlock() + + h.nodes = newIndex + h.shardedNodeLocks.UnlockAll() + + return nil +} + +// growIndexToAccomodateNode does not lock the graph for writes as the +// assumption is that it is called as part of an operation that is already +// wrapped inside a lock, such as inserting a node into the graph. If +// growIndexToAccomodateNode is ever called outside of such an operation, the +// caller must make sure to lock the graph as concurrent reads/write would +// otherwise be possible +func growIndexToAccomodateNode(index []*vertex, id uint64, + logger logrus.FieldLogger, +) ([]*vertex, bool, error) { + previousSize := uint64(len(index)) + if id < previousSize { + // node will fit, nothing to do + return nil, false, nil + } + before := time.Now() + + var newSize uint64 + + if (indexGrowthRate-1)*float64(previousSize) < float64(cache.MinimumIndexGrowthDelta) { + // typically grow the index by the delta + newSize = previousSize + cache.MinimumIndexGrowthDelta + } else { + newSize = uint64(float64(previousSize) * indexGrowthRate) + } + + if newSize <= id { + // There are situations were docIDs are not in order. For example, if the + // default size is 10k and the default delta is 10k. Imagine the user + // imports 21 objects, then deletes the first 20,500. When rebuilding the + // index from disk the first id to be imported would be 20,501, however the + // index default size and default delta would only reach up to 20,000. + newSize = id + cache.MinimumIndexGrowthDelta + } + + newIndex := make([]*vertex, newSize) + copy(newIndex, index) + + took := time.Since(before) + logger.WithField("action", "hnsw_grow_index"). + WithField("took", took). + WithField("previous_size", previousSize). + WithField("new_size", newSize). + Debugf("index grown from %d to %d, took %s\n", previousSize, newSize, took) + return newIndex, true, nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/maintenance_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/maintenance_test.go new file mode 100644 index 0000000000000000000000000000000000000000..192aaa3daff7d1860b86aa514606e06ffea3774c --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/maintenance_test.go @@ -0,0 +1,133 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package hnsw + +import ( + "testing" + + "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/adapters/repos/db/vector/cache" +) + +func Test_growIndexToAccomodateNode(t *testing.T) { + createVertexSlice := func(size int) []*vertex { + index := make([]*vertex, size) + for i := 0; i < len(index); i++ { + index[i] = &vertex{id: uint64(i)} + } + return index + } + type args struct { + index []*vertex + id uint64 + } + tests := []struct { + name string + args args + wantIndexSize int + changed bool + err error + }{ + { + name: "is one before the initial size", + args: args{ + id: cache.InitialSize - 1, + index: createVertexSlice(cache.InitialSize), + }, + wantIndexSize: 0, + changed: false, + }, + { + name: "exactly equals the initial size", + args: args{ + id: cache.InitialSize, + index: createVertexSlice(cache.InitialSize), + }, + wantIndexSize: cache.InitialSize + cache.MinimumIndexGrowthDelta, + changed: true, + }, + { + name: "is one after the initial size", + args: args{ + id: cache.InitialSize + 1, + index: createVertexSlice(cache.InitialSize), + }, + wantIndexSize: cache.InitialSize + cache.MinimumIndexGrowthDelta, + changed: true, + }, + { + name: "4 times the initial size minus 1", + args: args{ + id: 4*cache.InitialSize - 1, + index: createVertexSlice(cache.InitialSize), + }, + wantIndexSize: 4*cache.InitialSize - 1 + cache.MinimumIndexGrowthDelta, + changed: true, + }, + { + name: "4 times the initial size", + args: args{ + id: 4 * cache.InitialSize, + index: createVertexSlice(cache.InitialSize), + }, + wantIndexSize: 4*cache.InitialSize + cache.MinimumIndexGrowthDelta, + changed: true, + }, + { + name: "4 times the initial size plus 1", + args: args{ + id: 4*cache.InitialSize + 1, + index: createVertexSlice(cache.InitialSize), + }, + wantIndexSize: 4*cache.InitialSize + 1 + cache.MinimumIndexGrowthDelta, + changed: true, + }, + { + name: "14160016 case", + args: args{ + id: uint64(14160016), + index: createVertexSlice(14160016), + }, + wantIndexSize: int(14160016 * indexGrowthRate), + changed: true, + }, + { + name: "panic case", + args: args{ + id: uint64(cache.InitialSize + cache.MinimumIndexGrowthDelta + 1), + index: createVertexSlice(cache.InitialSize + 1), + }, + wantIndexSize: cache.InitialSize + 1 + 2*cache.MinimumIndexGrowthDelta, + changed: true, + }, + } + for _, tt := range tests { + logger, _ := test.NewNullLogger() + t.Run(tt.name, func(t *testing.T) { + newNodes, changed, err := growIndexToAccomodateNode(tt.args.index, tt.args.id, logger) + assert.Len(t, newNodes, tt.wantIndexSize) + assert.Equal(t, tt.changed, changed) + if err != nil { + require.NotNil(t, tt.err) + assert.EqualError(t, err, tt.err.Error()) + } + // check the newly grown index + index := tt.args.index + if changed { + index = newNodes + } + assert.Greater(t, len(index), int(tt.args.id)) + }) + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/metrics.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/metrics.go new file mode 100644 index 0000000000000000000000000000000000000000..f9ac39b53e8e2c7ebfcc2fb3d2fe1f8c92b6ebd2 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/metrics.go @@ -0,0 +1,397 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package hnsw + +import ( + "time" + + "github.com/prometheus/client_golang/prometheus" + "github.com/weaviate/weaviate/usecases/monitoring" +) + +type Metrics struct { + enabled bool + tombstones prometheus.Gauge + threads prometheus.Gauge + insert prometheus.Gauge + insertTime prometheus.ObserverVec + delete prometheus.Gauge + deleteTime prometheus.ObserverVec + cleaned prometheus.Counter + size prometheus.Gauge + grow prometheus.Observer + startupProgress prometheus.Gauge + startupDurations prometheus.ObserverVec + startupDiskIO prometheus.ObserverVec + tombstoneReassignNeighbors prometheus.Counter + tombstoneFindGlobalEntrypoint prometheus.Counter + tombstoneFindLocalEntrypoint prometheus.Counter + tombstoneDeleteListSize prometheus.Gauge + tombstoneUnexpected prometheus.CounterVec + tombstoneStart prometheus.Gauge + tombstoneEnd prometheus.Gauge + tombstoneProgress prometheus.Gauge +} + +func NewMetrics(prom *monitoring.PrometheusMetrics, + className, shardName string, +) *Metrics { + if prom == nil { + return &Metrics{enabled: false} + } + + if prom.Group { + className = "n/a" + shardName = "n/a" + } + + tombstones := prom.VectorIndexTombstones.With(prometheus.Labels{ + "class_name": className, + "shard_name": shardName, + }) + + threads := prom.VectorIndexTombstoneCleanupThreads.With(prometheus.Labels{ + "class_name": className, + "shard_name": shardName, + }) + + cleaned := prom.VectorIndexTombstoneCleanedCount.With(prometheus.Labels{ + "class_name": className, + "shard_name": shardName, + }) + + insert := prom.VectorIndexOperations.With(prometheus.Labels{ + "class_name": className, + "shard_name": shardName, + "operation": "create", + }) + + insertTime := prom.VectorIndexDurations.MustCurryWith(prometheus.Labels{ + "class_name": className, + "shard_name": shardName, + "operation": "create", + }) + + del := prom.VectorIndexOperations.With(prometheus.Labels{ + "class_name": className, + "shard_name": shardName, + "operation": "delete", + }) + + deleteTime := prom.VectorIndexDurations.MustCurryWith(prometheus.Labels{ + "class_name": className, + "shard_name": shardName, + "operation": "delete", + }) + + size := prom.VectorIndexSize.With(prometheus.Labels{ + "class_name": className, + "shard_name": shardName, + }) + + grow := prom.VectorIndexMaintenanceDurations.With(prometheus.Labels{ + "class_name": className, + "shard_name": shardName, + "operation": "grow", + }) + + startupProgress := prom.StartupProgress.With(prometheus.Labels{ + "class_name": className, + "shard_name": shardName, + "operation": "hnsw_read_commitlogs", + }) + + startupDurations := prom.StartupDurations.MustCurryWith(prometheus.Labels{ + "class_name": className, + "shard_name": shardName, + }) + + startupDiskIO := prom.StartupDiskIO.MustCurryWith(prometheus.Labels{ + "class_name": className, + "shard_name": shardName, + }) + + tombstoneReassignNeighbors := prom.TombstoneReassignNeighbors.With(prometheus.Labels{ + "class_name": className, + "shard_name": shardName, + }) + + tombstoneUnexpected := prom.VectorIndexTombstoneUnexpected.MustCurryWith(prometheus.Labels{ + "class_name": className, + "shard_name": shardName, + }) + + tombstoneStart := prom.VectorIndexTombstoneCycleStart.With(prometheus.Labels{ + "class_name": className, + "shard_name": shardName, + }) + + tombstoneEnd := prom.VectorIndexTombstoneCycleEnd.With(prometheus.Labels{ + "class_name": className, + "shard_name": shardName, + }) + + tombstoneProgress := prom.VectorIndexTombstoneCycleProgress.With(prometheus.Labels{ + "class_name": className, + "shard_name": shardName, + }) + + tombstoneFindGlobalEntrypoint := prom.TombstoneFindGlobalEntrypoint.With(prometheus.Labels{ + "class_name": className, + "shard_name": shardName, + }) + + tombstoneFindLocalEntrypoint := prom.TombstoneFindLocalEntrypoint.With(prometheus.Labels{ + "class_name": className, + "shard_name": shardName, + }) + + tombstoneDeleteListSize := prom.TombstoneDeleteListSize.With(prometheus.Labels{ + "class_name": className, + "shard_name": shardName, + }) + + return &Metrics{ + enabled: true, + tombstones: tombstones, + threads: threads, + cleaned: cleaned, + insert: insert, + insertTime: insertTime, + delete: del, + deleteTime: deleteTime, + size: size, + grow: grow, + startupProgress: startupProgress, + startupDurations: startupDurations, + startupDiskIO: startupDiskIO, + tombstoneReassignNeighbors: tombstoneReassignNeighbors, + tombstoneFindGlobalEntrypoint: tombstoneFindGlobalEntrypoint, + tombstoneFindLocalEntrypoint: tombstoneFindLocalEntrypoint, + tombstoneDeleteListSize: tombstoneDeleteListSize, + tombstoneUnexpected: *tombstoneUnexpected, + tombstoneStart: tombstoneStart, + tombstoneEnd: tombstoneEnd, + tombstoneProgress: tombstoneProgress, + } +} + +func (m *Metrics) TombstoneReassignNeighbor() { + if !m.enabled { + return + } + + m.tombstoneReassignNeighbors.Inc() +} + +func (m *Metrics) TombstoneFindGlobalEntrypoint() { + if !m.enabled { + return + } + + m.tombstoneFindGlobalEntrypoint.Inc() +} + +func (m *Metrics) TombstoneFindLocalEntrypoint() { + if !m.enabled { + return + } + + m.tombstoneFindLocalEntrypoint.Inc() +} + +func (m *Metrics) SetTombstoneDeleteListSize(size int) { + if !m.enabled { + return + } + + m.tombstoneDeleteListSize.Set(float64(size)) +} + +func (m *Metrics) AddTombstone() { + if !m.enabled { + return + } + + m.tombstones.Inc() +} + +func (m *Metrics) SetTombstone(count int) { + if !m.enabled { + return + } + + m.tombstones.Set(float64(count)) +} + +func (m *Metrics) AddUnexpectedTombstone(operation string) { + if !m.enabled { + return + } + + m.tombstoneUnexpected.With(prometheus.Labels{"operation": operation}).Inc() +} + +func (m *Metrics) StartTombstoneCycle() { + if !m.enabled { + return + } + + m.tombstoneStart.Set(float64(time.Now().Unix())) + m.tombstoneProgress.Set(0) + m.tombstoneEnd.Set(-1) +} + +func (m *Metrics) EndTombstoneCycle() { + if !m.enabled { + return + } + + m.tombstoneEnd.Set(float64(time.Now().Unix())) +} + +func (m *Metrics) TombstoneCycleProgress(progress float64) { + if !m.enabled { + return + } + + m.tombstoneProgress.Set(progress) +} + +func (m *Metrics) RemoveTombstone() { + if !m.enabled { + return + } + + m.tombstones.Dec() +} + +func (m *Metrics) StartCleanup(threads int) { + if !m.enabled { + return + } + + m.threads.Add(float64(threads)) +} + +func (m *Metrics) EndCleanup(threads int) { + if !m.enabled { + return + } + + m.threads.Sub(float64(threads)) +} + +func (m *Metrics) CleanedUp() { + if !m.enabled { + return + } + + m.cleaned.Inc() +} + +func (m *Metrics) InsertVector() { + if !m.enabled { + return + } + + m.insert.Inc() +} + +func (m *Metrics) DeleteVector() { + if !m.enabled { + return + } + + m.delete.Inc() +} + +func (m *Metrics) SetSize(size int) { + if !m.enabled { + return + } + + m.size.Set(float64(size)) +} + +func (m *Metrics) GrowDuration(start time.Time) { + if !m.enabled { + return + } + + took := float64(time.Since(start)) / float64(time.Millisecond) + m.grow.Observe(took) +} + +type Observer func(start time.Time) + +func noOpObserver(start time.Time) { + // do nothing +} + +func (m *Metrics) TrackInsertObserver(step string) Observer { + if !m.enabled { + return noOpObserver + } + + curried := m.insertTime.With(prometheus.Labels{"step": step}) + + return func(start time.Time) { + took := float64(time.Since(start)) / float64(time.Millisecond) + curried.Observe(took) + } +} + +func (m *Metrics) TrackDelete(start time.Time, step string) { + if !m.enabled { + return + } + + took := float64(time.Since(start)) / float64(time.Millisecond) + m.deleteTime.With(prometheus.Labels{"step": step}).Observe(took) +} + +func (m *Metrics) StartupProgress(ratio float64) { + if !m.enabled { + return + } + + m.startupProgress.Set(ratio) +} + +func (m *Metrics) TrackStartupTotal(start time.Time) { + if !m.enabled { + return + } + + took := float64(time.Since(start)) / float64(time.Millisecond) + m.startupDurations.With(prometheus.Labels{"operation": "hnsw_read_all_commitlogs"}).Observe(took) +} + +func (m *Metrics) TrackStartupIndividual(start time.Time) { + if !m.enabled { + return + } + + took := float64(time.Since(start)) / float64(time.Millisecond) + m.startupDurations.With(prometheus.Labels{"operation": "hnsw_read_single_commitlog"}).Observe(took) +} + +func (m *Metrics) TrackStartupReadCommitlogDiskIO(read int64, nanoseconds int64) { + if !m.enabled { + return + } + + seconds := float64(nanoseconds) / float64(time.Second) + throughput := float64(read) / float64(seconds) + m.startupDiskIO.With(prometheus.Labels{"operation": "hnsw_read_commitlog"}).Observe(throughput) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/multivector_hnsw_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/multivector_hnsw_test.go new file mode 100644 index 0000000000000000000000000000000000000000..ffbd9d8d8054146950102eb82fd4cfb1786ea3d3 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/multivector_hnsw_test.go @@ -0,0 +1,415 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +//go:build integrationTest +// +build integrationTest + +package hnsw + +import ( + "context" + "errors" + "fmt" + "testing" + + "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/adapters/repos/db/vector/common" + "github.com/weaviate/weaviate/adapters/repos/db/vector/hnsw/distancer" + "github.com/weaviate/weaviate/adapters/repos/db/vector/testinghelpers" + "github.com/weaviate/weaviate/entities/cyclemanager" + ent "github.com/weaviate/weaviate/entities/vectorindex/hnsw" +) + +var multiVectors = [][][]float32{ + // Document ID: 0 + { + {0.3546, 0.3751, 0.8565}, // Relative ID: 0 + {0.7441, 0.6594, 0.1069}, // Relative ID: 1 + {0.3224, 0.9466, 0.0006}, // Relative ID: 2 + }, + + // Document ID: 1 + { + {0.9017, 0.3555, 0.2460}, // Relative ID: 0 + {0.5278, 0.1360, 0.8384}, // Relative ID: 1 + }, + + // Document ID: 2 + { + {0.0817, 0.9565, 0.2802}, // Relative ID: 0 + }, +} + +var multiQueries = [][][]float32{ + // Query 0 + { + {0.9054, 0.4201, 0.0613}, + }, + + // Query 1 + { + {0.3491, 0.8591, 0.3742}, + {0.0613, 0.4201, 0.9054}, + }, +} + +// Expected results for each query +var expectedResults = [][]uint64{ + {1, 0, 2}, + {0, 2, 1}, +} + +func TestMultiVectorHnsw(t *testing.T) { + var vectorIndex *hnsw + ctx := context.Background() + maxConnections := 8 + efConstruction := 64 + ef := 64 + k := 10 + + t.Run("importing into hnsw", func(t *testing.T) { + index, err := New(Config{ + RootPath: "doesnt-matter-as-committlogger-is-mocked-out", + ID: "recallbenchmark", + MakeCommitLoggerThunk: MakeNoopCommitLogger, + DistanceProvider: distancer.NewDotProductProvider(), + VectorForIDThunk: func(ctx context.Context, id uint64) ([]float32, error) { + return []float32{0}, errors.New("can not use VectorForIDThunk with multivector") + }, + MultiVectorForIDThunk: func(ctx context.Context, id uint64) ([][]float32, error) { + return multiVectors[id], nil + }, + }, ent.UserConfig{ + VectorCacheMaxObjects: 1e12, + MaxConnections: maxConnections, + EFConstruction: efConstruction, + EF: ef, + Multivector: ent.MultivectorConfig{Enabled: true}, + }, cyclemanager.NewCallbackGroupNoop(), testinghelpers.NewDummyStore(t)) + require.Nil(t, err) + vectorIndex = index + + for i, vec := range multiVectors { + err := vectorIndex.AddMulti(ctx, uint64(i), vec) + require.Nil(t, err) + } + }) + + t.Run("inspect a query", func(t *testing.T) { + for i, query := range multiQueries { + ids, _, err := vectorIndex.SearchByMultiVector(ctx, query, k, nil) + require.Nil(t, err) + require.Equal(t, expectedResults[i], ids) + } + }) + + t.Run("delete some nodes", func(t *testing.T) { + // Delete the second node and then add back + newExpectedResults := [][]uint64{ + {0, 2}, + {0, 2}, + } + err := vectorIndex.DeleteMulti(1) + require.Nil(t, err) + for i, query := range multiQueries { + ids, _, err := vectorIndex.SearchByMultiVector(ctx, query, k, nil) + require.Nil(t, err) + require.Equal(t, newExpectedResults[i], ids) + } + err = vectorIndex.AddMulti(ctx, 1, multiVectors[1]) + require.Nil(t, err) + for i, query := range multiQueries { + ids, _, err := vectorIndex.SearchByMultiVector(ctx, query, k, nil) + require.Nil(t, err) + require.Equal(t, expectedResults[i], ids) + } + + // Delete the third node and then add back + newExpectedResults = [][]uint64{ + {1, 0}, + {0, 1}, + } + err = vectorIndex.DeleteMulti(2) + require.Nil(t, err) + for i, query := range multiQueries { + ids, _, err := vectorIndex.SearchByMultiVector(ctx, query, k, nil) + require.Nil(t, err) + require.Equal(t, newExpectedResults[i], ids) + } + err = vectorIndex.AddMulti(ctx, 2, multiVectors[2]) + require.Nil(t, err) + for i, query := range multiQueries { + ids, _, err := vectorIndex.SearchByMultiVector(ctx, query, k, nil) + require.Nil(t, err) + require.Equal(t, expectedResults[i], ids) + } + }) +} + +func TestMultiVectorCompressHnsw(t *testing.T) { + var vectorIndex *hnsw + maxConnections := 8 + efConstruction := 64 + ef := 64 + + userConfigTest := []ent.UserConfig{ + { + MaxConnections: maxConnections, + EFConstruction: efConstruction, + EF: ef, + VectorCacheMaxObjects: 1e12, + Multivector: ent.MultivectorConfig{Enabled: true}, + PQ: ent.PQConfig{ + Enabled: true, + Encoder: ent.PQEncoder{ + Type: ent.PQEncoderTypeKMeans, + Distribution: ent.PQEncoderDistributionLogNormal, + }, + Centroids: 256, + TrainingLimit: 100_000, + }, + }, + { + MaxConnections: maxConnections, + EFConstruction: efConstruction, + EF: ef, + VectorCacheMaxObjects: 1e12, + Multivector: ent.MultivectorConfig{Enabled: true}, + SQ: ent.SQConfig{Enabled: true}, + }, + } + + t.Run("creating hnsw with compression", func(t *testing.T) { + for _, userConfig := range userConfigTest { + _, err := New(Config{ + RootPath: "doesnt-matter-as-committlogger-is-mocked-out", + ID: "recallbenchmark", + MakeCommitLoggerThunk: MakeNoopCommitLogger, + DistanceProvider: distancer.NewDotProductProvider(), + VectorForIDThunk: func(ctx context.Context, id uint64) ([]float32, error) { + return []float32{0}, errors.New("can not use VectorForIDThunk with multivector") + }, + MultiVectorForIDThunk: func(ctx context.Context, id uint64) ([][]float32, error) { + return multiVectors[id], nil + }, + TempMultiVectorForIDThunk: func(ctx context.Context, id uint64, container *common.VectorSlice) ([][]float32, error) { + return multiVectors[id], nil + }, + }, userConfig, cyclemanager.NewCallbackGroupNoop(), testinghelpers.NewDummyStore(t)) + require.Nil(t, err) + } + }) + + t.Run("compressing hnsw after creation", func(t *testing.T) { + for _, userConfig := range userConfigTest { + index, err := New(Config{ + RootPath: "doesnt-matter-as-committlogger-is-mocked-out", + ID: "recallbenchmark", + MakeCommitLoggerThunk: MakeNoopCommitLogger, + DistanceProvider: distancer.NewDotProductProvider(), + VectorForIDThunk: func(ctx context.Context, id uint64) ([]float32, error) { + return []float32{0}, errors.New("can not use VectorForIDThunk with multivector") + }, + MultiVectorForIDThunk: func(ctx context.Context, id uint64) ([][]float32, error) { + return multiVectors[id], nil + }, + TempMultiVectorForIDThunk: func(ctx context.Context, id uint64, container *common.VectorSlice) ([][]float32, error) { + return multiVectors[id], nil + }, + }, ent.UserConfig{ + VectorCacheMaxObjects: 1e12, + MaxConnections: maxConnections, + EFConstruction: efConstruction, + EF: ef, + Multivector: ent.MultivectorConfig{Enabled: true}, + }, + cyclemanager.NewCallbackGroupNoop(), testinghelpers.NewDummyStore(t)) + require.Nil(t, err) + vectorIndex = index + err = vectorIndex.AddMulti(context.Background(), 0, multiVectors[0]) + require.Nil(t, err) + err = vectorIndex.compress(userConfig) + require.Nil(t, err) + } + }) +} + +func TestMultiVectorBQHnsw(t *testing.T) { + var vectorIndex *hnsw + ctx := context.Background() + maxConnections := 8 + efConstruction := 64 + ef := 64 + k := 10 + + t.Run("importing into hnsw", func(t *testing.T) { + index, err := New(Config{ + RootPath: "doesnt-matter-as-committlogger-is-mocked-out", + ID: "recallbenchmark", + MakeCommitLoggerThunk: MakeNoopCommitLogger, + DistanceProvider: distancer.NewDotProductProvider(), + VectorForIDThunk: func(ctx context.Context, id uint64) ([]float32, error) { + docID, relativeID := vectorIndex.cache.GetKeys(id) + return multiVectors[docID][relativeID], nil + }, + TempMultiVectorForIDThunk: func(ctx context.Context, id uint64, container *common.VectorSlice) ([][]float32, error) { + return multiVectors[id], nil + }, + }, ent.UserConfig{ + VectorCacheMaxObjects: 1e12, + MaxConnections: maxConnections, + EFConstruction: efConstruction, + EF: ef, + Multivector: ent.MultivectorConfig{Enabled: true}, + // BQ: ent.BQConfig{Enabled: true}, + }, cyclemanager.NewCallbackGroupNoop(), testinghelpers.NewDummyStore(t)) + require.Nil(t, err) + vectorIndex = index + + for i, vec := range multiVectors { + err := vectorIndex.AddMulti(ctx, uint64(i), vec) + require.Nil(t, err) + } + uc := ent.UserConfig{ + VectorCacheMaxObjects: 1e12, + MaxConnections: maxConnections, + EFConstruction: efConstruction, + EF: ef, + Multivector: ent.MultivectorConfig{ + Enabled: true, + }, + BQ: ent.BQConfig{ + Enabled: true, + }, + } + err = vectorIndex.compress(uc) + require.Nil(t, err) + }) + + t.Run("inspect a query", func(t *testing.T) { + for i, query := range multiQueries { + ids, _, err := vectorIndex.SearchByMultiVector(ctx, query, k, nil) + require.Nil(t, err) + require.Equal(t, expectedResults[i], ids) + } + }) + + t.Run("delete some nodes", func(t *testing.T) { + // Delete the first node and then add back + newExpectedResults := [][]uint64{ + {1, 2}, + {2, 1}, + } + err := vectorIndex.DeleteMulti(0) + require.Nil(t, err) + for i, query := range multiQueries { + ids, _, err := vectorIndex.SearchByMultiVector(ctx, query, k, nil) + require.Nil(t, err) + require.Equal(t, newExpectedResults[i], ids) + } + err = vectorIndex.AddMulti(ctx, 0, multiVectors[0]) + require.Nil(t, err) + for i, query := range multiQueries { + ids, _, err := vectorIndex.SearchByMultiVector(ctx, query, k, nil) + require.Nil(t, err) + require.Equal(t, expectedResults[i], ids) + } + }) +} + +func TestMultivectorPersistence(t *testing.T) { + dirName := t.TempDir() + ctx := context.Background() + indexID := "integrationtest" + maxConnections := 8 + efConstruction := 64 + ef := 64 + k := 10 + + logger, _ := test.NewNullLogger() + cl, clErr := NewCommitLogger(dirName, indexID, logger, + cyclemanager.NewCallbackGroupNoop()) + makeCL := func() (CommitLogger, error) { + return cl, clErr + } + store := testinghelpers.NewDummyStore(t) + + index, err := New(Config{ + RootPath: dirName, + ID: indexID, + MakeCommitLoggerThunk: makeCL, + DistanceProvider: distancer.NewDotProductProvider(), + VectorForIDThunk: func(ctx context.Context, id uint64) ([]float32, error) { + return []float32{0}, errors.New("can not use VectorForIDThunk with multivector") + }, + MultiVectorForIDThunk: func(ctx context.Context, id uint64) ([][]float32, error) { + return multiVectors[id], nil + }, + }, ent.UserConfig{ + VectorCacheMaxObjects: 1e12, + MaxConnections: maxConnections, + EFConstruction: efConstruction, + EF: ef, + Multivector: ent.MultivectorConfig{ + Enabled: true, + }, + }, cyclemanager.NewCallbackGroupNoop(), store) + require.Nil(t, err) + + t.Run("adding nodes", func(t *testing.T) { + for i, vec := range multiVectors { + err := index.AddMulti(ctx, uint64(i), vec) + require.Nil(t, err) + } + }) + + for i, query := range multiQueries { + ids, _, err := index.SearchByMultiVector(ctx, query, k, nil) + require.Nil(t, err) + require.Equal(t, expectedResults[i], ids) + } + + require.Nil(t, index.Flush()) + + // destroy the index + index = nil + + fmt.Println("building the second index") + // build a new index from the (uncondensed) commit log + secondIndex, err := New(Config{ + RootPath: dirName, + ID: indexID, + MakeCommitLoggerThunk: makeCL, + DistanceProvider: distancer.NewDotProductProvider(), + VectorForIDThunk: func(ctx context.Context, id uint64) ([]float32, error) { + return []float32{0}, errors.New("can not use VectorForIDThunk with multivector") + }, + MultiVectorForIDThunk: func(ctx context.Context, id uint64) ([][]float32, error) { + return multiVectors[id], nil + }, + }, ent.UserConfig{ + VectorCacheMaxObjects: 1e12, + MaxConnections: maxConnections, + EFConstruction: efConstruction, + EF: ef, + Multivector: ent.MultivectorConfig{ + Enabled: true, + }, + }, cyclemanager.NewCallbackGroupNoop(), store) + require.Nil(t, err) + + for i, query := range multiQueries { + ids, _, err := secondIndex.SearchByMultiVector(ctx, query, k, nil) + require.Nil(t, err) + require.Equal(t, expectedResults[i], ids) + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/neighbor_connections.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/neighbor_connections.go new file mode 100644 index 0000000000000000000000000000000000000000..7593040dddea176be6f536da59e6269597507cd1 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/neighbor_connections.go @@ -0,0 +1,547 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package hnsw + +import ( + "context" + "fmt" + "math" + "sync" + "time" + + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/weaviate/weaviate/adapters/repos/db/helpers" + "github.com/weaviate/weaviate/adapters/repos/db/priorityqueue" + "github.com/weaviate/weaviate/adapters/repos/db/vector/compressionhelpers" + "github.com/weaviate/weaviate/adapters/repos/db/vector/hnsw/visited" + "github.com/weaviate/weaviate/entities/storobj" +) + +func (h *hnsw) findAndConnectNeighbors(ctx context.Context, node *vertex, + entryPointID uint64, nodeVec []float32, distancer compressionhelpers.CompressorDistancer, targetLevel, currentMaxLevel int, + denyList helpers.AllowList, +) error { + nfc := newNeighborFinderConnector(h, node, entryPointID, nodeVec, distancer, targetLevel, + currentMaxLevel, denyList, false, nil) + + return nfc.Do(ctx) +} + +func (h *hnsw) reconnectNeighboursOf(ctx context.Context, node *vertex, + entryPointID uint64, nodeVec []float32, distancer compressionhelpers.CompressorDistancer, targetLevel, currentMaxLevel int, + denyList helpers.AllowList, processedIDs *sync.Map, +) error { + nfc := newNeighborFinderConnector(h, node, entryPointID, nodeVec, distancer, targetLevel, + currentMaxLevel, denyList, true, processedIDs) + + return nfc.Do(ctx) +} + +type neighborFinderConnector struct { + ctx context.Context + graph *hnsw + node *vertex + entryPointID uint64 + entryPointDist float32 + nodeVec []float32 + distancer compressionhelpers.CompressorDistancer + targetLevel int + currentMaxLevel int + denyList helpers.AllowList + // bufLinksLog BufferedLinksLogger + tombstoneCleanupNodes bool + processedIDs *sync.Map +} + +func newNeighborFinderConnector(graph *hnsw, node *vertex, entryPointID uint64, + nodeVec []float32, distancer compressionhelpers.CompressorDistancer, targetLevel, currentMaxLevel int, + denyList helpers.AllowList, tombstoneCleanupNodes bool, processedIDs *sync.Map, +) *neighborFinderConnector { + return &neighborFinderConnector{ + ctx: graph.shutdownCtx, + graph: graph, + node: node, + entryPointID: entryPointID, + nodeVec: nodeVec, + distancer: distancer, + targetLevel: targetLevel, + currentMaxLevel: currentMaxLevel, + denyList: denyList, + tombstoneCleanupNodes: tombstoneCleanupNodes, + processedIDs: processedIDs, + } +} + +func (n *neighborFinderConnector) Do(ctx context.Context) error { + for level := min(n.targetLevel, n.currentMaxLevel); level >= 0; level-- { + err := n.doAtLevel(ctx, level) + if err != nil { + return errors.Wrapf(err, "at level %d", level) + } + } + + return nil +} + +func (n *neighborFinderConnector) processNode(id uint64) (float32, error) { + var dist float32 + var err error + + if n.distancer == nil { + dist, err = n.graph.distToNode(n.distancer, id, n.nodeVec) + } else { + dist, err = n.distancer.DistanceToNode(id) + } + + var e storobj.ErrNotFound + if errors.As(err, &e) { + n.graph.handleDeletedNode(e.DocID, "processNode") + return math.MaxFloat32, nil + } + if err != nil { + return math.MaxFloat32, fmt.Errorf( + "calculate distance between insert node and entrypoint: %w", err) + } + return dist, nil +} + +func (n *neighborFinderConnector) processRecursively(from uint64, results *priorityqueue.Queue[any], visited visited.ListSet, level, top int) error { + if top <= 0 { + return nil + } + if err := n.ctx.Err(); err != nil { + return err + } + + n.graph.RLock() + nodesLen := uint64(len(n.graph.nodes)) + n.graph.RUnlock() + var pending []uint64 + // Check if already completed (not just started) + if n.processedIDs != nil { + if _, alreadyProcessed := n.processedIDs.Load(from); alreadyProcessed { + return nil + } + } + + // lock the nodes slice + n.graph.shardedNodeLocks.Lock(from) + // Double-check after acquiring lock + if n.processedIDs != nil { + if _, alreadyProcessed := n.processedIDs.Load(from); alreadyProcessed { + n.graph.shardedNodeLocks.Unlock(from) + return nil + } + } + if nodesLen < from || n.graph.nodes[from] == nil { + n.graph.handleDeletedNode(from, "processRecursively") + if n.processedIDs != nil { + n.processedIDs.Store(from, struct{}{}) + } + n.graph.shardedNodeLocks.Unlock(from) + return nil + } + // lock the node itself + n.graph.nodes[from].Lock() + if level >= int(n.graph.nodes[from].connections.Layers()) { + n.graph.nodes[from].Unlock() + n.graph.shardedNodeLocks.Unlock(from) + return nil + } + connections := make([]uint64, n.graph.nodes[from].connections.LenAtLayer(uint8(level))) + n.graph.nodes[from].connections.CopyLayer(connections, uint8(level)) + n.graph.nodes[from].Unlock() + n.graph.shardedNodeLocks.Unlock(from) + for _, id := range connections { + if visited.Visited(id) { + continue + } + visited.Visit(id) + if n.denyList.Contains(id) { + pending = append(pending, id) + continue + } + + dist, err := n.processNode(id) + if err != nil { + var e storobj.ErrNotFound + if errors.As(err, &e) { + // node was deleted in the meantime + continue + } else { + return err + } + } + if results.Len() >= top && dist < results.Top().Dist { + results.Pop() + results.Insert(id, dist) + } else if results.Len() < top { + results.Insert(id, dist) + } + } + for _, id := range pending { + if results.Len() >= top { + dist, err := n.processNode(id) + if err != nil { + var e storobj.ErrNotFound + if errors.As(err, &e) { + // node was deleted in the meantime + continue + } + return err + } + if dist > results.Top().Dist { + continue + } + } + err := n.processRecursively(id, results, visited, level, top) + if err != nil { + return err + } + } + return nil +} + +func (n *neighborFinderConnector) doAtLevel(ctx context.Context, level int) error { + before := time.Now() + + var results *priorityqueue.Queue[any] + var extraIDs []uint64 = nil + total := 0 + maxConnections := n.graph.maximumConnections + + if n.tombstoneCleanupNodes { + results = n.graph.pools.pqResults.GetMax(n.graph.efConstruction) + + n.graph.pools.visitedListsLock.RLock() + visited := n.graph.pools.visitedLists.Borrow() + n.graph.pools.visitedListsLock.RUnlock() + n.node.Lock() + connections := make([]uint64, n.node.connections.LenAtLayer(uint8(level))) + n.node.connections.CopyLayer(connections, uint8(level)) + n.node.Unlock() + visited.Visit(n.node.id) + top := n.graph.efConstruction + var pending []uint64 = nil + + for _, id := range connections { + visited.Visit(id) + if n.denyList.Contains(id) { + pending = append(pending, id) + continue + } + extraIDs = append(extraIDs, id) + top-- + total++ + } + for _, id := range pending { + visited.Visit(id) + err := n.processRecursively(id, results, visited, level, top) + if err != nil { + n.graph.pools.visitedListsLock.RLock() + n.graph.pools.visitedLists.Return(visited) + n.graph.pools.visitedListsLock.RUnlock() + return err + } + } + n.graph.pools.visitedListsLock.RLock() + n.graph.pools.visitedLists.Return(visited) + n.graph.pools.visitedListsLock.RUnlock() + // use dynamic max connections only during tombstone cleanup + maxConnections = n.maximumConnections(level) + } else { + if err := n.pickEntrypoint(); err != nil { + return errors.Wrap(err, "pick entrypoint at level beginning") + } + eps := priorityqueue.NewMin[any](1) + eps.Insert(n.entryPointID, n.entryPointDist) + var err error + + results, err = n.graph.searchLayerByVectorWithDistancer(ctx, n.nodeVec, eps, n.graph.efConstruction, + level, nil, n.distancer) + if err != nil { + return errors.Wrapf(err, "search layer at level %d", level) + } + + n.graph.insertMetrics.findAndConnectSearch(before) + before = time.Now() + } + + if err := n.graph.selectNeighborsHeuristic(results, maxConnections-total, n.denyList); err != nil { + return errors.Wrap(err, "heuristic") + } + + n.graph.insertMetrics.findAndConnectHeuristic(before) + before = time.Now() + + // // for distributed spike + // neighborsAtLevel[level] = neighbors + + neighbors := make([]uint64, total, total+results.Len()) + copy(neighbors, extraIDs) + for results.Len() > 0 { + id := results.Pop().ID + neighbors = append(neighbors, id) + } + + n.graph.pools.pqResults.Put(results) + + neighborsCpy := neighbors + // the node will potentially own the neighbors slice (cf. hnsw.vertex#setConnectionsAtLevel). + // if so, we need to create a copy + n.node.setConnectionsAtLevel(level, neighbors) + + if err := n.graph.commitLog.ReplaceLinksAtLevel(n.node.id, level, neighborsCpy); err != nil { + return errors.Wrapf(err, "ReplaceLinksAtLevel node %d at level %d", n.node.id, level) + } + + for _, neighborID := range neighborsCpy { + if err := n.connectNeighborAtLevel(neighborID, level); err != nil { + return errors.Wrapf(err, "connect neighbor %d", neighborID) + } + } + + if len(neighbors) > 0 { + // there could be no neighbors left, if all are marked deleted, in this + // case, don't change the entrypoint + nextEntryPointID := neighborsCpy[len(neighbors)-1] + if nextEntryPointID == n.node.id { + return nil + } + + n.entryPointID = nextEntryPointID + } + + n.graph.insertMetrics.findAndConnectUpdateConnections(before) + return nil +} + +func (n *neighborFinderConnector) connectNeighborAtLevel(neighborID uint64, + level int, +) error { + neighbor := n.graph.nodeByID(neighborID) + if skip := n.skipNeighbor(neighbor); skip { + return nil + } + + neighbor.Lock() + defer neighbor.Unlock() + if level > neighbor.level { + // upgrade neighbor level if the level is out of sync due to a delete re-assign + neighbor.upgradeToLevelNoLock(level) + } + currentConnections := neighbor.connectionsAtLevelNoLock(level) + + maximumConnections := n.maximumConnections(level) + if len(currentConnections) < maximumConnections { + // we can simply append + // updatedConnections = append(currentConnections, n.node.id) + neighbor.appendConnectionAtLevelNoLock(level, n.node.id, maximumConnections) + if err := n.graph.commitLog.AddLinkAtLevel(neighbor.id, level, n.node.id); err != nil { + return err + } + } else { + // we need to run the heuristic + + dist, err := n.graph.distBetweenNodes(n.node.id, neighborID) + var e storobj.ErrNotFound + if err != nil && errors.As(err, &e) { + n.graph.handleDeletedNode(e.DocID, "connectNeighborAtLevel") + // it seems either the node or the neighbor were deleted in the meantime, + // there is nothing we can do now + return nil + } + if err != nil { + return errors.Wrapf(err, "dist between %d and %d", n.node.id, neighborID) + } + + candidates := priorityqueue.NewMax[any](len(currentConnections) + 1) + candidates.Insert(n.node.id, dist) + + for _, existingConnection := range currentConnections { + dist, err := n.graph.distBetweenNodes(existingConnection, neighborID) + var e storobj.ErrNotFound + if errors.As(err, &e) { + n.graph.handleDeletedNode(e.DocID, "connectNeighborAtLevel") + // was deleted in the meantime + continue + } + if err != nil { + return errors.Wrapf(err, "dist between %d and %d", existingConnection, neighborID) + } + + candidates.Insert(existingConnection, dist) + } + + err = n.graph.selectNeighborsHeuristic(candidates, maximumConnections, n.denyList) + if err != nil { + return errors.Wrap(err, "connect neighbors") + } + + neighbor.resetConnectionsAtLevelNoLock(level) + if err := n.graph.commitLog.ClearLinksAtLevel(neighbor.id, uint16(level)); err != nil { + return err + } + + ids := make([]uint64, 0, candidates.Len()) + for candidates.Len() > 0 { + id := candidates.Pop().ID + ids = append(ids, id) + if err := n.graph.commitLog.AddLinkAtLevel(neighbor.id, level, id); err != nil { + return err + } + } + neighbor.appendConnectionsAtLevelNoLock(level, ids, maximumConnections) + } + + return nil +} + +func (n *neighborFinderConnector) skipNeighbor(neighbor *vertex) bool { + if neighbor == n.node { + // don't connect to self + return true + } + + if neighbor == nil || n.graph.hasTombstone(neighbor.id) { + // don't connect to tombstoned nodes. This would only increase the + // cleanup that needs to be done. Even worse: A tombstoned node can be + // cleaned up at any time, also while we are connecting to it. So, + // while the node still exists right now, it might already be nil in + // the next line, which would lead to a nil-pointer panic. + return true + } + + return false +} + +func (n *neighborFinderConnector) maximumConnections(level int) int { + if level == 0 { + return n.graph.maximumConnectionsLayerZero + } + + return n.graph.maximumConnections +} + +func (n *neighborFinderConnector) pickEntrypoint() error { + // the neighborFinderConnector always has a suggestion for an entrypoint that + // it got from the outside, most of the times we can use this, but in some + // cases we can't. To see if we can use it, three conditions need to be met: + // + // 1. it needs to exist in the graph, i.e. be not nil + // + // 2. it can't be under maintenance + // + // 3. we need to be able to obtain a vector for it + + candidate := n.entryPointID + + // for our search we will need a copy of the current deny list, however, the + // cost of that copy can be significant. Let's first verify if the global + // entrypoint candidate is usable. If yes, we can return early and skip the + // copy. + success, err := n.tryEpCandidate(candidate) + if err != nil { + var e storobj.ErrNotFound + if !errors.As(err, &e) { + return err + } + + // node was deleted in the meantime + // ignore the error and move to the logic below which will try more candidates + } + + if success { + // the global ep candidate is usable, let's skip the following logic (and + // therefore avoid the copy) + return nil + } + + // The global candidate is not usable, we need to find a new one. + localDeny := n.denyList.WrapOnWrite() + + // make sure the loop cannot block forever. In most cases, results should be + // found within micro to milliseconds, this is just a last resort to handle + // the unknown somewhat gracefully, for example if there is a bug in the + // underlying object store and we cannot retrieve the vector in time, etc. + ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second) + defer cancel() + n.graph.logger.WithFields(logrus.Fields{ + "action": "pick_entrypoint", + "duration": 60 * time.Second, + }).Debug("context.WithTimeout") + + for { + if err := ctx.Err(); err != nil { + return err + } + + success, err := n.tryEpCandidate(candidate) + if err != nil { + var e storobj.ErrNotFound + if !errors.As(err, &e) { + return err + } + + // node was deleted in the meantime + // ignore the error and try the next candidate + } + + if success { + return nil + } + + // no success so far, we need to keep going and find a better candidate + // make sure we never visit this candidate again + localDeny.Insert(candidate) + // now find a new one + + alternative, err := n.graph.findNewLocalEntrypoint(localDeny, candidate) + if err != nil { + return err + } + candidate = alternative + } +} + +func (n *neighborFinderConnector) tryEpCandidate(candidate uint64) (bool, error) { + node := n.graph.nodeByID(candidate) + if node == nil { + return false, nil + } + + if node.isUnderMaintenance() { + return false, nil + } + + var dist float32 + var err error + if n.distancer == nil { + dist, err = n.graph.distToNode(n.distancer, candidate, n.nodeVec) + } else { + dist, err = n.distancer.DistanceToNode(candidate) + } + var e storobj.ErrNotFound + if errors.As(err, &e) { + n.graph.handleDeletedNode(e.DocID, "tryEpCandidate") + return false, nil + } + if err != nil { + return false, fmt.Errorf("calculate distance between insert node and entrypoint: %w", err) + } + + // we were able to calculate a distance, we're good + n.entryPointDist = dist + n.entryPointID = candidate + return true, nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/packedconn/connections.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/packedconn/connections.go new file mode 100644 index 0000000000000000000000000000000000000000..45b6ecc1c19d4d9bf73e1e7228c0d18073d8d442 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/packedconn/connections.go @@ -0,0 +1,684 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package packedconn + +import ( + "fmt" + "math" +) + +const ( + InitialCapacity = 16 + DefaultMaxCapacity = 64 + // Simple encoding schemes - trade some compression for speed + SCHEME_2BYTE = 0 // 2 bytes per value (0-65535) + SCHEME_3BYTE = 1 // 3 bytes per value (0-16777215) + SCHEME_4BYTE = 2 // 4 bytes per value (0-4294967295) + SCHEME_5BYTE = 3 // 5 bytes per value (0-1099511627775) + SCHEME_8BYTE = 4 // 8 bytes per value (full uint64) +) + +type LayerData struct { + data []byte + // Packed scheme (4 bits) and count (12 bits) + // scheme is in lower 4 bits, count in upper 12 bits + packed uint16 +} + +type Connections struct { + layers []LayerData + layerCount uint8 +} + +func NewWithMaxLayer(maxLayer uint8) (*Connections, error) { + if int(maxLayer)+1 > math.MaxUint8 { + return nil, fmt.Errorf("max supported layer is %d", math.MaxUint8-1) + } + + layerCount := maxLayer + 1 + c := &Connections{ + layers: make([]LayerData, layerCount), + layerCount: layerCount, + } + for i := uint8(0); i < layerCount; i++ { + c.layers[i].data = make([]byte, 0, InitialCapacity) + } + return c, nil +} + +func NewWithData(data []byte) *Connections { + if len(data) == 0 { + return &Connections{ + layers: make([]LayerData, 0), + layerCount: 0, + } + } + + offset := 0 + layerCount := data[offset] + offset++ + + c := &Connections{ + layers: make([]LayerData, layerCount), + layerCount: layerCount, + } + + for i := uint8(0); i < layerCount; i++ { + if offset+6 > len(data) { // 2 for packed, 4 for dataLen + break // Malformed data + } + + // Read packed (2 bytes, little endian) + packed := uint16(data[offset]) | uint16(data[offset+1])<<8 + offset += 2 + + // Read data length (4 bytes, little endian) + dataLen := uint32(data[offset]) | + uint32(data[offset+1])<<8 | + uint32(data[offset+2])<<16 | + uint32(data[offset+3])<<24 + offset += 4 + + if offset+int(dataLen) > len(data) { + break // Malformed data + } + + layerData := make([]byte, dataLen) + copy(layerData, data[offset:offset+int(dataLen)]) + offset += int(dataLen) + + c.layers[i] = LayerData{ + data: layerData, + packed: packed, + } + } + + return c +} + +func NewWithElements(elements [][]uint64) (*Connections, error) { + c, err := NewWithMaxLayer(uint8(len(elements)) - 1) + if err != nil { + return nil, err + } + + for index, conns := range elements { + c.ReplaceLayer(uint8(index), conns) + } + return c, nil +} + +func (c *Connections) AddLayer() { + c.layerCount++ + c.layers = append(c.layers, LayerData{}) +} + +func (c *Connections) GrowLayersTo(newLayers uint8) { + targetCount := newLayers + 1 + if targetCount <= c.layerCount { + return + } + + // Optimize for the common case: 1 layer + if c.layerCount == 0 && targetCount == 1 { + c.layers = make([]LayerData, 1) + c.layerCount = 1 + return + } + + for c.layerCount < targetCount { + c.AddLayer() + } +} + +// determineOptimalScheme analyzes values to pick the most efficient encoding +func determineOptimalScheme(values []uint64) uint8 { + if len(values) == 0 { + return SCHEME_2BYTE + } + + maxVal := uint64(0) + for _, val := range values { + if val > maxVal { + maxVal = val + } + } + + if maxVal <= 65535 { + return SCHEME_2BYTE + } else if maxVal <= 16777215 { + return SCHEME_3BYTE + } else if maxVal <= 4294967295 { + return SCHEME_4BYTE + } else if maxVal <= 1099511627775 { + return SCHEME_5BYTE + } + return SCHEME_8BYTE +} + +// encodeValues encodes values using the specified scheme +func encodeValues(values []uint64, scheme uint8) []byte { + switch scheme { + case SCHEME_2BYTE: + data := make([]byte, len(values)*2) + for i, val := range values { + data[i*2] = byte(val) + data[i*2+1] = byte(val >> 8) + } + return data + + case SCHEME_3BYTE: + data := make([]byte, len(values)*3) + for i, val := range values { + data[i*3] = byte(val) + data[i*3+1] = byte(val >> 8) + data[i*3+2] = byte(val >> 16) + } + return data + + case SCHEME_4BYTE: + data := make([]byte, len(values)*4) + for i, val := range values { + data[i*4] = byte(val) + data[i*4+1] = byte(val >> 8) + data[i*4+2] = byte(val >> 16) + data[i*4+3] = byte(val >> 24) + } + return data + + case SCHEME_5BYTE: + data := make([]byte, len(values)*5) + for i, val := range values { + data[i*5] = byte(val) + data[i*5+1] = byte(val >> 8) + data[i*5+2] = byte(val >> 16) + data[i*5+3] = byte(val >> 24) + data[i*5+4] = byte(val >> 32) + } + return data + + case SCHEME_8BYTE: + data := make([]byte, len(values)*8) + for i, val := range values { + for j := 0; j < 8; j++ { + data[i*8+j] = byte(val >> (j * 8)) + } + } + return data + + default: + return encodeValues(values, SCHEME_8BYTE) + } +} + +// decodeInto decodes values directly into the provided slice using the specified scheme +// +//go:inline +func decodeInto(data []byte, scheme uint8, count uint32, result []uint64) { + switch scheme { + case SCHEME_2BYTE: + for i := uint32(0); i < count; i++ { + result[i] = uint64(data[i*2]) | uint64(data[i*2+1])<<8 + } + + case SCHEME_3BYTE: + for i := uint32(0); i < count; i++ { + result[i] = uint64(data[i*3]) | uint64(data[i*3+1])<<8 | uint64(data[i*3+2])<<16 + } + + case SCHEME_4BYTE: + for i := uint32(0); i < count; i++ { + result[i] = uint64(data[i*4]) | + uint64(data[i*4+1])<<8 | + uint64(data[i*4+2])<<16 | + uint64(data[i*4+3])<<24 + } + + case SCHEME_5BYTE: + for i := uint32(0); i < count; i++ { + result[i] = uint64(data[i*5]) | + uint64(data[i*5+1])<<8 | + uint64(data[i*5+2])<<16 | + uint64(data[i*5+3])<<24 | + uint64(data[i*5+4])<<32 + } + + case SCHEME_8BYTE: + for i := uint32(0); i < count; i++ { + val := uint64(0) + for j := uint32(0); j < 8; j++ { + val |= uint64(data[i*8+j]) << (j * 8) + } + result[i] = val + } + } +} + +// decodeValues decodes values using the specified scheme +func decodeValues(data []byte, scheme uint8, count uint32) []uint64 { + result := make([]uint64, count) + decodeInto(data, scheme, count, result) + return result +} + +// Helper functions for packed scheme and count +func packSchemeAndCount(scheme uint8, count uint32) uint16 { + if count > 4095 { // 2^12 - 1 + count = 4095 + } + return uint16(scheme) | uint16(count)<<4 +} + +func unpackScheme(packed uint16) uint8 { + return uint8(packed & 0xF) +} + +func unpackCount(packed uint16) uint32 { + return uint32(packed >> 4) +} + +func (c *Connections) ReplaceLayer(layer uint8, conns []uint64) { + if layer >= c.layerCount { + c.GrowLayersTo(layer) + } + + if len(conns) == 0 { + c.layers[layer] = LayerData{} + return + } + + scheme := determineOptimalScheme(conns) + data := encodeValues(conns, scheme) + + c.layers[layer] = LayerData{ + data: data, + packed: packSchemeAndCount(scheme, uint32(len(conns))), + } +} + +// Fast insertion optimized for append-only operations +func (c *Connections) InsertAtLayer(conn uint64, layer uint8) { + if layer >= c.layerCount { + c.GrowLayersTo(layer) + } + + layerData := &c.layers[layer] + + // If layer is empty, start with optimal scheme for this value + if layerData.packed == 0 { + scheme := determineOptimalScheme([]uint64{conn}) + layerData.packed = packSchemeAndCount(scheme, 1) + layerData.data = encodeValues([]uint64{conn}, scheme) + return + } + + // Check if current scheme can handle the new value + requiredScheme := determineOptimalScheme([]uint64{conn}) + currentScheme := unpackScheme(layerData.packed) + if requiredScheme > currentScheme { + // Need to upgrade scheme - decode, append, re-encode + values := decodeValues(layerData.data, currentScheme, unpackCount(layerData.packed)) + values = append(values, conn) + layerData.packed = packSchemeAndCount(requiredScheme, uint32(len(values))) + layerData.data = encodeValues(values, requiredScheme) + return + } + + // Can use current scheme - just append encoded bytes + c.appendToLayer(conn, layer) +} + +// appendToLayer appends a single value using the current scheme +func (c *Connections) appendToLayer(conn uint64, layer uint8) { + layerData := &c.layers[layer] + scheme := unpackScheme(layerData.packed) + count := unpackCount(layerData.packed) + + var bytesNeeded int + switch scheme { + case SCHEME_2BYTE: + bytesNeeded = 2 + case SCHEME_3BYTE: + bytesNeeded = 3 + case SCHEME_4BYTE: + bytesNeeded = 4 + case SCHEME_5BYTE: + bytesNeeded = 5 + case SCHEME_8BYTE: + bytesNeeded = 8 + default: + bytesNeeded = 8 // Safe fallback + } + + // Smart capacity management in limits - grow more conservatively than Go's default doubling + if len(layerData.data)+bytesNeeded > cap(layerData.data) && len(layerData.data)+bytesNeeded <= DefaultMaxCapacity*bytesNeeded { + + currentLen := len(layerData.data) + // We can assume this due to previous check + maxCapacity := DefaultMaxCapacity * bytesNeeded + + // Use growth strategy based on quantile data from real world data + // p25=0.39, p50=0.52, p75=0.69, p90=0.84, p95=0.92, p99=0.98 + ratio := float64(currentLen) / float64(maxCapacity) + var target int + + switch { + case ratio < 0.25: + target = int(0.25 * float64(maxCapacity)) + case ratio < 0.52: + target = int(0.52 * float64(maxCapacity)) + case ratio < 0.84: + target = int(0.84 * float64(maxCapacity)) + default: + target = maxCapacity + } + + if target < currentLen+bytesNeeded { + target = currentLen + bytesNeeded + } + + // Cap at maximum capacity + if target > maxCapacity { + target = maxCapacity + } + + newData := make([]byte, currentLen, target) + copy(newData, layerData.data) + layerData.data = newData + } + + switch scheme { + case SCHEME_2BYTE: + layerData.data = append(layerData.data, + byte(conn), + byte(conn>>8)) + + case SCHEME_3BYTE: + layerData.data = append(layerData.data, + byte(conn), + byte(conn>>8), + byte(conn>>16)) + + case SCHEME_4BYTE: + layerData.data = append(layerData.data, + byte(conn), + byte(conn>>8), + byte(conn>>16), + byte(conn>>24)) + + case SCHEME_5BYTE: + layerData.data = append(layerData.data, + byte(conn), + byte(conn>>8), + byte(conn>>16), + byte(conn>>24), + byte(conn>>32)) + + case SCHEME_8BYTE: + for j := 0; j < 8; j++ { + layerData.data = append(layerData.data, byte(conn>>(j*8))) + } + } + + layerData.packed = packSchemeAndCount(scheme, count+1) +} + +func (c *Connections) BulkInsertAtLayer(conns []uint64, layer uint8) { + if layer >= c.layerCount { + c.GrowLayersTo(layer) + } + + if len(conns) == 0 { + return + } + + layerData := &c.layers[layer] + + if layerData.packed == 0 { + // Empty layer - just encode all values + scheme := determineOptimalScheme(conns) + layerData.packed = packSchemeAndCount(scheme, uint32(len(conns))) + layerData.data = encodeValues(conns, scheme) + return + } + + // Check if current scheme can handle the new values + currentScheme := unpackScheme(layerData.packed) + requiredScheme := determineOptimalScheme(conns) + + if requiredScheme <= currentScheme { + // Current scheme is sufficient - just append encoded bytes + currentCount := unpackCount(layerData.packed) + newCount := currentCount + uint32(len(conns)) + + // Encode new values using current scheme and append + newData := encodeValues(conns, currentScheme) + layerData.data = append(layerData.data, newData...) + layerData.packed = packSchemeAndCount(currentScheme, newCount) + return + } + + // Need to upgrade scheme - decode existing, merge, and re-encode + existing := decodeValues(layerData.data, currentScheme, unpackCount(layerData.packed)) + all := append(existing, conns...) + + scheme := determineOptimalScheme(all) + layerData.packed = packSchemeAndCount(scheme, uint32(len(all))) + layerData.data = encodeValues(all, scheme) +} + +func (c *Connections) Data() []byte { + if c.layerCount == 0 { + return []byte{0} + } + + // Calculate total size + totalSize := 1 // layer count + for i := uint8(0); i < c.layerCount; i++ { + totalSize += 2 // packed scheme and count + totalSize += 4 // data length + totalSize += len(c.layers[i].data) // data + } + + data := make([]byte, totalSize) + offset := 0 + + data[offset] = c.layerCount + offset++ + + for i := uint8(0); i < c.layerCount; i++ { + layer := &c.layers[i] + + // Write packed scheme and count (2 bytes, little endian) + data[offset] = byte(layer.packed) + data[offset+1] = byte(layer.packed >> 8) + offset += 2 + + // Write data length (4 bytes, little endian) + dataLen := uint32(len(layer.data)) + data[offset] = byte(dataLen) + data[offset+1] = byte(dataLen >> 8) + data[offset+2] = byte(dataLen >> 16) + data[offset+3] = byte(dataLen >> 24) + offset += 4 + + // Write data + copy(data[offset:], layer.data) + offset += len(layer.data) + } + + return data +} + +func (c *Connections) LenAtLayer(layer uint8) int { + if layer >= c.layerCount { + return 0 + } + return int(unpackCount(c.layers[layer].packed)) +} + +func (c *Connections) GetLayer(layer uint8) []uint64 { + if layer >= c.layerCount || c.layers[layer].packed == 0 { + return nil + } + + layerData := &c.layers[layer] + return decodeValues(layerData.data, unpackScheme(layerData.packed), unpackCount(layerData.packed)) +} + +func (c *Connections) CopyLayer(conns []uint64, layer uint8) []uint64 { + if layer >= c.layerCount || c.layers[layer].packed == 0 { + return conns[:0] + } + + layerData := &c.layers[layer] + count := int(unpackCount(layerData.packed)) + + if cap(conns) < count { + conns = make([]uint64, count) + } else { + conns = conns[:count] + } + + decodeInto(layerData.data, unpackScheme(layerData.packed), uint32(count), conns) + return conns +} + +func (c *Connections) Layers() uint8 { + return c.layerCount +} + +func (c *Connections) IterateOnLayers(f func(layer uint8, conns []uint64)) { + for layer := uint8(0); layer < c.layerCount; layer++ { + conns := c.GetLayer(layer) + f(layer, conns) + } +} + +func (c *Connections) GetAllLayers() [][]uint64 { + result := make([][]uint64, c.layerCount) + for i := uint8(0); i < c.layerCount; i++ { + result[i] = c.GetLayer(i) + } + return result +} + +// Iterator implementations remain similar but work with the new structure +type LayerIterator struct { + connections *Connections + currentLayer uint8 + maxLayers uint8 +} + +func (c *Connections) Iterator() *LayerIterator { + return &LayerIterator{ + connections: c, + currentLayer: 0, + maxLayers: c.layerCount, + } +} + +func (iter *LayerIterator) Next() bool { + return iter.currentLayer < iter.maxLayers +} + +func (iter *LayerIterator) Current() (uint8, []uint64) { + if iter.currentLayer >= iter.maxLayers { + return 0, nil + } + + index := iter.currentLayer + connections := iter.connections.GetLayer(index) + iter.currentLayer++ + + return index, connections +} + +func (iter *LayerIterator) Reset() { + iter.currentLayer = 0 +} + +// Element iterator for a specific layer +type LayerElementIterator struct { + connections *Connections + layer uint8 + index int + maxIndex int + values []uint64 // cached decoded values for performance +} + +func (c *Connections) ElementIterator(layer uint8) *LayerElementIterator { + maxIndex := 0 + var values []uint64 + + if layer < c.layerCount && c.layers[layer].packed != 0 { + maxIndex = int(unpackCount(c.layers[layer].packed)) + // Decode values once for the iterator's lifetime + layerData := &c.layers[layer] + values = decodeValues(layerData.data, unpackScheme(layerData.packed), uint32(maxIndex)) + } + + return &LayerElementIterator{ + connections: c, + layer: layer, + index: 0, + maxIndex: maxIndex, + values: values, + } +} + +func (iter *LayerElementIterator) Next() bool { + if iter.index >= iter.maxIndex { + return false + } + iter.index++ + return true +} + +func (iter *LayerElementIterator) Current() (index int, value uint64) { + if iter.index <= 0 || iter.index > iter.maxIndex { + return -1, 0 + } + + currentIndex := iter.index - 1 + value = iter.values[currentIndex] + + return currentIndex, value +} + +func (iter *LayerElementIterator) Value() uint64 { + _, value := iter.Current() + return value +} + +func (iter *LayerElementIterator) Index() int { + return iter.index - 1 +} + +func (iter *LayerElementIterator) Reset() { + iter.index = 0 +} + +func (iter *LayerElementIterator) HasElements() bool { + return iter.maxIndex > 0 +} + +func (iter *LayerElementIterator) Count() int { + return iter.maxIndex +} + +func (c *Connections) ClearLayer(layer uint8) { + if layer < c.layerCount { + c.layers[layer].data = c.layers[layer].data[:0] + c.layers[layer].packed = 0 + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/packedconn/connections_benchmark_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/packedconn/connections_benchmark_test.go new file mode 100644 index 0000000000000000000000000000000000000000..4ce2c5cffd3da3c6f9381f6b6c955ee629623562 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/packedconn/connections_benchmark_test.go @@ -0,0 +1,279 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package packedconn + +import ( + "math/rand" + "testing" +) + +// Original implementation for comparison +func (c *Connections) bulkInsertAtLayerOriginal(conns []uint64, layer uint8) { + if layer >= c.layerCount { + c.GrowLayersTo(layer) + } + + if len(conns) == 0 { + return + } + + layerData := &c.layers[layer] + + if layerData.packed == 0 { + // Empty layer - just encode all values + scheme := determineOptimalScheme(conns) + layerData.packed = packSchemeAndCount(scheme, uint32(len(conns))) + layerData.data = encodeValues(conns, scheme) + return + } + + // Always decode, merge, and re-encode (original behavior) + currentScheme := unpackScheme(layerData.packed) + existing := decodeValues(layerData.data, currentScheme, unpackCount(layerData.packed)) + all := append(existing, conns...) + + scheme := determineOptimalScheme(all) + layerData.packed = packSchemeAndCount(scheme, uint32(len(all))) + layerData.data = encodeValues(all, scheme) +} + +// Generate test data with different value ranges +func generateTestData(size int, maxValue uint64) []uint64 { + data := make([]uint64, size) + for i := 0; i < size; i++ { + data[i] = uint64(rand.Int63n(int64(maxValue))) + } + return data +} + +// Benchmark: Same scheme scenario (most common case) +// Initial data fits in 2-byte scheme, new data also fits in 2-byte scheme +func BenchmarkBulkInsert_SameScheme_32(b *testing.B) { + initialData := generateTestData(32, 65535) // 2-byte scheme + newData := generateTestData(16, 65535) // Also 2-byte scheme + + b.Run("Original", func(b *testing.B) { + for i := 0; i < b.N; i++ { + c, _ := NewWithMaxLayer(0) + c.ReplaceLayer(0, initialData) + c.bulkInsertAtLayerOriginal(newData, 0) + } + }) + + b.Run("Optimized", func(b *testing.B) { + for i := 0; i < b.N; i++ { + c, _ := NewWithMaxLayer(0) + c.ReplaceLayer(0, initialData) + c.BulkInsertAtLayer(newData, 0) + } + }) +} + +func BenchmarkBulkInsert_SameScheme_64(b *testing.B) { + initialData := generateTestData(64, 65535) // 2-byte scheme + newData := generateTestData(32, 65535) // Also 2-byte scheme + + b.Run("Original", func(b *testing.B) { + for i := 0; i < b.N; i++ { + c, _ := NewWithMaxLayer(0) + c.ReplaceLayer(0, initialData) + c.bulkInsertAtLayerOriginal(newData, 0) + } + }) + + b.Run("Optimized", func(b *testing.B) { + for i := 0; i < b.N; i++ { + c, _ := NewWithMaxLayer(0) + c.ReplaceLayer(0, initialData) + c.BulkInsertAtLayer(newData, 0) + } + }) +} + +// Benchmark: Scheme upgrade scenario +// Initial data fits in 2-byte scheme, new data requires 4-byte scheme +func BenchmarkBulkInsert_SchemeUpgrade_32(b *testing.B) { + initialData := generateTestData(32, 65535) // 2-byte scheme + newData := generateTestData(16, 4294967295) // 4-byte scheme + + b.Run("Original", func(b *testing.B) { + for i := 0; i < b.N; i++ { + c, _ := NewWithMaxLayer(0) + c.ReplaceLayer(0, initialData) + c.bulkInsertAtLayerOriginal(newData, 0) + } + }) + + b.Run("Optimized", func(b *testing.B) { + for i := 0; i < b.N; i++ { + c, _ := NewWithMaxLayer(0) + c.ReplaceLayer(0, initialData) + c.BulkInsertAtLayer(newData, 0) + } + }) +} + +func BenchmarkBulkInsert_SchemeUpgrade_64(b *testing.B) { + initialData := generateTestData(64, 65535) // 2-byte scheme + newData := generateTestData(32, 4294967295) // 4-byte scheme + + b.Run("Original", func(b *testing.B) { + for i := 0; i < b.N; i++ { + c, _ := NewWithMaxLayer(0) + c.ReplaceLayer(0, initialData) + c.bulkInsertAtLayerOriginal(newData, 0) + } + }) + + b.Run("Optimized", func(b *testing.B) { + for i := 0; i < b.N; i++ { + c, _ := NewWithMaxLayer(0) + c.ReplaceLayer(0, initialData) + c.BulkInsertAtLayer(newData, 0) + } + }) +} + +// Benchmark: Different scheme combinations +func BenchmarkBulkInsert_Various_Schemes(b *testing.B) { + scenarios := []struct { + name string + initialMax uint64 + newMax uint64 + initialSize int + newSize int + }{ + {"2byte_to_2byte_32", 65535, 65535, 32, 16}, + {"2byte_to_2byte_64", 65535, 65535, 64, 32}, + {"2byte_to_3byte_32", 65535, 16777215, 32, 16}, + {"2byte_to_3byte_64", 65535, 16777215, 64, 32}, + {"3byte_to_3byte_32", 16777215, 16777215, 32, 16}, + {"3byte_to_3byte_64", 16777215, 16777215, 64, 32}, + {"4byte_to_4byte_32", 4294967295, 4294967295, 32, 16}, + {"4byte_to_4byte_64", 4294967295, 4294967295, 64, 32}, + } + + for _, scenario := range scenarios { + initialData := generateTestData(scenario.initialSize, scenario.initialMax) + newData := generateTestData(scenario.newSize, scenario.newMax) + + b.Run(scenario.name+"_Original", func(b *testing.B) { + for i := 0; i < b.N; i++ { + c, _ := NewWithMaxLayer(0) + c.ReplaceLayer(0, initialData) + c.bulkInsertAtLayerOriginal(newData, 0) + } + }) + + b.Run(scenario.name+"_Optimized", func(b *testing.B) { + for i := 0; i < b.N; i++ { + c, _ := NewWithMaxLayer(0) + c.ReplaceLayer(0, initialData) + c.BulkInsertAtLayer(newData, 0) + } + }) + } +} + +// Benchmark memory allocations +func BenchmarkBulkInsert_Allocations(b *testing.B) { + initialData := generateTestData(64, 65535) // 2-byte scheme + newData := generateTestData(32, 65535) // Also 2-byte scheme + + b.Run("Original_Allocs", func(b *testing.B) { + b.ReportAllocs() + for i := 0; i < b.N; i++ { + c, _ := NewWithMaxLayer(0) + c.ReplaceLayer(0, initialData) + c.bulkInsertAtLayerOriginal(newData, 0) + } + }) + + b.Run("Optimized_Allocs", func(b *testing.B) { + b.ReportAllocs() + for i := 0; i < b.N; i++ { + c, _ := NewWithMaxLayer(0) + c.ReplaceLayer(0, initialData) + c.BulkInsertAtLayer(newData, 0) + } + }) +} + +// Benchmark with realistic workload patterns +func BenchmarkBulkInsert_RealisticWorkload(b *testing.B) { + // Simulate building a connection list incrementally + // Most additions don't require scheme upgrades + + b.Run("Realistic_32_Original", func(b *testing.B) { + for i := 0; i < b.N; i++ { + c, _ := NewWithMaxLayer(0) + + // Initial small list + initial := generateTestData(8, 65535) + c.ReplaceLayer(0, initial) + + // Add data in chunks (realistic pattern) + for j := 0; j < 4; j++ { + chunk := generateTestData(8, 65535) // Same scheme + c.bulkInsertAtLayerOriginal(chunk, 0) + } + } + }) + + b.Run("Realistic_32_Optimized", func(b *testing.B) { + for i := 0; i < b.N; i++ { + c, _ := NewWithMaxLayer(0) + + // Initial small list + initial := generateTestData(8, 65535) + c.ReplaceLayer(0, initial) + + // Add data in chunks (realistic pattern) + for j := 0; j < 4; j++ { + chunk := generateTestData(8, 65535) // Same scheme + c.BulkInsertAtLayer(chunk, 0) + } + } + }) + + b.Run("Realistic_64_Original", func(b *testing.B) { + for i := 0; i < b.N; i++ { + c, _ := NewWithMaxLayer(0) + + // Initial medium list + initial := generateTestData(16, 65535) + c.ReplaceLayer(0, initial) + + // Add data in chunks + for j := 0; j < 4; j++ { + chunk := generateTestData(12, 65535) // Same scheme + c.bulkInsertAtLayerOriginal(chunk, 0) + } + } + }) + + b.Run("Realistic_64_Optimized", func(b *testing.B) { + for i := 0; i < b.N; i++ { + c, _ := NewWithMaxLayer(0) + + // Initial medium list + initial := generateTestData(16, 65535) + c.ReplaceLayer(0, initial) + + // Add data in chunks + for j := 0; j < 4; j++ { + chunk := generateTestData(12, 65535) // Same scheme + c.BulkInsertAtLayer(chunk, 0) + } + } + }) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/packedconn/connections_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/packedconn/connections_test.go new file mode 100644 index 0000000000000000000000000000000000000000..bf94086c376b664cacf1b8089f24140e9f51d69d --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/packedconn/connections_test.go @@ -0,0 +1,1345 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package packedconn + +import ( + "math/rand" + "sort" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var ( + connsSlice1 = []uint64{ + 4477, 83, 6777, 13118, 12903, 12873, 14397, 15034, 15127, 15162, 15219, 15599, 17627, + 18624, 18844, 19359, 22981, 23099, 36188, 37400, 39724, 39810, 47254, 58047, 59647, 61746, + 64635, 66528, 70470, 73936, 86283, 86697, 120033, 129098, 131345, 137609, 140937, 186468, + 191226, 199803, 206818, 223456, 271063, 278598, 288539, 395876, 396785, 452103, 487237, + 506431, 507230, 554813, 572566, 595572, 660562, 694477, 728865, 730031, 746368, 809331, + 949338, + } + connsSlice2 = []uint64{ + 10, 20, 30, 40, 50, 60, 70, 80, 90, 100, + } + connsSlice3 = []uint64{ + 9999, 10000, 10001, + } +) + +func TestConnections_ReplaceLayers(t *testing.T) { + c, err := NewWithMaxLayer(2) + require.Nil(t, err) + + // Initially all layers should have length==0 and return no results + assert.Equal(t, 0, c.LenAtLayer(0)) + assert.Len(t, c.GetLayer(0), 0) + assert.Equal(t, 0, c.LenAtLayer(1)) + assert.Len(t, c.GetLayer(1), 0) + assert.Equal(t, 0, c.LenAtLayer(2)) + assert.Len(t, c.GetLayer(2), 0) + + // replace layer 0, it should return the correct results, all others should + // still be empty + c.ReplaceLayer(0, connsSlice1) + assert.ElementsMatch(t, connsSlice1, c.GetLayer(0)) + assert.Len(t, c.GetLayer(1), 0) + assert.Len(t, c.GetLayer(2), 0) + + // replace layer 1+2, other layers should be unaffected + c.ReplaceLayer(1, connsSlice2) + c.ReplaceLayer(2, connsSlice3) + assert.ElementsMatch(t, connsSlice1, c.GetLayer(0)) + assert.ElementsMatch(t, connsSlice2, c.GetLayer(1)) + assert.ElementsMatch(t, connsSlice3, c.GetLayer(2)) + + // replace a layer with a smaller list to trigger a shrinking operation + c.ReplaceLayer(2, []uint64{768}) + assert.ElementsMatch(t, []uint64{768}, c.GetLayer(2)) + assert.ElementsMatch(t, connsSlice1, c.GetLayer(0)) + assert.ElementsMatch(t, connsSlice2, c.GetLayer(1)) + + // replace the other layers with smaller lists + c.ReplaceLayer(0, connsSlice1[:5]) + c.ReplaceLayer(1, connsSlice2[:5]) + assert.ElementsMatch(t, connsSlice1[:5], c.GetLayer(0)) + assert.ElementsMatch(t, connsSlice2[:5], c.GetLayer(1)) + + // finally grow all layers back to their original sizes again, to verify what + // previous shrinking does not hinder future growing + c.ReplaceLayer(1, connsSlice2) + c.ReplaceLayer(2, connsSlice3) + c.ReplaceLayer(0, connsSlice1) + assert.ElementsMatch(t, connsSlice1, c.GetLayer(0)) + assert.ElementsMatch(t, connsSlice2, c.GetLayer(1)) + assert.ElementsMatch(t, connsSlice3, c.GetLayer(2)) +} + +func TestConnections_ConstructorWithData(t *testing.T) { + c, err := NewWithElements([][]uint64{ + connsSlice1, + connsSlice2, + connsSlice3, + }) + require.Nil(t, err) + + assert.ElementsMatch(t, connsSlice1, c.GetLayer(0)) + assert.ElementsMatch(t, connsSlice2, c.GetLayer(1)) + assert.ElementsMatch(t, connsSlice3, c.GetLayer(2)) +} + +func TestConnections_CopyLayers(t *testing.T) { + c, err := NewWithMaxLayer(2) + require.Nil(t, err) + + conns := make([]uint64, 0, 100) + + // Initially all layers should have length==0 and return no results + assert.Equal(t, 0, c.LenAtLayer(0)) + assert.Len(t, c.CopyLayer(conns, 0), 0) + assert.Equal(t, 0, c.LenAtLayer(1)) + assert.Len(t, c.CopyLayer(conns, 1), 0) + assert.Equal(t, 0, c.LenAtLayer(2)) + assert.Len(t, c.CopyLayer(conns, 2), 0) + + // replace layer 0, it should return the correct results, all others should + // still be empty + c.ReplaceLayer(0, connsSlice1) + assert.ElementsMatch(t, connsSlice1, c.CopyLayer(conns, 0)) + assert.Len(t, c.CopyLayer(conns, 1), 0) + assert.Len(t, c.CopyLayer(conns, 2), 0) + + // replace layer 1+2, other layers should be unaffected + c.ReplaceLayer(1, connsSlice2) + c.ReplaceLayer(2, connsSlice3) + assert.ElementsMatch(t, connsSlice1, c.CopyLayer(conns, 0)) + assert.ElementsMatch(t, connsSlice2, c.CopyLayer(conns, 1)) + assert.ElementsMatch(t, connsSlice3, c.CopyLayer(conns, 2)) + + // replace a layer with a smaller list to trigger a shrinking operation + c.ReplaceLayer(2, []uint64{768}) + assert.ElementsMatch(t, []uint64{768}, c.CopyLayer(conns, 2)) + assert.ElementsMatch(t, connsSlice1, c.CopyLayer(conns, 0)) + assert.ElementsMatch(t, connsSlice2, c.CopyLayer(conns, 1)) + + // replace the other layers with smaller lists + c.ReplaceLayer(0, connsSlice1[:5]) + c.ReplaceLayer(1, connsSlice2[:5]) + assert.ElementsMatch(t, connsSlice1[:5], c.CopyLayer(conns, 0)) + assert.ElementsMatch(t, connsSlice2[:5], c.CopyLayer(conns, 1)) + + // finally grow all layers back to their original sizes again, to verify what + // previous shrinking does not hinder future growing + c.ReplaceLayer(1, connsSlice2) + c.ReplaceLayer(2, connsSlice3) + c.ReplaceLayer(0, connsSlice1) + assert.ElementsMatch(t, connsSlice1, c.CopyLayer(conns, 0)) + assert.ElementsMatch(t, connsSlice2, c.CopyLayer(conns, 1)) + assert.ElementsMatch(t, connsSlice3, c.CopyLayer(conns, 2)) +} + +func TestConnections_InsertLayers(t *testing.T) { + c, err := NewWithMaxLayer(2) + require.Nil(t, err) + + assert.Equal(t, 0, c.LenAtLayer(0)) + assert.Len(t, c.GetLayer(0), 0) + assert.Equal(t, 0, c.LenAtLayer(1)) + assert.Len(t, c.GetLayer(1), 0) + assert.Equal(t, 0, c.LenAtLayer(2)) + assert.Len(t, c.GetLayer(2), 0) + + c.ReplaceLayer(0, connsSlice1) + c.ReplaceLayer(1, connsSlice2) + c.ReplaceLayer(2, connsSlice3) + + c.ReplaceLayer(1, []uint64{}) + shuffled := make([]uint64, len(connsSlice2)) + copy(shuffled, connsSlice2) + shuffled = append(shuffled, 10000) + rand.Shuffle(len(shuffled), func(i, j int) { shuffled[i], shuffled[j] = shuffled[j], shuffled[i] }) + for _, item := range shuffled { + c.InsertAtLayer(item, 1) + } + + conns2 := c.GetLayer(1) + assert.ElementsMatch(t, connsSlice1, c.GetLayer(0)) + assert.ElementsMatch(t, shuffled, conns2) + assert.ElementsMatch(t, connsSlice3, c.GetLayer(2)) +} + +func TestConnections_InsertLayersAtEnd(t *testing.T) { + c, err := NewWithMaxLayer(2) + require.Nil(t, err) + + assert.Equal(t, 0, c.LenAtLayer(0)) + assert.Len(t, c.GetLayer(0), 0) + assert.Equal(t, 0, c.LenAtLayer(1)) + assert.Len(t, c.GetLayer(1), 0) + assert.Equal(t, 0, c.LenAtLayer(2)) + assert.Len(t, c.GetLayer(2), 0) + + c.ReplaceLayer(0, connsSlice1) + c.ReplaceLayer(1, connsSlice2) + c.ReplaceLayer(2, connsSlice3) + + c.ReplaceLayer(0, []uint64{}) + shuffled := make([]uint64, len(connsSlice1)) + copy(shuffled, connsSlice1) + shuffled = append(shuffled, 10000) + rand.Shuffle(len(shuffled), func(i, j int) { shuffled[i], shuffled[j] = shuffled[j], shuffled[i] }) + for _, item := range shuffled { + c.InsertAtLayer(item, 0) + } + + conns1 := c.GetLayer(0) + assert.ElementsMatch(t, shuffled, conns1) + assert.ElementsMatch(t, connsSlice2, c.GetLayer(1)) + assert.ElementsMatch(t, connsSlice3, c.GetLayer(2)) +} + +func TestConnections_InsertLayerAfterAddingLayer(t *testing.T) { + c, err := NewWithMaxLayer(1) + require.Nil(t, err) + + assert.Equal(t, 0, c.LenAtLayer(0)) + assert.Len(t, c.GetLayer(0), 0) + assert.Equal(t, 0, c.LenAtLayer(1)) + assert.Len(t, c.GetLayer(1), 0) + + c.ReplaceLayer(0, connsSlice1) + c.ReplaceLayer(1, connsSlice2) + + assert.ElementsMatch(t, connsSlice1, c.GetLayer(0)) + assert.ElementsMatch(t, connsSlice2, c.GetLayer(1)) + + c.AddLayer() + + c.ReplaceLayer(0, []uint64{}) + shuffled := make([]uint64, len(connsSlice1)) + copy(shuffled, connsSlice1) + rand.Shuffle(len(shuffled), func(i, j int) { shuffled[i], shuffled[j] = shuffled[j], shuffled[i] }) + for _, item := range shuffled { + c.InsertAtLayer(item, 0) + } + + c.ReplaceLayer(2, []uint64{}) + shuffled = make([]uint64, len(connsSlice3)) + copy(shuffled, connsSlice3) + rand.Shuffle(len(shuffled), func(i, j int) { shuffled[i], shuffled[j] = shuffled[j], shuffled[i] }) + for _, item := range shuffled { + c.InsertAtLayer(item, 2) + } + + assert.ElementsMatch(t, connsSlice1, c.GetLayer(0)) + assert.ElementsMatch(t, connsSlice2, c.GetLayer(1)) + assert.ElementsMatch(t, connsSlice3, c.GetLayer(2)) +} + +func TestConnections_AccessHigherLayersDoesNotReturnData(t *testing.T) { + c, err := NewWithMaxLayer(1) + require.Nil(t, err) + + assert.Equal(t, 0, c.LenAtLayer(0)) + assert.Len(t, c.GetLayer(0), 0) + assert.Equal(t, 0, c.LenAtLayer(1)) + assert.Len(t, c.GetLayer(1), 0) + + c.ReplaceLayer(0, connsSlice1) + c.ReplaceLayer(1, connsSlice2) + + assert.ElementsMatch(t, connsSlice1, c.GetLayer(0)) + assert.ElementsMatch(t, connsSlice2, c.GetLayer(1)) + assert.ElementsMatch(t, nil, c.GetLayer(2)) +} + +func TestConnections_InsertLayersByNumber(t *testing.T) { + c, err := NewWithMaxLayer(1) + require.Nil(t, err) + + assert.Equal(t, 0, c.LenAtLayer(0)) + assert.Len(t, c.GetLayer(0), 0) + assert.Equal(t, 0, c.LenAtLayer(1)) + assert.Len(t, c.GetLayer(1), 0) + + c.ReplaceLayer(0, connsSlice1) + c.ReplaceLayer(1, connsSlice2) + + assert.ElementsMatch(t, connsSlice1, c.GetLayer(0)) + assert.ElementsMatch(t, connsSlice2, c.GetLayer(1)) + + c.GrowLayersTo(4) + + c.ReplaceLayer(0, []uint64{}) + shuffled := make([]uint64, len(connsSlice1)) + copy(shuffled, connsSlice1) + rand.Shuffle(len(shuffled), func(i, j int) { shuffled[i], shuffled[j] = shuffled[j], shuffled[i] }) + for _, item := range shuffled { + c.InsertAtLayer(item, 0) + } + + c.ReplaceLayer(2, []uint64{}) + shuffled = make([]uint64, len(connsSlice3)) + copy(shuffled, connsSlice3) + rand.Shuffle(len(shuffled), func(i, j int) { shuffled[i], shuffled[j] = shuffled[j], shuffled[i] }) + for _, item := range shuffled { + c.InsertAtLayer(item, 2) + } + + assert.ElementsMatch(t, connsSlice1, c.GetLayer(0)) + assert.ElementsMatch(t, connsSlice2, c.GetLayer(1)) + assert.ElementsMatch(t, connsSlice3, c.GetLayer(2)) + assert.ElementsMatch(t, []uint64{}, c.GetLayer(3)) + assert.ElementsMatch(t, []uint64{}, c.GetLayer(4)) +} + +func TestConnections_GrowsLayersSuccessfully(t *testing.T) { + c, err := NewWithMaxLayer(0) + require.Nil(t, err) + + assert.Equal(t, 0, c.LenAtLayer(0)) + assert.Len(t, c.GetLayer(0), 0) + + c.ReplaceLayer(0, connsSlice3) + + assert.ElementsMatch(t, connsSlice3, c.GetLayer(0)) + + c.GrowLayersTo(1) + + c.ReplaceLayer(1, connsSlice2) + assert.ElementsMatch(t, connsSlice2, c.GetLayer(1)) +} + +func randomArray(size int) []uint64 { + res := make([]uint64, 0, size) + for i := 0; i < size; i++ { + res = append(res, uint64(rand.Uint32()/10000)) + } + return res +} + +func TestConnections_stress(t *testing.T) { + layers := uint8(10) + c, err := NewWithMaxLayer(layers) + require.Nil(t, err) + + slices := make([][]uint64, 0, layers+1) + for i := uint8(0); i <= layers; i++ { + assert.Equal(t, 0, c.LenAtLayer(i)) + assert.Len(t, c.GetLayer(i), 0) + slices = append(slices, randomArray(32)) + } + + for i := uint8(0); i <= layers; i++ { + c.ReplaceLayer(i, slices[i]) + } + + randomArray(32) + randomArray(32) + + for i := uint8(0); i <= layers; i++ { + newNumbers := randomArray(5) + slices[i] = append(slices[i], newNumbers...) + for j := range newNumbers { + c.InsertAtLayer(newNumbers[j], i) + } + } + + for i := uint8(0); int(i) < len(slices); i++ { + sort.Slice(slices[i], func(i2, j int) bool { + return slices[i][i2] < slices[i][j] + }) + assert.Equal(t, len(slices[i]), c.LenAtLayer(i)) + if !assert.ElementsMatch(t, slices[i], c.GetLayer(i)) { + return + } + } +} + +func TestInitialSizeShouldAccommodateLayers(t *testing.T) { + _, err := NewWithMaxLayer(50) + require.Nil(t, err) +} + +func TestConnections_LayerRange(t *testing.T) { + c, err := NewWithMaxLayer(2) + require.Nil(t, err) + + layerCount := 0 + iter := c.Iterator() + for iter.Next() { + layer, connections := iter.Current() + assert.Equal(t, uint8(layerCount), layer) + assert.Len(t, connections, 0) + layerCount++ + } + assert.Equal(t, 3, layerCount) + + c.ReplaceLayer(0, connsSlice1) + c.ReplaceLayer(1, connsSlice2) + c.ReplaceLayer(2, connsSlice3) + + expectedData := [][]uint64{connsSlice1, connsSlice2, connsSlice3} + layerCount = 0 + + iter = c.Iterator() + for iter.Next() { + layer, connections := iter.Current() + assert.Equal(t, uint8(layerCount), layer) + assert.ElementsMatch(t, expectedData[layerCount], connections) + layerCount++ + } + assert.Equal(t, 3, layerCount) +} + +func TestConnections_LayerRangeWithSingleLayer(t *testing.T) { + c, err := NewWithMaxLayer(0) + require.Nil(t, err) + + c.ReplaceLayer(0, connsSlice1) + + layerCount := 0 + iter := c.Iterator() + for iter.Next() { + layer, connections := iter.Current() + assert.Equal(t, uint8(0), layer) + assert.ElementsMatch(t, connsSlice1, connections) + layerCount++ + } + assert.Equal(t, 1, layerCount) +} + +func TestConnections_LayerRangeAfterAddingLayers(t *testing.T) { + c, err := NewWithMaxLayer(1) + require.Nil(t, err) + + c.ReplaceLayer(0, connsSlice1) + c.ReplaceLayer(1, connsSlice2) + + c.AddLayer() + c.ReplaceLayer(2, connsSlice3) + + expectedData := [][]uint64{connsSlice1, connsSlice2, connsSlice3} + layerCount := 0 + + iter := c.Iterator() + for iter.Next() { + layer, connections := iter.Current() + assert.Equal(t, uint8(layerCount), layer) + assert.ElementsMatch(t, expectedData[layerCount], connections) + layerCount++ + } + assert.Equal(t, 3, layerCount) +} + +func TestConnections_LayerRangeAfterGrowingLayers(t *testing.T) { + c, err := NewWithMaxLayer(1) + require.Nil(t, err) + + c.ReplaceLayer(0, connsSlice1) + c.ReplaceLayer(1, connsSlice2) + + c.GrowLayersTo(4) + c.ReplaceLayer(2, connsSlice3) + + expectedLayers := 5 + layerCount := 0 + + iter := c.Iterator() + for iter.Next() { + layer, connections := iter.Current() + assert.Equal(t, uint8(layerCount), layer) + + switch layerCount { + case 0: + assert.ElementsMatch(t, connsSlice1, connections) + case 1: + assert.ElementsMatch(t, connsSlice2, connections) + case 2: + assert.ElementsMatch(t, connsSlice3, connections) + case 3, 4: + assert.Len(t, connections, 0) + } + layerCount++ + } + assert.Equal(t, expectedLayers, layerCount) +} + +func TestConnections_LayerRangeWithDynamicModifications(t *testing.T) { + c, err := NewWithMaxLayer(2) + require.Nil(t, err) + + c.ReplaceLayer(0, connsSlice1) + c.ReplaceLayer(1, connsSlice2) + c.ReplaceLayer(2, connsSlice3) + + layerCount := 0 + iter := c.Iterator() + for iter.Next() { + layer, connections := iter.Current() + if layerCount == 1 { + c.ReplaceLayer(2, []uint64{999, 1000, 1001}) + } + + assert.Equal(t, uint8(layerCount), layer) + assert.True(t, len(connections) >= 0) + layerCount++ + } + assert.Equal(t, 3, layerCount) +} + +func TestConnections_LayerRangeCompareWithIterateOnLayers(t *testing.T) { + c, err := NewWithMaxLayer(3) + require.Nil(t, err) + + testData := [][]uint64{ + randomArray(10), + randomArray(15), + randomArray(8), + randomArray(12), + } + + for i, data := range testData { + c.ReplaceLayer(uint8(i), data) + } + + rangeResults := make(map[uint8][]uint64) + iter := c.Iterator() + for iter.Next() { + layer, connections := iter.Current() + rangeResults[layer] = connections + } + + iterateResults := make(map[uint8][]uint64) + c.IterateOnLayers(func(layer uint8, conns []uint64) { + iterateResults[layer] = conns + }) + + assert.Equal(t, len(iterateResults), len(rangeResults)) + for layer := uint8(0); layer < c.Layers(); layer++ { + assert.ElementsMatch(t, iterateResults[layer], rangeResults[layer]) + assert.ElementsMatch(t, testData[layer], rangeResults[layer]) + } +} + +func TestConnections_LayerRangeStress(t *testing.T) { + layers := uint8(20) + c, err := NewWithMaxLayer(layers) + require.Nil(t, err) + + testSlices := make([][]uint64, layers+1) + for i := uint8(0); i <= layers; i++ { + testSlices[i] = randomArray(50) + c.ReplaceLayer(i, testSlices[i]) + } + + layerCount := 0 + iter := c.Iterator() + for iter.Next() { + layer, connections := iter.Current() + assert.Equal(t, uint8(layerCount), layer) + assert.ElementsMatch(t, testSlices[layerCount], connections) + layerCount++ + } + + assert.Equal(t, int(layers)+1, layerCount) +} + +func TestConnections_LayerRangeEmptyConnections(t *testing.T) { + c, err := NewWithMaxLayer(5) + require.Nil(t, err) + + layerCount := 0 + iter := c.Iterator() + for iter.Next() { + layer, connections := iter.Current() + assert.Equal(t, uint8(layerCount), layer) + assert.Len(t, connections, 0) + layerCount++ + } + assert.Equal(t, 6, layerCount) +} + +func TestConnections_ElementRange(t *testing.T) { + c, err := NewWithMaxLayer(2) + require.Nil(t, err) + + elementCount := 0 + iter := c.ElementIterator(0) + for iter.Next() { + elementCount++ + } + assert.Equal(t, 0, elementCount) + + c.ReplaceLayer(0, connsSlice1) + c.ReplaceLayer(1, connsSlice2) + c.ReplaceLayer(2, connsSlice3) + + elementCount = 0 + actualElements := make([]uint64, 0) + iter = c.ElementIterator(0) + for iter.Next() { + index, value := iter.Current() + assert.Equal(t, elementCount, index) + actualElements = append(actualElements, value) + elementCount++ + } + assert.Equal(t, len(connsSlice1), elementCount) + assert.ElementsMatch(t, connsSlice1, actualElements) // Use ElementsMatch instead of Equal + + elementCount = 0 + actualElements = make([]uint64, 0) + iter = c.ElementIterator(1) + for iter.Next() { + index, value := iter.Current() + assert.Equal(t, elementCount, index) + actualElements = append(actualElements, value) + elementCount++ + } + assert.Equal(t, len(connsSlice2), elementCount) + assert.ElementsMatch(t, connsSlice2, actualElements) // Use ElementsMatch instead of Equal + + elementCount = 0 + actualElements = make([]uint64, 0) + iter = c.ElementIterator(2) + for iter.Next() { + index, value := iter.Current() + assert.Equal(t, elementCount, index) + actualElements = append(actualElements, value) + elementCount++ + } + assert.Equal(t, len(connsSlice3), elementCount) + assert.ElementsMatch(t, connsSlice3, actualElements) // Use ElementsMatch instead of Equal +} + +func TestConnections_ElementRangeWithSingleLayer(t *testing.T) { + c, err := NewWithMaxLayer(0) + require.Nil(t, err) + + c.ReplaceLayer(0, connsSlice1) + + elementCount := 0 + actualElements := make([]uint64, 0) + iter := c.ElementIterator(0) + for iter.Next() { + index, value := iter.Current() + assert.Equal(t, elementCount, index) + actualElements = append(actualElements, value) + elementCount++ + } + assert.Equal(t, len(connsSlice1), elementCount) + assert.ElementsMatch(t, connsSlice1, actualElements) // Use ElementsMatch instead of Equal +} + +func TestConnections_ElementRangeAfterAddingLayers(t *testing.T) { + c, err := NewWithMaxLayer(1) + require.Nil(t, err) + + c.ReplaceLayer(0, connsSlice1) + c.ReplaceLayer(1, connsSlice2) + + c.AddLayer() + c.ReplaceLayer(2, connsSlice3) + + // Test the newly added layer + elementCount := 0 + actualElements := make([]uint64, 0) + iter := c.ElementIterator(2) + for iter.Next() { + index, value := iter.Current() + assert.Equal(t, elementCount, index) + actualElements = append(actualElements, value) + elementCount++ + } + assert.Equal(t, len(connsSlice3), elementCount) + assert.ElementsMatch(t, connsSlice3, actualElements) // Use ElementsMatch instead of Equal +} + +func TestConnections_ElementRangeAfterGrowingLayers(t *testing.T) { + c, err := NewWithMaxLayer(1) + require.Nil(t, err) + + c.ReplaceLayer(0, connsSlice1) + c.ReplaceLayer(1, connsSlice2) + + c.GrowLayersTo(4) + c.ReplaceLayer(2, connsSlice3) + + elementCount := 0 + actualElements := make([]uint64, 0) + iter := c.ElementIterator(2) + for iter.Next() { + index, value := iter.Current() + assert.Equal(t, elementCount, index) + actualElements = append(actualElements, value) + elementCount++ + } + assert.Equal(t, len(connsSlice3), elementCount) + assert.ElementsMatch(t, connsSlice3, actualElements) // Use ElementsMatch instead of Equal + + elementCount = 0 + iter = c.ElementIterator(3) + for iter.Next() { + elementCount++ + } + assert.Equal(t, 0, elementCount) + + elementCount = 0 + iter = c.ElementIterator(4) + for iter.Next() { + elementCount++ + } + assert.Equal(t, 0, elementCount) +} + +func TestConnections_ElementRangeWithInvalidLayer(t *testing.T) { + c, err := NewWithMaxLayer(2) + require.Nil(t, err) + + c.ReplaceLayer(0, connsSlice1) + c.ReplaceLayer(1, connsSlice2) + c.ReplaceLayer(2, connsSlice3) + + elementCount := 0 + iter := c.ElementIterator(5) + for iter.Next() { + elementCount++ + } + assert.Equal(t, 0, elementCount) +} + +func TestConnections_ElementRangeCompareWithElementIterator(t *testing.T) { + c, err := NewWithMaxLayer(3) + require.Nil(t, err) + + testData := [][]uint64{ + randomArray(10), + randomArray(15), + randomArray(8), + randomArray(12), + } + + for i, data := range testData { + c.ReplaceLayer(uint8(i), data) + } + + for layer := uint8(0); layer < c.Layers(); layer++ { + rangeResults := make([]struct { + Index int + Value uint64 + }, 0) + + iter := c.ElementIterator(layer) + for iter.Next() { + index, value := iter.Current() + rangeResults = append(rangeResults, struct { + Index int + Value uint64 + }{ + Index: index, + Value: value, + }) + } + + iteratorResults := make([]struct { + Index int + Value uint64 + }, 0) + + iter = c.ElementIterator(layer) + for iter.Next() { + index, value := iter.Current() + iteratorResults = append(iteratorResults, struct { + Index int + Value uint64 + }{ + Index: index, + Value: value, + }) + } + + assert.Equal(t, len(iteratorResults), len(rangeResults)) + // Compare the actual results - they should be identical since we're using the same iterator + assert.Equal(t, iteratorResults, rangeResults) + } +} + +func TestConnections_ElementRangeStress(t *testing.T) { + layers := uint8(20) + c, err := NewWithMaxLayer(layers) + require.Nil(t, err) + + testSlices := make([][]uint64, layers+1) + for i := uint8(0); i <= layers; i++ { + testSlices[i] = randomArray(50) + c.ReplaceLayer(i, testSlices[i]) + } + + for layer := uint8(0); layer <= layers; layer++ { + elementCount := 0 + actualElements := make([]uint64, 0) + iter := c.ElementIterator(layer) + for iter.Next() { + index, value := iter.Current() + assert.Equal(t, elementCount, index) + actualElements = append(actualElements, value) + elementCount++ + } + + assert.Equal(t, len(testSlices[layer]), elementCount) + assert.ElementsMatch(t, testSlices[layer], actualElements) // Use ElementsMatch instead of Equal + } +} + +// Additional test to verify order preservation (insertion order should be maintained) +func TestConnections_ElementOrderPreservation(t *testing.T) { + c, err := NewWithMaxLayer(0) + require.Nil(t, err) + + // Test with a specific sequence to verify insertion order is preserved + testSequence := []uint64{100, 1, 50, 200, 25} + c.ReplaceLayer(0, testSequence) + + actualElements := make([]uint64, 0) + iter := c.ElementIterator(0) + for iter.Next() { + _, value := iter.Current() + actualElements = append(actualElements, value) + } + + // Should preserve the exact order from ReplaceLayer + assert.Equal(t, testSequence, actualElements) +} + +// Test to verify that individual insertions maintain order +func TestConnections_InsertionOrderPreservation(t *testing.T) { + c, err := NewWithMaxLayer(0) + require.Nil(t, err) + + // Insert values one by one + values := []uint64{100, 1, 50, 200, 25} + for _, val := range values { + c.InsertAtLayer(val, 0) + } + + actualElements := make([]uint64, 0) + iter := c.ElementIterator(0) + for iter.Next() { + _, value := iter.Current() + actualElements = append(actualElements, value) + } + + // Should preserve the insertion order + assert.Equal(t, values, actualElements) +} + +func TestConnections_ElementRangeEmptyConnections(t *testing.T) { + c, err := NewWithMaxLayer(5) + require.Nil(t, err) + + for layer := uint8(0); layer <= 5; layer++ { + elementCount := 0 + iter := c.ElementIterator(layer) + for iter.Next() { + elementCount++ + } + assert.Equal(t, 0, elementCount) + } +} + +func TestConnections_ElementRangeWithInsertions(t *testing.T) { + c, err := NewWithMaxLayer(1) + require.Nil(t, err) + + c.ReplaceLayer(0, []uint64{}) + + testElements := []uint64{100, 50, 200, 25, 150} + for _, elem := range testElements { + c.InsertAtLayer(elem, 0) + } + + elementCount := 0 + actualElements := make([]uint64, 0) + iter := c.ElementIterator(0) + for iter.Next() { + index, value := iter.Current() + assert.Equal(t, elementCount, index) + actualElements = append(actualElements, value) + elementCount++ + } + + assert.Equal(t, len(testElements), elementCount) + // Use ElementsMatch to check that all elements are present regardless of order + assert.ElementsMatch(t, testElements, actualElements) +} + +func TestConnections_ElementRangeConsistentIndexing(t *testing.T) { + c, err := NewWithMaxLayer(0) + require.Nil(t, err) + + testData := []uint64{10, 5, 20, 15, 25} + c.ReplaceLayer(0, testData) + + expectedIndex := 0 + iter := c.ElementIterator(0) + for iter.Next() { + index, _ := iter.Current() + assert.Equal(t, expectedIndex, index) + expectedIndex++ + } + assert.Equal(t, len(testData), expectedIndex) +} + +func TestConnections_InsertAtLayer_SchemeGrowth(t *testing.T) { + c, err := NewWithMaxLayer(0) + require.Nil(t, err) + + expectedConns := make([]uint64, 0) + + // Start with 62 values that fit in SCHEME_2BYTE to test capacity growth + for i := uint64(1); i <= 62; i++ { + expectedConns = append(expectedConns, i) + c.InsertAtLayer(i, 0) + } + assert.ElementsMatch(t, expectedConns, c.GetLayer(0)) + assert.Equal(t, 62, c.LenAtLayer(0)) + assert.Equal(t, uint8(SCHEME_2BYTE), unpackScheme(c.layers[0].packed)) + + // SCHEME_3BYTE + val2 := uint64(1 << 16) // 65536 + expectedConns = append(expectedConns, val2) + c.InsertAtLayer(val2, 0) + assert.ElementsMatch(t, expectedConns, c.GetLayer(0)) + assert.Equal(t, 63, c.LenAtLayer(0)) + assert.Equal(t, uint8(SCHEME_3BYTE), unpackScheme(c.layers[0].packed)) + + // SCHEME_4BYTE + val3 := uint64(1 << 24) // 16777216 + expectedConns = append(expectedConns, val3) + c.InsertAtLayer(val3, 0) + assert.ElementsMatch(t, expectedConns, c.GetLayer(0)) + assert.Equal(t, 64, c.LenAtLayer(0)) + assert.Equal(t, uint8(SCHEME_4BYTE), unpackScheme(c.layers[0].packed)) + + // SCHEME_5BYTE + val4 := uint64(1 << 32) + expectedConns = append(expectedConns, val4) + c.InsertAtLayer(val4, 0) + assert.ElementsMatch(t, expectedConns, c.GetLayer(0)) + assert.Equal(t, 65, c.LenAtLayer(0)) + assert.Equal(t, uint8(SCHEME_5BYTE), unpackScheme(c.layers[0].packed)) + + // SCHEME_8BYTE + val5 := uint64(1 << 40) + expectedConns = append(expectedConns, val5) + c.InsertAtLayer(val5, 0) + assert.ElementsMatch(t, expectedConns, c.GetLayer(0)) + assert.Equal(t, 66, c.LenAtLayer(0)) + assert.Equal(t, uint8(SCHEME_8BYTE), unpackScheme(c.layers[0].packed)) +} + +func BenchmarkInsertAtLayer(b *testing.B) { + layers := uint8(5) + + c := make(map[int]*Connections) + for i := 0; i < b.N; i++ { + cTemp, err := NewWithMaxLayer(layers) + require.Nil(b, err) + for i := uint8(0); i <= layers; i++ { + cTemp.ReplaceLayer(i, randomArray(32)) + } + c[i] = cTemp + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + for l := uint8(0); l <= layers; l++ { + newNumbers := randomArray(5) + for j := range newNumbers { + c[i].InsertAtLayer(newNumbers[j], l) + } + } + } +} + +func BenchmarkInsertAtLayerLarge(b *testing.B) { + layers := uint8(5) + + c, err := NewWithMaxLayer(layers) + require.Nil(b, err) + + for i := uint8(0); i <= layers; i++ { + c.ReplaceLayer(i, randomArray(32)) + } + + newNumbers := randomArray(1000) + b.ResetTimer() + for i := 0; i < b.N; i++ { + for l := uint8(0); l <= layers; l++ { + for j := range newNumbers { + c.InsertAtLayer(newNumbers[j], l) + } + } + + b.StopTimer() + for i := uint8(0); i <= layers; i++ { + c.ReplaceLayer(i, randomArray(32)) + } + b.StartTimer() + } +} + +func BenchmarkReplaceLayer(b *testing.B) { + layers := uint8(5) + + c, err := NewWithMaxLayer(layers) + require.Nil(b, err) + + newNumbers := randomArray(1000) + b.ResetTimer() + for i := 0; i < b.N; i++ { + for i := uint8(0); i <= layers; i++ { + c.ReplaceLayer(i, newNumbers) + } + } +} + +func TestNewWithData(t *testing.T) { + c, err := NewWithMaxLayer(0) + require.Nil(t, err) + + c.appendToLayer(1, 0) + c.appendToLayer(9, 0) + c.appendToLayer(5, 0) + + require.ElementsMatch(t, []uint64{1, 9, 5}, c.GetLayer(0)) + + copied := NewWithData(c.Data()) + require.ElementsMatch(t, c.GetLayer(0), copied.GetLayer(0)) +} + +func TestConnections_DataSerialization_SingleLayer(t *testing.T) { + // Test with small values that fit in 2-byte scheme + c, err := NewWithMaxLayer(0) + require.Nil(t, err) + + smallValues := []uint64{1, 100, 1000, 65535} + c.ReplaceLayer(0, smallValues) + + data := c.Data() + require.NotNil(t, data) + require.Greater(t, len(data), 0) + + // Deserialize and verify + copied := NewWithData(data) + require.NotNil(t, copied) + assert.Equal(t, c.Layers(), copied.Layers()) + assert.ElementsMatch(t, c.GetLayer(0), copied.GetLayer(0)) + assert.Equal(t, c.LenAtLayer(0), copied.LenAtLayer(0)) +} + +func TestConnections_DataSerialization_MultipleLayers(t *testing.T) { + c, err := NewWithMaxLayer(2) + require.Nil(t, err) + + // Layer 0: small values (2-byte scheme) + smallValues := []uint64{1, 100, 1000, 65535} + c.ReplaceLayer(0, smallValues) + + // Layer 1: medium values (3-byte scheme) + mediumValues := []uint64{100000, 500000, 16777215} + c.ReplaceLayer(1, mediumValues) + + // Layer 2: large values (4-byte scheme) + largeValues := []uint64{10000000, 50000000, 4294967295} + c.ReplaceLayer(2, largeValues) + + data := c.Data() + require.NotNil(t, data) + require.Greater(t, len(data), 0) + + // Deserialize and verify + copied := NewWithData(data) + require.NotNil(t, copied) + assert.Equal(t, c.Layers(), copied.Layers()) + + // Verify each layer + assert.ElementsMatch(t, c.GetLayer(0), copied.GetLayer(0)) + assert.ElementsMatch(t, c.GetLayer(1), copied.GetLayer(1)) + assert.ElementsMatch(t, c.GetLayer(2), copied.GetLayer(2)) + + assert.Equal(t, c.LenAtLayer(0), copied.LenAtLayer(0)) + assert.Equal(t, c.LenAtLayer(1), copied.LenAtLayer(1)) + assert.Equal(t, c.LenAtLayer(2), copied.LenAtLayer(2)) +} + +func TestConnections_DataSerialization_AllSchemes(t *testing.T) { + c, err := NewWithMaxLayer(4) + require.Nil(t, err) + + // Test all encoding schemes with boundary values + testCases := []struct { + layer uint8 + values []uint64 + scheme uint8 + desc string + }{ + {0, []uint64{1, 100, 65535}, SCHEME_2BYTE, "2-byte scheme"}, + {1, []uint64{65536, 100000, 16777215}, SCHEME_3BYTE, "3-byte scheme"}, + {2, []uint64{16777216, 100000000, 4294967295}, SCHEME_4BYTE, "4-byte scheme"}, + {3, []uint64{4294967296, 1000000000000, 1099511627775}, SCHEME_5BYTE, "5-byte scheme"}, + {4, []uint64{1099511627776, 1000000000000000, 18446744073709551615}, SCHEME_8BYTE, "8-byte scheme"}, + } + + for _, tc := range testCases { + c.ReplaceLayer(tc.layer, tc.values) + } + + data := c.Data() + require.NotNil(t, data) + require.Greater(t, len(data), 0) + + // Deserialize and verify + copied := NewWithData(data) + require.NotNil(t, copied) + assert.Equal(t, c.Layers(), copied.Layers()) + + // Verify each layer + for _, tc := range testCases { + assert.ElementsMatch(t, c.GetLayer(tc.layer), copied.GetLayer(tc.layer), + "Layer %d (%s) mismatch", tc.layer, tc.desc) + assert.Equal(t, c.LenAtLayer(tc.layer), copied.LenAtLayer(tc.layer), + "Layer %d (%s) length mismatch", tc.layer, tc.desc) + } +} + +func TestConnections_DataSerialization_EmptyLayers(t *testing.T) { + c, err := NewWithMaxLayer(3) + require.Nil(t, err) + + // Only populate layer 1, leave others empty + values := []uint64{1, 2, 3, 4, 5} + c.ReplaceLayer(1, values) + + data := c.Data() + require.NotNil(t, data) + require.Greater(t, len(data), 0) + + // Deserialize and verify + copied := NewWithData(data) + require.NotNil(t, copied) + assert.Equal(t, c.Layers(), copied.Layers()) + + // Verify empty layers remain empty + assert.Len(t, copied.GetLayer(0), 0) + assert.Len(t, copied.GetLayer(2), 0) + assert.Len(t, copied.GetLayer(3), 0) + + // Verify populated layer + assert.ElementsMatch(t, values, copied.GetLayer(1)) + assert.Equal(t, len(values), copied.LenAtLayer(1)) +} + +func TestConnections_DataSerialization_EmptyConnections(t *testing.T) { + c, err := NewWithMaxLayer(0) + require.Nil(t, err) + + data := c.Data() + require.NotNil(t, data) + // NewWithMaxLayer(0) creates layerCount=1, so we expect 1 layer with empty data + // Format: [layerCount=1][packed=0][dataLen=0][dataLen=0][dataLen=0][dataLen=0] + expectedData := []byte{1, 0, 0, 0, 0, 0, 0} + assert.Equal(t, expectedData, data) + + // Deserialize and verify + copied := NewWithData(data) + require.NotNil(t, copied) + assert.Equal(t, uint8(1), copied.Layers()) + assert.Len(t, copied.GetLayer(0), 0) +} + +func TestConnections_DataSerialization_InsertAtLayer(t *testing.T) { + c, err := NewWithMaxLayer(1) + require.Nil(t, err) + + // Use InsertAtLayer to build up data + expectedValues := []uint64{1, 100, 1000, 100000, 10000000} + for _, val := range expectedValues { + c.InsertAtLayer(val, 0) + } + + data := c.Data() + require.NotNil(t, data) + require.Greater(t, len(data), 0) + + // Deserialize and verify + copied := NewWithData(data) + require.NotNil(t, copied) + assert.Equal(t, c.Layers(), copied.Layers()) + assert.ElementsMatch(t, expectedValues, copied.GetLayer(0)) + assert.Equal(t, len(expectedValues), copied.LenAtLayer(0)) +} + +func TestConnections_DataSerialization_BulkInsert(t *testing.T) { + c, err := NewWithMaxLayer(1) + require.Nil(t, err) + + // Start with some values + initialValues := []uint64{1, 2, 3} + c.ReplaceLayer(0, initialValues) + + // Add more values using BulkInsertAtLayer + additionalValues := []uint64{100, 200, 300, 1000000} + c.BulkInsertAtLayer(additionalValues, 0) + + expectedValues := append(initialValues, additionalValues...) + + data := c.Data() + require.NotNil(t, data) + require.Greater(t, len(data), 0) + + // Deserialize and verify + copied := NewWithData(data) + require.NotNil(t, copied) + assert.Equal(t, c.Layers(), copied.Layers()) + assert.ElementsMatch(t, expectedValues, copied.GetLayer(0)) + assert.Equal(t, len(expectedValues), copied.LenAtLayer(0)) +} + +func TestConnections_DataSerialization_LargeValues(t *testing.T) { + c, err := NewWithMaxLayer(0) + require.Nil(t, err) + + // Test with very large values that require 8-byte encoding + largeValues := []uint64{ + 18446744073709551615, // max uint64 + 1000000000000000000, + 5000000000000000000, + 9999999999999999999, + } + + c.ReplaceLayer(0, largeValues) + + data := c.Data() + require.NotNil(t, data) + require.Greater(t, len(data), 0) + + // Deserialize and verify + copied := NewWithData(data) + require.NotNil(t, copied) + assert.Equal(t, c.Layers(), copied.Layers()) + assert.ElementsMatch(t, largeValues, copied.GetLayer(0)) + assert.Equal(t, len(largeValues), copied.LenAtLayer(0)) +} + +func TestConnections_DataSerialization_MixedSchemes(t *testing.T) { + c, err := NewWithMaxLayer(2) + require.Nil(t, err) + + // Layer 0: Start with small values, then add large ones to trigger scheme upgrade + c.InsertAtLayer(1, 0) + c.InsertAtLayer(100, 0) + c.InsertAtLayer(18446744073709551615, 0) // This should trigger 8-byte scheme + + // Layer 1: Start with medium values, then add larger ones + c.InsertAtLayer(100000, 1) + c.InsertAtLayer(500000, 1) + c.InsertAtLayer(4294967296, 1) // This should trigger 5-byte scheme + + data := c.Data() + require.NotNil(t, data) + require.Greater(t, len(data), 0) + + // Deserialize and verify + copied := NewWithData(data) + require.NotNil(t, copied) + assert.Equal(t, c.Layers(), copied.Layers()) + + // Verify both layers + assert.ElementsMatch(t, c.GetLayer(0), copied.GetLayer(0)) + assert.ElementsMatch(t, c.GetLayer(1), copied.GetLayer(1)) + assert.Equal(t, c.LenAtLayer(0), copied.LenAtLayer(0)) + assert.Equal(t, c.LenAtLayer(1), copied.LenAtLayer(1)) +} + +func TestConnections_DataSerialization_Stress(t *testing.T) { + layers := uint8(5) + c, err := NewWithMaxLayer(layers) + require.Nil(t, err) + + // Populate all layers with different sized values + for i := uint8(0); i <= layers; i++ { + values := make([]uint64, 10) + for j := 0; j < 10; j++ { + // Mix different value ranges to test different schemes + switch j % 5 { + case 0: + values[j] = uint64(j + 1) // Small values + case 1: + values[j] = uint64(100000 + j) // Medium values + case 2: + values[j] = uint64(1000000000 + j) // Large values + case 3: + values[j] = uint64(1000000000000 + j) // Very large values + case 4: + values[j] = uint64(1000000000000000000 + j) // Extremely large values + } + } + c.ReplaceLayer(i, values) + } + + data := c.Data() + require.NotNil(t, data) + require.Greater(t, len(data), 0) + + // Deserialize and verify + copied := NewWithData(data) + require.NotNil(t, copied) + assert.Equal(t, c.Layers(), copied.Layers()) + + // Verify all layers + for i := uint8(0); i <= layers; i++ { + assert.ElementsMatch(t, c.GetLayer(i), copied.GetLayer(i), + "Layer %d mismatch", i) + assert.Equal(t, c.LenAtLayer(i), copied.LenAtLayer(i), + "Layer %d length mismatch", i) + } +} + +func TestConnections_DataSerialization_EmptyData(t *testing.T) { + // Test with empty data + copied := NewWithData([]byte{}) + require.NotNil(t, copied) + assert.Equal(t, uint8(0), copied.Layers()) + + // Test with nil data + copied = NewWithData(nil) + require.NotNil(t, copied) + assert.Equal(t, uint8(0), copied.Layers()) +} + +func TestConnections_DataSerialization_MalformedData(t *testing.T) { + // Test with malformed data (too short) + copied := NewWithData([]byte{1}) // Layer count 1 but no data + require.NotNil(t, copied) + assert.Equal(t, uint8(1), copied.Layers()) + assert.Len(t, copied.GetLayer(0), 0) + + // Test with incomplete layer data + malformedData := []byte{2, 0, 0, 0, 0, 0, 0} // Layer count 2, incomplete first layer + copied = NewWithData(malformedData) + require.NotNil(t, copied) + assert.Equal(t, uint8(2), copied.Layers()) + assert.Len(t, copied.GetLayer(0), 0) + assert.Len(t, copied.GetLayer(1), 0) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/periodic_tombstone_removal_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/periodic_tombstone_removal_test.go new file mode 100644 index 0000000000000000000000000000000000000000..24fe2a9262be0055cd5fb6c4bd58926233ca4b39 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/periodic_tombstone_removal_test.go @@ -0,0 +1,92 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package hnsw + +import ( + "context" + "testing" + "time" + + "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/adapters/repos/db/vector/hnsw/distancer" + "github.com/weaviate/weaviate/adapters/repos/db/vector/testinghelpers" + "github.com/weaviate/weaviate/entities/cyclemanager" + ent "github.com/weaviate/weaviate/entities/vectorindex/hnsw" + testhelper "github.com/weaviate/weaviate/test/helper" +) + +func TestPeriodicTombstoneRemoval(t *testing.T) { + ctx := context.Background() + logger, _ := test.NewNullLogger() + cleanupIntervalSeconds := 1 + tombstoneCallbacks := cyclemanager.NewCallbackGroup("tombstone", logger, 1) + tombstoneCleanupCycle := cyclemanager.NewManager( + cyclemanager.NewFixedTicker(time.Duration(cleanupIntervalSeconds)*time.Second), + tombstoneCallbacks.CycleCallback, logger) + tombstoneCleanupCycle.Start() + + index, err := New(Config{ + RootPath: "doesnt-matter-as-committlogger-is-mocked-out", + ID: "automatic-tombstone-removal", + MakeCommitLoggerThunk: MakeNoopCommitLogger, + DistanceProvider: distancer.NewCosineDistanceProvider(), + VectorForIDThunk: testVectorForID, + }, ent.UserConfig{ + CleanupIntervalSeconds: cleanupIntervalSeconds, + MaxConnections: 30, + EFConstruction: 128, + }, tombstoneCallbacks, testinghelpers.NewDummyStore(t)) + index.PostStartup() + + require.Nil(t, err) + + for i, vec := range testVectors { + err := index.Add(ctx, uint64(i), vec) + require.Nil(t, err) + } + + t.Run("delete an entry and verify there is a tombstone", func(t *testing.T) { + for i := range testVectors { + if i%2 != 0 { + continue + } + + err := index.Delete(uint64(i)) + require.Nil(t, err) + } + }) + + t.Run("verify there are now tombstones", func(t *testing.T) { + index.tombstoneLock.RLock() + ts := len(index.tombstones) + index.tombstoneLock.RUnlock() + assert.True(t, ts > 0) + }) + + t.Run("wait for tombstones to disappear", func(t *testing.T) { + testhelper.AssertEventuallyEqual(t, true, func() interface{} { + index.tombstoneLock.RLock() + ts := len(index.tombstones) + index.tombstoneLock.RUnlock() + return ts == 0 + }, "wait until tombstones have been cleaned up") + }) + + if err := index.Shutdown(context.Background()); err != nil { + t.Fatal(err) + } + if err := tombstoneCleanupCycle.StopAndWait(context.Background()); err != nil { + t.Fatal(err) + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/persistence_integration_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/persistence_integration_test.go new file mode 100644 index 0000000000000000000000000000000000000000..566277ba1ec132dd79f31ed274a76281cc07b8db --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/persistence_integration_test.go @@ -0,0 +1,472 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +//go:build integrationTest +// +build integrationTest + +package hnsw + +import ( + "context" + "fmt" + "io" + "os" + "path/filepath" + "testing" + "time" + + "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/adapters/repos/db/vector/hnsw/distancer" + "github.com/weaviate/weaviate/adapters/repos/db/vector/testinghelpers" + "github.com/weaviate/weaviate/entities/cyclemanager" + ent "github.com/weaviate/weaviate/entities/vectorindex/hnsw" +) + +func TestHnswPersistence(t *testing.T) { + dirName := t.TempDir() + ctx := context.Background() + indexID := "integrationtest" + + logger, _ := test.NewNullLogger() + + makeCL := func() (CommitLogger, error) { + return NewCommitLogger(dirName, indexID, logger, + cyclemanager.NewCallbackGroupNoop()) + } + + index, err := New(Config{ + RootPath: dirName, + ID: indexID, + MakeCommitLoggerThunk: makeCL, + DistanceProvider: distancer.NewCosineDistanceProvider(), + VectorForIDThunk: testVectorForID, + }, ent.UserConfig{ + MaxConnections: 30, + EFConstruction: 60, + }, cyclemanager.NewCallbackGroupNoop(), testinghelpers.NewDummyStore(t)) + require.Nil(t, err) + + for i, vec := range testVectors { + err := index.Add(ctx, uint64(i), vec) + require.Nil(t, err) + } + + require.Nil(t, index.Flush()) + + // see index_test.go for more context + expectedResults := []uint64{ + 3, 5, 4, // cluster 2 + 7, 8, 6, // cluster 3 + 2, 1, 0, // cluster 1 + } + + t.Run("verify that the results match originally", func(t *testing.T) { + position := 3 + res, _, err := index.knnSearchByVector(ctx, testVectors[position], 50, 36, nil) + require.Nil(t, err) + assert.Equal(t, expectedResults, res) + }) + + // destroy the index + index = nil + + // build a new index from the (uncondensed) commit log + secondIndex, err := New(Config{ + RootPath: dirName, + ID: indexID, + MakeCommitLoggerThunk: makeCL, + DistanceProvider: distancer.NewCosineDistanceProvider(), + VectorForIDThunk: testVectorForID, + }, ent.UserConfig{ + MaxConnections: 30, + EFConstruction: 60, + }, cyclemanager.NewCallbackGroupNoop(), testinghelpers.NewDummyStore(t)) + require.Nil(t, err) + + t.Run("verify that the results match after rebuilding from disk", + func(t *testing.T) { + position := 3 + res, _, err := secondIndex.knnSearchByVector(ctx, testVectors[position], 50, 36, nil) + require.Nil(t, err) + assert.Equal(t, expectedResults, res) + }) +} + +func TestHnswPersistence_CorruptWAL(t *testing.T) { + ctx := context.Background() + dirName := t.TempDir() + indexID := "integrationtest_corrupt" + + logger, _ := test.NewNullLogger() + + makeCL := func() (CommitLogger, error) { + return NewCommitLogger(dirName, indexID, logger, + cyclemanager.NewCallbackGroupNoop()) + } + + index, err := New(Config{ + RootPath: dirName, + ID: indexID, + MakeCommitLoggerThunk: makeCL, + DistanceProvider: distancer.NewCosineDistanceProvider(), + VectorForIDThunk: testVectorForID, + }, ent.UserConfig{ + MaxConnections: 30, + EFConstruction: 60, + }, cyclemanager.NewCallbackGroupNoop(), testinghelpers.NewDummyStore(t)) + require.Nil(t, err) + + for i, vec := range testVectors { + err := index.Add(ctx, uint64(i), vec) + require.Nil(t, err) + } + + require.Nil(t, index.Flush()) + + // see index_test.go for more context + expectedResults := []uint64{ + 3, 5, 4, // cluster 2 + 7, 8, 6, // cluster 3 + 2, 1, 0, // cluster 1 + } + + t.Run("verify that the results match originally", func(t *testing.T) { + position := 3 + res, _, err := index.knnSearchByVector(ctx, testVectors[position], 50, 36, nil) + require.Nil(t, err) + assert.Equal(t, expectedResults, res) + }) + + // destroy the index + index.Shutdown(context.Background()) + index = nil + indexDir := filepath.Join(dirName, "integrationtest_corrupt.hnsw.commitlog.d") + + t.Run("corrupt the commit log on purpose", func(t *testing.T) { + res, err := os.ReadDir(indexDir) + require.Nil(t, err) + require.Len(t, res, 1) + fName := filepath.Join(indexDir, res[0].Name()) + newFName := filepath.Join(indexDir, fmt.Sprintf("%d", time.Now().Unix())) + + orig, err := os.Open(fName) + require.Nil(t, err) + + correctLog, err := io.ReadAll(orig) + require.Nil(t, err) + err = orig.Close() + require.Nil(t, err) + + os.Remove(fName) + + corruptLog := correctLog[:len(correctLog)-6] + corrupt, err := os.Create(newFName) + require.Nil(t, err) + + _, err = corrupt.Write(corruptLog) + require.Nil(t, err) + + err = corrupt.Close() + require.Nil(t, err) + + // double check that we only have one file left (the corrupted one) + res, err = os.ReadDir(indexDir) + require.Nil(t, err) + require.Len(t, res, 1) + }) + + // build a new index from the (uncondensed, corrupted) commit log + secondIndex, err := New(Config{ + RootPath: dirName, + ID: indexID, + MakeCommitLoggerThunk: makeCL, + DistanceProvider: distancer.NewCosineDistanceProvider(), + VectorForIDThunk: testVectorForID, + }, ent.UserConfig{ + MaxConnections: 30, + EFConstruction: 60, + }, cyclemanager.NewCallbackGroupNoop(), testinghelpers.NewDummyStore(t)) + require.Nil(t, err) + + // the minor corruption (just one missing link) will most likely not render + // the index unusable, so we should still expect to retrieve results as + // normal + t.Run("verify that the results match after rebuilding from disk", + func(t *testing.T) { + position := 3 + res, _, err := secondIndex.knnSearchByVector(ctx, testVectors[position], 50, 36, nil) + require.Nil(t, err) + assert.Equal(t, expectedResults, res) + }) +} + +func TestHnswPersistence_WithDeletion_WithoutTombstoneCleanup(t *testing.T) { + ctx := context.Background() + dirName := t.TempDir() + indexID := "integrationtest_deletion" + logger, _ := test.NewNullLogger() + + makeCL := func() (CommitLogger, error) { + return NewCommitLogger(dirName, indexID, logger, + cyclemanager.NewCallbackGroupNoop()) + } + + index, err := New(Config{ + RootPath: dirName, + ID: indexID, + MakeCommitLoggerThunk: makeCL, + DistanceProvider: distancer.NewCosineDistanceProvider(), + VectorForIDThunk: testVectorForID, + }, ent.UserConfig{ + MaxConnections: 30, + EFConstruction: 60, + }, cyclemanager.NewCallbackGroupNoop(), testinghelpers.NewDummyStore(t)) + require.Nil(t, err) + + for i, vec := range testVectors { + err := index.Add(ctx, uint64(i), vec) + require.Nil(t, err) + } + + t.Run("delete some elements", func(t *testing.T) { + err := index.Delete(6) + require.Nil(t, err) + err = index.Delete(8) + require.Nil(t, err) + }) + + // see index_test.go for more context + expectedResults := []uint64{ + 3, 5, 4, // cluster 2 + 7, // cluster 3 with element 6 and 8 deleted + 2, 1, 0, // cluster 1 + } + + require.Nil(t, index.Flush()) + + t.Run("verify that the results match originally", func(t *testing.T) { + position := 3 + res, _, err := index.knnSearchByVector(ctx, testVectors[position], 50, 36, nil) + require.Nil(t, err) + assert.Equal(t, expectedResults, res) + }) + + dumpIndex(index, "without_cleanup_original_index_before_storage") + + // destroy the index + index = nil + + // build a new index from the (uncondensed) commit log + secondIndex, err := New(Config{ + RootPath: dirName, + ID: indexID, + MakeCommitLoggerThunk: makeCL, + DistanceProvider: distancer.NewCosineDistanceProvider(), + VectorForIDThunk: testVectorForID, + }, ent.UserConfig{ + MaxConnections: 30, + EFConstruction: 60, + }, cyclemanager.NewCallbackGroupNoop(), testinghelpers.NewDummyStore(t)) + require.Nil(t, err) + + dumpIndex(secondIndex, "without_cleanup_after_rebuild") + t.Run("verify that the results match after rebuilding from disk", + func(t *testing.T) { + position := 3 + res, _, err := secondIndex.knnSearchByVector(ctx, testVectors[position], 50, 36, nil) + require.Nil(t, err) + assert.Equal(t, expectedResults, res) + }) +} + +func TestHnswPersistence_WithDeletion_WithTombstoneCleanup(t *testing.T) { + ctx := context.Background() + dirName := t.TempDir() + indexID := "integrationtest_tombstonecleanup" + + logger, _ := test.NewNullLogger() + + makeCL := func() (CommitLogger, error) { + return NewCommitLogger(dirName, indexID, logger, + cyclemanager.NewCallbackGroupNoop()) + } + + index, err := New(Config{ + RootPath: dirName, + ID: indexID, + MakeCommitLoggerThunk: makeCL, + DistanceProvider: distancer.NewCosineDistanceProvider(), + VectorForIDThunk: testVectorForID, + }, ent.UserConfig{ + MaxConnections: 30, + EFConstruction: 60, + }, cyclemanager.NewCallbackGroupNoop(), testinghelpers.NewDummyStore(t)) + require.Nil(t, err) + + for i, vec := range testVectors { + err := index.Add(ctx, uint64(i), vec) + require.Nil(t, err) + } + dumpIndex(index, "with cleanup after import") + require.Nil(t, index.Flush()) + + t.Run("delete some elements and permanently delete tombstoned elements", + func(t *testing.T) { + err := index.Delete(6) + require.Nil(t, err) + err = index.Delete(8) + require.Nil(t, err) + + err = index.CleanUpTombstonedNodes(neverStop) + require.Nil(t, err) + }) + + dumpIndex(index, "with cleanup after delete") + + require.Nil(t, index.Flush()) + + // see index_test.go for more context + expectedResults := []uint64{ + 3, 5, 4, // cluster 2 + 7, // cluster 3 with element 6 and 8 deleted + 2, 1, 0, // cluster 1 + } + + t.Run("verify that the results match originally", func(t *testing.T) { + position := 3 + res, _, err := index.knnSearchByVector(ctx, testVectors[position], 50, 36, nil) + require.Nil(t, err) + assert.Equal(t, expectedResults, res) + }) + + // destroy the index + index.Shutdown(context.Background()) + index = nil + + // build a new index from the (uncondensed) commit log + secondIndex, err := New(Config{ + RootPath: dirName, + ID: indexID, + MakeCommitLoggerThunk: makeCL, + DistanceProvider: distancer.NewCosineDistanceProvider(), + VectorForIDThunk: testVectorForID, + }, ent.UserConfig{ + MaxConnections: 30, + EFConstruction: 60, + }, cyclemanager.NewCallbackGroupNoop(), testinghelpers.NewDummyStore(t)) + require.Nil(t, err) + dumpIndex(secondIndex, "with cleanup second index") + + t.Run("verify that the results match after rebuilding from disk", + func(t *testing.T) { + position := 3 + res, _, err := secondIndex.knnSearchByVector(ctx, testVectors[position], 50, 36, nil) + require.Nil(t, err) + assert.Equal(t, expectedResults, res) + }) + + t.Run("further deleting all elements and reimporting one", func(t *testing.T) { + toDelete := []uint64{0, 1, 2, 3, 4, 5, 7} + + for _, id := range toDelete { + err := secondIndex.Delete(id) + require.Nil(t, err) + } + + err = secondIndex.CleanUpTombstonedNodes(neverStop) + require.Nil(t, err) + + err := secondIndex.Add(ctx, 3, testVectors[3]) + require.Nil(t, err) + }) + + require.Nil(t, secondIndex.Flush()) + + dumpIndex(secondIndex) + + secondIndex.Shutdown(context.Background()) + secondIndex = nil + + // build a new index from the (uncondensed) commit log + thirdIndex, err := New(Config{ + RootPath: dirName, + ID: indexID, + MakeCommitLoggerThunk: makeCL, + DistanceProvider: distancer.NewCosineDistanceProvider(), + VectorForIDThunk: testVectorForID, + }, ent.UserConfig{ + MaxConnections: 30, + EFConstruction: 60, + }, cyclemanager.NewCallbackGroupNoop(), testinghelpers.NewDummyStore(t)) + require.Nil(t, err) + + dumpIndex(thirdIndex) + + t.Run("verify that the results match after rebuilding from disk", + func(t *testing.T) { + position := 3 + res, _, err := thirdIndex.knnSearchByVector(ctx, testVectors[position], 50, 36, nil) + require.Nil(t, err) + assert.Equal(t, []uint64{3}, res) + }) + + t.Run("delete all elements so the commitlog ends with an empty graph", func(t *testing.T) { + toDelete := []uint64{3} + + for _, id := range toDelete { + err := thirdIndex.Delete(id) + require.Nil(t, err) + } + + err = thirdIndex.CleanUpTombstonedNodes(neverStop) + require.Nil(t, err) + }) + + require.Nil(t, thirdIndex.Flush()) + + thirdIndex.Shutdown(context.Background()) + thirdIndex = nil + // build a new index from the (uncondensed) commit log + fourthIndex, err := New(Config{ + RootPath: dirName, + ID: indexID, + MakeCommitLoggerThunk: makeCL, + DistanceProvider: distancer.NewCosineDistanceProvider(), + VectorForIDThunk: testVectorForID, + }, ent.UserConfig{ + MaxConnections: 30, + EFConstruction: 60, + }, cyclemanager.NewCallbackGroupNoop(), testinghelpers.NewDummyStore(t)) + require.Nil(t, err) + + t.Run("load from disk and try to insert again", func(t *testing.T) { + for i, vec := range testVectors { + err := fourthIndex.Add(ctx, uint64(i), vec) + require.Nil(t, err) + } + }) + + t.Run("verify that searching works normally", func(t *testing.T) { + expectedResults := []uint64{ + 3, 5, 4, // cluster 2 + 7, 8, 6, // cluster 3 with element 6 and 8 deleted + 2, 1, 0, // cluster 1 + } + position := 3 + res, _, err := fourthIndex.knnSearchByVector(ctx, testVectors[position], 50, 36, nil) + require.Nil(t, err) + assert.Equal(t, expectedResults, res) + }) + + fourthIndex.Shutdown(context.Background()) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/pools.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/pools.go new file mode 100644 index 0000000000000000000000000000000000000000..ed10d450fb866f20751e63bd75bd5632f450013c --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/pools.go @@ -0,0 +1,109 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package hnsw + +import ( + "sync" + + "github.com/weaviate/weaviate/adapters/repos/db/priorityqueue" + "github.com/weaviate/weaviate/adapters/repos/db/vector/cache" + "github.com/weaviate/weaviate/adapters/repos/db/vector/common" + "github.com/weaviate/weaviate/adapters/repos/db/vector/hnsw/visited" +) + +type pools struct { + visitedLists *visited.Pool + visitedListsLock *sync.RWMutex + + pqItemSlice *sync.Pool + pqHeuristic *pqMinWithIndexPool + pqResults *common.PqMaxPool + pqCandidates *pqMinPool + + tempVectors *common.TempVectorsPool + tempVectorsUint64 *common.TempVectorUint64Pool +} + +func newPools(maxConnectionsLayerZero int, initialVisitedListPoolSize int) *pools { + return &pools{ + visitedLists: visited.NewPool(1, cache.InitialSize+500, initialVisitedListPoolSize), + visitedListsLock: &sync.RWMutex{}, + pqItemSlice: &sync.Pool{ + New: func() interface{} { + return make([]priorityqueue.Item[uint64], 0, maxConnectionsLayerZero) + }, + }, + pqHeuristic: newPqMinWithIndexPool(maxConnectionsLayerZero), + pqResults: common.NewPqMaxPool(maxConnectionsLayerZero), + pqCandidates: newPqMinPool(maxConnectionsLayerZero), + tempVectors: common.NewTempVectorsPool(), + tempVectorsUint64: common.NewTempUint64VectorsPool(), + } +} + +type pqMinPool struct { + pool *sync.Pool +} + +func newPqMinPool(defaultCap int) *pqMinPool { + return &pqMinPool{ + pool: &sync.Pool{ + New: func() interface{} { + return priorityqueue.NewMin[any](defaultCap) + }, + }, + } +} + +func (pqh *pqMinPool) GetMin(capacity int) *priorityqueue.Queue[any] { + pq := pqh.pool.Get().(*priorityqueue.Queue[any]) + if pq.Cap() < capacity { + pq.ResetCap(capacity) + } else { + pq.Reset() + } + + return pq +} + +func (pqh *pqMinPool) Put(pq *priorityqueue.Queue[any]) { + pqh.pool.Put(pq) +} + +type pqMinWithIndexPool struct { + pool *sync.Pool +} + +func newPqMinWithIndexPool(defaultCap int) *pqMinWithIndexPool { + return &pqMinWithIndexPool{ + pool: &sync.Pool{ + New: func() interface{} { + return priorityqueue.NewMin[uint64](defaultCap) + }, + }, + } +} + +func (pqh *pqMinWithIndexPool) GetMin(capacity int) *priorityqueue.Queue[uint64] { + pq := pqh.pool.Get().(*priorityqueue.Queue[uint64]) + if pq.Cap() < capacity { + pq.ResetCap(capacity) + } else { + pq.Reset() + } + + return pq +} + +func (pqh *pqMinWithIndexPool) Put(pq *priorityqueue.Queue[uint64]) { + pqh.pool.Put(pq) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/recall_geo_spatial_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/recall_geo_spatial_test.go new file mode 100644 index 0000000000000000000000000000000000000000..15ce99b7bafcca8e143d91fbe24891f7f54d7e82 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/recall_geo_spatial_test.go @@ -0,0 +1,276 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +//go:build integrationTestSlow && !race +// +build integrationTestSlow,!race + +package hnsw + +import ( + "context" + "fmt" + "math/rand" + "runtime" + "sort" + "sync" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/adapters/repos/db/vector/hnsw/distancer" + "github.com/weaviate/weaviate/adapters/repos/db/vector/testinghelpers" + "github.com/weaviate/weaviate/entities/cyclemanager" + ent "github.com/weaviate/weaviate/entities/vectorindex/hnsw" +) + +func TestRecallGeo(t *testing.T) { + ctx := context.Background() + size := 10000 + queries := 100 + efConstruction := 128 + maxNeighbors := 64 + + vectors := make([][]float32, size) + queryVectors := make([][]float32, queries) + var vectorIndex *hnsw + + t.Run("generate random vectors", func(t *testing.T) { + fmt.Printf("generating %d vectors", size) + for i := 0; i < size; i++ { + lat, lon := randLatLon() + vectors[i] = []float32{lat, lon} + } + fmt.Printf("done\n") + + fmt.Printf("generating %d search queries", queries) + for i := 0; i < queries; i++ { + lat, lon := randLatLon() + queryVectors[i] = []float32{lat, lon} + } + fmt.Printf("done\n") + }) + + t.Run("importing into hnsw", func(t *testing.T) { + fmt.Printf("importing into hnsw\n") + index, err := New(Config{ + RootPath: "doesnt-matter-as-committlogger-is-mocked-out", + ID: "recallbenchmark", + MakeCommitLoggerThunk: MakeNoopCommitLogger, + DistanceProvider: distancer.NewGeoProvider(), + VectorForIDThunk: func(ctx context.Context, id uint64) ([]float32, error) { + return vectors[int(id)], nil + }, + }, ent.UserConfig{ + MaxConnections: maxNeighbors, + EFConstruction: efConstruction, + }, cyclemanager.NewCallbackGroupNoop(), testinghelpers.NewDummyStore(t)) + + require.Nil(t, err) + vectorIndex = index + + workerCount := runtime.GOMAXPROCS(0) + jobsForWorker := make([][][]float32, workerCount) + + for i, vec := range vectors { + workerID := i % workerCount + jobsForWorker[workerID] = append(jobsForWorker[workerID], vec) + } + + beforeImport := time.Now() + wg := &sync.WaitGroup{} + for workerID, jobs := range jobsForWorker { + wg.Add(1) + go func(workerID int, myJobs [][]float32) { + defer wg.Done() + for i, vec := range myJobs { + originalIndex := (i * workerCount) + workerID + err := vectorIndex.Add(ctx, uint64(originalIndex), vec) + require.Nil(t, err) + } + }(workerID, jobs) + } + + wg.Wait() + fmt.Printf("import took %s\n", time.Since(beforeImport)) + }) + + t.Run("with k=10", func(t *testing.T) { + k := 10 + + var relevant int + var retrieved int + + var times time.Duration + + for i := 0; i < queries; i++ { + controlList := bruteForce(vectors, queryVectors[i], k) + before := time.Now() + results, _, err := vectorIndex.knnSearchByVector(ctx, queryVectors[i], k, 800, nil) + times += time.Since(before) + + require.Nil(t, err) + + retrieved += k + relevant += matchesInLists(controlList, results) + } + + recall := float32(relevant) / float32(retrieved) + fmt.Printf("recall is %f\n", recall) + fmt.Printf("avg search time for k=%d is %s\n", k, times/time.Duration(queries)) + assert.True(t, recall >= 0.99) + }) + + t.Run("with max dist set", func(t *testing.T) { + distances := []float32{ + 0.1, + 1, + 10, + 100, + 1000, + 2000, + 5000, + 7500, + 10000, + 12500, + 15000, + 20000, + 35000, + 100000, // larger than the circumference of the earth, should contain all + } + + for _, maxDist := range distances { + t.Run(fmt.Sprintf("with maxDist=%f", maxDist), func(t *testing.T) { + var relevant int + var retrieved int + + var times time.Duration + + for i := 0; i < queries; i++ { + controlList := bruteForceMaxDist(vectors, queryVectors[i], maxDist) + before := time.Now() + results, err := vectorIndex.KnnSearchByVectorMaxDist(ctx, queryVectors[i], maxDist, 800, nil) + times += time.Since(before) + require.Nil(t, err) + + retrieved += len(results) + relevant += matchesInLists(controlList, results) + } + + if relevant == 0 { + // skip, as we risk dividing by zero, if both relevant and retrieved + // are zero, however, we want to fail with a divide-by-zero if only + // retrieved is 0 and relevant was more than 0 + return + } + recall := float32(relevant) / float32(retrieved) + fmt.Printf("recall is %f\n", recall) + fmt.Printf("avg search time for maxDist=%f is %s\n", maxDist, times/time.Duration(queries)) + assert.True(t, recall >= 0.99) + }) + } + }) +} + +func matchesInLists(control []uint64, results []uint64) int { + desired := map[uint64]struct{}{} + for _, relevant := range control { + desired[relevant] = struct{}{} + } + + var matches int + for _, candidate := range results { + _, ok := desired[candidate] + if ok { + matches++ + } + } + + return matches +} + +func bruteForce(vectors [][]float32, query []float32, k int) []uint64 { + type distanceAndIndex struct { + distance float32 + index uint64 + } + + distances := make([]distanceAndIndex, len(vectors)) + + distancer := distancer.NewGeoProvider().New(query) + for i, vec := range vectors { + dist, _ := distancer.Distance(vec) + distances[i] = distanceAndIndex{ + index: uint64(i), + distance: dist, + } + } + + sort.Slice(distances, func(a, b int) bool { + return distances[a].distance < distances[b].distance + }) + + if len(distances) < k { + k = len(distances) + } + + out := make([]uint64, k) + for i := 0; i < k; i++ { + out[i] = distances[i].index + } + + return out +} + +func bruteForceMaxDist(vectors [][]float32, query []float32, maxDist float32) []uint64 { + type distanceAndIndex struct { + distance float32 + index uint64 + } + + distances := make([]distanceAndIndex, len(vectors)) + + distancer := distancer.NewGeoProvider().New(query) + for i, vec := range vectors { + dist, _ := distancer.Distance(vec) + distances[i] = distanceAndIndex{ + index: uint64(i), + distance: dist, + } + } + + sort.Slice(distances, func(a, b int) bool { + return distances[a].distance < distances[b].distance + }) + + out := make([]uint64, len(distances)) + i := 0 + for _, elem := range distances { + if elem.distance > maxDist { + break + } + out[i] = distances[i].index + i++ + } + + return out[:i] +} + +func randLatLon() (float32, float32) { + maxLat := float32(90.0) + minLat := float32(-90.0) + maxLon := float32(180) + minLon := float32(-180) + + lat := minLat + (maxLat-minLat)*rand.Float32() + lon := minLon + (maxLon-minLon)*rand.Float32() + return lat, lon +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/recall_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/recall_test.go new file mode 100644 index 0000000000000000000000000000000000000000..15f64b2795a6d396ce7be8aebc41d45418b89a13 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/recall_test.go @@ -0,0 +1,170 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +//go:build benchmarkRecall +// +build benchmarkRecall + +package hnsw + +import ( + "context" + "encoding/json" + "fmt" + "io/ioutil" + "runtime" + "sync" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/adapters/repos/db/vector/hnsw/distancer" + "github.com/weaviate/weaviate/adapters/repos/db/vector/testinghelpers" +) + +func TestRecall(t *testing.T) { + efConstruction := 256 + ef := 256 + maxNeighbors := 64 + + var vectors [][]float32 + var queries [][]float32 + var truths [][]uint64 + var vectorIndex *hnsw + + t.Run("generate random vectors", func(t *testing.T) { + vectorsJSON, err := ioutil.ReadFile("recall_vectors.json") + require.Nil(t, err) + err = json.Unmarshal(vectorsJSON, &vectors) + require.Nil(t, err) + + queriesJSON, err := ioutil.ReadFile("recall_queries.json") + require.Nil(t, err) + err = json.Unmarshal(queriesJSON, &queries) + require.Nil(t, err) + + truthsJSON, err := ioutil.ReadFile("recall_truths.json") + require.Nil(t, err) + err = json.Unmarshal(truthsJSON, &truths) + require.Nil(t, err) + }) + + t.Run("importing into hnsw", func(t *testing.T) { + fmt.Printf("importing into hnsw\n") + + index, err := New(Config{ + RootPath: "doesnt-matter-as-committlogger-is-mocked-out", + ID: "recallbenchmark", + MakeCommitLoggerThunk: MakeNoopCommitLogger, + DistanceProvider: distancer.NewCosineDistanceProvider(), + VectorForIDThunk: func(ctx context.Context, id uint64) ([]float32, error) { + return vectors[int(id)], nil + }, + }, UserConfig{ + MaxConnections: maxNeighbors, + EFConstruction: efConstruction, + EF: ef, + }, testinghelpers.NewDummyStore(t)) + require.Nil(t, err) + vectorIndex = index + + workerCount := runtime.GOMAXPROCS(0) + jobsForWorker := make([][][]float32, workerCount) + + before := time.Now() + for i, vec := range vectors { + workerID := i % workerCount + jobsForWorker[workerID] = append(jobsForWorker[workerID], vec) + } + + wg := &sync.WaitGroup{} + for workerID, jobs := range jobsForWorker { + wg.Add(1) + go func(workerID int, myJobs [][]float32) { + defer wg.Done() + for i, vec := range myJobs { + originalIndex := (i * workerCount) + workerID + err := vectorIndex.Add(uint64(originalIndex), vec) + require.Nil(t, err) + } + }(workerID, jobs) + } + + wg.Wait() + fmt.Printf("importing took %s\n", time.Since(before)) + }) + + t.Run("inspect a query", func(t *testing.T) { + k := 20 + + hasDuplicates := 0 + + for _, vec := range queries { + results, _, err := vectorIndex.SearchByVector(vec, k, nil) + require.Nil(t, err) + if containsDuplicates(results) { + hasDuplicates++ + panic("stop") + } + } + + fmt.Printf("%d out of %d searches contained duplicates", hasDuplicates, len(queries)) + }) + + t.Run("with k=10", func(t *testing.T) { + k := 10 + + var relevant int + var retrieved int + + for i := 0; i < len(queries); i++ { + results, _, err := vectorIndex.SearchByVector(queries[i], k, nil) + require.Nil(t, err) + + retrieved += k + relevant += matchesInLists(truths[i], results) + } + + recall := float32(relevant) / float32(retrieved) + fmt.Printf("recall is %f\n", recall) + assert.True(t, recall >= 0.99) + }) +} + +func matchesInLists(control []uint64, results []uint64) int { + desired := map[uint64]struct{}{} + for _, relevant := range control { + desired[relevant] = struct{}{} + } + + var matches int + for _, candidate := range results { + _, ok := desired[candidate] + if ok { + matches++ + } + } + + return matches +} + +func containsDuplicates(in []uint64) bool { + seen := map[uint64]struct{}{} + + for _, value := range in { + if _, ok := seen[value]; ok { + return true + } + seen[value] = struct{}{} + } + + return false +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/restore_integration_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/restore_integration_test.go new file mode 100644 index 0000000000000000000000000000000000000000..d60ef0dae478af1acea97ed0ba85203cd032df6d --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/restore_integration_test.go @@ -0,0 +1,108 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package hnsw_test + +import ( + "context" + "path/filepath" + "testing" + + "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/adapters/repos/db/vector/common" + "github.com/weaviate/weaviate/adapters/repos/db/vector/compressionhelpers" + "github.com/weaviate/weaviate/adapters/repos/db/vector/hnsw" + "github.com/weaviate/weaviate/adapters/repos/db/vector/hnsw/distancer" + "github.com/weaviate/weaviate/adapters/repos/db/vector/testinghelpers" + "github.com/weaviate/weaviate/entities/cyclemanager" + "github.com/weaviate/weaviate/entities/storobj" + hnswent "github.com/weaviate/weaviate/entities/vectorindex/hnsw" + "go.etcd.io/bbolt" +) + +func TempVectorForIDThunk(vectors [][]float32) func(context.Context, uint64, *common.VectorSlice) ([]float32, error) { + return func(ctx context.Context, id uint64, container *common.VectorSlice) ([]float32, error) { + copy(container.Slice, vectors[int(id)]) + return vectors[int(id)], nil + } +} + +func TestRestorBQ_Integration(t *testing.T) { + ctx := context.Background() + dimensions := 20 + vectors_size := 3_000 + queries_size := 100 + k := 10 + + vectors, queries := testinghelpers.RandomVecs(vectors_size, queries_size, dimensions) + distancer := distancer.NewL2SquaredProvider() + logger, _ := test.NewNullLogger() + + dirName := t.TempDir() + indexID := "restore-bq-integration-test" + noopCallback := cyclemanager.NewCallbackGroupNoop() + uc := hnswent.UserConfig{} + uc.SetDefaults() + uc.MaxConnections = 30 + uc.EFConstruction = 64 + uc.EF = 32 + uc.VectorCacheMaxObjects = 1_000_000 + uc.BQ = hnswent.BQConfig{ + Enabled: true, + } + + tmpDir := t.TempDir() + dbPath := filepath.Join(tmpDir, "index.db") + db, err := bbolt.Open(dbPath, 0o666, nil) + require.NoError(t, err) + t.Cleanup(func() { + db.Close() + }) + + config := hnsw.Config{ + RootPath: dirName, + ID: indexID, + Logger: logger, + DistanceProvider: distancer, + MakeCommitLoggerThunk: func() (hnsw.CommitLogger, error) { + return hnsw.NewCommitLogger(dirName, indexID, logger, noopCallback) + }, + VectorForIDThunk: func(ctx context.Context, id uint64) ([]float32, error) { + vec := vectors[int(id)] + if vec == nil { + return nil, storobj.NewErrNotFoundf(id, "nil vec") + } + return vec, nil + }, + TempVectorForIDThunk: TempVectorForIDThunk(vectors), + } + + idx, err := hnsw.New(config, uc, cyclemanager.NewCallbackGroupNoop(), testinghelpers.NewDummyStore(t)) + require.Nil(t, err) + idx.PostStartup() + + compressionhelpers.Concurrently(logger, uint64(vectors_size), func(i uint64) { + idx.Add(ctx, i, vectors[i]) + }) + + assert.Nil(t, idx.Flush()) + assert.Nil(t, idx.Shutdown(context.Background())) + + idx, err = hnsw.New(config, uc, cyclemanager.NewCallbackGroupNoop(), testinghelpers.NewDummyStore(t)) + require.Nil(t, err) + idx.PostStartup() + + for i := range queries { + idx.SearchByVector(ctx, queries[i], k, nil) + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/restore_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/restore_test.go new file mode 100644 index 0000000000000000000000000000000000000000..015191d29af8ab17892f2967981cea6d1670439f --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/restore_test.go @@ -0,0 +1,147 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package hnsw + +import ( + "context" + "io" + "os" + "path" + "testing" + + "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/adapters/repos/db/vector/common" + "github.com/weaviate/weaviate/adapters/repos/db/vector/compressionhelpers" + "github.com/weaviate/weaviate/adapters/repos/db/vector/hnsw/distancer" + "github.com/weaviate/weaviate/adapters/repos/db/vector/testinghelpers" + "github.com/weaviate/weaviate/entities/cyclemanager" + "github.com/weaviate/weaviate/entities/storobj" + ent "github.com/weaviate/weaviate/entities/vectorindex/hnsw" +) + +var logger, _ = test.NewNullLogger() + +func Test_RestartFromZeroSegments(t *testing.T) { + testPath := t.TempDir() + src := path.Join(".", "compression_tests", "fixtures", "restart-from-zero-segments", "1234567") + source, err := os.Open(src) + assert.Nil(t, err) + dstPath := path.Join(testPath, "main.hnsw.commitlog.d") + assert.Nil(t, os.Mkdir(dstPath, 0o777)) + destination, err := os.Create(path.Join(dstPath, "1234567")) + assert.Nil(t, err) + _, err = io.Copy(destination, source) + assert.Nil(t, err) + source.Close() + destination.Close() + + efConstruction := 64 + ef := 32 + maxNeighbors := 32 + dimensions := 20 + vectors_size := 1000 + queries_size := 1 + vectors, _ := testinghelpers.RandomVecs(vectors_size, queries_size, dimensions) + distancer := distancer.NewL2SquaredProvider() + uc := ent.UserConfig{} + uc.MaxConnections = maxNeighbors + uc.EFConstruction = efConstruction + uc.EF = ef + uc.VectorCacheMaxObjects = 10e12 + uc.PQ = ent.PQConfig{Enabled: true, Encoder: ent.PQEncoder{Type: ent.PQEncoderTypeKMeans, Distribution: ent.PQEncoderDistributionNormal}} + config := Config{ + RootPath: testPath, + ID: "main", + MakeCommitLoggerThunk: MakeNoopCommitLogger, + DistanceProvider: distancer, + VectorForIDThunk: func(ctx context.Context, id uint64) ([]float32, error) { + return vectors[int(id)], nil + }, + TempVectorForIDThunk: func(ctx context.Context, id uint64, container *common.VectorSlice) ([]float32, error) { + copy(container.Slice, vectors[int(id)]) + return container.Slice, nil + }, + } + + _, err = New( + config, uc, + cyclemanager.NewCallbackGroupNoop(), testinghelpers.NewDummyStore(t)) + + assert.Nil(t, err) +} + +func TestBackup_IntegrationHnsw(t *testing.T) { + ctx := context.Background() + dimensions := 20 + vectors_size := 1_000 + queries_size := 100 + k := 10 + + vectors, queries := testinghelpers.RandomVecs(vectors_size, queries_size, dimensions) + truths := make([][]uint64, queries_size) + distancer := distancer.NewL2SquaredProvider() + compressionhelpers.Concurrently(logger, uint64(len(queries)), func(i uint64) { + truths[i], _ = testinghelpers.BruteForce(logger, vectors, queries[i], k, testinghelpers.DistanceWrapper(distancer)) + }) + + dirName := t.TempDir() + indexID := "restore-integration-test" + noopCallback := cyclemanager.NewCallbackGroupNoop() + hnswuc := ent.UserConfig{ + MaxConnections: 30, + EFConstruction: 64, + EF: 32, + VectorCacheMaxObjects: 1_000_000, + } + + config := Config{ + RootPath: dirName, + ID: indexID, + Logger: logger, + DistanceProvider: distancer, + MakeCommitLoggerThunk: func() (CommitLogger, error) { + return NewCommitLogger(dirName, indexID, logger, noopCallback) + }, + VectorForIDThunk: func(ctx context.Context, id uint64) ([]float32, error) { + vec := vectors[int(id)] + if vec == nil { + return nil, storobj.NewErrNotFoundf(id, "nil vec") + } + return vec, nil + }, + TempVectorForIDThunk: TempVectorForIDThunk(vectors), + } + + store := testinghelpers.NewDummyStore(t) + + idx, err := New(config, hnswuc, cyclemanager.NewCallbackGroupNoop(), store) + require.Nil(t, err) + idx.PostStartup() + + compressionhelpers.Concurrently(logger, uint64(vectors_size), func(i uint64) { + idx.Add(ctx, i, vectors[i]) + }) + recall1, _ := testinghelpers.RecallAndLatency(ctx, queries, k, idx, truths) + assert.True(t, recall1 > 0.9) + + assert.Nil(t, idx.Flush()) + assert.Nil(t, idx.Shutdown(context.Background())) + + idx, err = New(config, hnswuc, cyclemanager.NewCallbackGroupNoop(), store) + require.Nil(t, err) + idx.PostStartup() + + recall2, _ := testinghelpers.RecallAndLatency(ctx, queries, k, idx, truths) + assert.Equal(t, recall1, recall2) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/search.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/search.go new file mode 100644 index 0000000000000000000000000000000000000000..aaf09809c16b8a01ed51c01ad5f062995663331d --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/search.go @@ -0,0 +1,1216 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package hnsw + +import ( + "context" + "fmt" + "math" + "sync" + "sync/atomic" + "time" + + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/weaviate/weaviate/adapters/repos/db/helpers" + "github.com/weaviate/weaviate/adapters/repos/db/priorityqueue" + "github.com/weaviate/weaviate/adapters/repos/db/vector/common" + "github.com/weaviate/weaviate/adapters/repos/db/vector/compressionhelpers" + "github.com/weaviate/weaviate/adapters/repos/db/vector/hnsw/distancer" + "github.com/weaviate/weaviate/adapters/repos/db/vector/hnsw/visited" + "github.com/weaviate/weaviate/entities/dto" + enterrors "github.com/weaviate/weaviate/entities/errors" + "github.com/weaviate/weaviate/entities/storobj" + "github.com/weaviate/weaviate/usecases/floatcomp" +) + +type FilterStrategy int + +const ( + SWEEPING FilterStrategy = iota + ACORN + RRE +) + +func (h *hnsw) searchTimeEF(k int) int { + // load atomically, so we can get away with concurrent updates of the + // userconfig without having to set a lock each time we try to read - which + // can be so common that it would cause considerable overhead + ef := int(atomic.LoadInt64(&h.ef)) + if ef < 1 { + return h.autoEfFromK(k) + } + + if ef < k { + ef = k + } + + return ef +} + +func (h *hnsw) autoEfFromK(k int) int { + factor := int(atomic.LoadInt64(&h.efFactor)) + min := int(atomic.LoadInt64(&h.efMin)) + max := int(atomic.LoadInt64(&h.efMax)) + + ef := k * factor + if ef > max { + ef = max + } else if ef < min { + ef = min + } + if k > ef { + ef = k // otherwise results will get cut off early + } + + return ef +} + +func (h *hnsw) SearchByVector(ctx context.Context, vector []float32, + k int, allowList helpers.AllowList, +) ([]uint64, []float32, error) { + h.compressActionLock.RLock() + defer h.compressActionLock.RUnlock() + + vector = h.normalizeVec(vector) + flatSearchCutoff := int(atomic.LoadInt64(&h.flatSearchCutoff)) + if allowList != nil && !h.forbidFlat && allowList.Len() < flatSearchCutoff { + helpers.AnnotateSlowQueryLog(ctx, "hnsw_flat_search", true) + return h.flatSearch(ctx, vector, k, h.searchTimeEF(k), allowList) + } + helpers.AnnotateSlowQueryLog(ctx, "hnsw_flat_search", false) + return h.knnSearchByVector(ctx, vector, k, h.searchTimeEF(k), allowList) +} + +func (h *hnsw) SearchByMultiVector(ctx context.Context, vectors [][]float32, k int, allowList helpers.AllowList) ([]uint64, []float32, error) { + if !h.multivector.Load() { + return nil, nil, errors.New("multivector search is not enabled") + } + + if h.muvera.Load() { + muvera_query := h.muveraEncoder.EncodeQuery(vectors) + overfetch := 2 + docIDs, _, err := h.SearchByVector(ctx, muvera_query, overfetch*k, allowList) + if err != nil { + return nil, nil, err + } + candidateSet := make(map[uint64]struct{}) + for _, docID := range docIDs { + candidateSet[docID] = struct{}{} + } + return h.computeLateInteraction(vectors, k, candidateSet) + } + + h.compressActionLock.RLock() + defer h.compressActionLock.RUnlock() + + vectors = h.normalizeVecs(vectors) + flatSearchCutoff := int(atomic.LoadInt64(&h.flatSearchCutoff)) + if allowList != nil && !h.forbidFlat && allowList.Len() < flatSearchCutoff { + helpers.AnnotateSlowQueryLog(ctx, "hnsw_flat_search", true) + return h.flatMultiSearch(ctx, vectors, k, allowList) + } + helpers.AnnotateSlowQueryLog(ctx, "hnsw_flat_search", false) + return h.knnSearchByMultiVector(ctx, vectors, k, allowList) +} + +// SearchByVectorDistance wraps SearchByVector, and calls it recursively until +// the search results contain all vector within the threshold specified by the +// target distance. +// +// The maxLimit param will place an upper bound on the number of search results +// returned. This is used in situations where the results of the method are all +// eventually turned into objects, for example, a Get query. If the caller just +// needs ids for sake of something like aggregation, a maxLimit of -1 can be +// passed in to truly obtain all results from the vector index. +func (h *hnsw) SearchByVectorDistance(ctx context.Context, vector []float32, + targetDistance float32, maxLimit int64, + allowList helpers.AllowList, +) ([]uint64, []float32, error) { + return searchByVectorDistance(ctx, vector, targetDistance, maxLimit, allowList, + h.SearchByVector, h.logger) +} + +// SearchByMultiVectorDistance wraps SearchByMultiVector, and calls it recursively until +// the search results contain all vector within the threshold specified by the +// target distance. +// +// The maxLimit param will place an upper bound on the number of search results +// returned. This is used in situations where the results of the method are all +// eventually turned into objects, for example, a Get query. If the caller just +// needs ids for sake of something like aggregation, a maxLimit of -1 can be +// passed in to truly obtain all results from the vector index. +func (h *hnsw) SearchByMultiVectorDistance(ctx context.Context, vector [][]float32, + targetDistance float32, maxLimit int64, + allowList helpers.AllowList, +) ([]uint64, []float32, error) { + return searchByVectorDistance(ctx, vector, targetDistance, maxLimit, allowList, + h.SearchByMultiVector, h.logger) +} + +func (h *hnsw) shouldRescore() bool { + if h.compressed.Load() { + if (h.sqConfig.Enabled && h.sqConfig.RescoreLimit == 0) || (h.rqConfig.Enabled && h.rqConfig.RescoreLimit == 0) { + return false + } + } + return h.compressed.Load() && !h.doNotRescore +} + +func (h *hnsw) cacheSize() int64 { + var size int64 + if h.compressed.Load() { + size = h.compressor.CountVectors() + } else { + size = h.cache.CountVectors() + } + return size +} + +func (h *hnsw) acornEnabled(allowList helpers.AllowList) bool { + if allowList == nil || !h.acornSearch.Load() { + return false + } + + cacheSize := h.cacheSize() + allowListSize := allowList.Len() + if cacheSize != 0 && float32(allowListSize)/float32(cacheSize) > float32(h.acornFilterRatio) { + return false + } + + return true +} + +func (h *hnsw) searchLayerByVectorWithDistancer(ctx context.Context, + queryVector []float32, + entrypoints *priorityqueue.Queue[any], ef int, level int, + allowList helpers.AllowList, compressorDistancer compressionhelpers.CompressorDistancer, +) (*priorityqueue.Queue[any], error, +) { + if h.acornEnabled(allowList) { + return h.searchLayerByVectorWithDistancerWithStrategy(ctx, queryVector, entrypoints, ef, level, allowList, compressorDistancer, ACORN) + } + return h.searchLayerByVectorWithDistancerWithStrategy(ctx, queryVector, entrypoints, ef, level, allowList, compressorDistancer, SWEEPING) +} + +func (h *hnsw) searchLayerByVectorWithDistancerWithStrategy(ctx context.Context, + queryVector []float32, + entrypoints *priorityqueue.Queue[any], ef int, level int, + allowList helpers.AllowList, compressorDistancer compressionhelpers.CompressorDistancer, + strategy FilterStrategy) (*priorityqueue.Queue[any], error, +) { + start := time.Now() + defer func() { + took := time.Since(start) + helpers.AnnotateSlowQueryLog(ctx, fmt.Sprintf("knn_search_layer_%d_took", level), took) + }() + h.pools.visitedListsLock.RLock() + visited := h.pools.visitedLists.Borrow() + visitedExp := h.pools.visitedLists.Borrow() + h.pools.visitedListsLock.RUnlock() + + candidates := h.pools.pqCandidates.GetMin(ef) + results := h.pools.pqResults.GetMax(ef) + var floatDistancer distancer.Distancer + if h.compressed.Load() { + if compressorDistancer == nil { + var returnFn compressionhelpers.ReturnDistancerFn + compressorDistancer, returnFn = h.compressor.NewDistancer(queryVector) + defer returnFn() + } + } else { + floatDistancer = h.distancerProvider.New(queryVector) + } + + h.insertViableEntrypointsAsCandidatesAndResults(entrypoints, candidates, + results, level, visited, allowList) + + isMultivec := h.multivector.Load() && !h.muvera.Load() + var worstResultDistance float32 + var err error + if h.compressed.Load() { + worstResultDistance, err = h.currentWorstResultDistanceToByte(results, compressorDistancer) + } else { + worstResultDistance, err = h.currentWorstResultDistanceToFloat(results, floatDistancer) + } + if err != nil { + return nil, errors.Wrapf(err, "calculate distance of current last result") + } + var connectionsReusable []uint64 + var sliceConnectionsReusable *common.VectorUint64Slice + var slicePendingNextRound *common.VectorUint64Slice + var slicePendingThisRound *common.VectorUint64Slice + + if allowList == nil { + strategy = SWEEPING + } + if strategy == ACORN { + sliceConnectionsReusable = h.pools.tempVectorsUint64.Get(8 * h.maximumConnectionsLayerZero) + slicePendingNextRound = h.pools.tempVectorsUint64.Get(h.maximumConnectionsLayerZero) + slicePendingThisRound = h.pools.tempVectorsUint64.Get(h.maximumConnectionsLayerZero) + } else { + connectionsReusable = make([]uint64, h.maximumConnectionsLayerZero) + } + + for candidates.Len() > 0 { + if err := ctx.Err(); err != nil { + h.pools.visitedListsLock.RLock() + h.pools.visitedLists.Return(visited) + h.pools.visitedListsLock.RUnlock() + + helpers.AnnotateSlowQueryLog(ctx, "context_error", "knn_search_layer") + return nil, err + } + var dist float32 + candidate := candidates.Pop() + dist = candidate.Dist + + if dist > worstResultDistance && results.Len() >= ef { + break + } + + h.shardedNodeLocks.RLock(candidate.ID) + candidateNode := h.nodes[candidate.ID] + h.shardedNodeLocks.RUnlock(candidate.ID) + + if candidateNode == nil { + // could have been a node that already had a tombstone attached and was + // just cleaned up while we were waiting for a read lock + continue + } + + candidateNode.Lock() + if candidateNode.level < level { + // a node level could have been downgraded as part of a delete-reassign, + // but the connections pointing to it not yet cleaned up. In this case + // the node doesn't have any outgoing connections at this level and we + // must discard it. + candidateNode.Unlock() + continue + } + + if strategy != ACORN { + if candidateNode.connections.LenAtLayer(uint8(level)) > h.maximumConnectionsLayerZero { + // How is it possible that we could ever have more connections than the + // allowed maximum? It is not anymore, but there was a bug that allowed + // this to happen in versions prior to v1.12.0: + // https://github.com/weaviate/weaviate/issues/1868 + // + // As a result the length of this slice is entirely unpredictable and we + // can no longer retrieve it from the pool. Instead we need to fallback + // to allocating a new slice. + // + // This was discovered as part of + // https://github.com/weaviate/weaviate/issues/1897 + connectionsReusable = make([]uint64, candidateNode.connections.LenAtLayer(uint8(level))) + } else { + connectionsReusable = connectionsReusable[:candidateNode.connections.LenAtLayer(uint8(level))] + } + connectionsReusable = candidateNode.connections.CopyLayer(connectionsReusable, uint8(level)) + } else { + connectionsReusable = sliceConnectionsReusable.Slice + pendingNextRound := slicePendingNextRound.Slice + pendingThisRound := slicePendingThisRound.Slice + + realLen := 0 + index := 0 + + pendingNextRound = pendingNextRound[:candidateNode.connections.LenAtLayer(uint8(level))] + pendingNextRound = candidateNode.connections.CopyLayer(pendingNextRound, uint8(level)) + hop := 1 + maxHops := 2 + for hop <= maxHops && realLen < 8*h.maximumConnectionsLayerZero && len(pendingNextRound) > 0 { + if cap(pendingThisRound) >= len(pendingNextRound) { + pendingThisRound = pendingThisRound[:len(pendingNextRound)] + } else { + pendingThisRound = make([]uint64, len(pendingNextRound)) + slicePendingThisRound.Slice = pendingThisRound + } + copy(pendingThisRound, pendingNextRound) + pendingNextRound = pendingNextRound[:0] + for index < len(pendingThisRound) && realLen < 8*h.maximumConnectionsLayerZero { + nodeId := pendingThisRound[index] + index++ + if ok := visited.Visited(nodeId); ok { + // skip if we've already visited this neighbor + continue + } + if !visitedExp.Visited(nodeId) { + if !isMultivec { + if allowList.Contains(nodeId) { + connectionsReusable[realLen] = nodeId + realLen++ + visitedExp.Visit(nodeId) + continue + } + } else { + var docID uint64 + if h.compressed.Load() { + docID, _ = h.compressor.GetKeys(nodeId) + } else { + docID, _ = h.cache.GetKeys(nodeId) + } + if allowList.Contains(docID) { + connectionsReusable[realLen] = nodeId + realLen++ + visitedExp.Visit(nodeId) + continue + } + } + } else { + continue + } + visitedExp.Visit(nodeId) + + h.RLock() + h.shardedNodeLocks.RLock(nodeId) + node := h.nodes[nodeId] + h.shardedNodeLocks.RUnlock(nodeId) + h.RUnlock() + if node == nil { + continue + } + iterator := node.connections.ElementIterator(uint8(level)) + for iterator.Next() { + _, expId := iterator.Current() + if visitedExp.Visited(expId) { + continue + } + if visited.Visited(expId) { + continue + } + + if realLen >= 8*h.maximumConnectionsLayerZero { + break + } + + if !isMultivec { + if allowList.Contains(expId) { + visitedExp.Visit(expId) + connectionsReusable[realLen] = expId + realLen++ + } else if hop < maxHops { + visitedExp.Visit(expId) + pendingNextRound = append(pendingNextRound, expId) + } + } else { + var docID uint64 + if h.compressed.Load() { + docID, _ = h.compressor.GetKeys(expId) + } else { + docID, _ = h.cache.GetKeys(expId) + } + if allowList.Contains(docID) { + visitedExp.Visit(expId) + connectionsReusable[realLen] = expId + realLen++ + } else if hop < maxHops { + visitedExp.Visit(expId) + pendingNextRound = append(pendingNextRound, expId) + } + } + } + } + hop++ + } + slicePendingNextRound.Slice = pendingNextRound + connectionsReusable = connectionsReusable[:realLen] + } + candidateNode.Unlock() + + for _, neighborID := range connectionsReusable { + if ok := visited.Visited(neighborID); ok { + // skip if we've already visited this neighbor + continue + } + + // make sure we never visit this neighbor again + visited.Visit(neighborID) + + if strategy == RRE && level == 0 { + if isMultivec { + var docID uint64 + if h.compressed.Load() { + docID, _ = h.compressor.GetKeys(neighborID) + } else { + docID, _ = h.cache.GetKeys(neighborID) + } + if !allowList.Contains(docID) { + continue + } + } else if !allowList.Contains(neighborID) { + continue + } + } + var distance float32 + var err error + if h.compressed.Load() { + distance, err = compressorDistancer.DistanceToNode(neighborID) + } else { + distance, err = h.distanceToFloatNode(floatDistancer, neighborID) + } + if err != nil { + var e storobj.ErrNotFound + if errors.As(err, &e) { + h.handleDeletedNode(e.DocID, "searchLayerByVectorWithDistancer") + continue + } else { + h.pools.visitedListsLock.RLock() + h.pools.visitedLists.Return(visited) + h.pools.visitedLists.Return(visitedExp) + h.pools.visitedListsLock.RUnlock() + return nil, errors.Wrap(err, "calculate distance between candidate and query") + } + } + + if distance < worstResultDistance || results.Len() < ef { + candidates.Insert(neighborID, distance) + if strategy == SWEEPING && level == 0 && allowList != nil { + // we are on the lowest level containing the actual candidates and we + // have an allow list (i.e. the user has probably set some sort of a + // filter restricting this search further. As a result we have to + // ignore items not on the list + if isMultivec { + var docID uint64 + if h.compressed.Load() { + docID, _ = h.compressor.GetKeys(neighborID) + } else { + docID, _ = h.cache.GetKeys(neighborID) + } + if !allowList.Contains(docID) { + continue + } + } else if !allowList.Contains(neighborID) { + continue + } + } + + if h.hasTombstone(neighborID) { + continue + } + + results.Insert(neighborID, distance) + + if h.compressed.Load() { + h.compressor.Prefetch(candidates.Top().ID) + } else { + h.cache.Prefetch(candidates.Top().ID) + } + + // +1 because we have added one node size calculating the len + if results.Len() > ef { + results.Pop() + } + + if results.Len() > 0 { + worstResultDistance = results.Top().Dist + } + } + } + } + + if strategy == ACORN { + h.pools.tempVectorsUint64.Put(sliceConnectionsReusable) + h.pools.tempVectorsUint64.Put(slicePendingNextRound) + h.pools.tempVectorsUint64.Put(slicePendingThisRound) + } + + h.pools.pqCandidates.Put(candidates) + + h.pools.visitedListsLock.RLock() + h.pools.visitedLists.Return(visited) + h.pools.visitedLists.Return(visitedExp) + h.pools.visitedListsLock.RUnlock() + + return results, nil +} + +func (h *hnsw) insertViableEntrypointsAsCandidatesAndResults( + entrypoints, candidates, results *priorityqueue.Queue[any], level int, + visitedList visited.ListSet, allowList helpers.AllowList, +) { + isMultivec := h.multivector.Load() && !h.muvera.Load() + for entrypoints.Len() > 0 { + ep := entrypoints.Pop() + visitedList.Visit(ep.ID) + candidates.Insert(ep.ID, ep.Dist) + if level == 0 && allowList != nil { + // we are on the lowest level containing the actual candidates and we + // have an allow list (i.e. the user has probably set some sort of a + // filter restricting this search further. As a result we have to + // ignore items not on the list + if isMultivec { + var docID uint64 + if h.compressed.Load() { + docID, _ = h.compressor.GetKeys(ep.ID) + } else { + docID, _ = h.cache.GetKeys(ep.ID) + } + if !allowList.Contains(docID) { + continue + } + } else if !allowList.Contains(ep.ID) { + continue + } + } + + if h.hasTombstone(ep.ID) { + continue + } + + results.Insert(ep.ID, ep.Dist) + } +} + +func (h *hnsw) currentWorstResultDistanceToFloat(results *priorityqueue.Queue[any], + distancer distancer.Distancer, +) (float32, error) { + if results.Len() > 0 { + id := results.Top().ID + + d, err := h.distanceToFloatNode(distancer, id) + if err != nil { + var e storobj.ErrNotFound + if errors.As(err, &e) { + h.handleDeletedNode(e.DocID, "currentWorstResultDistanceToFloat") + return math.MaxFloat32, nil + } + return 0, errors.Wrap(err, "calculated distance between worst result and query") + } + + return d, nil + } else { + // if the entrypoint (which we received from a higher layer doesn't match + // the allow List the result list is empty. In this case we can just set + // the worstDistance to an arbitrarily large number, so that any + // (allowed) candidate will have a lower distance in comparison + return math.MaxFloat32, nil + } +} + +func (h *hnsw) currentWorstResultDistanceToByte(results *priorityqueue.Queue[any], + distancer compressionhelpers.CompressorDistancer, +) (float32, error) { + if results.Len() > 0 { + item := results.Top() + if item.Dist != 0 { + return item.Dist, nil + } + id := item.ID + d, err := distancer.DistanceToNode(id) + if err != nil { + var e storobj.ErrNotFound + if errors.As(err, &e) { + h.handleDeletedNode(e.DocID, "currentWorstResultDistanceToByte") + return math.MaxFloat32, nil + } + return 0, errors.Wrap(err, + "calculated distance between worst result and query") + } + + return d, nil + } else { + // if the entrypoint (which we received from a higher layer doesn't match + // the allow List the result list is empty. In this case we can just set + // the worstDistance to an arbitrarily large number, so that any + // (allowed) candidate will have a lower distance in comparison + return math.MaxFloat32, nil + } +} + +func (h *hnsw) distanceFromBytesToFloatNode(concreteDistancer compressionhelpers.CompressorDistancer, nodeID uint64) (float32, error) { + slice := h.pools.tempVectors.Get(int(h.dims)) + defer h.pools.tempVectors.Put(slice) + var vec []float32 + var err error + if h.muvera.Load() || !h.multivector.Load() { + vec, err = h.TempVectorForIDThunk(context.Background(), nodeID, slice) + } else { + docID, relativeID := h.cache.GetKeys(nodeID) + vecs, err := h.TempMultiVectorForIDThunk(context.Background(), docID, slice) + if err != nil { + return 0, err + } else if len(vecs) <= int(relativeID) { + return 0, errors.Errorf("relativeID %d is out of bounds for docID %d", relativeID, docID) + } + vec = vecs[relativeID] + } + if err != nil { + var e storobj.ErrNotFound + if errors.As(err, &e) { + h.handleDeletedNode(e.DocID, "distanceFromBytesToFloatNode") + return 0, err + } + // not a typed error, we can recover from, return with err + return 0, errors.Wrapf(err, "get vector of docID %d", nodeID) + } + vec = h.normalizeVec(vec) + return concreteDistancer.DistanceToFloat(vec) +} + +func (h *hnsw) distanceToFloatNode(distancer distancer.Distancer, nodeID uint64) (float32, error) { + candidateVec, err := h.vectorForID(context.Background(), nodeID) + if err != nil { + return 0, err + } + + dist, err := distancer.Distance(candidateVec) + if err != nil { + return 0, errors.Wrap(err, "calculate distance between candidate and query") + } + + return dist, nil +} + +// the underlying object seems to have been deleted, to recover from +// this situation let's add a tombstone to the deleted object, so it +// will be cleaned up and skip this candidate in the current search +func (h *hnsw) handleDeletedNode(docID uint64, operation string) { + if h.hasTombstone(docID) { + // nothing to do, this node already has a tombstone, it will be cleaned up + // in the next deletion cycle + return + } + + h.addTombstone(docID) + h.metrics.AddUnexpectedTombstone(operation) + h.logger.WithField("action", "attach_tombstone_to_deleted_node"). + WithField("node_id", docID). + Debugf("found a deleted node (%d) without a tombstone, "+ + "tombstone was added", docID) +} + +func (h *hnsw) knnSearchByVector(ctx context.Context, searchVec []float32, k int, + ef int, allowList helpers.AllowList, +) ([]uint64, []float32, error) { + if h.isEmpty() { + return nil, nil, nil + } + + if k < 0 { + return nil, nil, fmt.Errorf("k must be greater than zero") + } + + h.RLock() + entryPointID := h.entryPointID + maxLayer := h.currentMaximumLayer + h.RUnlock() + + var compressorDistancer compressionhelpers.CompressorDistancer + if h.compressed.Load() { + var returnFn compressionhelpers.ReturnDistancerFn + compressorDistancer, returnFn = h.compressor.NewDistancer(searchVec) + defer returnFn() + } + entryPointDistance, err := h.distToNode(compressorDistancer, entryPointID, searchVec) + var e storobj.ErrNotFound + if err != nil && errors.As(err, &e) { + h.handleDeletedNode(e.DocID, "knnSearchByVector") + return nil, nil, fmt.Errorf("entrypoint was deleted in the object store, " + + "it has been flagged for cleanup and should be fixed in the next cleanup cycle") + } + if err != nil { + return nil, nil, errors.Wrap(err, "knn search: distance between entrypoint and query node") + } + + // stop at layer 1, not 0! + for level := maxLayer; level >= 1; level-- { + eps := priorityqueue.NewMin[any](10) + eps.Insert(entryPointID, entryPointDistance) + + res, err := h.searchLayerByVectorWithDistancer(ctx, searchVec, eps, 1, level, nil, compressorDistancer) + if err != nil { + return nil, nil, errors.Wrapf(err, "knn search: search layer at level %d", level) + } + + // There might be situations where we did not find a better entrypoint at + // that particular level, so instead we're keeping whatever entrypoint we + // had before (i.e. either from a previous level or even the main + // entrypoint) + // + // If we do, however, have results, any candidate that's not nil (not + // deleted), and not under maintenance is a viable candidate + for res.Len() > 0 { + cand := res.Pop() + n := h.nodeByID(cand.ID) + if n == nil { + // we have found a node in results that is nil. This means it was + // deleted, but not cleaned up properly. Make sure to add a tombstone to + // this node, so it can be cleaned up in the next cycle. + if err := h.addTombstone(cand.ID); err != nil { + return nil, nil, err + } + + // skip the nil node, as it does not make a valid entrypoint + continue + } + + if !n.isUnderMaintenance() { + entryPointID = cand.ID + entryPointDistance = cand.Dist + break + } + + // if we managed to go through the loop without finding a single + // suitable node, we simply stick with the original, i.e. the global + // entrypoint + } + + h.pools.pqResults.Put(res) + } + + eps := priorityqueue.NewMin[any](10) + eps.Insert(entryPointID, entryPointDistance) + var strategy FilterStrategy + h.shardedNodeLocks.RLock(entryPointID) + entryPointNode := h.nodes[entryPointID] + h.shardedNodeLocks.RUnlock(entryPointID) + useAcorn := h.acornEnabled(allowList) + isMultivec := h.multivector.Load() && !h.muvera.Load() + if useAcorn { + if entryPointNode == nil { + strategy = RRE + } else { + counter := float32(0) + entryPointNode.Lock() + if entryPointNode.connections.Layers() < 1 { + strategy = ACORN + } else { + iterator := entryPointNode.connections.ElementIterator(0) + for iterator.Next() { + _, value := iterator.Current() + if isMultivec { + if h.compressed.Load() { + value, _ = h.compressor.GetKeys(value) + } else { + value, _ = h.cache.GetKeys(value) + } + } + if allowList.Contains(value) { + counter++ + } + } + entryPointNode.Unlock() + if counter/float32(h.nodes[entryPointID].connections.LenAtLayer(0)) > float32(h.acornFilterRatio) { + strategy = RRE + } else { + strategy = ACORN + } + } + } + } else { + strategy = SWEEPING + } + + if allowList != nil && useAcorn { + it := allowList.Iterator() + idx, ok := it.Next() + h.shardedNodeLocks.RLockAll() + if !isMultivec { + for ok && h.nodes[idx] == nil && h.hasTombstone(idx) { + idx, ok = it.Next() + } + } else { + _, exists := h.docIDVectors[idx] + for ok && !exists { + idx, ok = it.Next() + _, exists = h.docIDVectors[idx] + } + } + h.shardedNodeLocks.RUnlockAll() + + entryPointDistance, _ := h.distToNode(compressorDistancer, idx, searchVec) + eps.Insert(idx, entryPointDistance) + } + res, err := h.searchLayerByVectorWithDistancerWithStrategy(ctx, searchVec, eps, ef, 0, allowList, compressorDistancer, strategy) + if err != nil { + return nil, nil, errors.Wrapf(err, "knn search: search layer at level %d", 0) + } + + beforeRescore := time.Now() + if h.shouldRescore() && !h.multivector.Load() { + if err := h.rescore(ctx, res, k, compressorDistancer); err != nil { + helpers.AnnotateSlowQueryLog(ctx, "context_error", "knn_search_rescore") + took := time.Since(beforeRescore) + helpers.AnnotateSlowQueryLog(ctx, "knn_search_rescore_took", took) + return nil, nil, fmt.Errorf("knn search: %w", err) + } + took := time.Since(beforeRescore) + helpers.AnnotateSlowQueryLog(ctx, "knn_search_rescore_took", took) + } + + if !h.multivector.Load() { + for res.Len() > k { + res.Pop() + } + } + ids := make([]uint64, res.Len()) + dists := make([]float32, res.Len()) + + // results is ordered in reverse, we need to flip the order before presenting + // to the user! + i := len(ids) - 1 + for res.Len() > 0 { + res := res.Pop() + ids[i] = res.ID + dists[i] = res.Dist + i-- + } + h.pools.pqResults.Put(res) + return ids, dists, nil +} + +func (h *hnsw) knnSearchByMultiVector(ctx context.Context, queryVectors [][]float32, k int, allowList helpers.AllowList) ([]uint64, []float32, error) { + kPrime := k + candidateSet := make(map[uint64]struct{}) + for _, vec := range queryVectors { + ids, _, err := h.knnSearchByVector(ctx, vec, kPrime, h.searchTimeEF(kPrime), allowList) + if err != nil { + return nil, nil, err + } + for _, id := range ids { + var docId uint64 + if !h.compressed.Load() { + docId, _ = h.cache.GetKeys(id) + } else { + docId, _ = h.compressor.GetKeys(id) + } + candidateSet[docId] = struct{}{} + } + } + return h.computeLateInteraction(queryVectors, k, candidateSet) +} + +func (h *hnsw) computeLateInteraction(queryVectors [][]float32, k int, candidateSet map[uint64]struct{}) ([]uint64, []float32, error) { + resultsQueue := priorityqueue.NewMax[any](1) + for docID := range candidateSet { + sim, err := h.computeScore(queryVectors, docID) + if err != nil { + return nil, nil, err + } + resultsQueue.Insert(docID, sim) + if resultsQueue.Len() > k { + resultsQueue.Pop() + } + } + + distances := make([]float32, resultsQueue.Len()) + ids := make([]uint64, resultsQueue.Len()) + + i := len(ids) - 1 + for resultsQueue.Len() > 0 { + element := resultsQueue.Pop() + ids[i] = element.ID + distances[i] = element.Dist + i-- + } + + return ids, distances, nil +} + +func (h *hnsw) computeScore(searchVecs [][]float32, docID uint64) (float32, error) { + h.RLock() + vecIDs := h.docIDVectors[docID] + h.RUnlock() + var docVecs [][]float32 + if h.compressed.Load() { + slice := h.pools.tempVectors.Get(int(h.dims)) + var err error + docVecs, err = h.TempMultiVectorForIDThunk(context.Background(), docID, slice) + if err != nil { + return 0.0, errors.Wrap(err, "get vector for docID") + } + h.pools.tempVectors.Put(slice) + } else { + if !h.muvera.Load() { + var errs []error + docVecs, errs = h.multiVectorForID(context.Background(), vecIDs) + for _, err := range errs { + if err != nil { + return 0.0, errors.Wrap(err, "get vector for docID") + } + } + } else { + var err error + docVecs, err = h.cache.GetDoc(context.Background(), docID) + if err != nil { + return 0.0, errors.Wrap(err, "get muvera vector for docID") + } + } + } + + similarity := float32(0.0) + + var distancer distancer.Distancer + for _, searchVec := range searchVecs { + maxSim := float32(math.MaxFloat32) + distancer = h.multiDistancerProvider.New(searchVec) + + for _, docVec := range docVecs { + dist, err := distancer.Distance(docVec) + if err != nil { + return 0.0, errors.Wrap(err, "calculate distance between candidate and query") + } + if dist < maxSim { + maxSim = dist + } + } + + similarity += maxSim + } + + return similarity, nil +} + +func (h *hnsw) QueryVectorDistancer(queryVector []float32) common.QueryVectorDistancer { + queryVector = h.normalizeVec(queryVector) + if h.compressed.Load() { + dist, returnFn := h.compressor.NewDistancer(queryVector) + f := func(nodeID uint64) (float32, error) { + if int(nodeID) > len(h.nodes) { + return -1, fmt.Errorf("node %v is larger than the cache size %v", nodeID, len(h.nodes)) + } + + return dist.DistanceToNode(nodeID) + } + return common.QueryVectorDistancer{DistanceFunc: f, CloseFunc: returnFn} + + } else { + distancer := h.distancerProvider.New(queryVector) + f := func(nodeID uint64) (float32, error) { + if int(nodeID) > len(h.nodes) { + return -1, fmt.Errorf("node %v is larger than the cache size %v", nodeID, len(h.nodes)) + } + return h.distanceToFloatNode(distancer, nodeID) + } + return common.QueryVectorDistancer{DistanceFunc: f} + } +} + +func (h *hnsw) QueryMultiVectorDistancer(queryVector [][]float32) common.QueryVectorDistancer { + queryVector = h.normalizeVecs(queryVector) + f := func(docID uint64) (float32, error) { + h.RLock() + _, ok := h.docIDVectors[docID] + h.RUnlock() + if !ok { + return -1, fmt.Errorf("docID %v is not in the vector index", docID) + } + return h.computeScore(queryVector, docID) + } + return common.QueryVectorDistancer{DistanceFunc: f} +} + +func (h *hnsw) rescore(ctx context.Context, res *priorityqueue.Queue[any], k int, compressorDistancer compressionhelpers.CompressorDistancer) error { + if h.sqConfig.Enabled && h.sqConfig.RescoreLimit >= k { + for res.Len() > h.sqConfig.RescoreLimit { + res.Pop() + } + } + if h.rqConfig.Enabled && h.rqConfig.RescoreLimit >= k { + for res.Len() > h.rqConfig.RescoreLimit { + res.Pop() + } + } + ids := make([]uint64, res.Len()) + i := len(ids) - 1 + for res.Len() > 0 { + res := res.Pop() + ids[i] = res.ID + i-- + } + res.Reset() + + mu := sync.Mutex{} // protect res + addID := func(id uint64, dist float32) { + mu.Lock() + defer mu.Unlock() + + res.Insert(id, dist) + if res.Len() > k { + res.Pop() + } + } + + eg := enterrors.NewErrorGroupWrapper(h.logger) + for workerID := 0; workerID < h.rescoreConcurrency; workerID++ { + workerID := workerID + + eg.Go(func() error { + for idPos := workerID; idPos < len(ids); idPos += h.rescoreConcurrency { + if err := ctx.Err(); err != nil { + return fmt.Errorf("rescore: %w", err) + } + + id := ids[idPos] + dist, err := h.distanceFromBytesToFloatNode(compressorDistancer, id) + if err == nil { + addID(id, dist) + } else { + h.logger. + WithField("action", "rescore"). + WithError(err). + Warnf("could not rescore node %d", id) + } + } + return nil + }, h.logger) + } + + if err := eg.Wait(); err != nil { + return err + } + + return nil +} + +func newSearchByDistParams(maxLimit int64) *searchByDistParams { + initialOffset := 0 + initialLimit := DefaultSearchByDistInitialLimit + + return &searchByDistParams{ + offset: initialOffset, + limit: initialLimit, + totalLimit: initialOffset + initialLimit, + maximumSearchLimit: maxLimit, + } +} + +const ( + // DefaultSearchByDistInitialLimit : + // the initial limit of 100 here is an + // arbitrary decision, and can be tuned + // as needed + DefaultSearchByDistInitialLimit = 100 + + // DefaultSearchByDistLimitMultiplier : + // the decision to increase the limit in + // multiples of 10 here is an arbitrary + // decision, and can be tuned as needed + DefaultSearchByDistLimitMultiplier = 10 +) + +type searchByDistParams struct { + offset int + limit int + totalLimit int + maximumSearchLimit int64 +} + +func (params *searchByDistParams) offsetCapacity(ids []uint64) int { + var offsetCap int + if params.offset < len(ids) { + offsetCap = params.offset + } else { + offsetCap = len(ids) + } + + return offsetCap +} + +func (params *searchByDistParams) totalLimitCapacity(ids []uint64) int { + var totalLimitCap int + if params.totalLimit < len(ids) { + totalLimitCap = params.totalLimit + } else { + totalLimitCap = len(ids) + } + + return totalLimitCap +} + +func (params *searchByDistParams) iterate() { + params.offset = params.totalLimit + params.limit *= DefaultSearchByDistLimitMultiplier + params.totalLimit = params.offset + params.limit +} + +func (params *searchByDistParams) maxLimitReached() bool { + if params.maximumSearchLimit < 0 { + return false + } + + return int64(params.totalLimit) > params.maximumSearchLimit +} + +func searchByVectorDistance[T dto.Embedding](ctx context.Context, vector T, + targetDistance float32, maxLimit int64, + allowList helpers.AllowList, + searchByVector func(context.Context, T, int, helpers.AllowList) ([]uint64, []float32, error), + logger logrus.FieldLogger, +) ([]uint64, []float32, error) { + var ( + searchParams = newSearchByDistParams(maxLimit) + + resultIDs []uint64 + resultDist []float32 + ) + + recursiveSearch := func() (bool, error) { + shouldContinue := false + + ids, dist, err := searchByVector(ctx, vector, searchParams.totalLimit, allowList) + if err != nil { + return false, errors.Wrap(err, "vector search") + } + + // ensures the indexers aren't out of range + offsetCap := searchParams.offsetCapacity(ids) + totalLimitCap := searchParams.totalLimitCapacity(ids) + + ids, dist = ids[offsetCap:totalLimitCap], dist[offsetCap:totalLimitCap] + + if len(ids) == 0 { + return false, nil + } + + lastFound := dist[len(dist)-1] + shouldContinue = lastFound <= targetDistance + + for i := range ids { + if aboveThresh := dist[i] <= targetDistance; aboveThresh || + floatcomp.InDelta(float64(dist[i]), float64(targetDistance), 1e-6) { + resultIDs = append(resultIDs, ids[i]) + resultDist = append(resultDist, dist[i]) + } else { + // as soon as we encounter a certainty which + // is below threshold, we can stop searching + break + } + } + + return shouldContinue, nil + } + + shouldContinue, err := recursiveSearch() + if err != nil { + return nil, nil, err + } + + for shouldContinue { + searchParams.iterate() + if searchParams.maxLimitReached() { + logger. + WithField("action", "unlimited_vector_search"). + Warnf("maximum search limit of %d results has been reached", + searchParams.maximumSearchLimit) + break + } + + shouldContinue, err = recursiveSearch() + if err != nil { + return nil, nil, err + } + } + + return resultIDs, resultDist, nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/search_by_dist_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/search_by_dist_test.go new file mode 100644 index 0000000000000000000000000000000000000000..3b97a204c8dc384c3c6c6d5a53a987adcde88faa --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/search_by_dist_test.go @@ -0,0 +1,32 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package hnsw + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestSearchByDistParams(t *testing.T) { + t.Run("param iteration", func(t *testing.T) { + params := newSearchByDistParams(100) + assert.Equal(t, 0, params.offset) + assert.Equal(t, DefaultSearchByDistInitialLimit, params.limit) + assert.Equal(t, 100, params.totalLimit) + + params.iterate() + assert.Equal(t, 100, params.offset) + assert.Equal(t, 1000, params.limit) + assert.Equal(t, 1100, params.totalLimit) + }) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/search_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/search_test.go new file mode 100644 index 0000000000000000000000000000000000000000..ade87ed9fd938293e69b444d1183023c16f5bfc5 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/search_test.go @@ -0,0 +1,357 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package hnsw + +import ( + "context" + "fmt" + "math/rand" + "testing" + + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/adapters/repos/db/helpers" + "github.com/weaviate/weaviate/adapters/repos/db/priorityqueue" + "github.com/weaviate/weaviate/adapters/repos/db/vector/common" + "github.com/weaviate/weaviate/adapters/repos/db/vector/hnsw/distancer" + "github.com/weaviate/weaviate/adapters/repos/db/vector/hnsw/packedconn" + "github.com/weaviate/weaviate/adapters/repos/db/vector/testinghelpers" + "github.com/weaviate/weaviate/entities/cyclemanager" + ent "github.com/weaviate/weaviate/entities/vectorindex/hnsw" +) + +// prevents a regression of +// https://github.com/weaviate/weaviate/issues/2155 +func TestNilCheckOnPartiallyCleanedNode(t *testing.T) { + ctx := context.Background() + vectors := [][]float32{ + {100, 100}, // first to import makes this the EP, it is far from any query which means it will be replaced. + {2, 2}, // a good potential entrypoint, but we will corrupt it later on + {1, 1}, // the perfect search result + } + + var vectorIndex *hnsw + + t.Run("import", func(*testing.T) { + index, err := New(Config{ + RootPath: "doesnt-matter-as-committlogger-is-mocked-out", + ID: "bug-2155", + MakeCommitLoggerThunk: MakeNoopCommitLogger, + DistanceProvider: distancer.NewL2SquaredProvider(), + VectorForIDThunk: func(ctx context.Context, id uint64) ([]float32, error) { + return vectors[int(id)], nil + }, + }, ent.UserConfig{ + MaxConnections: 30, + EFConstruction: 128, + + // The actual size does not matter for this test, but if it defaults to + // zero it will constantly think it's full and needs to be deleted - even + // after just being deleted, so make sure to use a positive number here. + VectorCacheMaxObjects: 100000, + }, cyclemanager.NewCallbackGroupNoop(), testinghelpers.NewDummyStore(t)) + require.Nil(t, err) + vectorIndex = index + }) + + t.Run("manually add the nodes", func(t *testing.T) { + vectorIndex.entryPointID = 0 + vectorIndex.currentMaximumLayer = 1 + conns1, _ := packedconn.NewWithElements([][]uint64{ + {1, 2}, + {1}, + }) + conns2, _ := packedconn.NewWithElements([][]uint64{ + {0, 1, 2}, + }) + vectorIndex.nodes = []*vertex{ + { + // must be on a non-zero layer for this bug to occur + level: 1, + connections: conns1, + }, + nil, // corrupt node + { + level: 0, + connections: conns2, + }, + } + }) + + t.Run("run a search that would typically find the new ep", func(t *testing.T) { + res, _, err := vectorIndex.SearchByVector(ctx, []float32{1.7, 1.7}, 20, nil) + require.Nil(t, err) + assert.Equal(t, []uint64{2, 0}, res, "right results are found") + }) + + t.Run("the corrupt node is now marked deleted", func(t *testing.T) { + _, ok := vectorIndex.tombstones[1] + assert.True(t, ok) + }) +} + +func TestQueryVectorDistancer(t *testing.T) { + vectors := [][]float32{ + {100, 100}, // first to import makes this the EP, it is far from any query which means it will be replaced. + {2, 2}, // a good potential entrypoint, but we will corrupt it later on + {1, 1}, // the perfect search result + } + + index, err := New(Config{ + RootPath: "doesnt-matter-as-committlogger-is-mocked-out", + ID: "bug-2155", + MakeCommitLoggerThunk: MakeNoopCommitLogger, + DistanceProvider: distancer.NewL2SquaredProvider(), + VectorForIDThunk: func(ctx context.Context, id uint64) ([]float32, error) { + return vectors[int(id)], nil + }, + }, ent.UserConfig{ + MaxConnections: 30, + EFConstruction: 128, + + // The actual size does not matter for this test, but if it defaults to + // zero it will constantly think it's full and needs to be deleted - even + // after just being deleted, so make sure to use a positive number here. + VectorCacheMaxObjects: 100000, + }, cyclemanager.NewCallbackGroupNoop(), testinghelpers.NewDummyStore(t)) + require.Nil(t, err) + + index.Add(context.TODO(), uint64(0), []float32{-1, 0}) + + dist := index.QueryVectorDistancer([]float32{0, 0}) + require.NotNil(t, dist) + distance, err := dist.DistanceToNode(0) + require.Nil(t, err) + require.Equal(t, distance, float32(1.)) + + // get distance for non-existing node above default cache size + _, err = dist.DistanceToNode(1001) + require.NotNil(t, err) +} + +func TestQueryMultiVectorDistancer(t *testing.T) { + vectors := [][][]float32{ + {{0.3, 0.1}, {1, 0}}, + } + + index, err := New(Config{ + RootPath: "doesnt-matter-as-committlogger-is-mocked-out", + ID: "bug-2155", + MakeCommitLoggerThunk: MakeNoopCommitLogger, + DistanceProvider: distancer.NewDotProductProvider(), + VectorForIDThunk: func(ctx context.Context, id uint64) ([]float32, error) { + return vectors[0][int(id)], nil + }, + MultiVectorForIDThunk: func(ctx context.Context, id uint64) ([][]float32, error) { + return vectors[int(id)], nil + }, + }, ent.UserConfig{ + MaxConnections: 30, + EFConstruction: 128, + VectorCacheMaxObjects: 100000, + Multivector: ent.MultivectorConfig{ + Enabled: true, + }, + }, cyclemanager.NewCallbackGroupNoop(), testinghelpers.NewDummyStore(t)) + require.Nil(t, err) + + index.AddMulti(context.TODO(), uint64(0), vectors[0]) + + dist := index.QueryMultiVectorDistancer([][]float32{{0.2, 0}, {1, 0}}) + require.NotNil(t, dist) + distance, err := dist.DistanceToNode(0) + require.Nil(t, err) + require.Equal(t, float32(-1.2), distance) + + // get distance for non-existing node + _, err = dist.DistanceToNode(1032) + require.NotNil(t, err) +} + +func TestAcornPercentage(t *testing.T) { + vectors, _ := testinghelpers.RandomVecs(10, 1, 3) + var vectorIndex *hnsw + + store := testinghelpers.NewDummyStore(t) + defer store.Shutdown(context.Background()) + + t.Run("import test vectors", func(t *testing.T) { + index, err := New(Config{ + RootPath: "doesnt-matter-as-committlogger-is-mocked-out", + ID: "delete-test", + MakeCommitLoggerThunk: MakeNoopCommitLogger, + DistanceProvider: distancer.NewCosineDistanceProvider(), + VectorForIDThunk: func(ctx context.Context, id uint64) ([]float32, error) { + return vectors[int(id)], nil + }, + TempVectorForIDThunk: TempVectorForIDThunk(vectors), + AcornFilterRatio: 0.4, + }, ent.UserConfig{ + MaxConnections: 16, + EFConstruction: 16, + VectorCacheMaxObjects: 1000, + }, cyclemanager.NewCallbackGroupNoop(), store) + require.Nil(t, err) + vectorIndex = index + + for i, vec := range vectors { + err := vectorIndex.Add(context.TODO(), uint64(i), vec) + require.Nil(t, err) + } + }) + + t.Run("check acorn params on different filter percentags", func(t *testing.T) { + vectorIndex.acornSearch.Store(false) + allowList := helpers.NewAllowList(1, 2, 3) + useAcorn := vectorIndex.acornEnabled(allowList) + assert.False(t, useAcorn) + + vectorIndex.acornSearch.Store(true) + + useAcorn = vectorIndex.acornEnabled(allowList) + assert.True(t, useAcorn) + + vectorIndex.acornSearch.Store(true) + + largerAllowList := helpers.NewAllowList(1, 2, 3, 4, 5) + useAcorn = vectorIndex.acornEnabled(largerAllowList) + // should be false as allow list percentage is 50% + assert.False(t, useAcorn) + }) +} + +func TestRescore(t *testing.T) { + for _, contextCancelled := range []bool{false, true} { + type test struct { + name string + concurrency int + k int + objects int + } + + tests := []test{ + { + name: "single-threaded, limit < objects", + concurrency: 1, + k: 10, + objects: 100, + }, + { + name: "two threads, limit < objects", + concurrency: 2, + k: 10, + objects: 50, + }, + { + name: "more threads than objects", + concurrency: 60, + k: 10, + objects: 50, + }, + { + name: "result limit above objects with no concurrency", + concurrency: 1, + k: 60, + objects: 50, + }, + { + name: "result limit above objects with low concurrency", + concurrency: 4, + k: 60, + objects: 50, + }, + { + name: "result limit above objects with high concurrency", + concurrency: 100, + k: 60, + objects: 50, + }, + } + + logger := logrus.New() + + for _, test := range tests { + name := fmt.Sprintf("%s, context cancelled: %v", test.name, contextCancelled) + t.Run(name, func(t *testing.T) { + vectors, queries := testinghelpers.RandomVecs(test.objects, 1, 128) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + d := distancer.NewDotProductProvider() + distFn := func(a, b []float32) float32 { + dist, _ := d.SingleDist(a, b) + return dist + } + ids, _ := testinghelpers.BruteForce(logger, vectors, queries[0], test.k, distFn) + + res := priorityqueue.NewMax[any](test.k) + // insert with random distances, so the result can't possibly be correct + // without re-ranking + for i := 0; i < test.objects; i++ { + res.Insert(uint64(i), rand.Float32()) + } + + h := &hnsw{ + rescoreConcurrency: test.concurrency, + logger: logger, + TempVectorForIDThunk: func( + ctx context.Context, id uint64, container *common.VectorSlice, + ) ([]float32, error) { + return vectors[id], nil + }, + pools: newPools(32, 1), + distancerProvider: d, + } + + compDistancer := newFakeCompressionDistancer(queries[0], distFn) + if contextCancelled { + cancel() + } + err := h.rescore(ctx, res, test.k, compDistancer) + + if contextCancelled { + assert.True(t, errors.Is(err, context.Canceled)) + } else { + resultIDs := make([]uint64, res.Len()) + for res.Len() > 0 { + item := res.Pop() + resultIDs[res.Len()] = item.ID + } + + assert.Equal(t, ids, resultIDs) + } + }) + } + } +} + +type fakeCompressionDistancer struct { + queryVec []float32 + distFn func(a, b []float32) float32 +} + +func newFakeCompressionDistancer(queryVec []float32, distFn func(a, b []float32) float32) *fakeCompressionDistancer { + return &fakeCompressionDistancer{ + distFn: distFn, + queryVec: queryVec, + } +} + +func (f *fakeCompressionDistancer) DistanceToNode(id uint64) (float32, error) { + panic("not implemented") +} + +func (f *fakeCompressionDistancer) DistanceToFloat(vec []float32) (float32, error) { + return f.distFn(f.queryVec, vec), nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/search_with_max_dist.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/search_with_max_dist.go new file mode 100644 index 0000000000000000000000000000000000000000..e0adaa6262b104b1e23495fc5350c0bcd91f4b15 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/search_with_max_dist.go @@ -0,0 +1,90 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package hnsw + +import ( + "context" + "fmt" + + "github.com/pkg/errors" + "github.com/weaviate/weaviate/adapters/repos/db/helpers" + "github.com/weaviate/weaviate/adapters/repos/db/priorityqueue" + "github.com/weaviate/weaviate/adapters/repos/db/vector/compressionhelpers" + "github.com/weaviate/weaviate/entities/storobj" +) + +func (h *hnsw) KnnSearchByVectorMaxDist(ctx context.Context, searchVec []float32, + dist float32, ef int, allowList helpers.AllowList, +) ([]uint64, error) { + entryPointID := h.entryPointID + var compressorDistancer compressionhelpers.CompressorDistancer + if h.compressed.Load() { + var returnFn compressionhelpers.ReturnDistancerFn + compressorDistancer, returnFn = h.compressor.NewDistancer(searchVec) + defer returnFn() + } + entryPointDistance, err := h.distToNode(compressorDistancer, entryPointID, searchVec) + var e storobj.ErrNotFound + if err != nil && errors.As(err, &e) { + h.handleDeletedNode(e.DocID, "KnnSearchByVectorMaxDist") + return nil, fmt.Errorf("entrypoint was deleted in the object store, " + + "it has been flagged for cleanup and should be fixed in the next cleanup cycle") + } + if err != nil { + return nil, errors.Wrap(err, "knn search: distance between entrypoint and query node") + } + + // stop at layer 1, not 0! + for level := h.currentMaximumLayer; level >= 1; level-- { + eps := priorityqueue.NewMin[any](1) + eps.Insert(entryPointID, entryPointDistance) + // ignore allowList on layers > 0 + res, err := h.searchLayerByVectorWithDistancer(ctx, searchVec, eps, 1, level, nil, compressorDistancer) + if err != nil { + return nil, errors.Wrapf(err, "knn search: search layer at level %d", level) + } + if res.Len() > 0 { + best := res.Pop() + entryPointID = best.ID + entryPointDistance = best.Dist + } + + h.pools.pqResults.Put(res) + } + + eps := priorityqueue.NewMin[any](1) + eps.Insert(entryPointID, entryPointDistance) + res, err := h.searchLayerByVectorWithDistancer(ctx, searchVec, eps, ef, 0, allowList, compressorDistancer) + if err != nil { + return nil, errors.Wrapf(err, "knn search: search layer at level %d", 0) + } + + all := make([]priorityqueue.Item[any], res.Len()) + i := res.Len() - 1 + for res.Len() > 0 { + all[i] = res.Pop() + i-- + } + + out := make([]uint64, len(all)) + i = 0 + for _, elem := range all { + if elem.Dist > dist { + break + } + out[i] = elem.ID + i++ + } + + h.pools.pqResults.Put(res) + return out[:i], nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/startup.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/startup.go new file mode 100644 index 0000000000000000000000000000000000000000..f71d86c22410084e60dbe4fc6e6e813f7d4fa133 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/startup.go @@ -0,0 +1,481 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package hnsw + +import ( + "context" + "encoding/binary" + "fmt" + "time" + + "github.com/sirupsen/logrus" + enterrors "github.com/weaviate/weaviate/entities/errors" + + "github.com/pkg/errors" + "github.com/weaviate/weaviate/adapters/repos/db/vector/compressionhelpers" + "github.com/weaviate/weaviate/adapters/repos/db/vector/hnsw/visited" + "github.com/weaviate/weaviate/entities/cyclemanager" +) + +func (h *hnsw) init(cfg Config) error { + h.pools = newPools(h.maximumConnectionsLayerZero, h.visitedListPoolMaxSize) + + // init commit logger for future writes + cl, err := cfg.MakeCommitLoggerThunk() + if err != nil { + return errors.Wrap(err, "create commit logger") + } + + if err := h.restoreFromDisk(cl); err != nil { + return errors.Wrapf(err, "restore hnsw index %q", cfg.ID) + } + h.commitLog = cl + + // report the vector_index_size at server startup. + // otherwise on server restart, prometheus reports + // a vector_index_size of 0 until more vectors are + // added. + h.metrics.SetSize(len(h.nodes)) + + return nil +} + +// if a commit log is already present it will be read into memory, if not we +// start with an empty model +func (h *hnsw) restoreFromDisk(cl CommitLogger) error { + beforeAll := time.Now() + defer h.metrics.TrackStartupTotal(beforeAll) + defer func() { + h.logger.WithField("action", "restore_from_disk"). + WithField("duration", time.Since(beforeAll).String()). + Info("restored data from disk") + }() + + var state *DeserializationResult + var stateTimestamp int64 + var err error + + if !h.disableSnapshots { + if h.snapshotOnStartup { + // This will opportunistically create a snapshot if it does not exist yet, + // as we are loading state from disk. Otherwise, it simply loads + // the last snapshot. + state, stateTimestamp, err = cl.CreateAndLoadSnapshot() + } else { + state, stateTimestamp, err = cl.LoadSnapshot() + } + + if err != nil { + // errors reading snapshots are not fatal + // we can still read the commit log from the beginning + h.logger. + WithError(err). + WithField("action", "restore_from_disk"). + Error("failed to read last snapshot, loading from commit log") + + state = nil + stateTimestamp = 0 + } else if state == nil { + h.logger. + WithField("action", "restore_from_disk"). + Info("no snapshot found, loading from commit log") + } + } else { + h.logger. + WithField("action", "restore_from_disk"). + Info("snapshots disabled, loading from commit log") + } + + fileNames, err := getCommitFileNames(h.rootPath, h.id, stateTimestamp) + if err != nil { + return err + } + + state, err = loadCommitLoggerState(h.logger, fileNames, state, h.metrics) + if err != nil { + return errors.Wrap(err, "load commit logger state") + } + + if state == nil { + // nothing to do + return nil + } + + h.Lock() + h.shardedNodeLocks.LockAll() + h.nodes = state.Nodes + h.shardedNodeLocks.UnlockAll() + + h.currentMaximumLayer = int(state.Level) + h.entryPointID = state.Entrypoint + h.Unlock() + + h.tombstoneLock.Lock() + h.tombstones = state.Tombstones + h.tombstoneLock.Unlock() + + if h.multivector.Load() { + if !h.muvera.Load() { + if err := h.restoreDocMappings(); err != nil { + return errors.Wrapf(err, "restore doc mappings %q", h.id) + } + } else if state.MuveraEnabled { + h.trackMuveraOnce.Do(func() { + h.muveraEncoder.LoadMuveraConfig(*state.EncoderMuvera) + }) + h.muvera.Store(true) + } + } + if state.Compressed { + h.compressed.Store(state.Compressed) + h.cache.Drop() + if state.CompressionPQData != nil { + data := state.CompressionPQData + h.dims = int32(data.Dimensions) + + if len(data.Encoders) > 0 { + // 0 means it was created using the default value. The user did not set the value, we calculated for him/her + if h.pqConfig.Segments == 0 { + h.pqConfig.Segments = int(data.Dimensions) + } + if !h.multivector.Load() || h.muvera.Load() { + h.compressor, err = compressionhelpers.RestoreHNSWPQCompressor( + h.pqConfig, + h.distancerProvider, + int(data.Dimensions), + // ToDo: we need to read this value from somewhere + 1e12, + h.logger, + data.Encoders, + h.store, + h.allocChecker, + ) + } else { + h.compressor, err = compressionhelpers.RestoreHNSWPQMultiCompressor( + h.pqConfig, + h.distancerProvider, + int(data.Dimensions), + 1e12, + h.logger, + data.Encoders, + h.store, + h.allocChecker, + ) + } + if err != nil { + return errors.Wrap(err, "Restoring compressed data.") + } + } + } else if state.CompressionSQData != nil { + data := state.CompressionSQData + h.dims = int32(data.Dimensions) + if !h.multivector.Load() || h.muvera.Load() { + h.compressor, err = compressionhelpers.RestoreHNSWSQCompressor( + h.distancerProvider, + 1e12, + h.logger, + data.A, + data.B, + data.Dimensions, + h.store, + h.allocChecker, + ) + } else { + h.compressor, err = compressionhelpers.RestoreHNSWSQMultiCompressor( + h.distancerProvider, + 1e12, + h.logger, + data.A, + data.B, + data.Dimensions, + h.store, + h.allocChecker, + ) + } + if err != nil { + return errors.Wrap(err, "Restoring compressed data.") + } + } else if state.CompressionRQData != nil { + if err := h.restoreRotationalQuantization(state.CompressionRQData); err != nil { + return errors.Wrap(err, "Restoring compressed data.") + } + } else if state.CompressionBRQData != nil { + if err := h.restoreBinaryRotationalQuantization(state.CompressionBRQData); err != nil { + return errors.Wrap(err, "Restoring compressed data.") + } + } else { + return errors.New("unsupported type while loading compression data") + } + // make sure the compressed cache fits the current size + h.compressor.GrowCache(uint64(len(h.nodes))) + } else if !h.compressed.Load() { + // make sure the cache fits the current size + h.cache.Grow(uint64(len(h.nodes))) + + if h.multivector.Load() && !h.muvera.Load() { + h.populateKeys() + } + if len(h.nodes) > 0 { + if vec, err := h.vectorForID(context.Background(), h.entryPointID); err == nil { + h.dims = int32(len(vec)) + } + } + } else { + h.compressor.GrowCache(uint64(len(h.nodes))) + } + + if h.compressed.Load() && h.multivector.Load() && !h.muvera.Load() { + h.compressor.GrowCache(uint64(len(h.nodes))) + h.populateKeys() + } + + h.resetTombstoneMetric() + + // make sure the visited list pool fits the current size + h.pools.visitedLists.Destroy() + h.pools.visitedLists = nil + h.pools.visitedLists = visited.NewPool(1, len(h.nodes)+512, h.visitedListPoolMaxSize) + + return nil +} + +func (h *hnsw) restoreRotationalQuantization(data *compressionhelpers.RQData) error { + var err error + if !h.multivector.Load() || h.muvera.Load() { + h.trackRQOnce.Do(func() { + h.compressor, err = compressionhelpers.RestoreRQCompressor( + h.distancerProvider, + 1e12, + h.logger, + int(data.InputDim), + int(data.Bits), + int(data.Rotation.OutputDim), + int(data.Rotation.Rounds), + data.Rotation.Swaps, + data.Rotation.Signs, + nil, + h.store, + h.allocChecker, + ) + }) + } else { + h.trackRQOnce.Do(func() { + h.compressor, err = compressionhelpers.RestoreRQMultiCompressor( + h.distancerProvider, + 1e12, + h.logger, + int(data.InputDim), + int(data.Bits), + int(data.Rotation.OutputDim), + int(data.Rotation.Rounds), + data.Rotation.Swaps, + data.Rotation.Signs, + nil, + h.store, + h.allocChecker, + ) + }) + } + + return err +} + +func (h *hnsw) restoreBinaryRotationalQuantization(data *compressionhelpers.BRQData) error { + var err error + if !h.multivector.Load() || h.muvera.Load() { + h.trackRQOnce.Do(func() { + h.compressor, err = compressionhelpers.RestoreRQCompressor( + h.distancerProvider, + 1e12, + h.logger, + int(data.InputDim), + 1, + int(data.Rotation.OutputDim), + int(data.Rotation.Rounds), + data.Rotation.Swaps, + data.Rotation.Signs, + data.Rounding, + h.store, + h.allocChecker, + ) + }) + } else { + h.trackRQOnce.Do(func() { + h.compressor, err = compressionhelpers.RestoreRQMultiCompressor( + h.distancerProvider, + 1e12, + h.logger, + int(data.InputDim), + 1, + int(data.Rotation.OutputDim), + int(data.Rotation.Rounds), + data.Rotation.Swaps, + data.Rotation.Signs, + data.Rounding, + h.store, + h.allocChecker, + ) + }) + } + return err +} + +func (h *hnsw) restoreDocMappings() error { + prevDocID := uint64(0) + relativeID := uint64(0) + maxNodeID := uint64(0) + maxDocID := uint64(0) + buf := make([]byte, 8) + for _, node := range h.nodes { + if node == nil { + continue + } + binary.BigEndian.PutUint64(buf, node.id) + docIDBytes, err := h.store.Bucket(h.id + "_mv_mappings").Get(buf) + if err != nil { + return errors.Wrap(err, fmt.Sprintf("failed to get %s_mv_mappings from the bucket", h.id)) + } + docID := binary.BigEndian.Uint64(docIDBytes) + if docID != prevDocID { + relativeID = 0 + prevDocID = docID + } + h.Lock() + h.docIDVectors[docID] = append(h.docIDVectors[docID], node.id) + h.Unlock() + relativeID++ + if node.id > maxNodeID { + maxNodeID = node.id + } + if docID > maxDocID { + maxDocID = docID + } + } + h.Lock() + h.vecIDcounter = maxNodeID + 1 + h.maxDocID = maxDocID + h.Unlock() + return nil +} + +func (h *hnsw) populateKeys() { + for docID, nodeIDs := range h.docIDVectors { + for relativeID, nodeID := range nodeIDs { + if h.compressed.Load() { + h.compressor.SetKeys(nodeID, docID, uint64(relativeID)) + } else { + h.cache.SetKeys(nodeID, docID, uint64(relativeID)) + } + } + } +} + +func (h *hnsw) tombstoneCleanup(shouldAbort cyclemanager.ShouldAbortCallback) bool { + if h.allocChecker != nil { + // allocChecker is optional, we can only check if it was actually set + + // It's hard to estimate how much memory we'd need to do a successful + // hnsw delete cleanup. The value below is probalby vastly overstated. + // However, without a doubt, delete cleanup could lead to temporary + // memory increases, either because it loads vectors into cache or + // because it rewrites connections in a way that they could need more + // memory than before. Either way, it's probably a good idea not to + // start a cleanup cycle if we are already this close to running out of + // memory. + memoryNeeded := int64(100 * 1024 * 1024) + + if err := h.allocChecker.CheckAlloc(memoryNeeded); err != nil { + h.logger.WithFields(logrus.Fields{ + "action": "hnsw_tombstone_cleanup", + "event": "cleanup_skipped_oom", + "class": h.className, + }).WithError(err). + Warnf("skipping hnsw cleanup due to memory pressure") + return false + } + } + executed, err := h.cleanUpTombstonedNodes(shouldAbort) + if err != nil { + h.logger.WithField("action", "hnsw_tombstone_cleanup"). + WithError(err).Error("tombstone cleanup errord") + } + return executed +} + +// The vector_index_tombstones metric is represented as a counter so on +// restart we need to reset it to the current number of tombstones read from +// the commit log. +func (h *hnsw) resetTombstoneMetric() { + h.tombstoneLock.Lock() + defer h.tombstoneLock.Unlock() + if len(h.tombstones) > 0 { + h.metrics.SetTombstone(len(h.tombstones)) + } +} + +// PostStartup triggers routines that should happen after startup. The startup +// process is triggered during the creation which in turn happens as part of +// the shard creation. Some post-startup routines, such as prefilling the +// vector cache, however, depend on the shard being ready as they will call +// getVectorForID. +func (h *hnsw) PostStartup() { + h.commitLog.InitMaintenance() + h.prefillCache() +} + +func (h *hnsw) prefillCache() { + limit := 0 + if h.compressed.Load() { + limit = int(h.compressor.GetCacheMaxSize()) + } else { + limit = int(h.cache.CopyMaxSize()) + } + + f := func() { + ctx, cancel := context.WithTimeout(context.Background(), 60*time.Minute) + defer cancel() + + h.logger.WithFields(logrus.Fields{ + "action": "prefill_cache", + "duration": 60 * time.Minute, + }).Debug("context.WithTimeout") + + var err error + if h.compressed.Load() { + if !h.multivector.Load() || h.muvera.Load() { + h.compressor.PrefillCache() + } else { + h.compressor.PrefillMultiCache(h.docIDVectors) + } + } else { + err = newVectorCachePrefiller(h.cache, h, h.logger).Prefill(ctx, limit) + } + + if err != nil { + h.logger.WithError(err).Error("prefill vector cache") + } + } + + if h.waitForCachePrefill { + h.logger.WithFields(logrus.Fields{ + "action": "hnsw_prefill_cache_sync", + "wait_for_cache_prefill": true, + }).Info("waiting for vector cache prefill to complete") + f() + } else { + h.logger.WithFields(logrus.Fields{ + "action": "hnsw_prefill_cache_async", + "wait_for_cache_prefill": false, + }).Info("not waiting for vector cache prefill, running in background") + enterrors.GoWrapper(f, h.logger) + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/test_recall_hnswlib.py b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/test_recall_hnswlib.py new file mode 100644 index 0000000000000000000000000000000000000000..1e6309a99c0ed428820d4430ff890995773faa88 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/test_recall_hnswlib.py @@ -0,0 +1,50 @@ +import hnswlib +import numpy as np +import time +import json + +data=None +queries=None +truths=None + +with open("recall_vectors.json", 'r') as f: + data = json.load(f) + +with open("recall_queries.json", 'r') as f: + queries = json.load(f) + +with open("recall_truths.json", 'r') as f: + truths = json.load(f) + +num_elements = len(data) +dim = len(data[0]) +data_labels = np.arange(num_elements) + +# Declaring index +p = hnswlib.Index(space = 'cosine', dim = dim) # possible options are l2, cosine or ip + +# Initializing index - the maximum number of elements should be known beforehand +p.init_index(max_elements = num_elements, ef_construction = 2000, M = 100) + +before = time.time() +# Element insertion (can be called several times): +p.add_items(data, data_labels) +print("import took {}".format(time.time() - before)) + +# Controlling the recall by setting ef: +p.set_ef(100) # ef should always be > k + +# Query dataset, k - number of closest elements (returns 2 numpy arrays) +results, distances = p.knn_query(queries, k = 1) + +relevant=0 +retrieved=0 + +for i, res in enumerate(results): + retrieved+=1 + + # take elem 0 because k==1 + if res[0] == truths[i][0]: + relevant+=1 + +print("Recall: {}".format(relevant/retrieved)) diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/unreachability_integration_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/unreachability_integration_test.go new file mode 100644 index 0000000000000000000000000000000000000000..1d3ea4a78a5316304b4b237cf0fa2d04e44c0870 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/unreachability_integration_test.go @@ -0,0 +1,175 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +//go:build integrationTest +// +build integrationTest + +package hnsw + +import ( + "context" + "fmt" + "runtime" + "sync" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/adapters/repos/db/vector/hnsw/distancer" + "github.com/weaviate/weaviate/adapters/repos/db/vector/hnsw/packedconn" + "github.com/weaviate/weaviate/entities/cyclemanager" + ent "github.com/weaviate/weaviate/entities/vectorindex/hnsw" +) + +func TestUnreachablePoints(t *testing.T) { + ctx := context.Background() + var vectors [][]float32 + var vectorIndex *hnsw + + t.Run("generate vectors", func(t *testing.T) { + vectors = [][]float32{ + {0, 0, 0}, + {1, 1, 1}, + {-1, -1, -1}, + {-5, -5, -5}, + {5, 5, 5}, + } + }) + + t.Run("importing into hnsw", func(t *testing.T) { + fmt.Printf("importing into hnsw\n") + + index, err := New(Config{ + RootPath: "doesnt-matter-as-committlogger-is-mocked-out", + ID: "recallbenchmark", + MakeCommitLoggerThunk: MakeNoopCommitLogger, + DistanceProvider: distancer.NewCosineDistanceProvider(), + VectorForIDThunk: func(ctx context.Context, id uint64) ([]float32, error) { + return vectors[int(id)], nil + }, + }, ent.UserConfig{ + MaxConnections: 0, + EFConstruction: 0, + EF: 0, + }, cyclemanager.NewCallbackGroupNoop(), nil) + require.Nil(t, err) + vectorIndex = index + groundtruth := [][]uint64{{}, {2}, {}, {1, 2, 3, 4}, {}} + + workerCount := runtime.GOMAXPROCS(0) + jobsForWorker := make([][][]float32, workerCount) + + before := time.Now() + for i, vec := range vectors { + workerID := i % workerCount + jobsForWorker[workerID] = append(jobsForWorker[workerID], vec) + } + + wg := &sync.WaitGroup{} + for workerID, jobs := range jobsForWorker { + wg.Add(1) + go func(workerID int, myJobs [][]float32) { + defer wg.Done() + for i, vec := range myJobs { + originalIndex := (i * workerCount) + workerID + err := vectorIndex.Add(ctx, uint64(originalIndex), vec) + require.Nil(t, err) + } + }(workerID, jobs) + } + + wg.Wait() + fmt.Printf("importing took %s\n", time.Since(before)) + + for i := 1; i <= 5; i++ { + vectorIndex.generateGraphConnections(i) + res := vectorIndex.calculateUnreachablePoints() + assert.Equal(t, groundtruth[i-1], res) + vectorIndex.cleanConnections() + } + }) +} + +func (h *hnsw) generateGraphConnections(testCase int) { + switch testCase { + case 1: + h.entryPointID = 0 + h.currentMaximumLayer = 1 + // Node 0 + h.nodes[0].upgradeToLevelNoLock(1) + h.nodes[0].setConnectionsAtLevel(1, []uint64{1, 2}) + // Node 1 + h.nodes[1].upgradeToLevelNoLock(1) + h.nodes[1].setConnectionsAtLevel(1, []uint64{3}) + // Node 2 + h.nodes[2].upgradeToLevelNoLock(1) + h.nodes[2].setConnectionsAtLevel(1, []uint64{4}) + // Node 3 + h.nodes[3].upgradeToLevelNoLock(1) + // Node 4 + h.nodes[4].upgradeToLevelNoLock(1) + case 2: + h.entryPointID = 0 + h.currentMaximumLayer = 1 + // Node 0 + h.nodes[0].upgradeToLevelNoLock(1) + h.nodes[0].setConnectionsAtLevel(0, []uint64{1}) + // Node 1 + h.nodes[1].upgradeToLevelNoLock(1) + h.nodes[1].setConnectionsAtLevel(0, []uint64{3}) + h.nodes[1].setConnectionsAtLevel(1, []uint64{2}) + // Node 2 + h.nodes[2].upgradeToLevelNoLock(1) + // Node 3 + h.nodes[3].setConnectionsAtLevel(0, []uint64{4}) + case 3: + h.entryPointID = 0 + h.currentMaximumLayer = 1 + // Node 0 + h.nodes[0].upgradeToLevelNoLock(1) + h.nodes[0].setConnectionsAtLevel(0, []uint64{1}) + // Node 1 + h.nodes[1].setConnectionsAtLevel(0, []uint64{2}) + // Node 2 + h.nodes[2].setConnectionsAtLevel(0, []uint64{3}) + // Node 3 + h.nodes[3].setConnectionsAtLevel(0, []uint64{4}) + case 4: + h.entryPointID = 0 + h.currentMaximumLayer = 2 + // Node 0 + h.nodes[0].upgradeToLevelNoLock(2) + case 5: + h.entryPointID = 0 + h.currentMaximumLayer = 1 + // Node 0 + h.nodes[0].upgradeToLevelNoLock(1) + h.nodes[0].setConnectionsAtLevel(1, []uint64{1, 2}) + // Node 1 + h.nodes[1].upgradeToLevelNoLock(1) + h.nodes[1].setConnectionsAtLevel(0, []uint64{3}) + // Node 2 + h.nodes[2].upgradeToLevelNoLock(1) + // Node 3 + h.nodes[3].setConnectionsAtLevel(0, []uint64{4}) + } +} + +func (h *hnsw) cleanConnections() { + for i := 0; i < len(h.nodes); i++ { + if h.nodes[i] == nil { + continue + } + h.nodes[i].connections, _ = packedconn.NewWithMaxLayer(1) + h.nodes[i].level = 0 + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/vector_cache_prefiller.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/vector_cache_prefiller.go new file mode 100644 index 0000000000000000000000000000000000000000..095d37f35350313af135f055f331ad6272ecd74e --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/vector_cache_prefiller.go @@ -0,0 +1,131 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package hnsw + +import ( + "context" + "time" + + "github.com/sirupsen/logrus" + "github.com/weaviate/weaviate/adapters/repos/db/vector/cache" +) + +type vectorCachePrefiller[T any] struct { + cache cache.Cache[T] + index *hnsw + logger logrus.FieldLogger +} + +func newVectorCachePrefiller[T any](cache cache.Cache[T], index *hnsw, + logger logrus.FieldLogger, +) *vectorCachePrefiller[T] { + return &vectorCachePrefiller[T]{ + cache: cache, + index: index, + logger: logger, + } +} + +func (pf *vectorCachePrefiller[T]) Prefill(ctx context.Context, limit int) error { + before := time.Now() + for level := pf.maxLevel(); level >= 0; level-- { + ok, err := pf.prefillLevel(ctx, level, limit) + if err != nil { + return err + } + + if !ok { + break + } + } + + pf.logTotal(int(pf.cache.Len()), limit, before) + return nil +} + +// returns false if the max has been reached, true otherwise +func (pf *vectorCachePrefiller[T]) prefillLevel(ctx context.Context, + level, limit int, +) (bool, error) { + before := time.Now() + layerCount := 0 + + pf.index.Lock() + nodesLen := len(pf.index.nodes) + pf.index.Unlock() + + for i := 0; i < nodesLen; i++ { + if int(pf.cache.Len()) >= limit { + break + } + + if err := ctx.Err(); err != nil { + return false, err + } + + pf.index.shardedNodeLocks.RLock(uint64(i)) + node := pf.index.nodes[i] + pf.index.shardedNodeLocks.RUnlock(uint64(i)) + + if node == nil { + continue + } + + if levelOfNode(node) != level { + continue + } + + // we are not really interested in the result, we just want to populate the + // cache + pf.index.Lock() + pf.cache.Get(ctx, uint64(i)) + layerCount++ + pf.index.Unlock() + } + + pf.logLevel(level, layerCount, before) + return true, nil +} + +func (pf *vectorCachePrefiller[T]) logLevel(level, count int, before time.Time) { + pf.logger.WithFields(logrus.Fields{ + "action": "hnsw_vector_cache_prefill_level", + "hnsw_level": level, + "count": count, + "took": time.Since(before), + "index_id": pf.index.id, + }).Debug("prefilled level in vector cache") +} + +func (pf *vectorCachePrefiller[T]) logTotal(count, limit int, before time.Time) { + pf.logger.WithFields(logrus.Fields{ + "action": "hnsw_vector_cache_prefill", + "limit": limit, + "count": count, + "took": time.Since(before), + "index_id": pf.index.id, + }).Info("prefilled vector cache") +} + +func levelOfNode(node *vertex) int { + node.Lock() + defer node.Unlock() + + return node.level +} + +func (pf *vectorCachePrefiller[T]) maxLevel() int { + pf.index.Lock() + defer pf.index.Unlock() + + return pf.index.currentMaximumLayer +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/vector_cache_prefiller_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/vector_cache_prefiller_test.go new file mode 100644 index 0000000000000000000000000000000000000000..999b839be808eb148edabf4776a13e5a84631289 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/vector_cache_prefiller_test.go @@ -0,0 +1,219 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package hnsw + +import ( + "context" + "testing" + + "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/assert" + "github.com/weaviate/weaviate/adapters/repos/db/vector/common" +) + +func TestVectorCachePrefilling(t *testing.T) { + cache := newFakeCache() + index := &hnsw{ + nodes: generateDummyVertices(100), + currentMaximumLayer: 3, + shardedNodeLocks: common.NewDefaultShardedRWLocks(), + } + + logger, _ := test.NewNullLogger() + + pf := newVectorCachePrefiller[float32](cache, index, logger) + + t.Run("prefill with limit >= graph size", func(t *testing.T) { + cache.Reset() + pf.Prefill(context.Background(), 100) + assert.Equal(t, allNumbersUpTo(100), cache.store) + }) + + t.Run("prefill with small limit so only the upper layer fits", func(t *testing.T) { + cache.Reset() + pf.Prefill(context.Background(), 7) + assert.Equal(t, map[uint64]struct{}{ + 0: {}, + 15: {}, + 30: {}, + 45: {}, + 60: {}, + 75: {}, + 90: {}, + }, cache.store) + }) + + t.Run("limit where a layer partially fits", func(t *testing.T) { + cache.Reset() + pf.Prefill(context.Background(), 10) + assert.Equal(t, map[uint64]struct{}{ + // layer 3 + 0: {}, + 15: {}, + 30: {}, + 45: {}, + 60: {}, + 75: {}, + 90: {}, + + // additional layer 2 + 5: {}, + 10: {}, + 20: {}, + }, cache.store) + }) +} + +func newFakeCache() *fakeCache { + return &fakeCache{ + store: map[uint64]struct{}{}, + } +} + +type fakeCache struct { + store map[uint64]struct{} +} + +func (f *fakeCache) MultiGet(ctx context.Context, id []uint64) ([][]float32, []error) { + panic("not implemented") +} + +func (f *fakeCache) GetAllInCurrentLock(ctx context.Context, id uint64, out [][]float32, errs []error) ([][]float32, []error, uint64, uint64) { + panic("not implemented") +} + +func (f *fakeCache) PageSize() uint64 { + return 1 +} + +func (f *fakeCache) Get(ctx context.Context, id uint64) ([]float32, error) { + f.store[id] = struct{}{} + return nil, nil +} + +func (f *fakeCache) Delete(ctx context.Context, id uint64) { + panic("not implemented") +} + +func (f *fakeCache) Preload(id uint64, vec []float32) { + panic("not implemented") +} + +func (f *fakeCache) PreloadNoLock(id uint64, vec []float32) { + panic("not implemented") +} + +func (f *fakeCache) Prefetch(id uint64) { + panic("not implemented") +} + +func (f *fakeCache) Grow(id uint64) { + panic("not implemented") +} + +func (f *fakeCache) SetSizeAndGrowNoLock(id uint64) { + panic("not implemented") +} + +func (f *fakeCache) LockAll() { panic("not implemented") } + +func (f *fakeCache) UnlockAll() { panic("not implemented") } + +func (f *fakeCache) UpdateMaxSize(size int64) { + panic("not implemented") +} + +func (f *fakeCache) All() [][]float32 { + panic("not implemented") +} + +func (f *fakeCache) Drop() { + panic("not implemented") +} + +func (f *fakeCache) CopyMaxSize() int64 { + return 1e6 +} + +func (f *fakeCache) Reset() { + f.store = map[uint64]struct{}{} +} + +func (f *fakeCache) Len() int32 { + return int32(len(f.store)) +} + +func (f *fakeCache) CountVectors() int64 { + panic("not implemented") +} + +func (f *fakeCache) GetKeys(id uint64) (uint64, uint64) { + panic("not implemented") +} + +func (f *fakeCache) SetKeys(id uint64, docID uint64, relativeID uint64) { + panic("not implemented") +} + +func (f *fakeCache) PreloadMulti(docID uint64, ids []uint64, vecs [][]float32) { + panic("not implemented") +} + +func (f *fakeCache) PreloadPassage(id uint64, docID uint64, relativeID uint64, vec []float32) { + panic("not implemented") +} + +func (f *fakeCache) GetDoc(ctx context.Context, docID uint64) ([][]float32, error) { + panic("not implemented") +} + +func generateDummyVertices(amount int) []*vertex { + out := make([]*vertex, amount) + for i := range out { + out[i] = &vertex{ + id: uint64(i), + level: levelForDummyVertex(i), + } + } + + return out +} + +// maximum of 3 layers +// if id % 15 == 0 -> layer 3 +// if id % 5 == 0 -> layer 2 +// if id % 3 == 0 -> layer 1 +// remainder -> layer 0 +func levelForDummyVertex(id int) int { + if id%15 == 0 { + return 3 + } + + if id%5 == 0 { + return 2 + } + + if id%3 == 0 { + return 1 + } + + return 0 +} + +func allNumbersUpTo(size int) map[uint64]struct{} { + out := map[uint64]struct{}{} + for i := 0; i < size; i++ { + out[uint64(i)] = struct{}{} + } + + return out +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/vectors_for_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/vectors_for_test.go new file mode 100644 index 0000000000000000000000000000000000000000..cd8b54618cc4a703f1656a5de52a8400f0eac2a8 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/vectors_for_test.go @@ -0,0 +1,85 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package hnsw + +import "context" + +// roughly grouped into three clusters of three +var testVectors = [][]float32{ + {0.1, 0.9}, + {0.15, 0.8}, + {0.13, 0.65}, + + {0.6, 0.1}, + {0.63, 0.2}, + {0.65, 0.08}, + + {0.8, 0.8}, + {0.9, 0.75}, + {0.8, 0.7}, +} + +func testVectorForID(ctx context.Context, id uint64) ([]float32, error) { + return testVectors[int(id)], nil +} + +var testMultiVectors = [][][]float32{ + { + {0.1, 0.9}, + {0.15, 0.8}, + {0.13, 0.65}, + }, + { + {0.6, 0.1}, + {0.63, 0.2}, + {0.65, 0.08}, + }, + { + {0.8, 0.8}, + {0.9, 0.75}, + {0.8, 0.7}, + }, + { + {0.25, 0.45}, + {0.28, 0.42}, + {0.22, 0.48}, + }, + { + {0.35, 0.35}, + {0.38, 0.32}, + {0.33, 0.37}, + }, + { + {0.55, 0.55}, + {0.58, 0.52}, + {0.53, 0.57}, + }, + { + {0.7, 0.3}, + {0.73, 0.28}, + {0.68, 0.32}, + }, + { + {0.4, 0.85}, + {0.43, 0.82}, + {0.38, 0.87}, + }, + { + {0.95, 0.15}, + {0.92, 0.18}, + {0.97, 0.12}, + }, +} + +func testMultiVectorForID(ctx context.Context, id uint64) ([][]float32, error) { + return testMultiVectors[int(id)], nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/vertex.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/vertex.go new file mode 100644 index 0000000000000000000000000000000000000000..8d37333354cafc882f72b97c33f0bc761ba97991 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/vertex.go @@ -0,0 +1,84 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package hnsw + +import ( + "sync" + + "github.com/weaviate/weaviate/adapters/repos/db/vector/hnsw/packedconn" +) + +type vertex struct { + id uint64 + sync.Mutex + connections *packedconn.Connections + level int + maintenance bool +} + +func (v *vertex) markAsMaintenance() { + v.Lock() + v.maintenance = true + v.Unlock() +} + +func (v *vertex) unmarkAsMaintenance() { + v.Lock() + v.maintenance = false + v.Unlock() +} + +func (v *vertex) isUnderMaintenance() bool { + v.Lock() + m := v.maintenance + v.Unlock() + return m +} + +func (v *vertex) connectionsAtLevelNoLock(level int) []uint64 { + return v.connections.GetLayer(uint8(level)) +} + +func (v *vertex) upgradeToLevelNoLock(level int) { + v.level = level + v.connections.GrowLayersTo(uint8(level)) +} + +func (v *vertex) setConnectionsAtLevel(level int, connections []uint64) { + v.Lock() + defer v.Unlock() + v.connections.ReplaceLayer(uint8(level), connections) +} + +func (v *vertex) appendConnectionAtLevelNoLock(level int, connection uint64, maxConns int) { + v.connections.InsertAtLayer(connection, uint8(level)) +} + +func (v *vertex) appendConnectionsAtLevelNoLock(level int, connections []uint64, maxConns int) { + v.connections.BulkInsertAtLayer(connections, uint8(level)) +} + +func (v *vertex) resetConnectionsAtLevelNoLock(level int) { + v.connections.ReplaceLayer(uint8(level), []uint64{}) +} + +func (v *vertex) connectionsAtLowerLevelsNoLock(level int, visitedNodes map[nodeLevel]bool) []nodeLevel { + connections := make([]nodeLevel, 0) + for i := level; i >= 0; i-- { + for _, nodeId := range v.connections.GetLayer(uint8(i)) { + if !visitedNodes[nodeLevel{nodeId, i}] { + connections = append(connections, nodeLevel{nodeId, i}) + } + } + } + return connections +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/vertex_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/vertex_test.go new file mode 100644 index 0000000000000000000000000000000000000000..8c4924be9cd7de05a61f8fac5faa030b1b71ec92 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/vertex_test.go @@ -0,0 +1,212 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package hnsw + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/weaviate/weaviate/adapters/repos/db/vector/hnsw/packedconn" +) + +func TestVertex_SetConnections(t *testing.T) { + type test struct { + name string + initial []uint64 + updated []uint64 + expectedCap int + } + + tests := []test{ + { + name: "no connections set before", + initial: nil, + updated: makeConnections(7, 7), + expectedCap: 7, + }, + { + name: "connections had a slightly higher cap before", + initial: makeConnections(24, 24), + updated: makeConnections(22, 22), + // we don't expect any downsizing, since it's a small diff + expectedCap: 24, + }, + { + name: "connections had a considerably higher cap before", + initial: makeConnections(24, 24), + updated: makeConnections(10, 10), + // large diff, we expect downsizing + expectedCap: 10, + }, + { + name: "connections had a lower cap before", + initial: makeConnections(10, 10), + updated: makeConnections(24, 24), + expectedCap: 24, + }, + { + name: "connections had the same length and cap", + initial: makeConnections(13, 13), + updated: makeConnections(13, 13), + expectedCap: 13, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + connections, _ := packedconn.NewWithMaxLayer(0) + v := &vertex{ + connections: connections, + } + v.connections.ReplaceLayer(0, tc.initial) + + v.setConnectionsAtLevel(0, tc.updated) + + assert.Equal(t, tc.updated, v.connections.GetLayer(0)) + }) + } +} + +func TestVertex_AppendConnection(t *testing.T) { + type test struct { + name string + initial []uint64 + expectedCap int + } + + tests := []test{ + { + name: "no connections set before, expect 1/4 of max", + initial: nil, + }, + { + name: "less than 1/4, expect 1/4 of max", + initial: makeConnections(15, 15), + }, + { + name: "less than 1/2, expect 1/2 of max", + initial: makeConnections(31, 31), + }, + { + name: "less than 3/4, expect 3/4 of max", + initial: makeConnections(42, 42), + }, + { + name: "more than 3/4, expect full size", + initial: makeConnections(53, 53), + expectedCap: 64, + }, + { + name: "enough capacity to not require growing", + initial: makeConnections(17, 53), + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + connections, _ := packedconn.NewWithMaxLayer(0) + v := &vertex{ + connections: connections, + } + v.connections.ReplaceLayer(0, tc.initial) + + v.appendConnectionAtLevelNoLock(0, 18, 64) + + newConns := make([]uint64, len(tc.initial)+1) + copy(newConns, tc.initial) + newConns[len(newConns)-1] = 18 + + assert.ElementsMatch(t, newConns, v.connectionsAtLevelNoLock(0)) + }) + } +} + +func TestVertex_AppendConnection_NotCleanlyDivisible(t *testing.T) { + type test struct { + name string + initial []uint64 + } + + tests := []test{ + { + name: "no connections set before, expect 1/4 of max", + initial: nil, + }, + { + name: "less than 1/4, expect 1/4 of max", + initial: makeConnections(15, 15), + }, + { + name: "less than 1/2, expect 1/2 of max", + initial: makeConnections(31, 31), + }, + { + name: "less than 3/4, expect 3/4 of max", + initial: makeConnections(42, 42), + }, + { + name: "more than 3/4, expect full size", + initial: makeConnections(53, 53), + }, + { + name: "enough capacity to not require growing", + initial: makeConnections(17, 53), + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + connections, _ := packedconn.NewWithMaxLayer(1) + v := &vertex{ + connections: connections, + } + v.connections.ReplaceLayer(0, tc.initial) + + v.appendConnectionAtLevelNoLock(0, 18, 63) + + newConns := make([]uint64, len(tc.initial)+1) + copy(newConns, tc.initial) + newConns[len(newConns)-1] = 18 + + assert.ElementsMatch(t, newConns, v.connectionsAtLevelNoLock(0)) + }) + } +} + +func TestVertex_ResetConnections(t *testing.T) { + connections, _ := packedconn.NewWithMaxLayer(1) + v := &vertex{ + connections: connections, + } + v.connections.ReplaceLayer(0, makeConnections(4, 4)) + + v.resetConnectionsAtLevelNoLock(0) + assert.Equal(t, 0, len(v.connections.GetLayer(0))) +} + +func makeConnections(length, capacity int) []uint64 { + out := make([]uint64, length, capacity) + for i := 0; i < length; i++ { + out[i] = uint64(i) + } + return out +} + +func TestVertex_Maintenance(t *testing.T) { + v := &vertex{} + + assert.False(t, v.isUnderMaintenance()) + v.markAsMaintenance() + assert.True(t, v.isUnderMaintenance()) + v.unmarkAsMaintenance() + assert.False(t, v.isUnderMaintenance()) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/visited/list_set.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/visited/list_set.go new file mode 100644 index 0000000000000000000000000000000000000000..5447dbc3db67a6db2f2514ffd3d26e2308923b81 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/visited/list_set.go @@ -0,0 +1,93 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package visited + +// ListSet is a reusable list with very efficient resets. Inspired by the C++ +// implementation in hnswlib it can be reset with zero memory writes in the +// array by moving the match target instead of altering the list. Only after a +// version overflow do we need to actually reset +// +// The new implementation uses a slice where the first element is reserved for the marker. +// This allow us to use ListSet as a value (i.e. no pointer is required) +// The marker (i.e. set[0]) allows for reusing the same list without having to zero all elements on each list reset. +// Resetting the list takes place once the marker (i.e. set[0]) overflows +type ListSet struct { + set []uint8 // set[0] is reserved for the marker (version) +} + +// Len returns the number of elements in the list. +func (l ListSet) Len() int { return len(l.set) - 1 } + +// free allocated slice. This list should not be reusable after this call. +func (l *ListSet) free() { l.set = nil } + +// NewList creates a new list. It allocates memory for elements and marker +func NewList(size int) ListSet { + set := make([]uint8, size+1) + set[0] = 1 // the marker starts always by 1 since on reset all element are set to 0 + return ListSet{set: set} +} + +// Visit sets element at node to the marker value +func (l *ListSet) Visit(node uint64) { + if int(node) >= l.Len() { // resize + newset := make([]uint8, growth(len(l.set), int(node)+1024)) + copy(newset, l.set) + l.set = newset + } + l.set[node+1] = l.set[0] +} + +// Visited checks if l contains the specified node +func (l *ListSet) Visited(node uint64) bool { + return int(node) < l.Len() && l.set[node+1] == l.set[0] +} + +// Reset list only in case of an overflow. +func (l *ListSet) Reset() { + l.set[0]++ + if l.set[0] == 0 { // if overflowed + for i := range l.set { + l.set[i] = 0 + } + l.set[0] = 1 // restart counting + } +} + +// threshold let us double the size if the old size is below it +const threshold = 2048 + +// growth calculates the amount a list should grow in a smooth way. +// +// Inspired by the go standard implementation +func growth(oldsize, size int) int { + doublesize := oldsize << 1 + if size > doublesize { + return size + } + if oldsize < threshold { + return doublesize // grow by 2x for small slices + } + // detect overflow newsize > 0 + // and prevent an infinite loop. + newsize := oldsize + for newsize > 0 && newsize < size { + // grow by 1.25x for large slices + // This formula allows for smothly growing + newsize += (newsize + threshold) / 4 + } + // return requested size in case of overflow + if newsize <= 0 { + newsize = size + } + return newsize +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/visited/list_set_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/visited/list_set_test.go new file mode 100644 index 0000000000000000000000000000000000000000..520b81b1471b3329db47ef52501e68d31028a256 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/visited/list_set_test.go @@ -0,0 +1,152 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package visited + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestVisitedList(t *testing.T) { + t.Run("creating a new list, filling it and checking against it", func(t *testing.T) { + l := NewList(1000) + + l.Visit(7) + l.Visit(38) + l.Visit(999) + + assert.True(t, l.Visited(7), "visited node should be marked visited") + assert.True(t, l.Visited(38), "visited node should be marked visited") + assert.True(t, l.Visited(999), "visited node should be marked visited") + assert.False(t, l.Visited(6), "unvisited node should NOT be marked visited") + assert.False(t, l.Visited(37), "unvisited node should NOT be marked visited") + assert.False(t, l.Visited(998), "unvisited node should NOT be marked visited") + }) + + t.Run("reusing a list it is not affected by past entries", func(t *testing.T) { + l := NewList(1000) + + l.Visit(7) + l.Visit(38) + l.Visit(999) + + l.Reset() + + l.Visit(6) + l.Visit(37) + l.Visit(998) + + assert.False(t, l.Visited(7), "an entry before the reset has no influence") + assert.False(t, l.Visited(38), "an entry before the reset has no influence") + assert.False(t, l.Visited(999), "an entry before the reset has no influence") + assert.False(t, l.Visited(20), "an entry never visited is not visited") + assert.True(t, l.Visited(6), "a node visited in this round is marked as such") + assert.True(t, l.Visited(37), "a node visited in this round is marked as such") + assert.True(t, l.Visited(998), "a node visited in this round is marked as such") + }) + + t.Run("it creates no false positives after a version overflow (v=1)", func(t *testing.T) { + l := NewList(1000) + + for i := 0; i < 255; i++ { + l.Reset() + } + + // verify the test is correct and we are indeed at the version we think we are + assert.Equal(t, uint8(1), l.set[0]) + + // verify there are zero visited nodes + for i := uint64(0); i < 1000; i++ { + assert.False(t, l.Visited(i), "node should not be visited") + } + }) + + t.Run("it creates no false positives after a version overflow (v=1)", func(t *testing.T) { + l := NewList(1000) + + // mark every node as visited in version==1 + for i := uint64(0); i < 1000; i++ { + l.Visit(i) + } + + // v==0 does not exist, so we only need 255 runs to be at version==1 again + for i := 0; i < 255; i++ { + l.Reset() + } + + // verify the test is correct and we are indeed at the version we think we are + assert.Equal(t, l.set[0], uint8(1)) + + // verify there are zero visited nodes + for i := uint64(0); i < 1000; i++ { + assert.False(t, l.Visited(i), "node should not be visited") + } + }) +} + +func TestListSetResize(t *testing.T) { + l := NewList(2) + assert.Equal(t, []uint8{1, 0, 0}, l.set) + assert.Equal(t, l.Len(), 2) + l.Visit(1) + assert.Equal(t, []uint8{1, 0, 1}, l.set) + assert.Equal(t, l.Len(), 2) + l.Reset() + assert.Equal(t, []uint8{2, 0, 1}, l.set) + assert.Equal(t, l.Len(), 2) + l.Visit(1) + assert.Equal(t, []uint8{2, 0, 2}, l.set) + assert.Equal(t, l.Len(), (2)) + l.Visit(3) + assert.Equal(t, []uint8{2, 0, 2, 0, 2}, l.set[0:5]) + assert.Equal(t, (2 + 1024), l.Len()) + l.free() + assert.Equal(t, []uint8(nil), l.set) +} + +func TestGrowth(t *testing.T) { + MaxInt := 1<<63 - 1 // math.MaxInt needs go >= 1.17 + tests := []struct { + old int + new int + want int + }{ + {512, 1000, 1024}, + {1024, 1048, 2048}, + {2000, 3500, 4000}, + {3500, 4500, 4887}, + {threshold, threshold + 32, threshold + threshold/2}, + {2097152, 4194304, 5122952}, + {threshold, MaxInt, MaxInt}, + {MaxInt / 2, MaxInt - 1, MaxInt - 1}, + } + for _, tc := range tests { + got := growth(tc.old, tc.new) + if got != tc.want { + t.Errorf("growth(%d,%d) got:%d want:%d", tc.old, tc.new, got, tc.want) + } + } +} + +func insertItems() { + list := NewList(1024) + for i := uint64(1); i < 8000000; i++ { + list.Visit(i) + } +} + +func BenchmarkListInsert(b *testing.B) { + for i := 0; i < b.N; i++ { + insertItems() + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/visited/pool.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/visited/pool.go new file mode 100644 index 0000000000000000000000000000000000000000..6bd972e2dc8876b1ac54c57148dcd8acc378b77b --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/visited/pool.go @@ -0,0 +1,106 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package visited + +import ( + "math" + "sync" +) + +type Pool struct { + sync.Mutex + listSetSize int + listSets []ListSet + maxStorage int +} + +// NewPool creates a new pool with specified size. +// listSetSize specifies the size of a list at creation time point +// maxStorage specifies the maximum number of lists that can be stored in the +// pool, the pool can still generate infinite lists, but if more than +// maxStorage are returned to the pool, some lists will be thrown away. +func NewPool(initialSize int, listSetSize int, maxStorage int) *Pool { + if maxStorage < 1 { + maxStorage = math.MaxInt + } + + if initialSize > maxStorage { + maxStorage = initialSize + } + + p := &Pool{ + listSetSize: listSetSize, + listSets: make([]ListSet, initialSize), // make enough room + maxStorage: maxStorage, + } + + for i := 0; i < initialSize; i++ { + p.listSets[i] = NewList(listSetSize) + } + + return p +} + +// Borrow a list from the pool. If the pool is empty, a new list is craeted. If +// an old list is used, it is guaranteed to be reset – as that was performed on +// return. +func (p *Pool) Borrow() ListSet { + p.Lock() + + if n := len(p.listSets); n > 0 { + l := p.listSets[n-1] + p.listSets = p.listSets[:n-1] + p.Unlock() + + return l + } + p.Unlock() + return NewList(p.listSetSize) +} + +// Return list l to the pool +// The list l might be thrown if l.Len() > listSetSize*1.10 +// or if the pool is full. +func (p *Pool) Return(l ListSet) { + n := l.Len() + if n < p.listSetSize || n > p.listSetSize*11/10 { // 11/10 could be tuned + return + } + l.Reset() + + p.Lock() + defer p.Unlock() + + if len(p.listSets) >= p.maxStorage { + return + } + + p.listSets = append(p.listSets, l) +} + +// Destroy and empty pool +func (p *Pool) Destroy() { + p.Lock() + defer p.Unlock() + for i := range p.listSets { + p.listSets[i].free() + } + + p.listSets = nil +} + +// Len returns the number of lists currently in the pool +func (p *Pool) Len() int { + p.Lock() + defer p.Unlock() + return len(p.listSets) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/visited/pool_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/visited/pool_test.go new file mode 100644 index 0000000000000000000000000000000000000000..8570ffb787159c08aab09d6594845a1025187ac2 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/hnsw/visited/pool_test.go @@ -0,0 +1,121 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package visited + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestPoolWithoutLimit(t *testing.T) { + // pool with two liss + pool := NewPool(2, 2, -1) + assert.Equal(t, 2, pool.Len()) + + // get first list + l1 := pool.Borrow() + assert.Equal(t, 2, l1.Len()) + assert.Equal(t, 1, pool.Len()) + + // get second list + l2 := pool.Borrow() + assert.Equal(t, 0, pool.Len()) + + // get third list from empty pool and return it back + l3 := pool.Borrow() + assert.Equal(t, 0, pool.Len()) + pool.Return(l3) + assert.Equal(t, 1, pool.Len()) + + // get same list again and modify its size + // so that it is not accepted when returned to the pool + l3 = pool.Borrow() + l3.Visit(2) + pool.Return(l3) + assert.Equal(t, 0, pool.Len()) + + // add two list and destroy the pool + pool.Return(l1) + pool.Return(l2) + assert.Equal(t, 2, pool.Len()) + pool.Destroy() + assert.Equal(t, 0, pool.Len()) +} + +func TestPoolWithLimit(t *testing.T) { + type test struct { + initialSize int + maxStorage int + borrowCount int + expected int + } + + tests := []test{ + // limited + {initialSize: 2, maxStorage: 10, borrowCount: 30, expected: 10}, + {initialSize: 10, maxStorage: 10, borrowCount: 10, expected: 10}, + {initialSize: 10, maxStorage: 10, borrowCount: 50, expected: 10}, + {initialSize: 10, maxStorage: 15, borrowCount: 50, expected: 15}, + + // unlimited + {initialSize: 2, maxStorage: -1, borrowCount: 30, expected: 30}, + {initialSize: 100, maxStorage: -1, borrowCount: 30, expected: 100}, + } + + for _, tt := range tests { + name := fmt.Sprintf("initialSize=%d maxStorage=%d borrowCount=%d expected=%d", + tt.initialSize, tt.maxStorage, tt.borrowCount, tt.expected) + t.Run(name, func(t *testing.T) { + // Test a journey where the pool starts with a small initial size and a + // fairly generous limit. Then validate that when all lists are returned, the + // pool does not hold more than the limit. + + pool := NewPool(tt.initialSize, 100, tt.maxStorage) + + if tt.maxStorage == -1 { + tt.maxStorage = tt.borrowCount + } + + lists := make([]ListSet, 0, 30) + + // Borrow all lists up to the limit + for i := 0; i < tt.maxStorage; i++ { + lists = append(lists, pool.Borrow()) + } + + // pool should now be max(initialSize-tt.maxStorage, 0) + expected := max(0, tt.initialSize-tt.maxStorage) + assert.Equal(t, expected, pool.Len()) + + // Borrow the remaining lists + for i := 0; i < tt.borrowCount-tt.maxStorage; i++ { + lists = append(lists, pool.Borrow()) + } + + // pool should now be max(initialSize-tt.borrowCount, 0) + expected = max(0, tt.initialSize-tt.borrowCount) + assert.Equal(t, expected, pool.Len()) + + // we should now have borrowCount lists + assert.Len(t, lists, tt.borrowCount) + + // try to return all lists + for _, l := range lists { + pool.Return(l) + } + + assert.Equal(t, tt.expected, pool.Len()) + }) + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/kmeans/kmeans.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/kmeans/kmeans.go new file mode 100644 index 0000000000000000000000000000000000000000..040aeac71f2e92d89dc3f6a88a3a994d3fa54cb2 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/kmeans/kmeans.go @@ -0,0 +1,505 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package kmeans + +import ( + "cmp" + "errors" + "math" + "math/rand/v2" + "slices" + + "github.com/weaviate/weaviate/adapters/repos/db/vector/hnsw/distancer" +) + +type IndexAndDistance struct { + index uint32 + distance float32 +} + +type temporaryData struct { + centroids [][]float64 // Higher-precision intermediate storage for computing centroids. + sizes []uint32 // Number of points in each cluster. + assignment []uint32 // For each data point the index of the cluster that it is assigned to. + centerNeighbors [][]IndexAndDistance // Lists of nearest centers to each center, ordered by distance. + rng *rand.Rand // RNG for initialization and tie breaking. +} + +func (tmp *temporaryData) init(n int, d int, k int, seed uint64, strategy AssignmentStrategy) { + tmp.centroids = make([][]float64, 0, k) + for range k { + tmp.centroids = append(tmp.centroids, make([]float64, d)) + } + tmp.sizes = make([]uint32, k) + tmp.assignment = make([]uint32, n) + if strategy == GraphPruning { + tmp.centerNeighbors = make([][]IndexAndDistance, k) + for c := range k { + tmp.centerNeighbors[c] = make([]IndexAndDistance, 0, k-1) + } + } + tmp.rng = rand.New(rand.NewPCG(seed, 0x385ab5285169b1ac)) +} + +func (tmp *temporaryData) free() { + tmp.centroids = nil + tmp.sizes = nil + tmp.assignment = nil + tmp.centerNeighbors = nil + tmp.rng = nil +} + +type TerminationCondition string + +const ( + MaxIterations TerminationCondition = "MaxIterations" + ClusterStability TerminationCondition = "ClusterStability" +) + +// The first entry in the slices contain statistics for the initialization. +// By convention we set the first number of changes equal to the size of the dataset. +type Metrics struct { + Iterations int // Number of iterations before terminating. + Termination TerminationCondition // The condition that causes k-means to terminate. + Changes []int // For each iteration the number of points that moved to a new cluster. + Computations []int // Total number of distance computations. + WCSS []float64 // Within-cluster sum of squares per iteration (k-means objective). +} + +func (m *Metrics) TotalComputations() int { + var sum int + for _, c := range m.Computations { + sum += c + } + return sum +} + +func (m *Metrics) TotalChanges() int { + var sum int + for _, c := range m.Changes { + sum += c + } + return sum +} + +func (m *Metrics) update(metrics IterationMetrics) { + m.Computations = append(m.Computations, metrics.computations) + m.WCSS = append(m.WCSS, metrics.wcss) + m.Changes = append(m.Changes, metrics.changes) + m.Iterations++ +} + +type InitializationStrategy string + +const ( + PlusPlusInitialization InitializationStrategy = "PlusPlus" + RandomInitialization InitializationStrategy = "Random" +) + +type AssignmentStrategy string + +const ( + GraphPruning AssignmentStrategy = "GraphPruning" + BruteForce AssignmentStrategy = "BruteForce" +) + +type KMeans struct { + K int // How many centroids. + IterationThreshold int // Used to stop fitting after a certain amount of iterations. + DeltaThreshold float32 // Used to stop fitting if the fraction of points that change clusters is at or below threshold. + Initialization InitializationStrategy // Algorithm used to initialize cluster centers. + Assignment AssignmentStrategy // Whether to use inter-cluster distances to prune away distance computations. + Seed uint64 // The seed for the RNG using during fitting. + Metrics Metrics // Metrics for observability of the clustering algorithm. + Centers [][]float32 // The centers computed by Fit() + distance distancer.Provider // The clustering algorithm is intended to work with L2 squared. + dimensions int // Dimensions of the data. + segment int // Segment where it operates. + tmp temporaryData // Temporary heap-allocated data used during fitting. +} + +func New(k int, dimensions int, segment int) *KMeans { + // Reasoning behind default settings + // ================================= + // Experiments show that GraphPruning speeds up k-means compared to BruteForce + // when d <= 16 on random data. The speedup is about 5x for d = 4, 2x for d = 8, + // and insignificant at d = 16. We use GraphPruning by default as there is + // almost no additional cost for high-dimensional data at typical problem sizes + // (n = 100_000, k = 256), and a clear benefit for low-dimensional data. + // + // The IterationThreshold is what typically determines the number of iterations. + // It is set to balance running time and quality. The DeltaThreshold only comes + // into effect after e.g. 50-100 iterations on random data. + // + // k-means++ initialization seems to provide slightly better WCSS on random data + // compared to random initialization. It does come at at a slight increase in + // cost, something like 3-5% of the total k-means running time, but the + // robustness and increased quality seems worth it. + // + // Notes + // ===== + // There exists other heuristics to speed up k-means, see e.g. the paper + // "Making k-means even faster" by Greg Hamerly. We tried implementing this + // method, but it turned out that it mostly saves distance computations when the + // clusters are nearly stable, which only happens after e.g. 50+ iterations. + // Therefore the current heuristic (I don't know if it is published somewhere) + // seems to provide better speedups in the setting that is relewvant for + // product quantization (d = 4 or d = 8, and few iterations). + kMeans := &KMeans{ + K: k, + DeltaThreshold: 0.01, + IterationThreshold: 10, + Initialization: PlusPlusInitialization, + Assignment: GraphPruning, + Seed: rand.Uint64(), + distance: distancer.NewL2SquaredProvider(), + dimensions: dimensions, + segment: segment, + } + return kMeans +} + +func (m *KMeans) seg(x []float32) []float32 { + return x[m.segment*m.dimensions : (m.segment+1)*m.dimensions] +} + +func (m *KMeans) l2squared(x []float32, y []float32) float32 { + dist, _ := m.distance.SingleDist(x, y) + return dist +} + +func weightedSample(weights []float32, r *rand.Rand) int { + var s float32 + for _, w := range weights { + s += w + } + + var v float32 + target := s * r.Float32() + for i, w := range weights { + v += w + if target < v { + return i + } + } + // Fallback, should not happen. + return r.IntN(len(weights)) +} + +// initialize performs k-means++ initialization and also performs one iteration +// of Lloyd's algorithm (assigns points to centers and updates centroids) since +// we get the cluster assignment "for free" from the initialization. +func (m *KMeans) initializePlusPlus(data [][]float32) { + n := len(data) + copy(m.Centers[0], m.seg(data[m.tmp.rng.IntN(n)])) + distances := make([]float32, n) + for i := range distances { + distances[i] = math.MaxFloat32 + } + + for c := range m.K { + for i, x := range data { + if dist := m.l2squared(m.seg(x), m.Centers[c]); dist < distances[i] { + distances[i] = dist + m.tmp.assignment[i] = uint32(c) + } + } + if c < m.K-1 { + idx := weightedSample(distances, m.tmp.rng) + copy(m.Centers[c+1], m.seg(data[idx])) + } + } + + if m.IterationThreshold == 0 { + return + } + + m.updateCenters(data) + var metrics IterationMetrics + metrics.changes = n + metrics.computations = n * m.K + for _, dist := range distances { + metrics.wcss += float64(dist) + } + m.Metrics.update(metrics) +} + +type IndexOrder struct{} + +func randomSubset(n int, k int, r *rand.Rand) []int { + if k > n/2 { + return r.Perm(n)[:k] + } + + // We sample random integers and insert them in a map to create the random + // subset. To produce a deterministic ordering we associate a random float + // with each index and use it to order the subset. + m := make(map[int]float64, k) + for len(m) < k { + m[r.IntN(n)] = r.Float64() + } + i := 0 + + type IndexOrder struct { + Index int + Rank float64 + } + + subset := make([]IndexOrder, k) + for key, value := range m { + subset[i].Index = key + subset[i].Rank = value + i++ + } + + slices.SortFunc(subset, func(a, b IndexOrder) int { + return cmp.Compare(a.Rank, b.Rank) + }) + + indices := make([]int, k) + for i := range k { + indices[i] = subset[i].Index + } + + return indices +} + +// initializeRandom picks k random data points as centers. We also run a single +// iteration of LLoyd's algorithm in order to prepare the data structure for +// Fit() in a similar manner to initializePlusPlus(). +func (m *KMeans) initializeRandom(data [][]float32) { + subset := randomSubset(len(data), m.K, m.tmp.rng) + for c := range m.K { + copy(m.Centers[c], m.seg(data[subset[c]])) + } + + if m.IterationThreshold == 0 { + return + } + + var metrics IterationMetrics + for i, x := range data { + nearest := m.nearestBruteForce(x) + m.tmp.assignment[i] = nearest.index + metrics.wcss += float64(nearest.distance) + metrics.computations += nearest.computations + metrics.changes++ + } + m.Metrics.update(metrics) + m.updateCenters(data) +} + +// updateCenters computes new centroids according to the current assignment. +func (m *KMeans) updateCenters(data [][]float32) { + // We perform intermediate computations of the centroids using float64 + // for improved precision. The overhead of doing this seems to be negligible + // (< 1% of running time) as measured by BenchmarkKMeansFit(). + clear(m.tmp.sizes) + for c := range m.K { + clear(m.tmp.centroids[c]) + } + + for i, x := range data { + c := m.tmp.assignment[i] + m.tmp.sizes[c]++ + for j, z := range m.seg(x) { + m.tmp.centroids[c][j] += float64(z) + } + } + + for c := range m.K { + if m.tmp.sizes[c] == 0 { + // This is not supposed to happen under normal circumstances. + // If it happens it is likely due to duplicate data, but + // k-means++ initialization should never pick duplicates except + // as a fallback measure. We could pick another random center, + // but it is unlikely to improve the situtation. + continue // Keep the current center. + } + for j := range m.dimensions { + m.Centers[c][j] = float32(m.tmp.centroids[c][j] / float64(m.tmp.sizes[c])) + } + } +} + +type NearestCenter struct { + index uint32 // Index of nearest center. + distance float32 // L2 squared distance to nearest center. + computations int // Number of distance computations used to determine nearest center. +} + +// nearestWithPruning returns the index of the nearest center to a point. It +// leverages the following information in order to prune unnecessary distance +// computations: +// +// 1) The center that the point was assigned to in the previous iteration, and +// +// 2) A list of the nearest centers to the previously assigned center, ordered +// by distance. +// +// We use the observation that a point is likely to remain in the same cluster, +// so we take the previous center as the starting point and compute the distance +// centerDist between it and the point. We can then prune away distance +// calculations to every center that lie at a distance greater than 2*centerDist +// from the previous center. +func (m *KMeans) nearestWithPruning(point []float32, prevCenterIdx uint32) NearestCenter { + pointSegment := m.seg(point) + minDist := m.l2squared(m.Centers[prevCenterIdx], pointSegment) + centerDist := float32(math.Sqrt(float64(minDist))) + idx := prevCenterIdx + neighbors := m.tmp.centerNeighbors[prevCenterIdx] + var computations int + for _, v := range neighbors { + if v.distance >= 2*centerDist { + break // Remaining centers can be skipped. + } + if dist := m.l2squared(m.seg(point), m.Centers[v.index]); dist < minDist { + minDist = dist + idx = v.index + } + } + return NearestCenter{index: idx, distance: minDist, computations: computations} +} + +func (m *KMeans) nearestBruteForce(point []float32) NearestCenter { + var minDist float32 = math.MaxFloat32 + var idx uint32 + for i := range m.Centers { + if dist := m.l2squared(m.seg(point), m.Centers[i]); dist < minDist { + minDist = dist + idx = uint32(i) + } + } + return NearestCenter{index: idx, distance: minDist, computations: m.K} +} + +func (m *KMeans) nearest(point []float32, prevCenterIdx uint32) NearestCenter { + var nearest NearestCenter + switch m.Assignment { + case GraphPruning: + nearest = m.nearestWithPruning(point, prevCenterIdx) + case BruteForce: + nearest = m.nearestBruteForce(point) + } + return nearest +} + +// updateCenterNeighbors computes for each center the list of nearest centers in +// ascending order by Euclidean distance. +func (m *KMeans) updateCenterNeighbors() { + for c := range m.K { + m.tmp.centerNeighbors[c] = m.tmp.centerNeighbors[c][:0] + } + + for c1 := range m.K { + for c2 := c1 + 1; c2 < m.K; c2++ { + dist := m.l2squared(m.Centers[c1], m.Centers[c2]) + distEuclidean := float32(math.Sqrt(float64(dist))) + m.tmp.centerNeighbors[c1] = append(m.tmp.centerNeighbors[c1], IndexAndDistance{index: uint32(c2), distance: distEuclidean}) + m.tmp.centerNeighbors[c2] = append(m.tmp.centerNeighbors[c2], IndexAndDistance{index: uint32(c1), distance: distEuclidean}) + } + } + + for _, neighbors := range m.tmp.centerNeighbors { + slices.SortFunc(neighbors, func(a, b IndexAndDistance) int { + return cmp.Compare(a.distance, b.distance) + }) + } +} + +type IterationMetrics struct { + changes int + computations int + wcss float64 +} + +func (m *KMeans) initMemory(n int) { + m.tmp.init(n, m.dimensions, m.K, m.Seed, m.Assignment) + m.Centers = make([][]float32, m.K) + for c := range m.K { + m.Centers[c] = make([]float32, m.dimensions) + } + m.Metrics = Metrics{} +} + +func (m *KMeans) initializeCenters(data [][]float32) { + switch m.Initialization { + case RandomInitialization: + m.initializeRandom(data) + case PlusPlusInitialization: + m.initializePlusPlus(data) + } +} + +func (m *KMeans) cleanupMemory() { + m.tmp.free() +} + +func (m *KMeans) computeCentroid(data [][]float32) { + // We can skip Lloyd's algorithm and return the centroid directly. + // We leverage existing methods to compute it. + m.initMemory(len(data)) // Every data point is assigned to the first center. + m.updateCenters(data) // Compute the centroid according to the zero-assignment. +} + +// Fit runs k-means clustering on the data according to the settings on the +// KMeans struct. After running Fit() the resulting cluster centers can be +// accessed through Centers(). +// TODO: Consider refactoring to functions that explicitly pass around structs. +func (m *KMeans) Fit(data [][]float32) error { + if len(data) < m.K { + return errors.New("not enough data to fit k-means") + } + + if m.K == 1 { + m.computeCentroid(data) + return nil + } + + n := len(data) + m.initMemory(n) + m.initializeCenters(data) + m.Metrics.Termination = MaxIterations + for m.Metrics.Iterations < m.IterationThreshold { + var metrics IterationMetrics + if m.Assignment == GraphPruning { + m.updateCenterNeighbors() + metrics.computations += m.K * m.K / 2 + } + for i, x := range data { + prevCenterIdx := m.tmp.assignment[i] + nearest := m.nearest(x, prevCenterIdx) + if nearest.index != prevCenterIdx { + metrics.changes++ + m.tmp.assignment[i] = nearest.index + } + metrics.wcss += float64(nearest.distance) + metrics.computations += nearest.computations + } + m.Metrics.update(metrics) + m.updateCenters(data) + if float32(metrics.changes) <= m.DeltaThreshold*float32(n) { + m.Metrics.Termination = ClusterStability + break + } + } + m.cleanupMemory() + return nil +} + +func (m *KMeans) DisableDeltaThreshold() { + m.DeltaThreshold = -1 +} + +func (m *KMeans) DisableIterationThreshold() { + m.IterationThreshold = math.MaxInt +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/kmeans/kmeans_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/kmeans/kmeans_test.go new file mode 100644 index 0000000000000000000000000000000000000000..74bb6fa4a062f3d3f4da1d509e02e8321557865c --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/kmeans/kmeans_test.go @@ -0,0 +1,317 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package kmeans + +import ( + "fmt" + "math" + "math/rand/v2" + "slices" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/weaviate/weaviate/adapters/repos/db/vector/hnsw/distancer" +) + +type Distribution string + +const ( + uniform Distribution = "uniform" + normal Distribution = "normal" +) + +const ( + seed uint64 = 0x6a62976fa43149c9 +) + +func generateData(n int, d int, dist Distribution, seed uint64) [][]float32 { + r := rand.New(rand.NewPCG(seed, 8523456)) + data := make([][]float32, n) + for i := range n { + data[i] = make([]float32, d) + for j := range d { + switch dist { + case uniform: + data[i][j] = r.Float32() + case normal: + data[i][j] = float32(r.NormFloat64()) + } + } + } + return data +} + +type KMeansVariant struct { + Initialization InitializationStrategy + Assignment AssignmentStrategy +} + +var kMeansVariants = [...]KMeansVariant{ + {Initialization: RandomInitialization, Assignment: BruteForce}, + {Initialization: RandomInitialization, Assignment: GraphPruning}, + {Initialization: PlusPlusInitialization, Assignment: BruteForce}, + {Initialization: PlusPlusInitialization, Assignment: GraphPruning}, +} + +// Convenient instantiation of KMeans for testing. +func newKMeans(k int, d int, variant KMeansVariant) *KMeans { + kmeans := New(k, d, 0) + kmeans.Initialization = variant.Initialization + kmeans.Assignment = variant.Assignment + return kmeans +} + +// By default we seed KMeans with a call to the global random number generator. +// For some tests we want deterministic behavior for stability, so we explicitly +// set the seed. +func newDeterministicKMeans(k int, d int, variant KMeansVariant) *KMeans { + kmeans := newKMeans(k, d, variant) + kmeans.Seed = seed + return kmeans +} + +func TestIterationThreshold(t *testing.T) { + n := 100 + d := 4 + k := 8 + data := generateData(n, d, normal, seed) + maxIterations := 5 + for _, variant := range kMeansVariants { + for i := range maxIterations { + km := newKMeans(k, d, variant) + km.DisableDeltaThreshold() + km.IterationThreshold = i + km.Fit(data) + assert.Equal(t, km.Metrics.Iterations, i) + assert.Equal(t, len(km.Metrics.WCSS), i, + "The length of the per-iteration metric slices should match the number of iterations.") + assert.Equal(t, km.Metrics.Termination, MaxIterations) + } + } +} + +func TestDeltaThreshold(t *testing.T) { + n := 100 + d := 4 + k := 8 + data := generateData(n, d, normal, seed) + + for _, variant := range kMeansVariants { + km := newDeterministicKMeans(k, d, variant) + km.DisableIterationThreshold() + // With a threshold of zero we iterate until clusters are perfectly stable. + km.DeltaThreshold = 0.0 + km.Fit(data) + assert.Greater(t, km.Metrics.Iterations, 1) + assert.Less(t, km.Metrics.Iterations, 100) + assert.Equal(t, km.Metrics.Termination, ClusterStability) + assert.Equal(t, km.Metrics.Changes[len(km.Metrics.Changes)-1], 0) + } +} + +// The k-means objective functions is the Within-Cluster Sum of Squares (WCSS). +// With LLoyd's algorithm it is guaranteed to decrease with each iteration. +func TestDecreasingWCSS(t *testing.T) { + n := 1000 + d := 8 + k := 16 + data := generateData(n, d, normal, seed) + + for _, variant := range kMeansVariants { + km := newDeterministicKMeans(k, d, variant) + km.Fit(data) + slices.Reverse(km.Metrics.WCSS) + assert.True(t, slices.IsSorted(km.Metrics.WCSS)) + } +} + +// The GraphPruning assignment strategy only prunes away unnecessary distance +// computations. We should be producing the same result up to floating point +// errors and randomness in tie breaking. With random data ties are highly +// unlikely so any differences are likely due to a bug. Note that this test is a +// bit too tied to the implementation in that it requires exact equality of the +// centers, but it does the job for now. +func TestGraphPruningAssignment(t *testing.T) { + n := 1000 + d := 8 + k := 32 + data := generateData(n, d, normal, seed) + + for _, init := range [...]InitializationStrategy{RandomInitialization, PlusPlusInitialization} { + bf := newDeterministicKMeans(k, d, KMeansVariant{init, BruteForce}) + bf.Fit(data) + + prune := newDeterministicKMeans(k, d, KMeansVariant{init, GraphPruning}) + prune.Fit(data) + + for i := range k { + assert.True(t, slices.Equal(bf.Centers[i], prune.Centers[i])) + } + } +} + +func contains(data [][]float32, q []float32) bool { + const eps = 1e-12 + l2 := distancer.NewL2SquaredProvider() + for _, x := range data { + dist, _ := l2.SingleDist(x, q) + if dist < eps { + return true + } + } + return false +} + +func TestCorrectnessAcrossSegments(t *testing.T) { + // Create a dataset with two segments of two dimensions each. + // The first segment has clusters centered at (1,1) and (-1, -1). + // The second segment has clusters centered at (-1, 1) and (1, -1). + data := [][]float32{ + {0.99, 0.99, -0.99, 0.99}, + {1.01, 1.01, -1.01, 1.01}, + {-0.99, -0.99, 0.99, -0.99}, + {-1.01, -1.01, 1.01, -1.01}, + } + + k := 2 + d := 2 + for _, variant := range kMeansVariants { + // Run k-means clustering on each segment and assert that we find the true + km := newDeterministicKMeans(k, d, variant) + km.segment = 0 + km.Fit(data) + assert.True(t, contains(km.Centers, []float32{1, 1})) + assert.True(t, contains(km.Centers, []float32{-1, -1})) + + km = newDeterministicKMeans(k, d, variant) + km.segment = 1 + km.Fit(data) + assert.True(t, contains(km.Centers, []float32{-1, 1})) + assert.True(t, contains(km.Centers, []float32{1, -1})) + } +} + +// Create a new data set that duplicates each data point k times and shuffles +// the data. Verify that with k-means++ initialization we end up with the +// original data points as centers. With random initialization we would be +// extremely unlikely to capture the original k centers. +func TestPlusPlusInitialization(t *testing.T) { + k := 100 + d := 64 + centers := generateData(k, d, uniform, seed) + + data := make([][]float32, k*k) + for i := range data { + data[i] = make([]float32, d) + copy(data[i], centers[i/k]) + } + + r := rand.New(rand.NewPCG(seed, 8523456)) + r.Shuffle(len(data), func(i, j int) { + data[i], data[j] = data[j], data[i] + }) + + km := newDeterministicKMeans(k, d, KMeansVariant{PlusPlusInitialization, BruteForce}) + km.IterationThreshold = 0 // Only perform initialization. + km.Fit(data) + for _, trueCenter := range centers { + var found bool + for _, kmCenter := range km.Centers { + dist := km.l2squared(trueCenter, kmCenter) + if dist < 1e-12 { + found = true + break + } + } + assert.True(t, found, "k-means++ failed to sample all true centers.") + } +} + +func wcss(data [][]float32, centers [][]float32) float64 { + l2s := distancer.NewL2SquaredProvider() + var sum float64 + for _, v := range data { + var minDist float32 = math.MaxFloat32 + for _, c := range centers { + if dist, _ := l2s.SingleDist(v, c); dist < minDist { + minDist = dist + } + } + sum += float64(minDist) + } + return sum +} + +func TestFewDataPoints(t *testing.T) { + n := 10 + d := 8 + k := n + data := generateData(n, d, uniform, seed) + + // With k = n we all variants should select the original data points as + // cluster centers, resulting in a WCSS of zero. + for _, variant := range kMeansVariants { + km := newDeterministicKMeans(k, d, variant) + km.Fit(data) + assert.Equal(t, wcss(data, km.Centers), 0.0) + } +} + +func TestOneCenter(t *testing.T) { + data := [][]float32{ + {1, 0}, + {0, 1}, + {1, 1}, + {0, 0}, + } + for _, variant := range kMeansVariants { + km := newDeterministicKMeans(1, 2, variant) + km.Fit(data) + assert.Equal(t, len(km.Centers), 1) + assert.True(t, slices.Equal(km.Centers[0], []float32{0.5, 0.5})) + } +} + +func BenchmarkKMeansFit(b *testing.B) { + distribution := []Distribution{normal, uniform} + dimensions := []int{4, 8, 16, 32} + k := 256 + n := 100_000 + var seed uint64 + for _, dist := range distribution { + for _, d := range dimensions { + data := generateData(n, d, dist, seed) + seed++ + for _, variant := range kMeansVariants { + b.Run(fmt.Sprintf("KMeansFit-%v-d%d-%v-%v", dist, d, variant.Initialization, variant.Assignment), func(b *testing.B) { + var computations int + var WCSS float64 + var iterations int + for i := 0; i < b.N; i++ { + km := newDeterministicKMeans(k, d, variant) + km.IterationThreshold = 50 + b.StartTimer() + km.Fit(data) + b.StopTimer() + iterations += km.Metrics.Iterations + WCSS += wcss(data, km.Centers) + computations += km.Metrics.TotalComputations() + } + b.ReportMetric(float64(b.Elapsed().Seconds())/float64(b.N), "sec/fit") // More readable than ns. + b.ReportMetric(float64(iterations)/float64(b.N), "iter") + b.ReportMetric(WCSS/float64(b.N), "wcss") + b.ReportMetric(float64(computations), "distcomps") + }) + } + } + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/multivector/muvera.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/multivector/muvera.go new file mode 100644 index 0000000000000000000000000000000000000000..be5415d898c1cbff0de8c1beb3d374051457b103 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/multivector/muvera.go @@ -0,0 +1,271 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package multivector + +import ( + "encoding/binary" + "fmt" + "math" + "math/rand/v2" + + "github.com/weaviate/weaviate/adapters/repos/db/lsmkv" + "github.com/weaviate/weaviate/adapters/repos/db/vector/hnsw/distancer" + "github.com/weaviate/weaviate/entities/storobj" + ent "github.com/weaviate/weaviate/entities/vectorindex/hnsw" +) + +type MuveraConfig struct { + KSim int + NumClusters int // Number of clusters for K-means or number of bits for SimHash + Dimensions int // Dimensions of each vector + DProjections int // Number of projections for D-Projections + DFinal int // Number of projections for final projection + Repetitions int // Number of repetitions +} + +type MuveraEncoder struct { + config MuveraConfig + gaussians [][][]float32 // Random Gaussian vectors for SimHash projection + S [][][]float32 // Random projection matrix with ±1 entries + dotDistancerProvider distancer.Provider + muveraStore *lsmkv.Store +} + +const ( + DefaultMuveraSeed = uint64(0x532ca5105169b1df) +) + +func NewMuveraEncoder(config ent.MuveraConfig, muveraStore *lsmkv.Store) *MuveraEncoder { + encoder := &MuveraEncoder{ + config: MuveraConfig{ + KSim: config.KSim, + NumClusters: int(math.Pow(2, float64(config.KSim))), + DProjections: config.DProjections, + Repetitions: config.Repetitions, + }, + dotDistancerProvider: distancer.NewDotProductProvider(), + muveraStore: muveraStore, + } + + return encoder +} + +func (encoder *MuveraEncoder) InitEncoder(dimensions int) { + rng := rand.New(rand.NewPCG(DefaultMuveraSeed, 0x385ab5285169b1ac)) + encoder.config.Dimensions = dimensions + encoder.gaussians = make([][][]float32, encoder.config.Repetitions) + encoder.S = make([][][]float32, encoder.config.Repetitions) + for rep := 0; rep < encoder.config.Repetitions; rep++ { + // Initialize random Gaussian vectors + encoder.gaussians[rep] = make([][]float32, encoder.config.KSim) + for i := 0; i < encoder.config.KSim; i++ { + encoder.gaussians[rep][i] = make([]float32, encoder.config.Dimensions) + for j := 0; j < encoder.config.Dimensions; j++ { + u1 := rng.Float64() + u2 := rng.Float64() + encoder.gaussians[rep][i][j] = float32(math.Sqrt(-2.0*math.Log(u1)) * math.Cos(2*math.Pi*u2)) + } + } + + encoder.S[rep] = initProjectionMatrix(encoder.config.DProjections, encoder.config.Dimensions, rng) + } +} + +func initProjectionMatrix(rows int, cols int, rng *rand.Rand) [][]float32 { + matrix := make([][]float32, rows) + for i := 0; i < rows; i++ { + matrix[i] = make([]float32, cols) + for j := 0; j < cols; j++ { + matrix[i][j] = float32(rng.IntN(2)*2 - 1) + } + } + return matrix +} + +// simHash computes the SimHash of a vector using random Gaussian projections +func (e *MuveraEncoder) simHash(vec []float32, gaussians [][]float32) uint64 { + var result uint64 + distancer := e.dotDistancerProvider.New(vec) + + for i := 0; i < e.config.KSim; i++ { + dotProduct, err := distancer.Distance(gaussians[i]) + if err != nil { + return 0.0 + } + // Set bit based on sign of dot product + if dotProduct < 0 { + result |= 1 << uint(i) + } + } + return result +} + +func (e *MuveraEncoder) encode(fullVec [][]float32, isDoc bool) []float32 { + encodedVec := make([]float32, e.config.Repetitions*e.config.NumClusters*e.config.DProjections) + + // For each repetition + tmpVec := make([]float32, e.config.NumClusters*e.config.Dimensions) + for rep := 0; rep < e.config.Repetitions; rep++ { + // Get SimHash for each token + repetitionClusterCounts := make([]uint16, e.config.NumClusters) + clusterMappings := make([]uint64, len(fullVec)) + for relative, token := range fullVec { + cluster := e.simHash(token, e.gaussians[rep]) + clusterMappings[relative] = cluster + repetitionClusterCounts[cluster]++ + startIdx := cluster * uint64(e.config.Dimensions) + for i := 0; i < e.config.Dimensions; i++ { + tmpVec[startIdx+uint64(i)] += token[i] + } + } + + // doc ONLY operations + if isDoc { + for cluster, count := range repetitionClusterCounts { + startIdx := uint64(cluster) * uint64(e.config.Dimensions) + for i := 0; i < e.config.Dimensions; i++ { + tmpVec[startIdx+uint64(i)] = (1 / float32(count)) * tmpVec[startIdx+uint64(i)] + } + } + for cluster := uint64(0); cluster < uint64(e.config.NumClusters); cluster++ { + if repetitionClusterCounts[cluster] == 0 { + // Find nearest non-empty cluster + minHamming := float32(math.MaxFloat32) + nearestPoint := uint64(0) + for docIdx, clusterMapped := range clusterMappings { + hamming, err := distancer.HammingBitwise([]uint64{cluster}, []uint64{clusterMapped}) + if err != nil { + return nil + } + if hamming < minHamming { + minHamming = hamming + nearestPoint = uint64(docIdx) + } + } + startIdx := cluster * uint64(e.config.Dimensions) + for i := 0; i < e.config.Dimensions; i++ { + tmpVec[startIdx+uint64(i)] = fullVec[nearestPoint][i] + } + } + } + } + // doc ONLY operations ended + + scale := 1.0 / float32(math.Sqrt(float64(e.config.DProjections))) + projOffset := rep * e.config.NumClusters * e.config.DProjections + matrix := e.S[rep] + // Process each cluster + for j := 0; j < e.config.NumClusters; j++ { + // Calculate source and destination offsets + srcStart := j * e.config.Dimensions + dstStart := projOffset + (j * e.config.DProjections) + + // Process in chunks of 4 for better cache utilization + for k := 0; k < e.config.DProjections; k++ { + var sum float32 + // Process 4 elements at a time + for l := 0; l < e.config.Dimensions; l += 4 { + end := l + 4 + if end > e.config.Dimensions { + end = e.config.Dimensions + } + // Unroll the inner loop + for m := l; m < end; m++ { + sum += matrix[k][m] * tmpVec[srcStart+m] + } + } + encodedVec[dstStart+k] = sum * scale + } + } + + // Reset tmpVec, this is needed only for query encoding + clear(tmpVec) + } + + return encodedVec +} + +// EncodeQuery encodes a query vector using Muvera +func (e *MuveraEncoder) EncodeQuery(query [][]float32) []float32 { + return e.encode(query, false) +} + +// EncodeDoc encodes a document vector using Muvera +func (e *MuveraEncoder) EncodeDoc(fullDoc [][]float32) []float32 { + return e.encode(fullDoc, true) +} + +func MuveraBytesFromFloat32(vec []float32) []byte { + slice := make([]byte, len(vec)*4) + for i := range vec { + binary.LittleEndian.PutUint32(slice[i*4:], math.Float32bits(vec[i])) + } + return slice +} + +func MuveraFromBytes(bytes []byte) []float32 { + vec := make([]float32, len(bytes)/4) + for i := range vec { + vec[i] = math.Float32frombits(binary.LittleEndian.Uint32(bytes[i*4 : (i+1)*4])) + } + return vec +} + +func (e *MuveraEncoder) GetMuveraVectorForID(id uint64, bucket string) ([]float32, error) { + idBytes := make([]byte, 8) + binary.BigEndian.PutUint64(idBytes, id) + muveraBytes, err := e.muveraStore.Bucket(bucket).Get(idBytes) + if err != nil { + return nil, fmt.Errorf("getting vector for id: %w", err) + } + if len(muveraBytes) == 0 { + return nil, storobj.NewErrNotFoundf(id, "GetMuveraVectorForID") + } + + return MuveraFromBytes(muveraBytes), nil +} + +type MuveraData struct { + KSim uint32 // 4 bytes + NumClusters uint32 // 4 bytes + Dimensions uint32 // 4 bytes + DProjections uint32 // 4 bytes + Repetitions uint32 // 4 bytes + Gaussians [][][]float32 // 4 bytes -> (repetitions, kSim, dimensions) + S [][][]float32 // 4 bytes -> (repetitions, dProjections, dimensions) +} + +type CommitLogger interface { + AddMuvera(MuveraData) error +} + +func (e *MuveraEncoder) PersistMuvera(logger CommitLogger) error { + return logger.AddMuvera(MuveraData{ + KSim: uint32(e.config.KSim), + NumClusters: uint32(e.config.NumClusters), + Dimensions: uint32(e.config.Dimensions), + DProjections: uint32(e.config.DProjections), + Repetitions: uint32(e.config.Repetitions), + Gaussians: e.gaussians, + S: e.S, + }) +} + +func (e *MuveraEncoder) LoadMuveraConfig(data MuveraData) { + e.config.KSim = int(data.KSim) + e.config.NumClusters = int(data.NumClusters) + e.config.Dimensions = int(data.Dimensions) + e.config.DProjections = int(data.DProjections) + e.config.Repetitions = int(data.Repetitions) + e.gaussians = data.Gaussians + e.S = data.S +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/multivector/simhash_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/multivector/simhash_test.go new file mode 100644 index 0000000000000000000000000000000000000000..7844a657c97a52894e0238a2ecf19ea35b4b3aa4 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/multivector/simhash_test.go @@ -0,0 +1,88 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package multivector + +import ( + "testing" + + "github.com/weaviate/weaviate/adapters/repos/db/vector/hnsw/distancer" + ent "github.com/weaviate/weaviate/entities/vectorindex/hnsw" +) + +func TestSimHashTest(t *testing.T) { + // Create a default config + config := ent.MuveraConfig{ + KSim: 3, + DProjections: 8, + Repetitions: 20, + } + + encoder := NewMuveraEncoder(config, nil) + encoder.InitEncoder(64) + + // Test case 1: Similar vectors should produce similar hashes + vec1 := make([]float32, encoder.config.Dimensions) + vec2 := make([]float32, encoder.config.Dimensions) + vec3 := make([]float32, encoder.config.Dimensions) + for i := 0; i < encoder.config.Dimensions; i++ { + vec1[i] = 1.0 + vec2[i] = 0.9 // Slightly different but similar vector + vec3[i] = -1.0 // Opposite direction + } + zeroVec := make([]float32, encoder.config.Dimensions) + + for i := 0; i < encoder.config.Repetitions; i++ { + hash1 := encoder.simHash(vec1, encoder.gaussians[i]) + hash2 := encoder.simHash(vec2, encoder.gaussians[i]) + + // Calculate Hamming distance between hashes + hammingDist, err := distancer.HammingBitwise([]uint64{hash1}, []uint64{hash2}) + if err != nil { + t.Errorf("Error calculating Hamming distance: %v", err) + } + if hammingDist > float32(config.KSim)/2 { + t.Errorf("Similar vectors produced very different hashes. Hamming distance: %f", hammingDist) + } + + // Test case 2: Orthogonal vectors should produce different hashes + hash3 := encoder.simHash(vec3, encoder.gaussians[i]) + hammingDist, err = distancer.HammingBitwise([]uint64{hash1}, []uint64{hash3}) + if err != nil { + t.Errorf("Error calculating Hamming distance: %v", err) + } + if hammingDist < float32(config.KSim)/2 { + t.Errorf("Orthogonal vectors produced similar hashes. Hamming distance: %f", hammingDist) + } + + // Test case 3: Zero vector should produce consistent hash + hashZero := encoder.simHash(zeroVec, encoder.gaussians[i]) + if hashZero != 0 { + t.Errorf("Zero vector produced non-zero hash: %d", hashZero) + } + + // Test case 4: Same vector should produce same hash + hash1Rep1 := encoder.simHash(vec1, encoder.gaussians[i]) + hash1Rep2 := encoder.simHash(vec1, encoder.gaussians[i]) + hammingDist, err = distancer.HammingBitwise([]uint64{hash1Rep1}, []uint64{hash1Rep2}) + if err != nil { + t.Errorf("Error calculating Hamming distance: %v", err) + } + if hammingDist > 0 { + t.Error("Same vector produced different hashes") + } + + maxHash := uint64(1< maxHash { + t.Errorf("Hash value %d exceeds maximum possible value %d", hash1, maxHash) + } + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/noop/index.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/noop/index.go new file mode 100644 index 0000000000000000000000000000000000000000..daee4cdcb102086a1d55c7cca928f86ab9b2c6f7 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/noop/index.go @@ -0,0 +1,179 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package noop + +import ( + "context" + "fmt" + + "github.com/pkg/errors" + + "github.com/weaviate/weaviate/adapters/repos/db/helpers" + "github.com/weaviate/weaviate/adapters/repos/db/vector/common" + "github.com/weaviate/weaviate/adapters/repos/db/vector/compressionhelpers" + schemaConfig "github.com/weaviate/weaviate/entities/schema/config" + hnswconf "github.com/weaviate/weaviate/entities/vectorindex/hnsw" +) + +type Index struct{} + +func NewIndex() *Index { + return &Index{} +} + +func (i *Index) AddBatch(ctx context.Context, id []uint64, vector [][]float32) error { + // silently ignore + return nil +} + +func (i *Index) AddMultiBatch(ctx context.Context, docIds []uint64, vectors [][][]float32) error { + // silently ignore + return nil +} + +func (i *Index) Add(ctx context.Context, id uint64, vector []float32) error { + // silently ignore + return nil +} + +func (i *Index) AddMulti(ctx context.Context, docId uint64, vector [][]float32) error { + // silently ignore + return nil +} + +func (i *Index) Delete(id ...uint64) error { + // silently ignore + return nil +} + +func (i *Index) DeleteMulti(id ...uint64) error { + // silently ignore + return nil +} + +func (i *Index) SearchByVector(ctx context.Context, vector []float32, k int, allow helpers.AllowList) ([]uint64, []float32, error) { + return nil, nil, errors.Errorf("cannot vector-search on a class not vector-indexed") +} + +func (i *Index) SearchByMultiVector(ctx context.Context, vector [][]float32, k int, allow helpers.AllowList) ([]uint64, []float32, error) { + return nil, nil, errors.Errorf("cannot vector-search on a class not vector-indexed") +} + +func (i *Index) SearchByVectorDistance(ctx context.Context, vector []float32, dist float32, maxLimit int64, allow helpers.AllowList) ([]uint64, []float32, error) { + return nil, nil, errors.Errorf("cannot vector-search on a class not vector-indexed") +} + +func (i *Index) SearchByMultiVectorDistance(ctx context.Context, vector [][]float32, dist float32, maxLimit int64, allow helpers.AllowList) ([]uint64, []float32, error) { + return nil, nil, errors.Errorf("cannot multi-vector-search on a class not vector-indexed") +} + +func (i *Index) UpdateUserConfig(updated schemaConfig.VectorIndexConfig, callback func()) error { + callback() + switch t := updated.(type) { + case hnswconf.UserConfig: + // the fact that we are in the noop index means that 'skip' must have been + // set to true before, so changing it now is not possible. But if it + // stays, we don't mind. + if t.Skip { + return nil + } + return errors.Errorf("cannot update vector index config on a non-indexed class. Delete and re-create without skip property") + + default: + return fmt.Errorf("unrecognized vector index config: %T", updated) + + } +} + +func (i *Index) Drop(context.Context) error { + // silently ignore + return nil +} + +func (i *Index) Flush() error { + return nil +} + +func (i *Index) Shutdown(context.Context) error { + return nil +} + +func (i *Index) SwitchCommitLogs(context.Context) error { + return nil +} + +func (i *Index) ListFiles(context.Context, string) ([]string, error) { + return nil, nil +} + +func (i *Index) ValidateBeforeInsert(vector []float32) error { + return nil +} + +func (i *Index) ValidateMultiBeforeInsert(vector [][]float32) error { + return nil +} + +func (i *Index) PostStartup() { +} + +func (i *Index) ContainsDoc(docID uint64) bool { + return false +} + +func (i *Index) Iterate(fn func(id uint64) bool) {} + +func (i *Index) ShouldCompress() (bool, int) { + return false, 0 +} + +func (i *Index) ShouldCompressFromConfig(config schemaConfig.VectorIndexConfig) (bool, int) { + return false, 0 +} + +func (i *Index) Compressed() bool { + return false +} + +func (i *Index) Multivector() bool { + return false +} + +func (i *Index) AlreadyIndexed() uint64 { + return 0 +} + +func (i *Index) TurnOnCompression(callback func()) error { + return nil +} + +func (i *Index) QueryVectorDistancer(queryVector []float32) common.QueryVectorDistancer { + return common.QueryVectorDistancer{} +} + +func (i *Index) QueryMultiVectorDistancer(queryVector [][]float32) common.QueryVectorDistancer { + return common.QueryVectorDistancer{} +} + +func (i *Index) Type() common.IndexType { + return common.IndexTypeNoop +} + +func (i *Index) VectorStorageSize(_ context.Context) int64 { + // Noop index doesn't store vectors in memory, so return 0 + return 0 +} + +func (i *Index) CompressionStats() compressionhelpers.CompressionStats { + // Noop index doesn't compress vectors + return compressionhelpers.UncompressedStats{} +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/noop/index_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/noop/index_test.go new file mode 100644 index 0000000000000000000000000000000000000000..37d797cc32bac2ed48aeb25d9d892a13762c4d81 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/noop/index_test.go @@ -0,0 +1,51 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package noop + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/entities/vectorindex/hnsw" +) + +func Test_UpdateConfig(t *testing.T) { + t.Run("hnsw: with skip==true", func(t *testing.T) { + // the param we care about was not changed, do not error + + ind := NewIndex() + err := ind.UpdateUserConfig(hnsw.UserConfig{ + Skip: true, + }, func() {}) + + assert.Nil(t, err) + }) + + t.Run("hnsw: with skip==false", func(t *testing.T) { + ind := NewIndex() + err := ind.UpdateUserConfig(hnsw.UserConfig{ + Skip: false, + }, func() {}) + + require.NotNil(t, err) + assert.Contains(t, err.Error(), "Delete and re-create") + }) + + t.Run("with unrecognized vector index config", func(t *testing.T) { + ind := NewIndex() + err := ind.UpdateUserConfig(nil, func() {}) + + require.NotNil(t, err) + assert.Contains(t, err.Error(), "unrecognized vector index config") + }) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/testinghelpers/helpers.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/testinghelpers/helpers.go new file mode 100644 index 0000000000000000000000000000000000000000..3410c23ecbd279d4861610408cdc0bfe4595c334 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector/testinghelpers/helpers.go @@ -0,0 +1,339 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package testinghelpers + +import ( + "context" + "encoding/binary" + "encoding/gob" + "fmt" + "io" + "math" + "math/rand" + "os" + "sort" + "sync" + "testing" + "time" + + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/require" + + "github.com/weaviate/weaviate/adapters/repos/db/helpers" + "github.com/weaviate/weaviate/adapters/repos/db/lsmkv" + "github.com/weaviate/weaviate/adapters/repos/db/vector/compressionhelpers" + "github.com/weaviate/weaviate/adapters/repos/db/vector/hnsw/distancer" + "github.com/weaviate/weaviate/entities/cyclemanager" +) + +type DistanceFunction func([]float32, []float32) float32 + +func getRandomSeed() *rand.Rand { + return rand.New(rand.NewSource(time.Now().UnixNano())) +} + +func getFixedSeed() *rand.Rand { + seed := int64(425812) + return rand.New(rand.NewSource(seed)) +} + +func int32FromBytes(bytes []byte) int { + return int(binary.LittleEndian.Uint32(bytes)) +} + +func float32FromBytes(bytes []byte) float32 { + bits := binary.LittleEndian.Uint32(bytes) + float := math.Float32frombits(bits) + return float +} + +func readSiftFloat(file string, maxObjects int, vectorLengthFloat int) [][]float32 { + f, err := os.Open(file) + if err != nil { + panic(errors.Wrap(err, "Could not open SIFT file")) + } + defer f.Close() + + fi, err := f.Stat() + if err != nil { + panic(errors.Wrap(err, "Could not get SIFT file properties")) + } + fileSize := fi.Size() + if fileSize < 1000000 { + panic("The file is only " + fmt.Sprint(fileSize) + " bytes long. Did you forgot to install git lfs?") + } + + // The sift data is a binary file containing floating point vectors + // For each entry, the first 4 bytes is the length of the vector (in number of floats, not in bytes) + // which is followed by the vector data with vector length * 4 bytes. + // |-length-vec1 (4bytes)-|-Vec1-data-(4*length-vector-1 bytes)-|-length-vec2 (4bytes)-|-Vec2-data-(4*length-vector-2 bytes)-| + // The vector length needs to be converted from bytes to int + // The vector data needs to be converted from bytes to float + // Note that the vector entries are of type float but are integer numbers eg 2.0 + bytesPerF := 4 + objects := make([][]float32, maxObjects) + vectorBytes := make([]byte, bytesPerF+vectorLengthFloat*bytesPerF) + for i := 0; i >= 0; i++ { + _, err = f.Read(vectorBytes) + if errors.Is(err, io.EOF) { + break + } else if err != nil { + panic(err) + } + if int32FromBytes(vectorBytes[0:bytesPerF]) != vectorLengthFloat { + panic("Each vector must have 128 entries.") + } + vectorFloat := []float32{} + for j := 0; j < vectorLengthFloat; j++ { + start := (j + 1) * bytesPerF // first bytesPerF are length of vector + vectorFloat = append(vectorFloat, float32FromBytes(vectorBytes[start:start+bytesPerF])) + } + objects[i] = vectorFloat + + if i >= maxObjects-1 { + break + } + } + + return objects +} + +func ReadSiftVecsFrom(path string, size int, dimensions int) [][]float32 { + fmt.Printf("generating %d vectors...", size) + vectors := readSiftFloat(path, size, dimensions) + fmt.Printf(" done\n") + return vectors +} + +func RandomVecs(size int, queriesSize int, dimensions int) ([][]float32, [][]float32) { + fmt.Printf("generating %d vectors...\n", size+queriesSize) + r := getRandomSeed() + vectors := make([][]float32, 0, size) + queries := make([][]float32, 0, queriesSize) + for i := 0; i < size; i++ { + vectors = append(vectors, genVector(r, dimensions)) + } + for i := 0; i < queriesSize; i++ { + queries = append(queries, genVector(r, dimensions)) + } + return vectors, queries +} + +func RandomVecsFixedSeed(size int, queriesSize int, dimensions int) ([][]float32, [][]float32) { + fmt.Printf("generating %d vectors...\n", size+queriesSize) + r := getFixedSeed() + vectors := make([][]float32, 0, size) + queries := make([][]float32, 0, queriesSize) + for i := 0; i < size; i++ { + vectors = append(vectors, genVector(r, dimensions)) + } + for i := 0; i < queriesSize; i++ { + queries = append(queries, genVector(r, dimensions)) + } + return vectors, queries +} + +func genVector(r *rand.Rand, dimensions int) []float32 { + vector := make([]float32, 0, dimensions) + for i := 0; i < dimensions; i++ { + // Some distances like dot could produce negative values when the vectors have negative values + // This change will not affect anything when using a distance like l2, but will cover some bugs + // when using distances like dot + vector = append(vector, r.Float32()*2-1) + } + return vector +} + +func Normalize(vectors [][]float32) { + for i := range vectors { + vectors[i] = distancer.Normalize(vectors[i]) + } +} + +func ReadVecs(size int, queriesSize int, dimensions int, db string, path ...string) ([][]float32, [][]float32) { + fmt.Printf("generating %d vectors...", size+queriesSize) + uri := db + if len(path) > 0 { + uri = fmt.Sprintf("%s/%s", path[0], uri) + } + vectors := readSiftFloat(fmt.Sprintf("%s/%s_base.fvecs", uri, db), size, dimensions) + queries := readSiftFloat(fmt.Sprintf("%s/%s_query.fvecs", uri, db), queriesSize, dimensions) + fmt.Printf(" done\n") + return vectors, queries +} + +func ReadQueries(queriesSize int) [][]float32 { + fmt.Printf("generating %d vectors...", queriesSize) + queries := readSiftFloat("sift/sift_query.fvecs", queriesSize, 128) + fmt.Printf(" done\n") + return queries +} + +// nil vectors are ignored, this allows for deleting vectors or supplying +// sparse sets where not every id has a vec +func BruteForce(logger logrus.FieldLogger, vectors [][]float32, query []float32, k int, distance DistanceFunction) ([]uint64, []float32) { + type distanceAndIndex struct { + distance float32 + index uint64 + deleted bool + } + + distances := make([]distanceAndIndex, len(vectors)) + + compressionhelpers.Concurrently(logger, uint64(len(vectors)), func(i uint64) { + if vectors[i] == nil { + distances[i] = distanceAndIndex{deleted: true} + return + } + + dist := distance(query, vectors[i]) + distances[i] = distanceAndIndex{ + index: uint64(i), + distance: dist, + } + }) + + withoutDeletes := make([]distanceAndIndex, 0, len(distances)) + for _, d := range distances { + if !d.deleted { + withoutDeletes = append(withoutDeletes, d) + } + } + distances = withoutDeletes + + sort.Slice(distances, func(a, b int) bool { + return distances[a].distance < distances[b].distance + }) + + if len(distances) < k { + k = len(distances) + } + + out := make([]uint64, k) + dists := make([]float32, k) + for i := 0; i < k; i++ { + out[i] = distances[i].index + dists[i] = distances[i].distance + } + + return out, dists +} + +func BuildTruths(logger logrus.FieldLogger, queriesSize int, vectorsSize int, queries [][]float32, vectors [][]float32, k int, distance DistanceFunction, path ...string) [][]uint64 { + uri := "sift/sift_truths%d.%d.gob" + if len(path) > 0 { + uri = fmt.Sprintf("%s/%s", path[0], uri) + } + fileName := fmt.Sprintf(uri, k, vectorsSize) + truths := make([][]uint64, queriesSize) + + if _, err := os.Stat(fileName); err == nil { + return loadTruths(fileName, queriesSize, k) + } + + compressionhelpers.Concurrently(logger, uint64(len(queries)), func(i uint64) { + truths[i], _ = BruteForce(logger, vectors, queries[i], k, distance) + }) + + f, err := os.Create(fileName) + if err != nil { + panic(errors.Wrap(err, "Could not open file")) + } + + defer f.Close() + enc := gob.NewEncoder(f) + err = enc.Encode(truths) + if err != nil { + panic(errors.Wrap(err, "Could not encode truths")) + } + return truths +} + +func loadTruths(fileName string, queriesSize int, k int) [][]uint64 { + f, err := os.Open(fileName) + if err != nil { + panic(errors.Wrap(err, "Could not open truths file")) + } + defer f.Close() + + truths := make([][]uint64, queriesSize) + cDec := gob.NewDecoder(f) + err = cDec.Decode(&truths) + if err != nil { + panic(errors.Wrap(err, "Could not decode truths")) + } + return truths +} + +func MatchesInLists(control []uint64, results []uint64) uint64 { + desired := map[uint64]struct{}{} + for _, relevant := range control { + desired[relevant] = struct{}{} + } + + var matches uint64 + for _, candidate := range results { + _, ok := desired[candidate] + if ok { + matches++ + } + } + + return matches +} + +func NewDummyStore(t testing.TB) *lsmkv.Store { + logger, _ := test.NewNullLogger() + storeDir := t.TempDir() + store, err := lsmkv.New(storeDir, storeDir, logger, nil, + cyclemanager.NewCallbackGroupNoop(), + cyclemanager.NewCallbackGroupNoop(), + cyclemanager.NewCallbackGroupNoop()) + require.Nil(t, err) + return store +} + +type VectorIndex interface { + SearchByVector(ctx context.Context, vector []float32, k int, allow helpers.AllowList) ([]uint64, []float32, error) +} + +func RecallAndLatency(ctx context.Context, queries [][]float32, k int, index VectorIndex, truths [][]uint64) (float32, float32) { + var relevant uint64 + retrieved := k * len(queries) + + var querying time.Duration = 0 + mutex := &sync.Mutex{} + logger, _ := test.NewNullLogger() + compressionhelpers.Concurrently(logger, uint64(len(queries)), func(i uint64) { + before := time.Now() + results, _, _ := index.SearchByVector(ctx, queries[i], k, nil) + ellapsed := time.Since(before) + hits := MatchesInLists(truths[i], results) + mutex.Lock() + querying += ellapsed + relevant += hits + mutex.Unlock() + }) + + recall := float32(relevant) / float32(retrieved) + latency := float32(querying.Microseconds()) / float32(len(queries)) + return recall, latency +} + +func DistanceWrapper(provider distancer.Provider) func(x, y []float32) float32 { + return func(x, y []float32) float32 { + dist, _ := provider.SingleDist(x, y) + return dist + } +} diff --git a/platform/dbops/binaries/weaviate-src/client/authz/add_permissions_parameters.go b/platform/dbops/binaries/weaviate-src/client/authz/add_permissions_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..faf975644e572239a66ba97f1013fed58cf060dd --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/authz/add_permissions_parameters.go @@ -0,0 +1,179 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package authz + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewAddPermissionsParams creates a new AddPermissionsParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewAddPermissionsParams() *AddPermissionsParams { + return &AddPermissionsParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewAddPermissionsParamsWithTimeout creates a new AddPermissionsParams object +// with the ability to set a timeout on a request. +func NewAddPermissionsParamsWithTimeout(timeout time.Duration) *AddPermissionsParams { + return &AddPermissionsParams{ + timeout: timeout, + } +} + +// NewAddPermissionsParamsWithContext creates a new AddPermissionsParams object +// with the ability to set a context for a request. +func NewAddPermissionsParamsWithContext(ctx context.Context) *AddPermissionsParams { + return &AddPermissionsParams{ + Context: ctx, + } +} + +// NewAddPermissionsParamsWithHTTPClient creates a new AddPermissionsParams object +// with the ability to set a custom HTTPClient for a request. +func NewAddPermissionsParamsWithHTTPClient(client *http.Client) *AddPermissionsParams { + return &AddPermissionsParams{ + HTTPClient: client, + } +} + +/* +AddPermissionsParams contains all the parameters to send to the API endpoint + + for the add permissions operation. + + Typically these are written to a http.Request. +*/ +type AddPermissionsParams struct { + + // Body. + Body AddPermissionsBody + + /* ID. + + role name + */ + ID string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the add permissions params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *AddPermissionsParams) WithDefaults() *AddPermissionsParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the add permissions params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *AddPermissionsParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the add permissions params +func (o *AddPermissionsParams) WithTimeout(timeout time.Duration) *AddPermissionsParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the add permissions params +func (o *AddPermissionsParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the add permissions params +func (o *AddPermissionsParams) WithContext(ctx context.Context) *AddPermissionsParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the add permissions params +func (o *AddPermissionsParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the add permissions params +func (o *AddPermissionsParams) WithHTTPClient(client *http.Client) *AddPermissionsParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the add permissions params +func (o *AddPermissionsParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithBody adds the body to the add permissions params +func (o *AddPermissionsParams) WithBody(body AddPermissionsBody) *AddPermissionsParams { + o.SetBody(body) + return o +} + +// SetBody adds the body to the add permissions params +func (o *AddPermissionsParams) SetBody(body AddPermissionsBody) { + o.Body = body +} + +// WithID adds the id to the add permissions params +func (o *AddPermissionsParams) WithID(id string) *AddPermissionsParams { + o.SetID(id) + return o +} + +// SetID adds the id to the add permissions params +func (o *AddPermissionsParams) SetID(id string) { + o.ID = id +} + +// WriteToRequest writes these params to a swagger request +func (o *AddPermissionsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + if err := r.SetBodyParam(o.Body); err != nil { + return err + } + + // path param id + if err := r.SetPathParam("id", o.ID); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/authz/add_permissions_responses.go b/platform/dbops/binaries/weaviate-src/client/authz/add_permissions_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..cc59a19a50156dae690c6e17f5e4becda777c921 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/authz/add_permissions_responses.go @@ -0,0 +1,631 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package authz + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "fmt" + "io" + "strconv" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" + + "github.com/weaviate/weaviate/entities/models" +) + +// AddPermissionsReader is a Reader for the AddPermissions structure. +type AddPermissionsReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *AddPermissionsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewAddPermissionsOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 400: + result := NewAddPermissionsBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 401: + result := NewAddPermissionsUnauthorized() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 403: + result := NewAddPermissionsForbidden() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 404: + result := NewAddPermissionsNotFound() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 422: + result := NewAddPermissionsUnprocessableEntity() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 500: + result := NewAddPermissionsInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + } +} + +// NewAddPermissionsOK creates a AddPermissionsOK with default headers values +func NewAddPermissionsOK() *AddPermissionsOK { + return &AddPermissionsOK{} +} + +/* +AddPermissionsOK describes a response with status code 200, with default header values. + +Permissions added successfully +*/ +type AddPermissionsOK struct { +} + +// IsSuccess returns true when this add permissions o k response has a 2xx status code +func (o *AddPermissionsOK) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this add permissions o k response has a 3xx status code +func (o *AddPermissionsOK) IsRedirect() bool { + return false +} + +// IsClientError returns true when this add permissions o k response has a 4xx status code +func (o *AddPermissionsOK) IsClientError() bool { + return false +} + +// IsServerError returns true when this add permissions o k response has a 5xx status code +func (o *AddPermissionsOK) IsServerError() bool { + return false +} + +// IsCode returns true when this add permissions o k response a status code equal to that given +func (o *AddPermissionsOK) IsCode(code int) bool { + return code == 200 +} + +// Code gets the status code for the add permissions o k response +func (o *AddPermissionsOK) Code() int { + return 200 +} + +func (o *AddPermissionsOK) Error() string { + return fmt.Sprintf("[POST /authz/roles/{id}/add-permissions][%d] addPermissionsOK ", 200) +} + +func (o *AddPermissionsOK) String() string { + return fmt.Sprintf("[POST /authz/roles/{id}/add-permissions][%d] addPermissionsOK ", 200) +} + +func (o *AddPermissionsOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewAddPermissionsBadRequest creates a AddPermissionsBadRequest with default headers values +func NewAddPermissionsBadRequest() *AddPermissionsBadRequest { + return &AddPermissionsBadRequest{} +} + +/* +AddPermissionsBadRequest describes a response with status code 400, with default header values. + +Malformed request. +*/ +type AddPermissionsBadRequest struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this add permissions bad request response has a 2xx status code +func (o *AddPermissionsBadRequest) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this add permissions bad request response has a 3xx status code +func (o *AddPermissionsBadRequest) IsRedirect() bool { + return false +} + +// IsClientError returns true when this add permissions bad request response has a 4xx status code +func (o *AddPermissionsBadRequest) IsClientError() bool { + return true +} + +// IsServerError returns true when this add permissions bad request response has a 5xx status code +func (o *AddPermissionsBadRequest) IsServerError() bool { + return false +} + +// IsCode returns true when this add permissions bad request response a status code equal to that given +func (o *AddPermissionsBadRequest) IsCode(code int) bool { + return code == 400 +} + +// Code gets the status code for the add permissions bad request response +func (o *AddPermissionsBadRequest) Code() int { + return 400 +} + +func (o *AddPermissionsBadRequest) Error() string { + return fmt.Sprintf("[POST /authz/roles/{id}/add-permissions][%d] addPermissionsBadRequest %+v", 400, o.Payload) +} + +func (o *AddPermissionsBadRequest) String() string { + return fmt.Sprintf("[POST /authz/roles/{id}/add-permissions][%d] addPermissionsBadRequest %+v", 400, o.Payload) +} + +func (o *AddPermissionsBadRequest) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *AddPermissionsBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewAddPermissionsUnauthorized creates a AddPermissionsUnauthorized with default headers values +func NewAddPermissionsUnauthorized() *AddPermissionsUnauthorized { + return &AddPermissionsUnauthorized{} +} + +/* +AddPermissionsUnauthorized describes a response with status code 401, with default header values. + +Unauthorized or invalid credentials. +*/ +type AddPermissionsUnauthorized struct { +} + +// IsSuccess returns true when this add permissions unauthorized response has a 2xx status code +func (o *AddPermissionsUnauthorized) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this add permissions unauthorized response has a 3xx status code +func (o *AddPermissionsUnauthorized) IsRedirect() bool { + return false +} + +// IsClientError returns true when this add permissions unauthorized response has a 4xx status code +func (o *AddPermissionsUnauthorized) IsClientError() bool { + return true +} + +// IsServerError returns true when this add permissions unauthorized response has a 5xx status code +func (o *AddPermissionsUnauthorized) IsServerError() bool { + return false +} + +// IsCode returns true when this add permissions unauthorized response a status code equal to that given +func (o *AddPermissionsUnauthorized) IsCode(code int) bool { + return code == 401 +} + +// Code gets the status code for the add permissions unauthorized response +func (o *AddPermissionsUnauthorized) Code() int { + return 401 +} + +func (o *AddPermissionsUnauthorized) Error() string { + return fmt.Sprintf("[POST /authz/roles/{id}/add-permissions][%d] addPermissionsUnauthorized ", 401) +} + +func (o *AddPermissionsUnauthorized) String() string { + return fmt.Sprintf("[POST /authz/roles/{id}/add-permissions][%d] addPermissionsUnauthorized ", 401) +} + +func (o *AddPermissionsUnauthorized) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewAddPermissionsForbidden creates a AddPermissionsForbidden with default headers values +func NewAddPermissionsForbidden() *AddPermissionsForbidden { + return &AddPermissionsForbidden{} +} + +/* +AddPermissionsForbidden describes a response with status code 403, with default header values. + +Forbidden +*/ +type AddPermissionsForbidden struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this add permissions forbidden response has a 2xx status code +func (o *AddPermissionsForbidden) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this add permissions forbidden response has a 3xx status code +func (o *AddPermissionsForbidden) IsRedirect() bool { + return false +} + +// IsClientError returns true when this add permissions forbidden response has a 4xx status code +func (o *AddPermissionsForbidden) IsClientError() bool { + return true +} + +// IsServerError returns true when this add permissions forbidden response has a 5xx status code +func (o *AddPermissionsForbidden) IsServerError() bool { + return false +} + +// IsCode returns true when this add permissions forbidden response a status code equal to that given +func (o *AddPermissionsForbidden) IsCode(code int) bool { + return code == 403 +} + +// Code gets the status code for the add permissions forbidden response +func (o *AddPermissionsForbidden) Code() int { + return 403 +} + +func (o *AddPermissionsForbidden) Error() string { + return fmt.Sprintf("[POST /authz/roles/{id}/add-permissions][%d] addPermissionsForbidden %+v", 403, o.Payload) +} + +func (o *AddPermissionsForbidden) String() string { + return fmt.Sprintf("[POST /authz/roles/{id}/add-permissions][%d] addPermissionsForbidden %+v", 403, o.Payload) +} + +func (o *AddPermissionsForbidden) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *AddPermissionsForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewAddPermissionsNotFound creates a AddPermissionsNotFound with default headers values +func NewAddPermissionsNotFound() *AddPermissionsNotFound { + return &AddPermissionsNotFound{} +} + +/* +AddPermissionsNotFound describes a response with status code 404, with default header values. + +no role found +*/ +type AddPermissionsNotFound struct { +} + +// IsSuccess returns true when this add permissions not found response has a 2xx status code +func (o *AddPermissionsNotFound) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this add permissions not found response has a 3xx status code +func (o *AddPermissionsNotFound) IsRedirect() bool { + return false +} + +// IsClientError returns true when this add permissions not found response has a 4xx status code +func (o *AddPermissionsNotFound) IsClientError() bool { + return true +} + +// IsServerError returns true when this add permissions not found response has a 5xx status code +func (o *AddPermissionsNotFound) IsServerError() bool { + return false +} + +// IsCode returns true when this add permissions not found response a status code equal to that given +func (o *AddPermissionsNotFound) IsCode(code int) bool { + return code == 404 +} + +// Code gets the status code for the add permissions not found response +func (o *AddPermissionsNotFound) Code() int { + return 404 +} + +func (o *AddPermissionsNotFound) Error() string { + return fmt.Sprintf("[POST /authz/roles/{id}/add-permissions][%d] addPermissionsNotFound ", 404) +} + +func (o *AddPermissionsNotFound) String() string { + return fmt.Sprintf("[POST /authz/roles/{id}/add-permissions][%d] addPermissionsNotFound ", 404) +} + +func (o *AddPermissionsNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewAddPermissionsUnprocessableEntity creates a AddPermissionsUnprocessableEntity with default headers values +func NewAddPermissionsUnprocessableEntity() *AddPermissionsUnprocessableEntity { + return &AddPermissionsUnprocessableEntity{} +} + +/* +AddPermissionsUnprocessableEntity describes a response with status code 422, with default header values. + +Request body is well-formed (i.e., syntactically correct), but semantically erroneous. Are you sure the class is defined in the configuration file? +*/ +type AddPermissionsUnprocessableEntity struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this add permissions unprocessable entity response has a 2xx status code +func (o *AddPermissionsUnprocessableEntity) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this add permissions unprocessable entity response has a 3xx status code +func (o *AddPermissionsUnprocessableEntity) IsRedirect() bool { + return false +} + +// IsClientError returns true when this add permissions unprocessable entity response has a 4xx status code +func (o *AddPermissionsUnprocessableEntity) IsClientError() bool { + return true +} + +// IsServerError returns true when this add permissions unprocessable entity response has a 5xx status code +func (o *AddPermissionsUnprocessableEntity) IsServerError() bool { + return false +} + +// IsCode returns true when this add permissions unprocessable entity response a status code equal to that given +func (o *AddPermissionsUnprocessableEntity) IsCode(code int) bool { + return code == 422 +} + +// Code gets the status code for the add permissions unprocessable entity response +func (o *AddPermissionsUnprocessableEntity) Code() int { + return 422 +} + +func (o *AddPermissionsUnprocessableEntity) Error() string { + return fmt.Sprintf("[POST /authz/roles/{id}/add-permissions][%d] addPermissionsUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *AddPermissionsUnprocessableEntity) String() string { + return fmt.Sprintf("[POST /authz/roles/{id}/add-permissions][%d] addPermissionsUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *AddPermissionsUnprocessableEntity) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *AddPermissionsUnprocessableEntity) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewAddPermissionsInternalServerError creates a AddPermissionsInternalServerError with default headers values +func NewAddPermissionsInternalServerError() *AddPermissionsInternalServerError { + return &AddPermissionsInternalServerError{} +} + +/* +AddPermissionsInternalServerError describes a response with status code 500, with default header values. + +An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. +*/ +type AddPermissionsInternalServerError struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this add permissions internal server error response has a 2xx status code +func (o *AddPermissionsInternalServerError) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this add permissions internal server error response has a 3xx status code +func (o *AddPermissionsInternalServerError) IsRedirect() bool { + return false +} + +// IsClientError returns true when this add permissions internal server error response has a 4xx status code +func (o *AddPermissionsInternalServerError) IsClientError() bool { + return false +} + +// IsServerError returns true when this add permissions internal server error response has a 5xx status code +func (o *AddPermissionsInternalServerError) IsServerError() bool { + return true +} + +// IsCode returns true when this add permissions internal server error response a status code equal to that given +func (o *AddPermissionsInternalServerError) IsCode(code int) bool { + return code == 500 +} + +// Code gets the status code for the add permissions internal server error response +func (o *AddPermissionsInternalServerError) Code() int { + return 500 +} + +func (o *AddPermissionsInternalServerError) Error() string { + return fmt.Sprintf("[POST /authz/roles/{id}/add-permissions][%d] addPermissionsInternalServerError %+v", 500, o.Payload) +} + +func (o *AddPermissionsInternalServerError) String() string { + return fmt.Sprintf("[POST /authz/roles/{id}/add-permissions][%d] addPermissionsInternalServerError %+v", 500, o.Payload) +} + +func (o *AddPermissionsInternalServerError) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *AddPermissionsInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +/* +AddPermissionsBody add permissions body +swagger:model AddPermissionsBody +*/ +type AddPermissionsBody struct { + + // permissions to be added to the role + // Required: true + Permissions []*models.Permission `json:"permissions"` +} + +// Validate validates this add permissions body +func (o *AddPermissionsBody) Validate(formats strfmt.Registry) error { + var res []error + + if err := o.validatePermissions(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (o *AddPermissionsBody) validatePermissions(formats strfmt.Registry) error { + + if err := validate.Required("body"+"."+"permissions", "body", o.Permissions); err != nil { + return err + } + + for i := 0; i < len(o.Permissions); i++ { + if swag.IsZero(o.Permissions[i]) { // not required + continue + } + + if o.Permissions[i] != nil { + if err := o.Permissions[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("body" + "." + "permissions" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("body" + "." + "permissions" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +// ContextValidate validate this add permissions body based on the context it is used +func (o *AddPermissionsBody) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := o.contextValidatePermissions(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (o *AddPermissionsBody) contextValidatePermissions(ctx context.Context, formats strfmt.Registry) error { + + for i := 0; i < len(o.Permissions); i++ { + + if o.Permissions[i] != nil { + if err := o.Permissions[i].ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("body" + "." + "permissions" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("body" + "." + "permissions" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +// MarshalBinary interface implementation +func (o *AddPermissionsBody) MarshalBinary() ([]byte, error) { + if o == nil { + return nil, nil + } + return swag.WriteJSON(o) +} + +// UnmarshalBinary interface implementation +func (o *AddPermissionsBody) UnmarshalBinary(b []byte) error { + var res AddPermissionsBody + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *o = res + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/authz/assign_role_to_group_parameters.go b/platform/dbops/binaries/weaviate-src/client/authz/assign_role_to_group_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..192164731f9d1378f6cd30b1b6863cf4b9f2efb3 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/authz/assign_role_to_group_parameters.go @@ -0,0 +1,179 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package authz + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewAssignRoleToGroupParams creates a new AssignRoleToGroupParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewAssignRoleToGroupParams() *AssignRoleToGroupParams { + return &AssignRoleToGroupParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewAssignRoleToGroupParamsWithTimeout creates a new AssignRoleToGroupParams object +// with the ability to set a timeout on a request. +func NewAssignRoleToGroupParamsWithTimeout(timeout time.Duration) *AssignRoleToGroupParams { + return &AssignRoleToGroupParams{ + timeout: timeout, + } +} + +// NewAssignRoleToGroupParamsWithContext creates a new AssignRoleToGroupParams object +// with the ability to set a context for a request. +func NewAssignRoleToGroupParamsWithContext(ctx context.Context) *AssignRoleToGroupParams { + return &AssignRoleToGroupParams{ + Context: ctx, + } +} + +// NewAssignRoleToGroupParamsWithHTTPClient creates a new AssignRoleToGroupParams object +// with the ability to set a custom HTTPClient for a request. +func NewAssignRoleToGroupParamsWithHTTPClient(client *http.Client) *AssignRoleToGroupParams { + return &AssignRoleToGroupParams{ + HTTPClient: client, + } +} + +/* +AssignRoleToGroupParams contains all the parameters to send to the API endpoint + + for the assign role to group operation. + + Typically these are written to a http.Request. +*/ +type AssignRoleToGroupParams struct { + + // Body. + Body AssignRoleToGroupBody + + /* ID. + + group name + */ + ID string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the assign role to group params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *AssignRoleToGroupParams) WithDefaults() *AssignRoleToGroupParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the assign role to group params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *AssignRoleToGroupParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the assign role to group params +func (o *AssignRoleToGroupParams) WithTimeout(timeout time.Duration) *AssignRoleToGroupParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the assign role to group params +func (o *AssignRoleToGroupParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the assign role to group params +func (o *AssignRoleToGroupParams) WithContext(ctx context.Context) *AssignRoleToGroupParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the assign role to group params +func (o *AssignRoleToGroupParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the assign role to group params +func (o *AssignRoleToGroupParams) WithHTTPClient(client *http.Client) *AssignRoleToGroupParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the assign role to group params +func (o *AssignRoleToGroupParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithBody adds the body to the assign role to group params +func (o *AssignRoleToGroupParams) WithBody(body AssignRoleToGroupBody) *AssignRoleToGroupParams { + o.SetBody(body) + return o +} + +// SetBody adds the body to the assign role to group params +func (o *AssignRoleToGroupParams) SetBody(body AssignRoleToGroupBody) { + o.Body = body +} + +// WithID adds the id to the assign role to group params +func (o *AssignRoleToGroupParams) WithID(id string) *AssignRoleToGroupParams { + o.SetID(id) + return o +} + +// SetID adds the id to the assign role to group params +func (o *AssignRoleToGroupParams) SetID(id string) { + o.ID = id +} + +// WriteToRequest writes these params to a swagger request +func (o *AssignRoleToGroupParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + if err := r.SetBodyParam(o.Body); err != nil { + return err + } + + // path param id + if err := r.SetPathParam("id", o.ID); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/authz/assign_role_to_group_responses.go b/platform/dbops/binaries/weaviate-src/client/authz/assign_role_to_group_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..f9c315356f2bf275bd324fab00b39fc1d68858bb --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/authz/assign_role_to_group_responses.go @@ -0,0 +1,541 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package authz + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "fmt" + "io" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + + "github.com/weaviate/weaviate/entities/models" +) + +// AssignRoleToGroupReader is a Reader for the AssignRoleToGroup structure. +type AssignRoleToGroupReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *AssignRoleToGroupReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewAssignRoleToGroupOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 400: + result := NewAssignRoleToGroupBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 401: + result := NewAssignRoleToGroupUnauthorized() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 403: + result := NewAssignRoleToGroupForbidden() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 404: + result := NewAssignRoleToGroupNotFound() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 500: + result := NewAssignRoleToGroupInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + } +} + +// NewAssignRoleToGroupOK creates a AssignRoleToGroupOK with default headers values +func NewAssignRoleToGroupOK() *AssignRoleToGroupOK { + return &AssignRoleToGroupOK{} +} + +/* +AssignRoleToGroupOK describes a response with status code 200, with default header values. + +Role assigned successfully +*/ +type AssignRoleToGroupOK struct { +} + +// IsSuccess returns true when this assign role to group o k response has a 2xx status code +func (o *AssignRoleToGroupOK) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this assign role to group o k response has a 3xx status code +func (o *AssignRoleToGroupOK) IsRedirect() bool { + return false +} + +// IsClientError returns true when this assign role to group o k response has a 4xx status code +func (o *AssignRoleToGroupOK) IsClientError() bool { + return false +} + +// IsServerError returns true when this assign role to group o k response has a 5xx status code +func (o *AssignRoleToGroupOK) IsServerError() bool { + return false +} + +// IsCode returns true when this assign role to group o k response a status code equal to that given +func (o *AssignRoleToGroupOK) IsCode(code int) bool { + return code == 200 +} + +// Code gets the status code for the assign role to group o k response +func (o *AssignRoleToGroupOK) Code() int { + return 200 +} + +func (o *AssignRoleToGroupOK) Error() string { + return fmt.Sprintf("[POST /authz/groups/{id}/assign][%d] assignRoleToGroupOK ", 200) +} + +func (o *AssignRoleToGroupOK) String() string { + return fmt.Sprintf("[POST /authz/groups/{id}/assign][%d] assignRoleToGroupOK ", 200) +} + +func (o *AssignRoleToGroupOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewAssignRoleToGroupBadRequest creates a AssignRoleToGroupBadRequest with default headers values +func NewAssignRoleToGroupBadRequest() *AssignRoleToGroupBadRequest { + return &AssignRoleToGroupBadRequest{} +} + +/* +AssignRoleToGroupBadRequest describes a response with status code 400, with default header values. + +Bad request +*/ +type AssignRoleToGroupBadRequest struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this assign role to group bad request response has a 2xx status code +func (o *AssignRoleToGroupBadRequest) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this assign role to group bad request response has a 3xx status code +func (o *AssignRoleToGroupBadRequest) IsRedirect() bool { + return false +} + +// IsClientError returns true when this assign role to group bad request response has a 4xx status code +func (o *AssignRoleToGroupBadRequest) IsClientError() bool { + return true +} + +// IsServerError returns true when this assign role to group bad request response has a 5xx status code +func (o *AssignRoleToGroupBadRequest) IsServerError() bool { + return false +} + +// IsCode returns true when this assign role to group bad request response a status code equal to that given +func (o *AssignRoleToGroupBadRequest) IsCode(code int) bool { + return code == 400 +} + +// Code gets the status code for the assign role to group bad request response +func (o *AssignRoleToGroupBadRequest) Code() int { + return 400 +} + +func (o *AssignRoleToGroupBadRequest) Error() string { + return fmt.Sprintf("[POST /authz/groups/{id}/assign][%d] assignRoleToGroupBadRequest %+v", 400, o.Payload) +} + +func (o *AssignRoleToGroupBadRequest) String() string { + return fmt.Sprintf("[POST /authz/groups/{id}/assign][%d] assignRoleToGroupBadRequest %+v", 400, o.Payload) +} + +func (o *AssignRoleToGroupBadRequest) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *AssignRoleToGroupBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewAssignRoleToGroupUnauthorized creates a AssignRoleToGroupUnauthorized with default headers values +func NewAssignRoleToGroupUnauthorized() *AssignRoleToGroupUnauthorized { + return &AssignRoleToGroupUnauthorized{} +} + +/* +AssignRoleToGroupUnauthorized describes a response with status code 401, with default header values. + +Unauthorized or invalid credentials. +*/ +type AssignRoleToGroupUnauthorized struct { +} + +// IsSuccess returns true when this assign role to group unauthorized response has a 2xx status code +func (o *AssignRoleToGroupUnauthorized) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this assign role to group unauthorized response has a 3xx status code +func (o *AssignRoleToGroupUnauthorized) IsRedirect() bool { + return false +} + +// IsClientError returns true when this assign role to group unauthorized response has a 4xx status code +func (o *AssignRoleToGroupUnauthorized) IsClientError() bool { + return true +} + +// IsServerError returns true when this assign role to group unauthorized response has a 5xx status code +func (o *AssignRoleToGroupUnauthorized) IsServerError() bool { + return false +} + +// IsCode returns true when this assign role to group unauthorized response a status code equal to that given +func (o *AssignRoleToGroupUnauthorized) IsCode(code int) bool { + return code == 401 +} + +// Code gets the status code for the assign role to group unauthorized response +func (o *AssignRoleToGroupUnauthorized) Code() int { + return 401 +} + +func (o *AssignRoleToGroupUnauthorized) Error() string { + return fmt.Sprintf("[POST /authz/groups/{id}/assign][%d] assignRoleToGroupUnauthorized ", 401) +} + +func (o *AssignRoleToGroupUnauthorized) String() string { + return fmt.Sprintf("[POST /authz/groups/{id}/assign][%d] assignRoleToGroupUnauthorized ", 401) +} + +func (o *AssignRoleToGroupUnauthorized) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewAssignRoleToGroupForbidden creates a AssignRoleToGroupForbidden with default headers values +func NewAssignRoleToGroupForbidden() *AssignRoleToGroupForbidden { + return &AssignRoleToGroupForbidden{} +} + +/* +AssignRoleToGroupForbidden describes a response with status code 403, with default header values. + +Forbidden +*/ +type AssignRoleToGroupForbidden struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this assign role to group forbidden response has a 2xx status code +func (o *AssignRoleToGroupForbidden) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this assign role to group forbidden response has a 3xx status code +func (o *AssignRoleToGroupForbidden) IsRedirect() bool { + return false +} + +// IsClientError returns true when this assign role to group forbidden response has a 4xx status code +func (o *AssignRoleToGroupForbidden) IsClientError() bool { + return true +} + +// IsServerError returns true when this assign role to group forbidden response has a 5xx status code +func (o *AssignRoleToGroupForbidden) IsServerError() bool { + return false +} + +// IsCode returns true when this assign role to group forbidden response a status code equal to that given +func (o *AssignRoleToGroupForbidden) IsCode(code int) bool { + return code == 403 +} + +// Code gets the status code for the assign role to group forbidden response +func (o *AssignRoleToGroupForbidden) Code() int { + return 403 +} + +func (o *AssignRoleToGroupForbidden) Error() string { + return fmt.Sprintf("[POST /authz/groups/{id}/assign][%d] assignRoleToGroupForbidden %+v", 403, o.Payload) +} + +func (o *AssignRoleToGroupForbidden) String() string { + return fmt.Sprintf("[POST /authz/groups/{id}/assign][%d] assignRoleToGroupForbidden %+v", 403, o.Payload) +} + +func (o *AssignRoleToGroupForbidden) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *AssignRoleToGroupForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewAssignRoleToGroupNotFound creates a AssignRoleToGroupNotFound with default headers values +func NewAssignRoleToGroupNotFound() *AssignRoleToGroupNotFound { + return &AssignRoleToGroupNotFound{} +} + +/* +AssignRoleToGroupNotFound describes a response with status code 404, with default header values. + +role or group is not found. +*/ +type AssignRoleToGroupNotFound struct { +} + +// IsSuccess returns true when this assign role to group not found response has a 2xx status code +func (o *AssignRoleToGroupNotFound) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this assign role to group not found response has a 3xx status code +func (o *AssignRoleToGroupNotFound) IsRedirect() bool { + return false +} + +// IsClientError returns true when this assign role to group not found response has a 4xx status code +func (o *AssignRoleToGroupNotFound) IsClientError() bool { + return true +} + +// IsServerError returns true when this assign role to group not found response has a 5xx status code +func (o *AssignRoleToGroupNotFound) IsServerError() bool { + return false +} + +// IsCode returns true when this assign role to group not found response a status code equal to that given +func (o *AssignRoleToGroupNotFound) IsCode(code int) bool { + return code == 404 +} + +// Code gets the status code for the assign role to group not found response +func (o *AssignRoleToGroupNotFound) Code() int { + return 404 +} + +func (o *AssignRoleToGroupNotFound) Error() string { + return fmt.Sprintf("[POST /authz/groups/{id}/assign][%d] assignRoleToGroupNotFound ", 404) +} + +func (o *AssignRoleToGroupNotFound) String() string { + return fmt.Sprintf("[POST /authz/groups/{id}/assign][%d] assignRoleToGroupNotFound ", 404) +} + +func (o *AssignRoleToGroupNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewAssignRoleToGroupInternalServerError creates a AssignRoleToGroupInternalServerError with default headers values +func NewAssignRoleToGroupInternalServerError() *AssignRoleToGroupInternalServerError { + return &AssignRoleToGroupInternalServerError{} +} + +/* +AssignRoleToGroupInternalServerError describes a response with status code 500, with default header values. + +An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. +*/ +type AssignRoleToGroupInternalServerError struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this assign role to group internal server error response has a 2xx status code +func (o *AssignRoleToGroupInternalServerError) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this assign role to group internal server error response has a 3xx status code +func (o *AssignRoleToGroupInternalServerError) IsRedirect() bool { + return false +} + +// IsClientError returns true when this assign role to group internal server error response has a 4xx status code +func (o *AssignRoleToGroupInternalServerError) IsClientError() bool { + return false +} + +// IsServerError returns true when this assign role to group internal server error response has a 5xx status code +func (o *AssignRoleToGroupInternalServerError) IsServerError() bool { + return true +} + +// IsCode returns true when this assign role to group internal server error response a status code equal to that given +func (o *AssignRoleToGroupInternalServerError) IsCode(code int) bool { + return code == 500 +} + +// Code gets the status code for the assign role to group internal server error response +func (o *AssignRoleToGroupInternalServerError) Code() int { + return 500 +} + +func (o *AssignRoleToGroupInternalServerError) Error() string { + return fmt.Sprintf("[POST /authz/groups/{id}/assign][%d] assignRoleToGroupInternalServerError %+v", 500, o.Payload) +} + +func (o *AssignRoleToGroupInternalServerError) String() string { + return fmt.Sprintf("[POST /authz/groups/{id}/assign][%d] assignRoleToGroupInternalServerError %+v", 500, o.Payload) +} + +func (o *AssignRoleToGroupInternalServerError) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *AssignRoleToGroupInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +/* +AssignRoleToGroupBody assign role to group body +swagger:model AssignRoleToGroupBody +*/ +type AssignRoleToGroupBody struct { + + // group type + GroupType models.GroupType `json:"groupType,omitempty"` + + // the roles that assigned to group + Roles []string `json:"roles"` +} + +// Validate validates this assign role to group body +func (o *AssignRoleToGroupBody) Validate(formats strfmt.Registry) error { + var res []error + + if err := o.validateGroupType(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (o *AssignRoleToGroupBody) validateGroupType(formats strfmt.Registry) error { + if swag.IsZero(o.GroupType) { // not required + return nil + } + + if err := o.GroupType.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("body" + "." + "groupType") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("body" + "." + "groupType") + } + return err + } + + return nil +} + +// ContextValidate validate this assign role to group body based on the context it is used +func (o *AssignRoleToGroupBody) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := o.contextValidateGroupType(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (o *AssignRoleToGroupBody) contextValidateGroupType(ctx context.Context, formats strfmt.Registry) error { + + if err := o.GroupType.ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("body" + "." + "groupType") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("body" + "." + "groupType") + } + return err + } + + return nil +} + +// MarshalBinary interface implementation +func (o *AssignRoleToGroupBody) MarshalBinary() ([]byte, error) { + if o == nil { + return nil, nil + } + return swag.WriteJSON(o) +} + +// UnmarshalBinary interface implementation +func (o *AssignRoleToGroupBody) UnmarshalBinary(b []byte) error { + var res AssignRoleToGroupBody + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *o = res + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/authz/assign_role_to_user_parameters.go b/platform/dbops/binaries/weaviate-src/client/authz/assign_role_to_user_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..c9b9152686fab5d48b6f1d2257d37444db9e8e34 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/authz/assign_role_to_user_parameters.go @@ -0,0 +1,179 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package authz + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewAssignRoleToUserParams creates a new AssignRoleToUserParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewAssignRoleToUserParams() *AssignRoleToUserParams { + return &AssignRoleToUserParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewAssignRoleToUserParamsWithTimeout creates a new AssignRoleToUserParams object +// with the ability to set a timeout on a request. +func NewAssignRoleToUserParamsWithTimeout(timeout time.Duration) *AssignRoleToUserParams { + return &AssignRoleToUserParams{ + timeout: timeout, + } +} + +// NewAssignRoleToUserParamsWithContext creates a new AssignRoleToUserParams object +// with the ability to set a context for a request. +func NewAssignRoleToUserParamsWithContext(ctx context.Context) *AssignRoleToUserParams { + return &AssignRoleToUserParams{ + Context: ctx, + } +} + +// NewAssignRoleToUserParamsWithHTTPClient creates a new AssignRoleToUserParams object +// with the ability to set a custom HTTPClient for a request. +func NewAssignRoleToUserParamsWithHTTPClient(client *http.Client) *AssignRoleToUserParams { + return &AssignRoleToUserParams{ + HTTPClient: client, + } +} + +/* +AssignRoleToUserParams contains all the parameters to send to the API endpoint + + for the assign role to user operation. + + Typically these are written to a http.Request. +*/ +type AssignRoleToUserParams struct { + + // Body. + Body AssignRoleToUserBody + + /* ID. + + user name + */ + ID string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the assign role to user params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *AssignRoleToUserParams) WithDefaults() *AssignRoleToUserParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the assign role to user params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *AssignRoleToUserParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the assign role to user params +func (o *AssignRoleToUserParams) WithTimeout(timeout time.Duration) *AssignRoleToUserParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the assign role to user params +func (o *AssignRoleToUserParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the assign role to user params +func (o *AssignRoleToUserParams) WithContext(ctx context.Context) *AssignRoleToUserParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the assign role to user params +func (o *AssignRoleToUserParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the assign role to user params +func (o *AssignRoleToUserParams) WithHTTPClient(client *http.Client) *AssignRoleToUserParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the assign role to user params +func (o *AssignRoleToUserParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithBody adds the body to the assign role to user params +func (o *AssignRoleToUserParams) WithBody(body AssignRoleToUserBody) *AssignRoleToUserParams { + o.SetBody(body) + return o +} + +// SetBody adds the body to the assign role to user params +func (o *AssignRoleToUserParams) SetBody(body AssignRoleToUserBody) { + o.Body = body +} + +// WithID adds the id to the assign role to user params +func (o *AssignRoleToUserParams) WithID(id string) *AssignRoleToUserParams { + o.SetID(id) + return o +} + +// SetID adds the id to the assign role to user params +func (o *AssignRoleToUserParams) SetID(id string) { + o.ID = id +} + +// WriteToRequest writes these params to a swagger request +func (o *AssignRoleToUserParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + if err := r.SetBodyParam(o.Body); err != nil { + return err + } + + // path param id + if err := r.SetPathParam("id", o.ID); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/authz/assign_role_to_user_responses.go b/platform/dbops/binaries/weaviate-src/client/authz/assign_role_to_user_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..5e141568abc6a5bcb57858a9788e2a6fe8f87ac9 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/authz/assign_role_to_user_responses.go @@ -0,0 +1,553 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package authz + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "fmt" + "io" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + + "github.com/weaviate/weaviate/entities/models" +) + +// AssignRoleToUserReader is a Reader for the AssignRoleToUser structure. +type AssignRoleToUserReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *AssignRoleToUserReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewAssignRoleToUserOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 400: + result := NewAssignRoleToUserBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 401: + result := NewAssignRoleToUserUnauthorized() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 403: + result := NewAssignRoleToUserForbidden() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 404: + result := NewAssignRoleToUserNotFound() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 500: + result := NewAssignRoleToUserInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + } +} + +// NewAssignRoleToUserOK creates a AssignRoleToUserOK with default headers values +func NewAssignRoleToUserOK() *AssignRoleToUserOK { + return &AssignRoleToUserOK{} +} + +/* +AssignRoleToUserOK describes a response with status code 200, with default header values. + +Role assigned successfully +*/ +type AssignRoleToUserOK struct { +} + +// IsSuccess returns true when this assign role to user o k response has a 2xx status code +func (o *AssignRoleToUserOK) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this assign role to user o k response has a 3xx status code +func (o *AssignRoleToUserOK) IsRedirect() bool { + return false +} + +// IsClientError returns true when this assign role to user o k response has a 4xx status code +func (o *AssignRoleToUserOK) IsClientError() bool { + return false +} + +// IsServerError returns true when this assign role to user o k response has a 5xx status code +func (o *AssignRoleToUserOK) IsServerError() bool { + return false +} + +// IsCode returns true when this assign role to user o k response a status code equal to that given +func (o *AssignRoleToUserOK) IsCode(code int) bool { + return code == 200 +} + +// Code gets the status code for the assign role to user o k response +func (o *AssignRoleToUserOK) Code() int { + return 200 +} + +func (o *AssignRoleToUserOK) Error() string { + return fmt.Sprintf("[POST /authz/users/{id}/assign][%d] assignRoleToUserOK ", 200) +} + +func (o *AssignRoleToUserOK) String() string { + return fmt.Sprintf("[POST /authz/users/{id}/assign][%d] assignRoleToUserOK ", 200) +} + +func (o *AssignRoleToUserOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewAssignRoleToUserBadRequest creates a AssignRoleToUserBadRequest with default headers values +func NewAssignRoleToUserBadRequest() *AssignRoleToUserBadRequest { + return &AssignRoleToUserBadRequest{} +} + +/* +AssignRoleToUserBadRequest describes a response with status code 400, with default header values. + +Bad request +*/ +type AssignRoleToUserBadRequest struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this assign role to user bad request response has a 2xx status code +func (o *AssignRoleToUserBadRequest) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this assign role to user bad request response has a 3xx status code +func (o *AssignRoleToUserBadRequest) IsRedirect() bool { + return false +} + +// IsClientError returns true when this assign role to user bad request response has a 4xx status code +func (o *AssignRoleToUserBadRequest) IsClientError() bool { + return true +} + +// IsServerError returns true when this assign role to user bad request response has a 5xx status code +func (o *AssignRoleToUserBadRequest) IsServerError() bool { + return false +} + +// IsCode returns true when this assign role to user bad request response a status code equal to that given +func (o *AssignRoleToUserBadRequest) IsCode(code int) bool { + return code == 400 +} + +// Code gets the status code for the assign role to user bad request response +func (o *AssignRoleToUserBadRequest) Code() int { + return 400 +} + +func (o *AssignRoleToUserBadRequest) Error() string { + return fmt.Sprintf("[POST /authz/users/{id}/assign][%d] assignRoleToUserBadRequest %+v", 400, o.Payload) +} + +func (o *AssignRoleToUserBadRequest) String() string { + return fmt.Sprintf("[POST /authz/users/{id}/assign][%d] assignRoleToUserBadRequest %+v", 400, o.Payload) +} + +func (o *AssignRoleToUserBadRequest) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *AssignRoleToUserBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewAssignRoleToUserUnauthorized creates a AssignRoleToUserUnauthorized with default headers values +func NewAssignRoleToUserUnauthorized() *AssignRoleToUserUnauthorized { + return &AssignRoleToUserUnauthorized{} +} + +/* +AssignRoleToUserUnauthorized describes a response with status code 401, with default header values. + +Unauthorized or invalid credentials. +*/ +type AssignRoleToUserUnauthorized struct { +} + +// IsSuccess returns true when this assign role to user unauthorized response has a 2xx status code +func (o *AssignRoleToUserUnauthorized) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this assign role to user unauthorized response has a 3xx status code +func (o *AssignRoleToUserUnauthorized) IsRedirect() bool { + return false +} + +// IsClientError returns true when this assign role to user unauthorized response has a 4xx status code +func (o *AssignRoleToUserUnauthorized) IsClientError() bool { + return true +} + +// IsServerError returns true when this assign role to user unauthorized response has a 5xx status code +func (o *AssignRoleToUserUnauthorized) IsServerError() bool { + return false +} + +// IsCode returns true when this assign role to user unauthorized response a status code equal to that given +func (o *AssignRoleToUserUnauthorized) IsCode(code int) bool { + return code == 401 +} + +// Code gets the status code for the assign role to user unauthorized response +func (o *AssignRoleToUserUnauthorized) Code() int { + return 401 +} + +func (o *AssignRoleToUserUnauthorized) Error() string { + return fmt.Sprintf("[POST /authz/users/{id}/assign][%d] assignRoleToUserUnauthorized ", 401) +} + +func (o *AssignRoleToUserUnauthorized) String() string { + return fmt.Sprintf("[POST /authz/users/{id}/assign][%d] assignRoleToUserUnauthorized ", 401) +} + +func (o *AssignRoleToUserUnauthorized) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewAssignRoleToUserForbidden creates a AssignRoleToUserForbidden with default headers values +func NewAssignRoleToUserForbidden() *AssignRoleToUserForbidden { + return &AssignRoleToUserForbidden{} +} + +/* +AssignRoleToUserForbidden describes a response with status code 403, with default header values. + +Forbidden +*/ +type AssignRoleToUserForbidden struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this assign role to user forbidden response has a 2xx status code +func (o *AssignRoleToUserForbidden) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this assign role to user forbidden response has a 3xx status code +func (o *AssignRoleToUserForbidden) IsRedirect() bool { + return false +} + +// IsClientError returns true when this assign role to user forbidden response has a 4xx status code +func (o *AssignRoleToUserForbidden) IsClientError() bool { + return true +} + +// IsServerError returns true when this assign role to user forbidden response has a 5xx status code +func (o *AssignRoleToUserForbidden) IsServerError() bool { + return false +} + +// IsCode returns true when this assign role to user forbidden response a status code equal to that given +func (o *AssignRoleToUserForbidden) IsCode(code int) bool { + return code == 403 +} + +// Code gets the status code for the assign role to user forbidden response +func (o *AssignRoleToUserForbidden) Code() int { + return 403 +} + +func (o *AssignRoleToUserForbidden) Error() string { + return fmt.Sprintf("[POST /authz/users/{id}/assign][%d] assignRoleToUserForbidden %+v", 403, o.Payload) +} + +func (o *AssignRoleToUserForbidden) String() string { + return fmt.Sprintf("[POST /authz/users/{id}/assign][%d] assignRoleToUserForbidden %+v", 403, o.Payload) +} + +func (o *AssignRoleToUserForbidden) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *AssignRoleToUserForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewAssignRoleToUserNotFound creates a AssignRoleToUserNotFound with default headers values +func NewAssignRoleToUserNotFound() *AssignRoleToUserNotFound { + return &AssignRoleToUserNotFound{} +} + +/* +AssignRoleToUserNotFound describes a response with status code 404, with default header values. + +role or user is not found. +*/ +type AssignRoleToUserNotFound struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this assign role to user not found response has a 2xx status code +func (o *AssignRoleToUserNotFound) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this assign role to user not found response has a 3xx status code +func (o *AssignRoleToUserNotFound) IsRedirect() bool { + return false +} + +// IsClientError returns true when this assign role to user not found response has a 4xx status code +func (o *AssignRoleToUserNotFound) IsClientError() bool { + return true +} + +// IsServerError returns true when this assign role to user not found response has a 5xx status code +func (o *AssignRoleToUserNotFound) IsServerError() bool { + return false +} + +// IsCode returns true when this assign role to user not found response a status code equal to that given +func (o *AssignRoleToUserNotFound) IsCode(code int) bool { + return code == 404 +} + +// Code gets the status code for the assign role to user not found response +func (o *AssignRoleToUserNotFound) Code() int { + return 404 +} + +func (o *AssignRoleToUserNotFound) Error() string { + return fmt.Sprintf("[POST /authz/users/{id}/assign][%d] assignRoleToUserNotFound %+v", 404, o.Payload) +} + +func (o *AssignRoleToUserNotFound) String() string { + return fmt.Sprintf("[POST /authz/users/{id}/assign][%d] assignRoleToUserNotFound %+v", 404, o.Payload) +} + +func (o *AssignRoleToUserNotFound) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *AssignRoleToUserNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewAssignRoleToUserInternalServerError creates a AssignRoleToUserInternalServerError with default headers values +func NewAssignRoleToUserInternalServerError() *AssignRoleToUserInternalServerError { + return &AssignRoleToUserInternalServerError{} +} + +/* +AssignRoleToUserInternalServerError describes a response with status code 500, with default header values. + +An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. +*/ +type AssignRoleToUserInternalServerError struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this assign role to user internal server error response has a 2xx status code +func (o *AssignRoleToUserInternalServerError) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this assign role to user internal server error response has a 3xx status code +func (o *AssignRoleToUserInternalServerError) IsRedirect() bool { + return false +} + +// IsClientError returns true when this assign role to user internal server error response has a 4xx status code +func (o *AssignRoleToUserInternalServerError) IsClientError() bool { + return false +} + +// IsServerError returns true when this assign role to user internal server error response has a 5xx status code +func (o *AssignRoleToUserInternalServerError) IsServerError() bool { + return true +} + +// IsCode returns true when this assign role to user internal server error response a status code equal to that given +func (o *AssignRoleToUserInternalServerError) IsCode(code int) bool { + return code == 500 +} + +// Code gets the status code for the assign role to user internal server error response +func (o *AssignRoleToUserInternalServerError) Code() int { + return 500 +} + +func (o *AssignRoleToUserInternalServerError) Error() string { + return fmt.Sprintf("[POST /authz/users/{id}/assign][%d] assignRoleToUserInternalServerError %+v", 500, o.Payload) +} + +func (o *AssignRoleToUserInternalServerError) String() string { + return fmt.Sprintf("[POST /authz/users/{id}/assign][%d] assignRoleToUserInternalServerError %+v", 500, o.Payload) +} + +func (o *AssignRoleToUserInternalServerError) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *AssignRoleToUserInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +/* +AssignRoleToUserBody assign role to user body +swagger:model AssignRoleToUserBody +*/ +type AssignRoleToUserBody struct { + + // the roles that assigned to user + Roles []string `json:"roles"` + + // user type + UserType models.UserTypeInput `json:"userType,omitempty"` +} + +// Validate validates this assign role to user body +func (o *AssignRoleToUserBody) Validate(formats strfmt.Registry) error { + var res []error + + if err := o.validateUserType(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (o *AssignRoleToUserBody) validateUserType(formats strfmt.Registry) error { + if swag.IsZero(o.UserType) { // not required + return nil + } + + if err := o.UserType.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("body" + "." + "userType") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("body" + "." + "userType") + } + return err + } + + return nil +} + +// ContextValidate validate this assign role to user body based on the context it is used +func (o *AssignRoleToUserBody) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := o.contextValidateUserType(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (o *AssignRoleToUserBody) contextValidateUserType(ctx context.Context, formats strfmt.Registry) error { + + if err := o.UserType.ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("body" + "." + "userType") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("body" + "." + "userType") + } + return err + } + + return nil +} + +// MarshalBinary interface implementation +func (o *AssignRoleToUserBody) MarshalBinary() ([]byte, error) { + if o == nil { + return nil, nil + } + return swag.WriteJSON(o) +} + +// UnmarshalBinary interface implementation +func (o *AssignRoleToUserBody) UnmarshalBinary(b []byte) error { + var res AssignRoleToUserBody + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *o = res + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/authz/authz_client.go b/platform/dbops/binaries/weaviate-src/client/authz/authz_client.go new file mode 100644 index 0000000000000000000000000000000000000000..d09a78c4dfc8c1d59a882bffa104eb3bc6d28dae --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/authz/authz_client.go @@ -0,0 +1,794 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package authz + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" +) + +// New creates a new authz API client. +func New(transport runtime.ClientTransport, formats strfmt.Registry) ClientService { + return &Client{transport: transport, formats: formats} +} + +/* +Client for authz API +*/ +type Client struct { + transport runtime.ClientTransport + formats strfmt.Registry +} + +// ClientOption is the option for Client methods +type ClientOption func(*runtime.ClientOperation) + +// ClientService is the interface for Client methods +type ClientService interface { + AddPermissions(params *AddPermissionsParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*AddPermissionsOK, error) + + AssignRoleToGroup(params *AssignRoleToGroupParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*AssignRoleToGroupOK, error) + + AssignRoleToUser(params *AssignRoleToUserParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*AssignRoleToUserOK, error) + + CreateRole(params *CreateRoleParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*CreateRoleCreated, error) + + DeleteRole(params *DeleteRoleParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*DeleteRoleNoContent, error) + + GetGroups(params *GetGroupsParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*GetGroupsOK, error) + + GetGroupsForRole(params *GetGroupsForRoleParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*GetGroupsForRoleOK, error) + + GetRole(params *GetRoleParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*GetRoleOK, error) + + GetRoles(params *GetRolesParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*GetRolesOK, error) + + GetRolesForGroup(params *GetRolesForGroupParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*GetRolesForGroupOK, error) + + GetRolesForUser(params *GetRolesForUserParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*GetRolesForUserOK, error) + + GetRolesForUserDeprecated(params *GetRolesForUserDeprecatedParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*GetRolesForUserDeprecatedOK, error) + + GetUsersForRole(params *GetUsersForRoleParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*GetUsersForRoleOK, error) + + GetUsersForRoleDeprecated(params *GetUsersForRoleDeprecatedParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*GetUsersForRoleDeprecatedOK, error) + + HasPermission(params *HasPermissionParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*HasPermissionOK, error) + + RemovePermissions(params *RemovePermissionsParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*RemovePermissionsOK, error) + + RevokeRoleFromGroup(params *RevokeRoleFromGroupParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*RevokeRoleFromGroupOK, error) + + RevokeRoleFromUser(params *RevokeRoleFromUserParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*RevokeRoleFromUserOK, error) + + SetTransport(transport runtime.ClientTransport) +} + +/* +AddPermissions adds permission to a given role +*/ +func (a *Client) AddPermissions(params *AddPermissionsParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*AddPermissionsOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewAddPermissionsParams() + } + op := &runtime.ClientOperation{ + ID: "addPermissions", + Method: "POST", + PathPattern: "/authz/roles/{id}/add-permissions", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json", "application/yaml"}, + Schemes: []string{"https"}, + Params: params, + Reader: &AddPermissionsReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + success, ok := result.(*AddPermissionsOK) + if ok { + return success, nil + } + // unexpected success response + // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for addPermissions: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + +/* +AssignRoleToGroup assigns a role to a group +*/ +func (a *Client) AssignRoleToGroup(params *AssignRoleToGroupParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*AssignRoleToGroupOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewAssignRoleToGroupParams() + } + op := &runtime.ClientOperation{ + ID: "assignRoleToGroup", + Method: "POST", + PathPattern: "/authz/groups/{id}/assign", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json", "application/yaml"}, + Schemes: []string{"https"}, + Params: params, + Reader: &AssignRoleToGroupReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + success, ok := result.(*AssignRoleToGroupOK) + if ok { + return success, nil + } + // unexpected success response + // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for assignRoleToGroup: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + +/* +AssignRoleToUser assigns a role to a user +*/ +func (a *Client) AssignRoleToUser(params *AssignRoleToUserParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*AssignRoleToUserOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewAssignRoleToUserParams() + } + op := &runtime.ClientOperation{ + ID: "assignRoleToUser", + Method: "POST", + PathPattern: "/authz/users/{id}/assign", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json", "application/yaml"}, + Schemes: []string{"https"}, + Params: params, + Reader: &AssignRoleToUserReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + success, ok := result.(*AssignRoleToUserOK) + if ok { + return success, nil + } + // unexpected success response + // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for assignRoleToUser: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + +/* +CreateRole creates new role +*/ +func (a *Client) CreateRole(params *CreateRoleParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*CreateRoleCreated, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewCreateRoleParams() + } + op := &runtime.ClientOperation{ + ID: "createRole", + Method: "POST", + PathPattern: "/authz/roles", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json", "application/yaml"}, + Schemes: []string{"https"}, + Params: params, + Reader: &CreateRoleReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + success, ok := result.(*CreateRoleCreated) + if ok { + return success, nil + } + // unexpected success response + // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for createRole: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + +/* +DeleteRole deletes role +*/ +func (a *Client) DeleteRole(params *DeleteRoleParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*DeleteRoleNoContent, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewDeleteRoleParams() + } + op := &runtime.ClientOperation{ + ID: "deleteRole", + Method: "DELETE", + PathPattern: "/authz/roles/{id}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json", "application/yaml"}, + Schemes: []string{"https"}, + Params: params, + Reader: &DeleteRoleReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + success, ok := result.(*DeleteRoleNoContent) + if ok { + return success, nil + } + // unexpected success response + // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for deleteRole: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + +/* +GetGroups lists all groups of a specific type + +Retrieves a list of all available group names for a specified group type (`oidc` or `db`). +*/ +func (a *Client) GetGroups(params *GetGroupsParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*GetGroupsOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewGetGroupsParams() + } + op := &runtime.ClientOperation{ + ID: "getGroups", + Method: "GET", + PathPattern: "/authz/groups/{groupType}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json", "application/yaml"}, + Schemes: []string{"https"}, + Params: params, + Reader: &GetGroupsReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + success, ok := result.(*GetGroupsOK) + if ok { + return success, nil + } + // unexpected success response + // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for getGroups: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + +/* +GetGroupsForRole gets groups that have a specific role assigned + +Retrieves a list of all groups that have been assigned a specific role, identified by its name. +*/ +func (a *Client) GetGroupsForRole(params *GetGroupsForRoleParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*GetGroupsForRoleOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewGetGroupsForRoleParams() + } + op := &runtime.ClientOperation{ + ID: "getGroupsForRole", + Method: "GET", + PathPattern: "/authz/roles/{id}/group-assignments", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json", "application/yaml"}, + Schemes: []string{"https"}, + Params: params, + Reader: &GetGroupsForRoleReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + success, ok := result.(*GetGroupsForRoleOK) + if ok { + return success, nil + } + // unexpected success response + // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for getGroupsForRole: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + +/* +GetRole gets a role +*/ +func (a *Client) GetRole(params *GetRoleParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*GetRoleOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewGetRoleParams() + } + op := &runtime.ClientOperation{ + ID: "getRole", + Method: "GET", + PathPattern: "/authz/roles/{id}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json", "application/yaml"}, + Schemes: []string{"https"}, + Params: params, + Reader: &GetRoleReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + success, ok := result.(*GetRoleOK) + if ok { + return success, nil + } + // unexpected success response + // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for getRole: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + +/* +GetRoles gets all roles +*/ +func (a *Client) GetRoles(params *GetRolesParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*GetRolesOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewGetRolesParams() + } + op := &runtime.ClientOperation{ + ID: "getRoles", + Method: "GET", + PathPattern: "/authz/roles", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json", "application/yaml"}, + Schemes: []string{"https"}, + Params: params, + Reader: &GetRolesReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + success, ok := result.(*GetRolesOK) + if ok { + return success, nil + } + // unexpected success response + // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for getRoles: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + +/* +GetRolesForGroup gets roles assigned to a specific group + +Retrieves a list of all roles assigned to a specific group. The group must be identified by both its name (`id`) and its type (`db` or `oidc`). +*/ +func (a *Client) GetRolesForGroup(params *GetRolesForGroupParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*GetRolesForGroupOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewGetRolesForGroupParams() + } + op := &runtime.ClientOperation{ + ID: "getRolesForGroup", + Method: "GET", + PathPattern: "/authz/groups/{id}/roles/{groupType}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json", "application/yaml"}, + Schemes: []string{"https"}, + Params: params, + Reader: &GetRolesForGroupReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + success, ok := result.(*GetRolesForGroupOK) + if ok { + return success, nil + } + // unexpected success response + // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for getRolesForGroup: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + +/* +GetRolesForUser gets roles assigned to user +*/ +func (a *Client) GetRolesForUser(params *GetRolesForUserParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*GetRolesForUserOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewGetRolesForUserParams() + } + op := &runtime.ClientOperation{ + ID: "getRolesForUser", + Method: "GET", + PathPattern: "/authz/users/{id}/roles/{userType}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json", "application/yaml"}, + Schemes: []string{"https"}, + Params: params, + Reader: &GetRolesForUserReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + success, ok := result.(*GetRolesForUserOK) + if ok { + return success, nil + } + // unexpected success response + // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for getRolesForUser: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + +/* +GetRolesForUserDeprecated gets roles assigned to user d b o ID c deprecated will be removed when 1 29 is not supported anymore +*/ +func (a *Client) GetRolesForUserDeprecated(params *GetRolesForUserDeprecatedParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*GetRolesForUserDeprecatedOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewGetRolesForUserDeprecatedParams() + } + op := &runtime.ClientOperation{ + ID: "getRolesForUserDeprecated", + Method: "GET", + PathPattern: "/authz/users/{id}/roles", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json", "application/yaml"}, + Schemes: []string{"https"}, + Params: params, + Reader: &GetRolesForUserDeprecatedReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + success, ok := result.(*GetRolesForUserDeprecatedOK) + if ok { + return success, nil + } + // unexpected success response + // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for getRolesForUserDeprecated: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + +/* +GetUsersForRole gets users assigned to role +*/ +func (a *Client) GetUsersForRole(params *GetUsersForRoleParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*GetUsersForRoleOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewGetUsersForRoleParams() + } + op := &runtime.ClientOperation{ + ID: "getUsersForRole", + Method: "GET", + PathPattern: "/authz/roles/{id}/user-assignments", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json", "application/yaml"}, + Schemes: []string{"https"}, + Params: params, + Reader: &GetUsersForRoleReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + success, ok := result.(*GetUsersForRoleOK) + if ok { + return success, nil + } + // unexpected success response + // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for getUsersForRole: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + +/* +GetUsersForRoleDeprecated gets users db o ID c assigned to role deprecated will be removed when 1 29 is not supported anymore +*/ +func (a *Client) GetUsersForRoleDeprecated(params *GetUsersForRoleDeprecatedParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*GetUsersForRoleDeprecatedOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewGetUsersForRoleDeprecatedParams() + } + op := &runtime.ClientOperation{ + ID: "getUsersForRoleDeprecated", + Method: "GET", + PathPattern: "/authz/roles/{id}/users", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json", "application/yaml"}, + Schemes: []string{"https"}, + Params: params, + Reader: &GetUsersForRoleDeprecatedReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + success, ok := result.(*GetUsersForRoleDeprecatedOK) + if ok { + return success, nil + } + // unexpected success response + // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for getUsersForRoleDeprecated: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + +/* +HasPermission checks whether role possesses this permission +*/ +func (a *Client) HasPermission(params *HasPermissionParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*HasPermissionOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewHasPermissionParams() + } + op := &runtime.ClientOperation{ + ID: "hasPermission", + Method: "POST", + PathPattern: "/authz/roles/{id}/has-permission", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json", "application/yaml"}, + Schemes: []string{"https"}, + Params: params, + Reader: &HasPermissionReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + success, ok := result.(*HasPermissionOK) + if ok { + return success, nil + } + // unexpected success response + // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for hasPermission: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + +/* +RemovePermissions removes permissions from a role if this results in an empty role the role will be deleted +*/ +func (a *Client) RemovePermissions(params *RemovePermissionsParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*RemovePermissionsOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewRemovePermissionsParams() + } + op := &runtime.ClientOperation{ + ID: "removePermissions", + Method: "POST", + PathPattern: "/authz/roles/{id}/remove-permissions", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json", "application/yaml"}, + Schemes: []string{"https"}, + Params: params, + Reader: &RemovePermissionsReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + success, ok := result.(*RemovePermissionsOK) + if ok { + return success, nil + } + // unexpected success response + // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for removePermissions: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + +/* +RevokeRoleFromGroup revokes a role from a group +*/ +func (a *Client) RevokeRoleFromGroup(params *RevokeRoleFromGroupParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*RevokeRoleFromGroupOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewRevokeRoleFromGroupParams() + } + op := &runtime.ClientOperation{ + ID: "revokeRoleFromGroup", + Method: "POST", + PathPattern: "/authz/groups/{id}/revoke", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json", "application/yaml"}, + Schemes: []string{"https"}, + Params: params, + Reader: &RevokeRoleFromGroupReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + success, ok := result.(*RevokeRoleFromGroupOK) + if ok { + return success, nil + } + // unexpected success response + // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for revokeRoleFromGroup: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + +/* +RevokeRoleFromUser revokes a role from a user +*/ +func (a *Client) RevokeRoleFromUser(params *RevokeRoleFromUserParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*RevokeRoleFromUserOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewRevokeRoleFromUserParams() + } + op := &runtime.ClientOperation{ + ID: "revokeRoleFromUser", + Method: "POST", + PathPattern: "/authz/users/{id}/revoke", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json", "application/yaml"}, + Schemes: []string{"https"}, + Params: params, + Reader: &RevokeRoleFromUserReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + success, ok := result.(*RevokeRoleFromUserOK) + if ok { + return success, nil + } + // unexpected success response + // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for revokeRoleFromUser: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + +// SetTransport changes the transport on the client +func (a *Client) SetTransport(transport runtime.ClientTransport) { + a.transport = transport +} diff --git a/platform/dbops/binaries/weaviate-src/client/authz/create_role_parameters.go b/platform/dbops/binaries/weaviate-src/client/authz/create_role_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..ba2885838bd89e603b5dcc0fb1ae95c272b31527 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/authz/create_role_parameters.go @@ -0,0 +1,161 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package authz + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + + "github.com/weaviate/weaviate/entities/models" +) + +// NewCreateRoleParams creates a new CreateRoleParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewCreateRoleParams() *CreateRoleParams { + return &CreateRoleParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewCreateRoleParamsWithTimeout creates a new CreateRoleParams object +// with the ability to set a timeout on a request. +func NewCreateRoleParamsWithTimeout(timeout time.Duration) *CreateRoleParams { + return &CreateRoleParams{ + timeout: timeout, + } +} + +// NewCreateRoleParamsWithContext creates a new CreateRoleParams object +// with the ability to set a context for a request. +func NewCreateRoleParamsWithContext(ctx context.Context) *CreateRoleParams { + return &CreateRoleParams{ + Context: ctx, + } +} + +// NewCreateRoleParamsWithHTTPClient creates a new CreateRoleParams object +// with the ability to set a custom HTTPClient for a request. +func NewCreateRoleParamsWithHTTPClient(client *http.Client) *CreateRoleParams { + return &CreateRoleParams{ + HTTPClient: client, + } +} + +/* +CreateRoleParams contains all the parameters to send to the API endpoint + + for the create role operation. + + Typically these are written to a http.Request. +*/ +type CreateRoleParams struct { + + // Body. + Body *models.Role + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the create role params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *CreateRoleParams) WithDefaults() *CreateRoleParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the create role params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *CreateRoleParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the create role params +func (o *CreateRoleParams) WithTimeout(timeout time.Duration) *CreateRoleParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the create role params +func (o *CreateRoleParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the create role params +func (o *CreateRoleParams) WithContext(ctx context.Context) *CreateRoleParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the create role params +func (o *CreateRoleParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the create role params +func (o *CreateRoleParams) WithHTTPClient(client *http.Client) *CreateRoleParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the create role params +func (o *CreateRoleParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithBody adds the body to the create role params +func (o *CreateRoleParams) WithBody(body *models.Role) *CreateRoleParams { + o.SetBody(body) + return o +} + +// SetBody adds the body to the create role params +func (o *CreateRoleParams) SetBody(body *models.Role) { + o.Body = body +} + +// WriteToRequest writes these params to a swagger request +func (o *CreateRoleParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + if o.Body != nil { + if err := r.SetBodyParam(o.Body); err != nil { + return err + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/authz/create_role_responses.go b/platform/dbops/binaries/weaviate-src/client/authz/create_role_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..a6fb704cc479de04a962e2004728829571e70163 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/authz/create_role_responses.go @@ -0,0 +1,534 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package authz + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/weaviate/weaviate/entities/models" +) + +// CreateRoleReader is a Reader for the CreateRole structure. +type CreateRoleReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *CreateRoleReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 201: + result := NewCreateRoleCreated() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 400: + result := NewCreateRoleBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 401: + result := NewCreateRoleUnauthorized() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 403: + result := NewCreateRoleForbidden() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 409: + result := NewCreateRoleConflict() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 422: + result := NewCreateRoleUnprocessableEntity() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 500: + result := NewCreateRoleInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + } +} + +// NewCreateRoleCreated creates a CreateRoleCreated with default headers values +func NewCreateRoleCreated() *CreateRoleCreated { + return &CreateRoleCreated{} +} + +/* +CreateRoleCreated describes a response with status code 201, with default header values. + +Role created successfully +*/ +type CreateRoleCreated struct { +} + +// IsSuccess returns true when this create role created response has a 2xx status code +func (o *CreateRoleCreated) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this create role created response has a 3xx status code +func (o *CreateRoleCreated) IsRedirect() bool { + return false +} + +// IsClientError returns true when this create role created response has a 4xx status code +func (o *CreateRoleCreated) IsClientError() bool { + return false +} + +// IsServerError returns true when this create role created response has a 5xx status code +func (o *CreateRoleCreated) IsServerError() bool { + return false +} + +// IsCode returns true when this create role created response a status code equal to that given +func (o *CreateRoleCreated) IsCode(code int) bool { + return code == 201 +} + +// Code gets the status code for the create role created response +func (o *CreateRoleCreated) Code() int { + return 201 +} + +func (o *CreateRoleCreated) Error() string { + return fmt.Sprintf("[POST /authz/roles][%d] createRoleCreated ", 201) +} + +func (o *CreateRoleCreated) String() string { + return fmt.Sprintf("[POST /authz/roles][%d] createRoleCreated ", 201) +} + +func (o *CreateRoleCreated) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewCreateRoleBadRequest creates a CreateRoleBadRequest with default headers values +func NewCreateRoleBadRequest() *CreateRoleBadRequest { + return &CreateRoleBadRequest{} +} + +/* +CreateRoleBadRequest describes a response with status code 400, with default header values. + +Malformed request. +*/ +type CreateRoleBadRequest struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this create role bad request response has a 2xx status code +func (o *CreateRoleBadRequest) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this create role bad request response has a 3xx status code +func (o *CreateRoleBadRequest) IsRedirect() bool { + return false +} + +// IsClientError returns true when this create role bad request response has a 4xx status code +func (o *CreateRoleBadRequest) IsClientError() bool { + return true +} + +// IsServerError returns true when this create role bad request response has a 5xx status code +func (o *CreateRoleBadRequest) IsServerError() bool { + return false +} + +// IsCode returns true when this create role bad request response a status code equal to that given +func (o *CreateRoleBadRequest) IsCode(code int) bool { + return code == 400 +} + +// Code gets the status code for the create role bad request response +func (o *CreateRoleBadRequest) Code() int { + return 400 +} + +func (o *CreateRoleBadRequest) Error() string { + return fmt.Sprintf("[POST /authz/roles][%d] createRoleBadRequest %+v", 400, o.Payload) +} + +func (o *CreateRoleBadRequest) String() string { + return fmt.Sprintf("[POST /authz/roles][%d] createRoleBadRequest %+v", 400, o.Payload) +} + +func (o *CreateRoleBadRequest) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *CreateRoleBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewCreateRoleUnauthorized creates a CreateRoleUnauthorized with default headers values +func NewCreateRoleUnauthorized() *CreateRoleUnauthorized { + return &CreateRoleUnauthorized{} +} + +/* +CreateRoleUnauthorized describes a response with status code 401, with default header values. + +Unauthorized or invalid credentials. +*/ +type CreateRoleUnauthorized struct { +} + +// IsSuccess returns true when this create role unauthorized response has a 2xx status code +func (o *CreateRoleUnauthorized) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this create role unauthorized response has a 3xx status code +func (o *CreateRoleUnauthorized) IsRedirect() bool { + return false +} + +// IsClientError returns true when this create role unauthorized response has a 4xx status code +func (o *CreateRoleUnauthorized) IsClientError() bool { + return true +} + +// IsServerError returns true when this create role unauthorized response has a 5xx status code +func (o *CreateRoleUnauthorized) IsServerError() bool { + return false +} + +// IsCode returns true when this create role unauthorized response a status code equal to that given +func (o *CreateRoleUnauthorized) IsCode(code int) bool { + return code == 401 +} + +// Code gets the status code for the create role unauthorized response +func (o *CreateRoleUnauthorized) Code() int { + return 401 +} + +func (o *CreateRoleUnauthorized) Error() string { + return fmt.Sprintf("[POST /authz/roles][%d] createRoleUnauthorized ", 401) +} + +func (o *CreateRoleUnauthorized) String() string { + return fmt.Sprintf("[POST /authz/roles][%d] createRoleUnauthorized ", 401) +} + +func (o *CreateRoleUnauthorized) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewCreateRoleForbidden creates a CreateRoleForbidden with default headers values +func NewCreateRoleForbidden() *CreateRoleForbidden { + return &CreateRoleForbidden{} +} + +/* +CreateRoleForbidden describes a response with status code 403, with default header values. + +Forbidden +*/ +type CreateRoleForbidden struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this create role forbidden response has a 2xx status code +func (o *CreateRoleForbidden) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this create role forbidden response has a 3xx status code +func (o *CreateRoleForbidden) IsRedirect() bool { + return false +} + +// IsClientError returns true when this create role forbidden response has a 4xx status code +func (o *CreateRoleForbidden) IsClientError() bool { + return true +} + +// IsServerError returns true when this create role forbidden response has a 5xx status code +func (o *CreateRoleForbidden) IsServerError() bool { + return false +} + +// IsCode returns true when this create role forbidden response a status code equal to that given +func (o *CreateRoleForbidden) IsCode(code int) bool { + return code == 403 +} + +// Code gets the status code for the create role forbidden response +func (o *CreateRoleForbidden) Code() int { + return 403 +} + +func (o *CreateRoleForbidden) Error() string { + return fmt.Sprintf("[POST /authz/roles][%d] createRoleForbidden %+v", 403, o.Payload) +} + +func (o *CreateRoleForbidden) String() string { + return fmt.Sprintf("[POST /authz/roles][%d] createRoleForbidden %+v", 403, o.Payload) +} + +func (o *CreateRoleForbidden) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *CreateRoleForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewCreateRoleConflict creates a CreateRoleConflict with default headers values +func NewCreateRoleConflict() *CreateRoleConflict { + return &CreateRoleConflict{} +} + +/* +CreateRoleConflict describes a response with status code 409, with default header values. + +Role already exists +*/ +type CreateRoleConflict struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this create role conflict response has a 2xx status code +func (o *CreateRoleConflict) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this create role conflict response has a 3xx status code +func (o *CreateRoleConflict) IsRedirect() bool { + return false +} + +// IsClientError returns true when this create role conflict response has a 4xx status code +func (o *CreateRoleConflict) IsClientError() bool { + return true +} + +// IsServerError returns true when this create role conflict response has a 5xx status code +func (o *CreateRoleConflict) IsServerError() bool { + return false +} + +// IsCode returns true when this create role conflict response a status code equal to that given +func (o *CreateRoleConflict) IsCode(code int) bool { + return code == 409 +} + +// Code gets the status code for the create role conflict response +func (o *CreateRoleConflict) Code() int { + return 409 +} + +func (o *CreateRoleConflict) Error() string { + return fmt.Sprintf("[POST /authz/roles][%d] createRoleConflict %+v", 409, o.Payload) +} + +func (o *CreateRoleConflict) String() string { + return fmt.Sprintf("[POST /authz/roles][%d] createRoleConflict %+v", 409, o.Payload) +} + +func (o *CreateRoleConflict) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *CreateRoleConflict) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewCreateRoleUnprocessableEntity creates a CreateRoleUnprocessableEntity with default headers values +func NewCreateRoleUnprocessableEntity() *CreateRoleUnprocessableEntity { + return &CreateRoleUnprocessableEntity{} +} + +/* +CreateRoleUnprocessableEntity describes a response with status code 422, with default header values. + +Request body is well-formed (i.e., syntactically correct), but semantically erroneous. Are you sure the class is defined in the configuration file? +*/ +type CreateRoleUnprocessableEntity struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this create role unprocessable entity response has a 2xx status code +func (o *CreateRoleUnprocessableEntity) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this create role unprocessable entity response has a 3xx status code +func (o *CreateRoleUnprocessableEntity) IsRedirect() bool { + return false +} + +// IsClientError returns true when this create role unprocessable entity response has a 4xx status code +func (o *CreateRoleUnprocessableEntity) IsClientError() bool { + return true +} + +// IsServerError returns true when this create role unprocessable entity response has a 5xx status code +func (o *CreateRoleUnprocessableEntity) IsServerError() bool { + return false +} + +// IsCode returns true when this create role unprocessable entity response a status code equal to that given +func (o *CreateRoleUnprocessableEntity) IsCode(code int) bool { + return code == 422 +} + +// Code gets the status code for the create role unprocessable entity response +func (o *CreateRoleUnprocessableEntity) Code() int { + return 422 +} + +func (o *CreateRoleUnprocessableEntity) Error() string { + return fmt.Sprintf("[POST /authz/roles][%d] createRoleUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *CreateRoleUnprocessableEntity) String() string { + return fmt.Sprintf("[POST /authz/roles][%d] createRoleUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *CreateRoleUnprocessableEntity) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *CreateRoleUnprocessableEntity) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewCreateRoleInternalServerError creates a CreateRoleInternalServerError with default headers values +func NewCreateRoleInternalServerError() *CreateRoleInternalServerError { + return &CreateRoleInternalServerError{} +} + +/* +CreateRoleInternalServerError describes a response with status code 500, with default header values. + +An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. +*/ +type CreateRoleInternalServerError struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this create role internal server error response has a 2xx status code +func (o *CreateRoleInternalServerError) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this create role internal server error response has a 3xx status code +func (o *CreateRoleInternalServerError) IsRedirect() bool { + return false +} + +// IsClientError returns true when this create role internal server error response has a 4xx status code +func (o *CreateRoleInternalServerError) IsClientError() bool { + return false +} + +// IsServerError returns true when this create role internal server error response has a 5xx status code +func (o *CreateRoleInternalServerError) IsServerError() bool { + return true +} + +// IsCode returns true when this create role internal server error response a status code equal to that given +func (o *CreateRoleInternalServerError) IsCode(code int) bool { + return code == 500 +} + +// Code gets the status code for the create role internal server error response +func (o *CreateRoleInternalServerError) Code() int { + return 500 +} + +func (o *CreateRoleInternalServerError) Error() string { + return fmt.Sprintf("[POST /authz/roles][%d] createRoleInternalServerError %+v", 500, o.Payload) +} + +func (o *CreateRoleInternalServerError) String() string { + return fmt.Sprintf("[POST /authz/roles][%d] createRoleInternalServerError %+v", 500, o.Payload) +} + +func (o *CreateRoleInternalServerError) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *CreateRoleInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/authz/delete_role_parameters.go b/platform/dbops/binaries/weaviate-src/client/authz/delete_role_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..e43d8b5ed6128c562d67afbe583eea79f1d488df --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/authz/delete_role_parameters.go @@ -0,0 +1,162 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package authz + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewDeleteRoleParams creates a new DeleteRoleParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewDeleteRoleParams() *DeleteRoleParams { + return &DeleteRoleParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewDeleteRoleParamsWithTimeout creates a new DeleteRoleParams object +// with the ability to set a timeout on a request. +func NewDeleteRoleParamsWithTimeout(timeout time.Duration) *DeleteRoleParams { + return &DeleteRoleParams{ + timeout: timeout, + } +} + +// NewDeleteRoleParamsWithContext creates a new DeleteRoleParams object +// with the ability to set a context for a request. +func NewDeleteRoleParamsWithContext(ctx context.Context) *DeleteRoleParams { + return &DeleteRoleParams{ + Context: ctx, + } +} + +// NewDeleteRoleParamsWithHTTPClient creates a new DeleteRoleParams object +// with the ability to set a custom HTTPClient for a request. +func NewDeleteRoleParamsWithHTTPClient(client *http.Client) *DeleteRoleParams { + return &DeleteRoleParams{ + HTTPClient: client, + } +} + +/* +DeleteRoleParams contains all the parameters to send to the API endpoint + + for the delete role operation. + + Typically these are written to a http.Request. +*/ +type DeleteRoleParams struct { + + /* ID. + + role name + */ + ID string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the delete role params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *DeleteRoleParams) WithDefaults() *DeleteRoleParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the delete role params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *DeleteRoleParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the delete role params +func (o *DeleteRoleParams) WithTimeout(timeout time.Duration) *DeleteRoleParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the delete role params +func (o *DeleteRoleParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the delete role params +func (o *DeleteRoleParams) WithContext(ctx context.Context) *DeleteRoleParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the delete role params +func (o *DeleteRoleParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the delete role params +func (o *DeleteRoleParams) WithHTTPClient(client *http.Client) *DeleteRoleParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the delete role params +func (o *DeleteRoleParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithID adds the id to the delete role params +func (o *DeleteRoleParams) WithID(id string) *DeleteRoleParams { + o.SetID(id) + return o +} + +// SetID adds the id to the delete role params +func (o *DeleteRoleParams) SetID(id string) { + o.ID = id +} + +// WriteToRequest writes these params to a swagger request +func (o *DeleteRoleParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param id + if err := r.SetPathParam("id", o.ID); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/authz/delete_role_responses.go b/platform/dbops/binaries/weaviate-src/client/authz/delete_role_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..8c95692863b70affad29fee59072dade8d68c1bc --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/authz/delete_role_responses.go @@ -0,0 +1,386 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package authz + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/weaviate/weaviate/entities/models" +) + +// DeleteRoleReader is a Reader for the DeleteRole structure. +type DeleteRoleReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *DeleteRoleReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 204: + result := NewDeleteRoleNoContent() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 400: + result := NewDeleteRoleBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 401: + result := NewDeleteRoleUnauthorized() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 403: + result := NewDeleteRoleForbidden() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 500: + result := NewDeleteRoleInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + } +} + +// NewDeleteRoleNoContent creates a DeleteRoleNoContent with default headers values +func NewDeleteRoleNoContent() *DeleteRoleNoContent { + return &DeleteRoleNoContent{} +} + +/* +DeleteRoleNoContent describes a response with status code 204, with default header values. + +Successfully deleted. +*/ +type DeleteRoleNoContent struct { +} + +// IsSuccess returns true when this delete role no content response has a 2xx status code +func (o *DeleteRoleNoContent) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this delete role no content response has a 3xx status code +func (o *DeleteRoleNoContent) IsRedirect() bool { + return false +} + +// IsClientError returns true when this delete role no content response has a 4xx status code +func (o *DeleteRoleNoContent) IsClientError() bool { + return false +} + +// IsServerError returns true when this delete role no content response has a 5xx status code +func (o *DeleteRoleNoContent) IsServerError() bool { + return false +} + +// IsCode returns true when this delete role no content response a status code equal to that given +func (o *DeleteRoleNoContent) IsCode(code int) bool { + return code == 204 +} + +// Code gets the status code for the delete role no content response +func (o *DeleteRoleNoContent) Code() int { + return 204 +} + +func (o *DeleteRoleNoContent) Error() string { + return fmt.Sprintf("[DELETE /authz/roles/{id}][%d] deleteRoleNoContent ", 204) +} + +func (o *DeleteRoleNoContent) String() string { + return fmt.Sprintf("[DELETE /authz/roles/{id}][%d] deleteRoleNoContent ", 204) +} + +func (o *DeleteRoleNoContent) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewDeleteRoleBadRequest creates a DeleteRoleBadRequest with default headers values +func NewDeleteRoleBadRequest() *DeleteRoleBadRequest { + return &DeleteRoleBadRequest{} +} + +/* +DeleteRoleBadRequest describes a response with status code 400, with default header values. + +Bad request +*/ +type DeleteRoleBadRequest struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this delete role bad request response has a 2xx status code +func (o *DeleteRoleBadRequest) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this delete role bad request response has a 3xx status code +func (o *DeleteRoleBadRequest) IsRedirect() bool { + return false +} + +// IsClientError returns true when this delete role bad request response has a 4xx status code +func (o *DeleteRoleBadRequest) IsClientError() bool { + return true +} + +// IsServerError returns true when this delete role bad request response has a 5xx status code +func (o *DeleteRoleBadRequest) IsServerError() bool { + return false +} + +// IsCode returns true when this delete role bad request response a status code equal to that given +func (o *DeleteRoleBadRequest) IsCode(code int) bool { + return code == 400 +} + +// Code gets the status code for the delete role bad request response +func (o *DeleteRoleBadRequest) Code() int { + return 400 +} + +func (o *DeleteRoleBadRequest) Error() string { + return fmt.Sprintf("[DELETE /authz/roles/{id}][%d] deleteRoleBadRequest %+v", 400, o.Payload) +} + +func (o *DeleteRoleBadRequest) String() string { + return fmt.Sprintf("[DELETE /authz/roles/{id}][%d] deleteRoleBadRequest %+v", 400, o.Payload) +} + +func (o *DeleteRoleBadRequest) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *DeleteRoleBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewDeleteRoleUnauthorized creates a DeleteRoleUnauthorized with default headers values +func NewDeleteRoleUnauthorized() *DeleteRoleUnauthorized { + return &DeleteRoleUnauthorized{} +} + +/* +DeleteRoleUnauthorized describes a response with status code 401, with default header values. + +Unauthorized or invalid credentials. +*/ +type DeleteRoleUnauthorized struct { +} + +// IsSuccess returns true when this delete role unauthorized response has a 2xx status code +func (o *DeleteRoleUnauthorized) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this delete role unauthorized response has a 3xx status code +func (o *DeleteRoleUnauthorized) IsRedirect() bool { + return false +} + +// IsClientError returns true when this delete role unauthorized response has a 4xx status code +func (o *DeleteRoleUnauthorized) IsClientError() bool { + return true +} + +// IsServerError returns true when this delete role unauthorized response has a 5xx status code +func (o *DeleteRoleUnauthorized) IsServerError() bool { + return false +} + +// IsCode returns true when this delete role unauthorized response a status code equal to that given +func (o *DeleteRoleUnauthorized) IsCode(code int) bool { + return code == 401 +} + +// Code gets the status code for the delete role unauthorized response +func (o *DeleteRoleUnauthorized) Code() int { + return 401 +} + +func (o *DeleteRoleUnauthorized) Error() string { + return fmt.Sprintf("[DELETE /authz/roles/{id}][%d] deleteRoleUnauthorized ", 401) +} + +func (o *DeleteRoleUnauthorized) String() string { + return fmt.Sprintf("[DELETE /authz/roles/{id}][%d] deleteRoleUnauthorized ", 401) +} + +func (o *DeleteRoleUnauthorized) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewDeleteRoleForbidden creates a DeleteRoleForbidden with default headers values +func NewDeleteRoleForbidden() *DeleteRoleForbidden { + return &DeleteRoleForbidden{} +} + +/* +DeleteRoleForbidden describes a response with status code 403, with default header values. + +Forbidden +*/ +type DeleteRoleForbidden struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this delete role forbidden response has a 2xx status code +func (o *DeleteRoleForbidden) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this delete role forbidden response has a 3xx status code +func (o *DeleteRoleForbidden) IsRedirect() bool { + return false +} + +// IsClientError returns true when this delete role forbidden response has a 4xx status code +func (o *DeleteRoleForbidden) IsClientError() bool { + return true +} + +// IsServerError returns true when this delete role forbidden response has a 5xx status code +func (o *DeleteRoleForbidden) IsServerError() bool { + return false +} + +// IsCode returns true when this delete role forbidden response a status code equal to that given +func (o *DeleteRoleForbidden) IsCode(code int) bool { + return code == 403 +} + +// Code gets the status code for the delete role forbidden response +func (o *DeleteRoleForbidden) Code() int { + return 403 +} + +func (o *DeleteRoleForbidden) Error() string { + return fmt.Sprintf("[DELETE /authz/roles/{id}][%d] deleteRoleForbidden %+v", 403, o.Payload) +} + +func (o *DeleteRoleForbidden) String() string { + return fmt.Sprintf("[DELETE /authz/roles/{id}][%d] deleteRoleForbidden %+v", 403, o.Payload) +} + +func (o *DeleteRoleForbidden) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *DeleteRoleForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewDeleteRoleInternalServerError creates a DeleteRoleInternalServerError with default headers values +func NewDeleteRoleInternalServerError() *DeleteRoleInternalServerError { + return &DeleteRoleInternalServerError{} +} + +/* +DeleteRoleInternalServerError describes a response with status code 500, with default header values. + +An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. +*/ +type DeleteRoleInternalServerError struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this delete role internal server error response has a 2xx status code +func (o *DeleteRoleInternalServerError) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this delete role internal server error response has a 3xx status code +func (o *DeleteRoleInternalServerError) IsRedirect() bool { + return false +} + +// IsClientError returns true when this delete role internal server error response has a 4xx status code +func (o *DeleteRoleInternalServerError) IsClientError() bool { + return false +} + +// IsServerError returns true when this delete role internal server error response has a 5xx status code +func (o *DeleteRoleInternalServerError) IsServerError() bool { + return true +} + +// IsCode returns true when this delete role internal server error response a status code equal to that given +func (o *DeleteRoleInternalServerError) IsCode(code int) bool { + return code == 500 +} + +// Code gets the status code for the delete role internal server error response +func (o *DeleteRoleInternalServerError) Code() int { + return 500 +} + +func (o *DeleteRoleInternalServerError) Error() string { + return fmt.Sprintf("[DELETE /authz/roles/{id}][%d] deleteRoleInternalServerError %+v", 500, o.Payload) +} + +func (o *DeleteRoleInternalServerError) String() string { + return fmt.Sprintf("[DELETE /authz/roles/{id}][%d] deleteRoleInternalServerError %+v", 500, o.Payload) +} + +func (o *DeleteRoleInternalServerError) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *DeleteRoleInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/authz/get_groups_for_role_parameters.go b/platform/dbops/binaries/weaviate-src/client/authz/get_groups_for_role_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..96ffec687feddd831a336ee35e07f423c7a6c317 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/authz/get_groups_for_role_parameters.go @@ -0,0 +1,162 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package authz + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewGetGroupsForRoleParams creates a new GetGroupsForRoleParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewGetGroupsForRoleParams() *GetGroupsForRoleParams { + return &GetGroupsForRoleParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewGetGroupsForRoleParamsWithTimeout creates a new GetGroupsForRoleParams object +// with the ability to set a timeout on a request. +func NewGetGroupsForRoleParamsWithTimeout(timeout time.Duration) *GetGroupsForRoleParams { + return &GetGroupsForRoleParams{ + timeout: timeout, + } +} + +// NewGetGroupsForRoleParamsWithContext creates a new GetGroupsForRoleParams object +// with the ability to set a context for a request. +func NewGetGroupsForRoleParamsWithContext(ctx context.Context) *GetGroupsForRoleParams { + return &GetGroupsForRoleParams{ + Context: ctx, + } +} + +// NewGetGroupsForRoleParamsWithHTTPClient creates a new GetGroupsForRoleParams object +// with the ability to set a custom HTTPClient for a request. +func NewGetGroupsForRoleParamsWithHTTPClient(client *http.Client) *GetGroupsForRoleParams { + return &GetGroupsForRoleParams{ + HTTPClient: client, + } +} + +/* +GetGroupsForRoleParams contains all the parameters to send to the API endpoint + + for the get groups for role operation. + + Typically these are written to a http.Request. +*/ +type GetGroupsForRoleParams struct { + + /* ID. + + The unique name of the role. + */ + ID string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the get groups for role params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *GetGroupsForRoleParams) WithDefaults() *GetGroupsForRoleParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the get groups for role params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *GetGroupsForRoleParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the get groups for role params +func (o *GetGroupsForRoleParams) WithTimeout(timeout time.Duration) *GetGroupsForRoleParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the get groups for role params +func (o *GetGroupsForRoleParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the get groups for role params +func (o *GetGroupsForRoleParams) WithContext(ctx context.Context) *GetGroupsForRoleParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the get groups for role params +func (o *GetGroupsForRoleParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the get groups for role params +func (o *GetGroupsForRoleParams) WithHTTPClient(client *http.Client) *GetGroupsForRoleParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the get groups for role params +func (o *GetGroupsForRoleParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithID adds the id to the get groups for role params +func (o *GetGroupsForRoleParams) WithID(id string) *GetGroupsForRoleParams { + o.SetID(id) + return o +} + +// SetID adds the id to the get groups for role params +func (o *GetGroupsForRoleParams) SetID(id string) { + o.ID = id +} + +// WriteToRequest writes these params to a swagger request +func (o *GetGroupsForRoleParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param id + if err := r.SetPathParam("id", o.ID); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/authz/get_groups_for_role_responses.go b/platform/dbops/binaries/weaviate-src/client/authz/get_groups_for_role_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..64ca0a7d0a6abcf65c43afe628827281c7d64d1d --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/authz/get_groups_for_role_responses.go @@ -0,0 +1,562 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package authz + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "fmt" + "io" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" + + "github.com/weaviate/weaviate/entities/models" +) + +// GetGroupsForRoleReader is a Reader for the GetGroupsForRole structure. +type GetGroupsForRoleReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *GetGroupsForRoleReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewGetGroupsForRoleOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 400: + result := NewGetGroupsForRoleBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 401: + result := NewGetGroupsForRoleUnauthorized() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 403: + result := NewGetGroupsForRoleForbidden() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 404: + result := NewGetGroupsForRoleNotFound() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 500: + result := NewGetGroupsForRoleInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + } +} + +// NewGetGroupsForRoleOK creates a GetGroupsForRoleOK with default headers values +func NewGetGroupsForRoleOK() *GetGroupsForRoleOK { + return &GetGroupsForRoleOK{} +} + +/* +GetGroupsForRoleOK describes a response with status code 200, with default header values. + +Successfully retrieved the list of groups that have the role assigned. +*/ +type GetGroupsForRoleOK struct { + Payload []*GetGroupsForRoleOKBodyItems0 +} + +// IsSuccess returns true when this get groups for role o k response has a 2xx status code +func (o *GetGroupsForRoleOK) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this get groups for role o k response has a 3xx status code +func (o *GetGroupsForRoleOK) IsRedirect() bool { + return false +} + +// IsClientError returns true when this get groups for role o k response has a 4xx status code +func (o *GetGroupsForRoleOK) IsClientError() bool { + return false +} + +// IsServerError returns true when this get groups for role o k response has a 5xx status code +func (o *GetGroupsForRoleOK) IsServerError() bool { + return false +} + +// IsCode returns true when this get groups for role o k response a status code equal to that given +func (o *GetGroupsForRoleOK) IsCode(code int) bool { + return code == 200 +} + +// Code gets the status code for the get groups for role o k response +func (o *GetGroupsForRoleOK) Code() int { + return 200 +} + +func (o *GetGroupsForRoleOK) Error() string { + return fmt.Sprintf("[GET /authz/roles/{id}/group-assignments][%d] getGroupsForRoleOK %+v", 200, o.Payload) +} + +func (o *GetGroupsForRoleOK) String() string { + return fmt.Sprintf("[GET /authz/roles/{id}/group-assignments][%d] getGroupsForRoleOK %+v", 200, o.Payload) +} + +func (o *GetGroupsForRoleOK) GetPayload() []*GetGroupsForRoleOKBodyItems0 { + return o.Payload +} + +func (o *GetGroupsForRoleOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewGetGroupsForRoleBadRequest creates a GetGroupsForRoleBadRequest with default headers values +func NewGetGroupsForRoleBadRequest() *GetGroupsForRoleBadRequest { + return &GetGroupsForRoleBadRequest{} +} + +/* +GetGroupsForRoleBadRequest describes a response with status code 400, with default header values. + +Bad request +*/ +type GetGroupsForRoleBadRequest struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this get groups for role bad request response has a 2xx status code +func (o *GetGroupsForRoleBadRequest) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this get groups for role bad request response has a 3xx status code +func (o *GetGroupsForRoleBadRequest) IsRedirect() bool { + return false +} + +// IsClientError returns true when this get groups for role bad request response has a 4xx status code +func (o *GetGroupsForRoleBadRequest) IsClientError() bool { + return true +} + +// IsServerError returns true when this get groups for role bad request response has a 5xx status code +func (o *GetGroupsForRoleBadRequest) IsServerError() bool { + return false +} + +// IsCode returns true when this get groups for role bad request response a status code equal to that given +func (o *GetGroupsForRoleBadRequest) IsCode(code int) bool { + return code == 400 +} + +// Code gets the status code for the get groups for role bad request response +func (o *GetGroupsForRoleBadRequest) Code() int { + return 400 +} + +func (o *GetGroupsForRoleBadRequest) Error() string { + return fmt.Sprintf("[GET /authz/roles/{id}/group-assignments][%d] getGroupsForRoleBadRequest %+v", 400, o.Payload) +} + +func (o *GetGroupsForRoleBadRequest) String() string { + return fmt.Sprintf("[GET /authz/roles/{id}/group-assignments][%d] getGroupsForRoleBadRequest %+v", 400, o.Payload) +} + +func (o *GetGroupsForRoleBadRequest) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *GetGroupsForRoleBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewGetGroupsForRoleUnauthorized creates a GetGroupsForRoleUnauthorized with default headers values +func NewGetGroupsForRoleUnauthorized() *GetGroupsForRoleUnauthorized { + return &GetGroupsForRoleUnauthorized{} +} + +/* +GetGroupsForRoleUnauthorized describes a response with status code 401, with default header values. + +Unauthorized or invalid credentials. +*/ +type GetGroupsForRoleUnauthorized struct { +} + +// IsSuccess returns true when this get groups for role unauthorized response has a 2xx status code +func (o *GetGroupsForRoleUnauthorized) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this get groups for role unauthorized response has a 3xx status code +func (o *GetGroupsForRoleUnauthorized) IsRedirect() bool { + return false +} + +// IsClientError returns true when this get groups for role unauthorized response has a 4xx status code +func (o *GetGroupsForRoleUnauthorized) IsClientError() bool { + return true +} + +// IsServerError returns true when this get groups for role unauthorized response has a 5xx status code +func (o *GetGroupsForRoleUnauthorized) IsServerError() bool { + return false +} + +// IsCode returns true when this get groups for role unauthorized response a status code equal to that given +func (o *GetGroupsForRoleUnauthorized) IsCode(code int) bool { + return code == 401 +} + +// Code gets the status code for the get groups for role unauthorized response +func (o *GetGroupsForRoleUnauthorized) Code() int { + return 401 +} + +func (o *GetGroupsForRoleUnauthorized) Error() string { + return fmt.Sprintf("[GET /authz/roles/{id}/group-assignments][%d] getGroupsForRoleUnauthorized ", 401) +} + +func (o *GetGroupsForRoleUnauthorized) String() string { + return fmt.Sprintf("[GET /authz/roles/{id}/group-assignments][%d] getGroupsForRoleUnauthorized ", 401) +} + +func (o *GetGroupsForRoleUnauthorized) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewGetGroupsForRoleForbidden creates a GetGroupsForRoleForbidden with default headers values +func NewGetGroupsForRoleForbidden() *GetGroupsForRoleForbidden { + return &GetGroupsForRoleForbidden{} +} + +/* +GetGroupsForRoleForbidden describes a response with status code 403, with default header values. + +Forbidden +*/ +type GetGroupsForRoleForbidden struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this get groups for role forbidden response has a 2xx status code +func (o *GetGroupsForRoleForbidden) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this get groups for role forbidden response has a 3xx status code +func (o *GetGroupsForRoleForbidden) IsRedirect() bool { + return false +} + +// IsClientError returns true when this get groups for role forbidden response has a 4xx status code +func (o *GetGroupsForRoleForbidden) IsClientError() bool { + return true +} + +// IsServerError returns true when this get groups for role forbidden response has a 5xx status code +func (o *GetGroupsForRoleForbidden) IsServerError() bool { + return false +} + +// IsCode returns true when this get groups for role forbidden response a status code equal to that given +func (o *GetGroupsForRoleForbidden) IsCode(code int) bool { + return code == 403 +} + +// Code gets the status code for the get groups for role forbidden response +func (o *GetGroupsForRoleForbidden) Code() int { + return 403 +} + +func (o *GetGroupsForRoleForbidden) Error() string { + return fmt.Sprintf("[GET /authz/roles/{id}/group-assignments][%d] getGroupsForRoleForbidden %+v", 403, o.Payload) +} + +func (o *GetGroupsForRoleForbidden) String() string { + return fmt.Sprintf("[GET /authz/roles/{id}/group-assignments][%d] getGroupsForRoleForbidden %+v", 403, o.Payload) +} + +func (o *GetGroupsForRoleForbidden) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *GetGroupsForRoleForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewGetGroupsForRoleNotFound creates a GetGroupsForRoleNotFound with default headers values +func NewGetGroupsForRoleNotFound() *GetGroupsForRoleNotFound { + return &GetGroupsForRoleNotFound{} +} + +/* +GetGroupsForRoleNotFound describes a response with status code 404, with default header values. + +The specified role was not found. +*/ +type GetGroupsForRoleNotFound struct { +} + +// IsSuccess returns true when this get groups for role not found response has a 2xx status code +func (o *GetGroupsForRoleNotFound) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this get groups for role not found response has a 3xx status code +func (o *GetGroupsForRoleNotFound) IsRedirect() bool { + return false +} + +// IsClientError returns true when this get groups for role not found response has a 4xx status code +func (o *GetGroupsForRoleNotFound) IsClientError() bool { + return true +} + +// IsServerError returns true when this get groups for role not found response has a 5xx status code +func (o *GetGroupsForRoleNotFound) IsServerError() bool { + return false +} + +// IsCode returns true when this get groups for role not found response a status code equal to that given +func (o *GetGroupsForRoleNotFound) IsCode(code int) bool { + return code == 404 +} + +// Code gets the status code for the get groups for role not found response +func (o *GetGroupsForRoleNotFound) Code() int { + return 404 +} + +func (o *GetGroupsForRoleNotFound) Error() string { + return fmt.Sprintf("[GET /authz/roles/{id}/group-assignments][%d] getGroupsForRoleNotFound ", 404) +} + +func (o *GetGroupsForRoleNotFound) String() string { + return fmt.Sprintf("[GET /authz/roles/{id}/group-assignments][%d] getGroupsForRoleNotFound ", 404) +} + +func (o *GetGroupsForRoleNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewGetGroupsForRoleInternalServerError creates a GetGroupsForRoleInternalServerError with default headers values +func NewGetGroupsForRoleInternalServerError() *GetGroupsForRoleInternalServerError { + return &GetGroupsForRoleInternalServerError{} +} + +/* +GetGroupsForRoleInternalServerError describes a response with status code 500, with default header values. + +An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. +*/ +type GetGroupsForRoleInternalServerError struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this get groups for role internal server error response has a 2xx status code +func (o *GetGroupsForRoleInternalServerError) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this get groups for role internal server error response has a 3xx status code +func (o *GetGroupsForRoleInternalServerError) IsRedirect() bool { + return false +} + +// IsClientError returns true when this get groups for role internal server error response has a 4xx status code +func (o *GetGroupsForRoleInternalServerError) IsClientError() bool { + return false +} + +// IsServerError returns true when this get groups for role internal server error response has a 5xx status code +func (o *GetGroupsForRoleInternalServerError) IsServerError() bool { + return true +} + +// IsCode returns true when this get groups for role internal server error response a status code equal to that given +func (o *GetGroupsForRoleInternalServerError) IsCode(code int) bool { + return code == 500 +} + +// Code gets the status code for the get groups for role internal server error response +func (o *GetGroupsForRoleInternalServerError) Code() int { + return 500 +} + +func (o *GetGroupsForRoleInternalServerError) Error() string { + return fmt.Sprintf("[GET /authz/roles/{id}/group-assignments][%d] getGroupsForRoleInternalServerError %+v", 500, o.Payload) +} + +func (o *GetGroupsForRoleInternalServerError) String() string { + return fmt.Sprintf("[GET /authz/roles/{id}/group-assignments][%d] getGroupsForRoleInternalServerError %+v", 500, o.Payload) +} + +func (o *GetGroupsForRoleInternalServerError) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *GetGroupsForRoleInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +/* +GetGroupsForRoleOKBodyItems0 get groups for role o k body items0 +swagger:model GetGroupsForRoleOKBodyItems0 +*/ +type GetGroupsForRoleOKBodyItems0 struct { + + // group Id + GroupID string `json:"groupId,omitempty"` + + // group type + // Required: true + GroupType *models.GroupType `json:"groupType"` +} + +// Validate validates this get groups for role o k body items0 +func (o *GetGroupsForRoleOKBodyItems0) Validate(formats strfmt.Registry) error { + var res []error + + if err := o.validateGroupType(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (o *GetGroupsForRoleOKBodyItems0) validateGroupType(formats strfmt.Registry) error { + + if err := validate.Required("groupType", "body", o.GroupType); err != nil { + return err + } + + if err := validate.Required("groupType", "body", o.GroupType); err != nil { + return err + } + + if o.GroupType != nil { + if err := o.GroupType.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("groupType") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("groupType") + } + return err + } + } + + return nil +} + +// ContextValidate validate this get groups for role o k body items0 based on the context it is used +func (o *GetGroupsForRoleOKBodyItems0) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := o.contextValidateGroupType(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (o *GetGroupsForRoleOKBodyItems0) contextValidateGroupType(ctx context.Context, formats strfmt.Registry) error { + + if o.GroupType != nil { + if err := o.GroupType.ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("groupType") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("groupType") + } + return err + } + } + + return nil +} + +// MarshalBinary interface implementation +func (o *GetGroupsForRoleOKBodyItems0) MarshalBinary() ([]byte, error) { + if o == nil { + return nil, nil + } + return swag.WriteJSON(o) +} + +// UnmarshalBinary interface implementation +func (o *GetGroupsForRoleOKBodyItems0) UnmarshalBinary(b []byte) error { + var res GetGroupsForRoleOKBodyItems0 + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *o = res + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/authz/get_groups_parameters.go b/platform/dbops/binaries/weaviate-src/client/authz/get_groups_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..7075c2620b34cacdc2a967aa57a743c44746ca4b --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/authz/get_groups_parameters.go @@ -0,0 +1,162 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package authz + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewGetGroupsParams creates a new GetGroupsParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewGetGroupsParams() *GetGroupsParams { + return &GetGroupsParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewGetGroupsParamsWithTimeout creates a new GetGroupsParams object +// with the ability to set a timeout on a request. +func NewGetGroupsParamsWithTimeout(timeout time.Duration) *GetGroupsParams { + return &GetGroupsParams{ + timeout: timeout, + } +} + +// NewGetGroupsParamsWithContext creates a new GetGroupsParams object +// with the ability to set a context for a request. +func NewGetGroupsParamsWithContext(ctx context.Context) *GetGroupsParams { + return &GetGroupsParams{ + Context: ctx, + } +} + +// NewGetGroupsParamsWithHTTPClient creates a new GetGroupsParams object +// with the ability to set a custom HTTPClient for a request. +func NewGetGroupsParamsWithHTTPClient(client *http.Client) *GetGroupsParams { + return &GetGroupsParams{ + HTTPClient: client, + } +} + +/* +GetGroupsParams contains all the parameters to send to the API endpoint + + for the get groups operation. + + Typically these are written to a http.Request. +*/ +type GetGroupsParams struct { + + /* GroupType. + + The type of group to retrieve. + */ + GroupType string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the get groups params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *GetGroupsParams) WithDefaults() *GetGroupsParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the get groups params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *GetGroupsParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the get groups params +func (o *GetGroupsParams) WithTimeout(timeout time.Duration) *GetGroupsParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the get groups params +func (o *GetGroupsParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the get groups params +func (o *GetGroupsParams) WithContext(ctx context.Context) *GetGroupsParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the get groups params +func (o *GetGroupsParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the get groups params +func (o *GetGroupsParams) WithHTTPClient(client *http.Client) *GetGroupsParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the get groups params +func (o *GetGroupsParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithGroupType adds the groupType to the get groups params +func (o *GetGroupsParams) WithGroupType(groupType string) *GetGroupsParams { + o.SetGroupType(groupType) + return o +} + +// SetGroupType adds the groupType to the get groups params +func (o *GetGroupsParams) SetGroupType(groupType string) { + o.GroupType = groupType +} + +// WriteToRequest writes these params to a swagger request +func (o *GetGroupsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param groupType + if err := r.SetPathParam("groupType", o.GroupType); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/authz/get_groups_responses.go b/platform/dbops/binaries/weaviate-src/client/authz/get_groups_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..b946ea65e393cacbf184d25e9d1e2ac903391e46 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/authz/get_groups_responses.go @@ -0,0 +1,470 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package authz + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/weaviate/weaviate/entities/models" +) + +// GetGroupsReader is a Reader for the GetGroups structure. +type GetGroupsReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *GetGroupsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewGetGroupsOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 400: + result := NewGetGroupsBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 401: + result := NewGetGroupsUnauthorized() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 403: + result := NewGetGroupsForbidden() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 422: + result := NewGetGroupsUnprocessableEntity() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 500: + result := NewGetGroupsInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + } +} + +// NewGetGroupsOK creates a GetGroupsOK with default headers values +func NewGetGroupsOK() *GetGroupsOK { + return &GetGroupsOK{} +} + +/* +GetGroupsOK describes a response with status code 200, with default header values. + +A list of group names for the specified type. +*/ +type GetGroupsOK struct { + Payload []string +} + +// IsSuccess returns true when this get groups o k response has a 2xx status code +func (o *GetGroupsOK) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this get groups o k response has a 3xx status code +func (o *GetGroupsOK) IsRedirect() bool { + return false +} + +// IsClientError returns true when this get groups o k response has a 4xx status code +func (o *GetGroupsOK) IsClientError() bool { + return false +} + +// IsServerError returns true when this get groups o k response has a 5xx status code +func (o *GetGroupsOK) IsServerError() bool { + return false +} + +// IsCode returns true when this get groups o k response a status code equal to that given +func (o *GetGroupsOK) IsCode(code int) bool { + return code == 200 +} + +// Code gets the status code for the get groups o k response +func (o *GetGroupsOK) Code() int { + return 200 +} + +func (o *GetGroupsOK) Error() string { + return fmt.Sprintf("[GET /authz/groups/{groupType}][%d] getGroupsOK %+v", 200, o.Payload) +} + +func (o *GetGroupsOK) String() string { + return fmt.Sprintf("[GET /authz/groups/{groupType}][%d] getGroupsOK %+v", 200, o.Payload) +} + +func (o *GetGroupsOK) GetPayload() []string { + return o.Payload +} + +func (o *GetGroupsOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewGetGroupsBadRequest creates a GetGroupsBadRequest with default headers values +func NewGetGroupsBadRequest() *GetGroupsBadRequest { + return &GetGroupsBadRequest{} +} + +/* +GetGroupsBadRequest describes a response with status code 400, with default header values. + +Bad request +*/ +type GetGroupsBadRequest struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this get groups bad request response has a 2xx status code +func (o *GetGroupsBadRequest) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this get groups bad request response has a 3xx status code +func (o *GetGroupsBadRequest) IsRedirect() bool { + return false +} + +// IsClientError returns true when this get groups bad request response has a 4xx status code +func (o *GetGroupsBadRequest) IsClientError() bool { + return true +} + +// IsServerError returns true when this get groups bad request response has a 5xx status code +func (o *GetGroupsBadRequest) IsServerError() bool { + return false +} + +// IsCode returns true when this get groups bad request response a status code equal to that given +func (o *GetGroupsBadRequest) IsCode(code int) bool { + return code == 400 +} + +// Code gets the status code for the get groups bad request response +func (o *GetGroupsBadRequest) Code() int { + return 400 +} + +func (o *GetGroupsBadRequest) Error() string { + return fmt.Sprintf("[GET /authz/groups/{groupType}][%d] getGroupsBadRequest %+v", 400, o.Payload) +} + +func (o *GetGroupsBadRequest) String() string { + return fmt.Sprintf("[GET /authz/groups/{groupType}][%d] getGroupsBadRequest %+v", 400, o.Payload) +} + +func (o *GetGroupsBadRequest) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *GetGroupsBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewGetGroupsUnauthorized creates a GetGroupsUnauthorized with default headers values +func NewGetGroupsUnauthorized() *GetGroupsUnauthorized { + return &GetGroupsUnauthorized{} +} + +/* +GetGroupsUnauthorized describes a response with status code 401, with default header values. + +Unauthorized or invalid credentials. +*/ +type GetGroupsUnauthorized struct { +} + +// IsSuccess returns true when this get groups unauthorized response has a 2xx status code +func (o *GetGroupsUnauthorized) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this get groups unauthorized response has a 3xx status code +func (o *GetGroupsUnauthorized) IsRedirect() bool { + return false +} + +// IsClientError returns true when this get groups unauthorized response has a 4xx status code +func (o *GetGroupsUnauthorized) IsClientError() bool { + return true +} + +// IsServerError returns true when this get groups unauthorized response has a 5xx status code +func (o *GetGroupsUnauthorized) IsServerError() bool { + return false +} + +// IsCode returns true when this get groups unauthorized response a status code equal to that given +func (o *GetGroupsUnauthorized) IsCode(code int) bool { + return code == 401 +} + +// Code gets the status code for the get groups unauthorized response +func (o *GetGroupsUnauthorized) Code() int { + return 401 +} + +func (o *GetGroupsUnauthorized) Error() string { + return fmt.Sprintf("[GET /authz/groups/{groupType}][%d] getGroupsUnauthorized ", 401) +} + +func (o *GetGroupsUnauthorized) String() string { + return fmt.Sprintf("[GET /authz/groups/{groupType}][%d] getGroupsUnauthorized ", 401) +} + +func (o *GetGroupsUnauthorized) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewGetGroupsForbidden creates a GetGroupsForbidden with default headers values +func NewGetGroupsForbidden() *GetGroupsForbidden { + return &GetGroupsForbidden{} +} + +/* +GetGroupsForbidden describes a response with status code 403, with default header values. + +Forbidden +*/ +type GetGroupsForbidden struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this get groups forbidden response has a 2xx status code +func (o *GetGroupsForbidden) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this get groups forbidden response has a 3xx status code +func (o *GetGroupsForbidden) IsRedirect() bool { + return false +} + +// IsClientError returns true when this get groups forbidden response has a 4xx status code +func (o *GetGroupsForbidden) IsClientError() bool { + return true +} + +// IsServerError returns true when this get groups forbidden response has a 5xx status code +func (o *GetGroupsForbidden) IsServerError() bool { + return false +} + +// IsCode returns true when this get groups forbidden response a status code equal to that given +func (o *GetGroupsForbidden) IsCode(code int) bool { + return code == 403 +} + +// Code gets the status code for the get groups forbidden response +func (o *GetGroupsForbidden) Code() int { + return 403 +} + +func (o *GetGroupsForbidden) Error() string { + return fmt.Sprintf("[GET /authz/groups/{groupType}][%d] getGroupsForbidden %+v", 403, o.Payload) +} + +func (o *GetGroupsForbidden) String() string { + return fmt.Sprintf("[GET /authz/groups/{groupType}][%d] getGroupsForbidden %+v", 403, o.Payload) +} + +func (o *GetGroupsForbidden) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *GetGroupsForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewGetGroupsUnprocessableEntity creates a GetGroupsUnprocessableEntity with default headers values +func NewGetGroupsUnprocessableEntity() *GetGroupsUnprocessableEntity { + return &GetGroupsUnprocessableEntity{} +} + +/* +GetGroupsUnprocessableEntity describes a response with status code 422, with default header values. + +The request syntax is correct, but the server couldn't process it due to semantic issues. +*/ +type GetGroupsUnprocessableEntity struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this get groups unprocessable entity response has a 2xx status code +func (o *GetGroupsUnprocessableEntity) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this get groups unprocessable entity response has a 3xx status code +func (o *GetGroupsUnprocessableEntity) IsRedirect() bool { + return false +} + +// IsClientError returns true when this get groups unprocessable entity response has a 4xx status code +func (o *GetGroupsUnprocessableEntity) IsClientError() bool { + return true +} + +// IsServerError returns true when this get groups unprocessable entity response has a 5xx status code +func (o *GetGroupsUnprocessableEntity) IsServerError() bool { + return false +} + +// IsCode returns true when this get groups unprocessable entity response a status code equal to that given +func (o *GetGroupsUnprocessableEntity) IsCode(code int) bool { + return code == 422 +} + +// Code gets the status code for the get groups unprocessable entity response +func (o *GetGroupsUnprocessableEntity) Code() int { + return 422 +} + +func (o *GetGroupsUnprocessableEntity) Error() string { + return fmt.Sprintf("[GET /authz/groups/{groupType}][%d] getGroupsUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *GetGroupsUnprocessableEntity) String() string { + return fmt.Sprintf("[GET /authz/groups/{groupType}][%d] getGroupsUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *GetGroupsUnprocessableEntity) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *GetGroupsUnprocessableEntity) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewGetGroupsInternalServerError creates a GetGroupsInternalServerError with default headers values +func NewGetGroupsInternalServerError() *GetGroupsInternalServerError { + return &GetGroupsInternalServerError{} +} + +/* +GetGroupsInternalServerError describes a response with status code 500, with default header values. + +An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. +*/ +type GetGroupsInternalServerError struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this get groups internal server error response has a 2xx status code +func (o *GetGroupsInternalServerError) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this get groups internal server error response has a 3xx status code +func (o *GetGroupsInternalServerError) IsRedirect() bool { + return false +} + +// IsClientError returns true when this get groups internal server error response has a 4xx status code +func (o *GetGroupsInternalServerError) IsClientError() bool { + return false +} + +// IsServerError returns true when this get groups internal server error response has a 5xx status code +func (o *GetGroupsInternalServerError) IsServerError() bool { + return true +} + +// IsCode returns true when this get groups internal server error response a status code equal to that given +func (o *GetGroupsInternalServerError) IsCode(code int) bool { + return code == 500 +} + +// Code gets the status code for the get groups internal server error response +func (o *GetGroupsInternalServerError) Code() int { + return 500 +} + +func (o *GetGroupsInternalServerError) Error() string { + return fmt.Sprintf("[GET /authz/groups/{groupType}][%d] getGroupsInternalServerError %+v", 500, o.Payload) +} + +func (o *GetGroupsInternalServerError) String() string { + return fmt.Sprintf("[GET /authz/groups/{groupType}][%d] getGroupsInternalServerError %+v", 500, o.Payload) +} + +func (o *GetGroupsInternalServerError) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *GetGroupsInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/authz/get_role_parameters.go b/platform/dbops/binaries/weaviate-src/client/authz/get_role_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..9a06375d0d261c83b919d48a6db8d8f20bf1ce38 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/authz/get_role_parameters.go @@ -0,0 +1,162 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package authz + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewGetRoleParams creates a new GetRoleParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewGetRoleParams() *GetRoleParams { + return &GetRoleParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewGetRoleParamsWithTimeout creates a new GetRoleParams object +// with the ability to set a timeout on a request. +func NewGetRoleParamsWithTimeout(timeout time.Duration) *GetRoleParams { + return &GetRoleParams{ + timeout: timeout, + } +} + +// NewGetRoleParamsWithContext creates a new GetRoleParams object +// with the ability to set a context for a request. +func NewGetRoleParamsWithContext(ctx context.Context) *GetRoleParams { + return &GetRoleParams{ + Context: ctx, + } +} + +// NewGetRoleParamsWithHTTPClient creates a new GetRoleParams object +// with the ability to set a custom HTTPClient for a request. +func NewGetRoleParamsWithHTTPClient(client *http.Client) *GetRoleParams { + return &GetRoleParams{ + HTTPClient: client, + } +} + +/* +GetRoleParams contains all the parameters to send to the API endpoint + + for the get role operation. + + Typically these are written to a http.Request. +*/ +type GetRoleParams struct { + + /* ID. + + role name + */ + ID string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the get role params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *GetRoleParams) WithDefaults() *GetRoleParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the get role params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *GetRoleParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the get role params +func (o *GetRoleParams) WithTimeout(timeout time.Duration) *GetRoleParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the get role params +func (o *GetRoleParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the get role params +func (o *GetRoleParams) WithContext(ctx context.Context) *GetRoleParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the get role params +func (o *GetRoleParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the get role params +func (o *GetRoleParams) WithHTTPClient(client *http.Client) *GetRoleParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the get role params +func (o *GetRoleParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithID adds the id to the get role params +func (o *GetRoleParams) WithID(id string) *GetRoleParams { + o.SetID(id) + return o +} + +// SetID adds the id to the get role params +func (o *GetRoleParams) SetID(id string) { + o.ID = id +} + +// WriteToRequest writes these params to a swagger request +func (o *GetRoleParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param id + if err := r.SetPathParam("id", o.ID); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/authz/get_role_responses.go b/platform/dbops/binaries/weaviate-src/client/authz/get_role_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..fa73d536fd55046c746844c685cdda51c99f6b3e --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/authz/get_role_responses.go @@ -0,0 +1,460 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package authz + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/weaviate/weaviate/entities/models" +) + +// GetRoleReader is a Reader for the GetRole structure. +type GetRoleReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *GetRoleReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewGetRoleOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 400: + result := NewGetRoleBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 401: + result := NewGetRoleUnauthorized() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 403: + result := NewGetRoleForbidden() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 404: + result := NewGetRoleNotFound() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 500: + result := NewGetRoleInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + } +} + +// NewGetRoleOK creates a GetRoleOK with default headers values +func NewGetRoleOK() *GetRoleOK { + return &GetRoleOK{} +} + +/* +GetRoleOK describes a response with status code 200, with default header values. + +Successful response. +*/ +type GetRoleOK struct { + Payload *models.Role +} + +// IsSuccess returns true when this get role o k response has a 2xx status code +func (o *GetRoleOK) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this get role o k response has a 3xx status code +func (o *GetRoleOK) IsRedirect() bool { + return false +} + +// IsClientError returns true when this get role o k response has a 4xx status code +func (o *GetRoleOK) IsClientError() bool { + return false +} + +// IsServerError returns true when this get role o k response has a 5xx status code +func (o *GetRoleOK) IsServerError() bool { + return false +} + +// IsCode returns true when this get role o k response a status code equal to that given +func (o *GetRoleOK) IsCode(code int) bool { + return code == 200 +} + +// Code gets the status code for the get role o k response +func (o *GetRoleOK) Code() int { + return 200 +} + +func (o *GetRoleOK) Error() string { + return fmt.Sprintf("[GET /authz/roles/{id}][%d] getRoleOK %+v", 200, o.Payload) +} + +func (o *GetRoleOK) String() string { + return fmt.Sprintf("[GET /authz/roles/{id}][%d] getRoleOK %+v", 200, o.Payload) +} + +func (o *GetRoleOK) GetPayload() *models.Role { + return o.Payload +} + +func (o *GetRoleOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Role) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewGetRoleBadRequest creates a GetRoleBadRequest with default headers values +func NewGetRoleBadRequest() *GetRoleBadRequest { + return &GetRoleBadRequest{} +} + +/* +GetRoleBadRequest describes a response with status code 400, with default header values. + +Malformed request. +*/ +type GetRoleBadRequest struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this get role bad request response has a 2xx status code +func (o *GetRoleBadRequest) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this get role bad request response has a 3xx status code +func (o *GetRoleBadRequest) IsRedirect() bool { + return false +} + +// IsClientError returns true when this get role bad request response has a 4xx status code +func (o *GetRoleBadRequest) IsClientError() bool { + return true +} + +// IsServerError returns true when this get role bad request response has a 5xx status code +func (o *GetRoleBadRequest) IsServerError() bool { + return false +} + +// IsCode returns true when this get role bad request response a status code equal to that given +func (o *GetRoleBadRequest) IsCode(code int) bool { + return code == 400 +} + +// Code gets the status code for the get role bad request response +func (o *GetRoleBadRequest) Code() int { + return 400 +} + +func (o *GetRoleBadRequest) Error() string { + return fmt.Sprintf("[GET /authz/roles/{id}][%d] getRoleBadRequest %+v", 400, o.Payload) +} + +func (o *GetRoleBadRequest) String() string { + return fmt.Sprintf("[GET /authz/roles/{id}][%d] getRoleBadRequest %+v", 400, o.Payload) +} + +func (o *GetRoleBadRequest) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *GetRoleBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewGetRoleUnauthorized creates a GetRoleUnauthorized with default headers values +func NewGetRoleUnauthorized() *GetRoleUnauthorized { + return &GetRoleUnauthorized{} +} + +/* +GetRoleUnauthorized describes a response with status code 401, with default header values. + +Unauthorized or invalid credentials. +*/ +type GetRoleUnauthorized struct { +} + +// IsSuccess returns true when this get role unauthorized response has a 2xx status code +func (o *GetRoleUnauthorized) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this get role unauthorized response has a 3xx status code +func (o *GetRoleUnauthorized) IsRedirect() bool { + return false +} + +// IsClientError returns true when this get role unauthorized response has a 4xx status code +func (o *GetRoleUnauthorized) IsClientError() bool { + return true +} + +// IsServerError returns true when this get role unauthorized response has a 5xx status code +func (o *GetRoleUnauthorized) IsServerError() bool { + return false +} + +// IsCode returns true when this get role unauthorized response a status code equal to that given +func (o *GetRoleUnauthorized) IsCode(code int) bool { + return code == 401 +} + +// Code gets the status code for the get role unauthorized response +func (o *GetRoleUnauthorized) Code() int { + return 401 +} + +func (o *GetRoleUnauthorized) Error() string { + return fmt.Sprintf("[GET /authz/roles/{id}][%d] getRoleUnauthorized ", 401) +} + +func (o *GetRoleUnauthorized) String() string { + return fmt.Sprintf("[GET /authz/roles/{id}][%d] getRoleUnauthorized ", 401) +} + +func (o *GetRoleUnauthorized) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewGetRoleForbidden creates a GetRoleForbidden with default headers values +func NewGetRoleForbidden() *GetRoleForbidden { + return &GetRoleForbidden{} +} + +/* +GetRoleForbidden describes a response with status code 403, with default header values. + +Forbidden +*/ +type GetRoleForbidden struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this get role forbidden response has a 2xx status code +func (o *GetRoleForbidden) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this get role forbidden response has a 3xx status code +func (o *GetRoleForbidden) IsRedirect() bool { + return false +} + +// IsClientError returns true when this get role forbidden response has a 4xx status code +func (o *GetRoleForbidden) IsClientError() bool { + return true +} + +// IsServerError returns true when this get role forbidden response has a 5xx status code +func (o *GetRoleForbidden) IsServerError() bool { + return false +} + +// IsCode returns true when this get role forbidden response a status code equal to that given +func (o *GetRoleForbidden) IsCode(code int) bool { + return code == 403 +} + +// Code gets the status code for the get role forbidden response +func (o *GetRoleForbidden) Code() int { + return 403 +} + +func (o *GetRoleForbidden) Error() string { + return fmt.Sprintf("[GET /authz/roles/{id}][%d] getRoleForbidden %+v", 403, o.Payload) +} + +func (o *GetRoleForbidden) String() string { + return fmt.Sprintf("[GET /authz/roles/{id}][%d] getRoleForbidden %+v", 403, o.Payload) +} + +func (o *GetRoleForbidden) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *GetRoleForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewGetRoleNotFound creates a GetRoleNotFound with default headers values +func NewGetRoleNotFound() *GetRoleNotFound { + return &GetRoleNotFound{} +} + +/* +GetRoleNotFound describes a response with status code 404, with default header values. + +no role found +*/ +type GetRoleNotFound struct { +} + +// IsSuccess returns true when this get role not found response has a 2xx status code +func (o *GetRoleNotFound) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this get role not found response has a 3xx status code +func (o *GetRoleNotFound) IsRedirect() bool { + return false +} + +// IsClientError returns true when this get role not found response has a 4xx status code +func (o *GetRoleNotFound) IsClientError() bool { + return true +} + +// IsServerError returns true when this get role not found response has a 5xx status code +func (o *GetRoleNotFound) IsServerError() bool { + return false +} + +// IsCode returns true when this get role not found response a status code equal to that given +func (o *GetRoleNotFound) IsCode(code int) bool { + return code == 404 +} + +// Code gets the status code for the get role not found response +func (o *GetRoleNotFound) Code() int { + return 404 +} + +func (o *GetRoleNotFound) Error() string { + return fmt.Sprintf("[GET /authz/roles/{id}][%d] getRoleNotFound ", 404) +} + +func (o *GetRoleNotFound) String() string { + return fmt.Sprintf("[GET /authz/roles/{id}][%d] getRoleNotFound ", 404) +} + +func (o *GetRoleNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewGetRoleInternalServerError creates a GetRoleInternalServerError with default headers values +func NewGetRoleInternalServerError() *GetRoleInternalServerError { + return &GetRoleInternalServerError{} +} + +/* +GetRoleInternalServerError describes a response with status code 500, with default header values. + +An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. +*/ +type GetRoleInternalServerError struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this get role internal server error response has a 2xx status code +func (o *GetRoleInternalServerError) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this get role internal server error response has a 3xx status code +func (o *GetRoleInternalServerError) IsRedirect() bool { + return false +} + +// IsClientError returns true when this get role internal server error response has a 4xx status code +func (o *GetRoleInternalServerError) IsClientError() bool { + return false +} + +// IsServerError returns true when this get role internal server error response has a 5xx status code +func (o *GetRoleInternalServerError) IsServerError() bool { + return true +} + +// IsCode returns true when this get role internal server error response a status code equal to that given +func (o *GetRoleInternalServerError) IsCode(code int) bool { + return code == 500 +} + +// Code gets the status code for the get role internal server error response +func (o *GetRoleInternalServerError) Code() int { + return 500 +} + +func (o *GetRoleInternalServerError) Error() string { + return fmt.Sprintf("[GET /authz/roles/{id}][%d] getRoleInternalServerError %+v", 500, o.Payload) +} + +func (o *GetRoleInternalServerError) String() string { + return fmt.Sprintf("[GET /authz/roles/{id}][%d] getRoleInternalServerError %+v", 500, o.Payload) +} + +func (o *GetRoleInternalServerError) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *GetRoleInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/authz/get_roles_for_group_parameters.go b/platform/dbops/binaries/weaviate-src/client/authz/get_roles_for_group_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..f0717717ae64e6c935973d52f0072577d9de763d --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/authz/get_roles_for_group_parameters.go @@ -0,0 +1,230 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package authz + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// NewGetRolesForGroupParams creates a new GetRolesForGroupParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewGetRolesForGroupParams() *GetRolesForGroupParams { + return &GetRolesForGroupParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewGetRolesForGroupParamsWithTimeout creates a new GetRolesForGroupParams object +// with the ability to set a timeout on a request. +func NewGetRolesForGroupParamsWithTimeout(timeout time.Duration) *GetRolesForGroupParams { + return &GetRolesForGroupParams{ + timeout: timeout, + } +} + +// NewGetRolesForGroupParamsWithContext creates a new GetRolesForGroupParams object +// with the ability to set a context for a request. +func NewGetRolesForGroupParamsWithContext(ctx context.Context) *GetRolesForGroupParams { + return &GetRolesForGroupParams{ + Context: ctx, + } +} + +// NewGetRolesForGroupParamsWithHTTPClient creates a new GetRolesForGroupParams object +// with the ability to set a custom HTTPClient for a request. +func NewGetRolesForGroupParamsWithHTTPClient(client *http.Client) *GetRolesForGroupParams { + return &GetRolesForGroupParams{ + HTTPClient: client, + } +} + +/* +GetRolesForGroupParams contains all the parameters to send to the API endpoint + + for the get roles for group operation. + + Typically these are written to a http.Request. +*/ +type GetRolesForGroupParams struct { + + /* GroupType. + + The type of the group. + */ + GroupType string + + /* ID. + + The unique name of the group. + */ + ID string + + /* IncludeFullRoles. + + If true, the response will include the full role definitions with all associated permissions. If false, only role names are returned. + */ + IncludeFullRoles *bool + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the get roles for group params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *GetRolesForGroupParams) WithDefaults() *GetRolesForGroupParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the get roles for group params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *GetRolesForGroupParams) SetDefaults() { + var ( + includeFullRolesDefault = bool(false) + ) + + val := GetRolesForGroupParams{ + IncludeFullRoles: &includeFullRolesDefault, + } + + val.timeout = o.timeout + val.Context = o.Context + val.HTTPClient = o.HTTPClient + *o = val +} + +// WithTimeout adds the timeout to the get roles for group params +func (o *GetRolesForGroupParams) WithTimeout(timeout time.Duration) *GetRolesForGroupParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the get roles for group params +func (o *GetRolesForGroupParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the get roles for group params +func (o *GetRolesForGroupParams) WithContext(ctx context.Context) *GetRolesForGroupParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the get roles for group params +func (o *GetRolesForGroupParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the get roles for group params +func (o *GetRolesForGroupParams) WithHTTPClient(client *http.Client) *GetRolesForGroupParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the get roles for group params +func (o *GetRolesForGroupParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithGroupType adds the groupType to the get roles for group params +func (o *GetRolesForGroupParams) WithGroupType(groupType string) *GetRolesForGroupParams { + o.SetGroupType(groupType) + return o +} + +// SetGroupType adds the groupType to the get roles for group params +func (o *GetRolesForGroupParams) SetGroupType(groupType string) { + o.GroupType = groupType +} + +// WithID adds the id to the get roles for group params +func (o *GetRolesForGroupParams) WithID(id string) *GetRolesForGroupParams { + o.SetID(id) + return o +} + +// SetID adds the id to the get roles for group params +func (o *GetRolesForGroupParams) SetID(id string) { + o.ID = id +} + +// WithIncludeFullRoles adds the includeFullRoles to the get roles for group params +func (o *GetRolesForGroupParams) WithIncludeFullRoles(includeFullRoles *bool) *GetRolesForGroupParams { + o.SetIncludeFullRoles(includeFullRoles) + return o +} + +// SetIncludeFullRoles adds the includeFullRoles to the get roles for group params +func (o *GetRolesForGroupParams) SetIncludeFullRoles(includeFullRoles *bool) { + o.IncludeFullRoles = includeFullRoles +} + +// WriteToRequest writes these params to a swagger request +func (o *GetRolesForGroupParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param groupType + if err := r.SetPathParam("groupType", o.GroupType); err != nil { + return err + } + + // path param id + if err := r.SetPathParam("id", o.ID); err != nil { + return err + } + + if o.IncludeFullRoles != nil { + + // query param includeFullRoles + var qrIncludeFullRoles bool + + if o.IncludeFullRoles != nil { + qrIncludeFullRoles = *o.IncludeFullRoles + } + qIncludeFullRoles := swag.FormatBool(qrIncludeFullRoles) + if qIncludeFullRoles != "" { + + if err := r.SetQueryParam("includeFullRoles", qIncludeFullRoles); err != nil { + return err + } + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/authz/get_roles_for_group_responses.go b/platform/dbops/binaries/weaviate-src/client/authz/get_roles_for_group_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..663a1678bc699f2248044853435cc5e81ef9ecc7 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/authz/get_roles_for_group_responses.go @@ -0,0 +1,532 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package authz + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/weaviate/weaviate/entities/models" +) + +// GetRolesForGroupReader is a Reader for the GetRolesForGroup structure. +type GetRolesForGroupReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *GetRolesForGroupReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewGetRolesForGroupOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 400: + result := NewGetRolesForGroupBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 401: + result := NewGetRolesForGroupUnauthorized() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 403: + result := NewGetRolesForGroupForbidden() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 404: + result := NewGetRolesForGroupNotFound() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 422: + result := NewGetRolesForGroupUnprocessableEntity() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 500: + result := NewGetRolesForGroupInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + } +} + +// NewGetRolesForGroupOK creates a GetRolesForGroupOK with default headers values +func NewGetRolesForGroupOK() *GetRolesForGroupOK { + return &GetRolesForGroupOK{} +} + +/* +GetRolesForGroupOK describes a response with status code 200, with default header values. + +A list of roles assigned to the specified group. +*/ +type GetRolesForGroupOK struct { + Payload models.RolesListResponse +} + +// IsSuccess returns true when this get roles for group o k response has a 2xx status code +func (o *GetRolesForGroupOK) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this get roles for group o k response has a 3xx status code +func (o *GetRolesForGroupOK) IsRedirect() bool { + return false +} + +// IsClientError returns true when this get roles for group o k response has a 4xx status code +func (o *GetRolesForGroupOK) IsClientError() bool { + return false +} + +// IsServerError returns true when this get roles for group o k response has a 5xx status code +func (o *GetRolesForGroupOK) IsServerError() bool { + return false +} + +// IsCode returns true when this get roles for group o k response a status code equal to that given +func (o *GetRolesForGroupOK) IsCode(code int) bool { + return code == 200 +} + +// Code gets the status code for the get roles for group o k response +func (o *GetRolesForGroupOK) Code() int { + return 200 +} + +func (o *GetRolesForGroupOK) Error() string { + return fmt.Sprintf("[GET /authz/groups/{id}/roles/{groupType}][%d] getRolesForGroupOK %+v", 200, o.Payload) +} + +func (o *GetRolesForGroupOK) String() string { + return fmt.Sprintf("[GET /authz/groups/{id}/roles/{groupType}][%d] getRolesForGroupOK %+v", 200, o.Payload) +} + +func (o *GetRolesForGroupOK) GetPayload() models.RolesListResponse { + return o.Payload +} + +func (o *GetRolesForGroupOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewGetRolesForGroupBadRequest creates a GetRolesForGroupBadRequest with default headers values +func NewGetRolesForGroupBadRequest() *GetRolesForGroupBadRequest { + return &GetRolesForGroupBadRequest{} +} + +/* +GetRolesForGroupBadRequest describes a response with status code 400, with default header values. + +Bad request +*/ +type GetRolesForGroupBadRequest struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this get roles for group bad request response has a 2xx status code +func (o *GetRolesForGroupBadRequest) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this get roles for group bad request response has a 3xx status code +func (o *GetRolesForGroupBadRequest) IsRedirect() bool { + return false +} + +// IsClientError returns true when this get roles for group bad request response has a 4xx status code +func (o *GetRolesForGroupBadRequest) IsClientError() bool { + return true +} + +// IsServerError returns true when this get roles for group bad request response has a 5xx status code +func (o *GetRolesForGroupBadRequest) IsServerError() bool { + return false +} + +// IsCode returns true when this get roles for group bad request response a status code equal to that given +func (o *GetRolesForGroupBadRequest) IsCode(code int) bool { + return code == 400 +} + +// Code gets the status code for the get roles for group bad request response +func (o *GetRolesForGroupBadRequest) Code() int { + return 400 +} + +func (o *GetRolesForGroupBadRequest) Error() string { + return fmt.Sprintf("[GET /authz/groups/{id}/roles/{groupType}][%d] getRolesForGroupBadRequest %+v", 400, o.Payload) +} + +func (o *GetRolesForGroupBadRequest) String() string { + return fmt.Sprintf("[GET /authz/groups/{id}/roles/{groupType}][%d] getRolesForGroupBadRequest %+v", 400, o.Payload) +} + +func (o *GetRolesForGroupBadRequest) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *GetRolesForGroupBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewGetRolesForGroupUnauthorized creates a GetRolesForGroupUnauthorized with default headers values +func NewGetRolesForGroupUnauthorized() *GetRolesForGroupUnauthorized { + return &GetRolesForGroupUnauthorized{} +} + +/* +GetRolesForGroupUnauthorized describes a response with status code 401, with default header values. + +Unauthorized or invalid credentials. +*/ +type GetRolesForGroupUnauthorized struct { +} + +// IsSuccess returns true when this get roles for group unauthorized response has a 2xx status code +func (o *GetRolesForGroupUnauthorized) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this get roles for group unauthorized response has a 3xx status code +func (o *GetRolesForGroupUnauthorized) IsRedirect() bool { + return false +} + +// IsClientError returns true when this get roles for group unauthorized response has a 4xx status code +func (o *GetRolesForGroupUnauthorized) IsClientError() bool { + return true +} + +// IsServerError returns true when this get roles for group unauthorized response has a 5xx status code +func (o *GetRolesForGroupUnauthorized) IsServerError() bool { + return false +} + +// IsCode returns true when this get roles for group unauthorized response a status code equal to that given +func (o *GetRolesForGroupUnauthorized) IsCode(code int) bool { + return code == 401 +} + +// Code gets the status code for the get roles for group unauthorized response +func (o *GetRolesForGroupUnauthorized) Code() int { + return 401 +} + +func (o *GetRolesForGroupUnauthorized) Error() string { + return fmt.Sprintf("[GET /authz/groups/{id}/roles/{groupType}][%d] getRolesForGroupUnauthorized ", 401) +} + +func (o *GetRolesForGroupUnauthorized) String() string { + return fmt.Sprintf("[GET /authz/groups/{id}/roles/{groupType}][%d] getRolesForGroupUnauthorized ", 401) +} + +func (o *GetRolesForGroupUnauthorized) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewGetRolesForGroupForbidden creates a GetRolesForGroupForbidden with default headers values +func NewGetRolesForGroupForbidden() *GetRolesForGroupForbidden { + return &GetRolesForGroupForbidden{} +} + +/* +GetRolesForGroupForbidden describes a response with status code 403, with default header values. + +Forbidden +*/ +type GetRolesForGroupForbidden struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this get roles for group forbidden response has a 2xx status code +func (o *GetRolesForGroupForbidden) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this get roles for group forbidden response has a 3xx status code +func (o *GetRolesForGroupForbidden) IsRedirect() bool { + return false +} + +// IsClientError returns true when this get roles for group forbidden response has a 4xx status code +func (o *GetRolesForGroupForbidden) IsClientError() bool { + return true +} + +// IsServerError returns true when this get roles for group forbidden response has a 5xx status code +func (o *GetRolesForGroupForbidden) IsServerError() bool { + return false +} + +// IsCode returns true when this get roles for group forbidden response a status code equal to that given +func (o *GetRolesForGroupForbidden) IsCode(code int) bool { + return code == 403 +} + +// Code gets the status code for the get roles for group forbidden response +func (o *GetRolesForGroupForbidden) Code() int { + return 403 +} + +func (o *GetRolesForGroupForbidden) Error() string { + return fmt.Sprintf("[GET /authz/groups/{id}/roles/{groupType}][%d] getRolesForGroupForbidden %+v", 403, o.Payload) +} + +func (o *GetRolesForGroupForbidden) String() string { + return fmt.Sprintf("[GET /authz/groups/{id}/roles/{groupType}][%d] getRolesForGroupForbidden %+v", 403, o.Payload) +} + +func (o *GetRolesForGroupForbidden) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *GetRolesForGroupForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewGetRolesForGroupNotFound creates a GetRolesForGroupNotFound with default headers values +func NewGetRolesForGroupNotFound() *GetRolesForGroupNotFound { + return &GetRolesForGroupNotFound{} +} + +/* +GetRolesForGroupNotFound describes a response with status code 404, with default header values. + +The specified group was not found. +*/ +type GetRolesForGroupNotFound struct { +} + +// IsSuccess returns true when this get roles for group not found response has a 2xx status code +func (o *GetRolesForGroupNotFound) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this get roles for group not found response has a 3xx status code +func (o *GetRolesForGroupNotFound) IsRedirect() bool { + return false +} + +// IsClientError returns true when this get roles for group not found response has a 4xx status code +func (o *GetRolesForGroupNotFound) IsClientError() bool { + return true +} + +// IsServerError returns true when this get roles for group not found response has a 5xx status code +func (o *GetRolesForGroupNotFound) IsServerError() bool { + return false +} + +// IsCode returns true when this get roles for group not found response a status code equal to that given +func (o *GetRolesForGroupNotFound) IsCode(code int) bool { + return code == 404 +} + +// Code gets the status code for the get roles for group not found response +func (o *GetRolesForGroupNotFound) Code() int { + return 404 +} + +func (o *GetRolesForGroupNotFound) Error() string { + return fmt.Sprintf("[GET /authz/groups/{id}/roles/{groupType}][%d] getRolesForGroupNotFound ", 404) +} + +func (o *GetRolesForGroupNotFound) String() string { + return fmt.Sprintf("[GET /authz/groups/{id}/roles/{groupType}][%d] getRolesForGroupNotFound ", 404) +} + +func (o *GetRolesForGroupNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewGetRolesForGroupUnprocessableEntity creates a GetRolesForGroupUnprocessableEntity with default headers values +func NewGetRolesForGroupUnprocessableEntity() *GetRolesForGroupUnprocessableEntity { + return &GetRolesForGroupUnprocessableEntity{} +} + +/* +GetRolesForGroupUnprocessableEntity describes a response with status code 422, with default header values. + +The request syntax is correct, but the server couldn't process it due to semantic issues. +*/ +type GetRolesForGroupUnprocessableEntity struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this get roles for group unprocessable entity response has a 2xx status code +func (o *GetRolesForGroupUnprocessableEntity) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this get roles for group unprocessable entity response has a 3xx status code +func (o *GetRolesForGroupUnprocessableEntity) IsRedirect() bool { + return false +} + +// IsClientError returns true when this get roles for group unprocessable entity response has a 4xx status code +func (o *GetRolesForGroupUnprocessableEntity) IsClientError() bool { + return true +} + +// IsServerError returns true when this get roles for group unprocessable entity response has a 5xx status code +func (o *GetRolesForGroupUnprocessableEntity) IsServerError() bool { + return false +} + +// IsCode returns true when this get roles for group unprocessable entity response a status code equal to that given +func (o *GetRolesForGroupUnprocessableEntity) IsCode(code int) bool { + return code == 422 +} + +// Code gets the status code for the get roles for group unprocessable entity response +func (o *GetRolesForGroupUnprocessableEntity) Code() int { + return 422 +} + +func (o *GetRolesForGroupUnprocessableEntity) Error() string { + return fmt.Sprintf("[GET /authz/groups/{id}/roles/{groupType}][%d] getRolesForGroupUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *GetRolesForGroupUnprocessableEntity) String() string { + return fmt.Sprintf("[GET /authz/groups/{id}/roles/{groupType}][%d] getRolesForGroupUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *GetRolesForGroupUnprocessableEntity) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *GetRolesForGroupUnprocessableEntity) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewGetRolesForGroupInternalServerError creates a GetRolesForGroupInternalServerError with default headers values +func NewGetRolesForGroupInternalServerError() *GetRolesForGroupInternalServerError { + return &GetRolesForGroupInternalServerError{} +} + +/* +GetRolesForGroupInternalServerError describes a response with status code 500, with default header values. + +An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. +*/ +type GetRolesForGroupInternalServerError struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this get roles for group internal server error response has a 2xx status code +func (o *GetRolesForGroupInternalServerError) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this get roles for group internal server error response has a 3xx status code +func (o *GetRolesForGroupInternalServerError) IsRedirect() bool { + return false +} + +// IsClientError returns true when this get roles for group internal server error response has a 4xx status code +func (o *GetRolesForGroupInternalServerError) IsClientError() bool { + return false +} + +// IsServerError returns true when this get roles for group internal server error response has a 5xx status code +func (o *GetRolesForGroupInternalServerError) IsServerError() bool { + return true +} + +// IsCode returns true when this get roles for group internal server error response a status code equal to that given +func (o *GetRolesForGroupInternalServerError) IsCode(code int) bool { + return code == 500 +} + +// Code gets the status code for the get roles for group internal server error response +func (o *GetRolesForGroupInternalServerError) Code() int { + return 500 +} + +func (o *GetRolesForGroupInternalServerError) Error() string { + return fmt.Sprintf("[GET /authz/groups/{id}/roles/{groupType}][%d] getRolesForGroupInternalServerError %+v", 500, o.Payload) +} + +func (o *GetRolesForGroupInternalServerError) String() string { + return fmt.Sprintf("[GET /authz/groups/{id}/roles/{groupType}][%d] getRolesForGroupInternalServerError %+v", 500, o.Payload) +} + +func (o *GetRolesForGroupInternalServerError) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *GetRolesForGroupInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/authz/get_roles_for_user_deprecated_parameters.go b/platform/dbops/binaries/weaviate-src/client/authz/get_roles_for_user_deprecated_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..0fad833a07e91debcf2840c18a8534ca697bbcc5 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/authz/get_roles_for_user_deprecated_parameters.go @@ -0,0 +1,162 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package authz + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewGetRolesForUserDeprecatedParams creates a new GetRolesForUserDeprecatedParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewGetRolesForUserDeprecatedParams() *GetRolesForUserDeprecatedParams { + return &GetRolesForUserDeprecatedParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewGetRolesForUserDeprecatedParamsWithTimeout creates a new GetRolesForUserDeprecatedParams object +// with the ability to set a timeout on a request. +func NewGetRolesForUserDeprecatedParamsWithTimeout(timeout time.Duration) *GetRolesForUserDeprecatedParams { + return &GetRolesForUserDeprecatedParams{ + timeout: timeout, + } +} + +// NewGetRolesForUserDeprecatedParamsWithContext creates a new GetRolesForUserDeprecatedParams object +// with the ability to set a context for a request. +func NewGetRolesForUserDeprecatedParamsWithContext(ctx context.Context) *GetRolesForUserDeprecatedParams { + return &GetRolesForUserDeprecatedParams{ + Context: ctx, + } +} + +// NewGetRolesForUserDeprecatedParamsWithHTTPClient creates a new GetRolesForUserDeprecatedParams object +// with the ability to set a custom HTTPClient for a request. +func NewGetRolesForUserDeprecatedParamsWithHTTPClient(client *http.Client) *GetRolesForUserDeprecatedParams { + return &GetRolesForUserDeprecatedParams{ + HTTPClient: client, + } +} + +/* +GetRolesForUserDeprecatedParams contains all the parameters to send to the API endpoint + + for the get roles for user deprecated operation. + + Typically these are written to a http.Request. +*/ +type GetRolesForUserDeprecatedParams struct { + + /* ID. + + user name + */ + ID string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the get roles for user deprecated params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *GetRolesForUserDeprecatedParams) WithDefaults() *GetRolesForUserDeprecatedParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the get roles for user deprecated params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *GetRolesForUserDeprecatedParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the get roles for user deprecated params +func (o *GetRolesForUserDeprecatedParams) WithTimeout(timeout time.Duration) *GetRolesForUserDeprecatedParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the get roles for user deprecated params +func (o *GetRolesForUserDeprecatedParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the get roles for user deprecated params +func (o *GetRolesForUserDeprecatedParams) WithContext(ctx context.Context) *GetRolesForUserDeprecatedParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the get roles for user deprecated params +func (o *GetRolesForUserDeprecatedParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the get roles for user deprecated params +func (o *GetRolesForUserDeprecatedParams) WithHTTPClient(client *http.Client) *GetRolesForUserDeprecatedParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the get roles for user deprecated params +func (o *GetRolesForUserDeprecatedParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithID adds the id to the get roles for user deprecated params +func (o *GetRolesForUserDeprecatedParams) WithID(id string) *GetRolesForUserDeprecatedParams { + o.SetID(id) + return o +} + +// SetID adds the id to the get roles for user deprecated params +func (o *GetRolesForUserDeprecatedParams) SetID(id string) { + o.ID = id +} + +// WriteToRequest writes these params to a swagger request +func (o *GetRolesForUserDeprecatedParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param id + if err := r.SetPathParam("id", o.ID); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/authz/get_roles_for_user_deprecated_responses.go b/platform/dbops/binaries/weaviate-src/client/authz/get_roles_for_user_deprecated_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..30a4a21bd553c5a27430a377152c850011ea582f --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/authz/get_roles_for_user_deprecated_responses.go @@ -0,0 +1,532 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package authz + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/weaviate/weaviate/entities/models" +) + +// GetRolesForUserDeprecatedReader is a Reader for the GetRolesForUserDeprecated structure. +type GetRolesForUserDeprecatedReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *GetRolesForUserDeprecatedReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewGetRolesForUserDeprecatedOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 400: + result := NewGetRolesForUserDeprecatedBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 401: + result := NewGetRolesForUserDeprecatedUnauthorized() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 403: + result := NewGetRolesForUserDeprecatedForbidden() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 404: + result := NewGetRolesForUserDeprecatedNotFound() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 422: + result := NewGetRolesForUserDeprecatedUnprocessableEntity() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 500: + result := NewGetRolesForUserDeprecatedInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + } +} + +// NewGetRolesForUserDeprecatedOK creates a GetRolesForUserDeprecatedOK with default headers values +func NewGetRolesForUserDeprecatedOK() *GetRolesForUserDeprecatedOK { + return &GetRolesForUserDeprecatedOK{} +} + +/* +GetRolesForUserDeprecatedOK describes a response with status code 200, with default header values. + +Role assigned users +*/ +type GetRolesForUserDeprecatedOK struct { + Payload models.RolesListResponse +} + +// IsSuccess returns true when this get roles for user deprecated o k response has a 2xx status code +func (o *GetRolesForUserDeprecatedOK) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this get roles for user deprecated o k response has a 3xx status code +func (o *GetRolesForUserDeprecatedOK) IsRedirect() bool { + return false +} + +// IsClientError returns true when this get roles for user deprecated o k response has a 4xx status code +func (o *GetRolesForUserDeprecatedOK) IsClientError() bool { + return false +} + +// IsServerError returns true when this get roles for user deprecated o k response has a 5xx status code +func (o *GetRolesForUserDeprecatedOK) IsServerError() bool { + return false +} + +// IsCode returns true when this get roles for user deprecated o k response a status code equal to that given +func (o *GetRolesForUserDeprecatedOK) IsCode(code int) bool { + return code == 200 +} + +// Code gets the status code for the get roles for user deprecated o k response +func (o *GetRolesForUserDeprecatedOK) Code() int { + return 200 +} + +func (o *GetRolesForUserDeprecatedOK) Error() string { + return fmt.Sprintf("[GET /authz/users/{id}/roles][%d] getRolesForUserDeprecatedOK %+v", 200, o.Payload) +} + +func (o *GetRolesForUserDeprecatedOK) String() string { + return fmt.Sprintf("[GET /authz/users/{id}/roles][%d] getRolesForUserDeprecatedOK %+v", 200, o.Payload) +} + +func (o *GetRolesForUserDeprecatedOK) GetPayload() models.RolesListResponse { + return o.Payload +} + +func (o *GetRolesForUserDeprecatedOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewGetRolesForUserDeprecatedBadRequest creates a GetRolesForUserDeprecatedBadRequest with default headers values +func NewGetRolesForUserDeprecatedBadRequest() *GetRolesForUserDeprecatedBadRequest { + return &GetRolesForUserDeprecatedBadRequest{} +} + +/* +GetRolesForUserDeprecatedBadRequest describes a response with status code 400, with default header values. + +Bad request +*/ +type GetRolesForUserDeprecatedBadRequest struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this get roles for user deprecated bad request response has a 2xx status code +func (o *GetRolesForUserDeprecatedBadRequest) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this get roles for user deprecated bad request response has a 3xx status code +func (o *GetRolesForUserDeprecatedBadRequest) IsRedirect() bool { + return false +} + +// IsClientError returns true when this get roles for user deprecated bad request response has a 4xx status code +func (o *GetRolesForUserDeprecatedBadRequest) IsClientError() bool { + return true +} + +// IsServerError returns true when this get roles for user deprecated bad request response has a 5xx status code +func (o *GetRolesForUserDeprecatedBadRequest) IsServerError() bool { + return false +} + +// IsCode returns true when this get roles for user deprecated bad request response a status code equal to that given +func (o *GetRolesForUserDeprecatedBadRequest) IsCode(code int) bool { + return code == 400 +} + +// Code gets the status code for the get roles for user deprecated bad request response +func (o *GetRolesForUserDeprecatedBadRequest) Code() int { + return 400 +} + +func (o *GetRolesForUserDeprecatedBadRequest) Error() string { + return fmt.Sprintf("[GET /authz/users/{id}/roles][%d] getRolesForUserDeprecatedBadRequest %+v", 400, o.Payload) +} + +func (o *GetRolesForUserDeprecatedBadRequest) String() string { + return fmt.Sprintf("[GET /authz/users/{id}/roles][%d] getRolesForUserDeprecatedBadRequest %+v", 400, o.Payload) +} + +func (o *GetRolesForUserDeprecatedBadRequest) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *GetRolesForUserDeprecatedBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewGetRolesForUserDeprecatedUnauthorized creates a GetRolesForUserDeprecatedUnauthorized with default headers values +func NewGetRolesForUserDeprecatedUnauthorized() *GetRolesForUserDeprecatedUnauthorized { + return &GetRolesForUserDeprecatedUnauthorized{} +} + +/* +GetRolesForUserDeprecatedUnauthorized describes a response with status code 401, with default header values. + +Unauthorized or invalid credentials. +*/ +type GetRolesForUserDeprecatedUnauthorized struct { +} + +// IsSuccess returns true when this get roles for user deprecated unauthorized response has a 2xx status code +func (o *GetRolesForUserDeprecatedUnauthorized) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this get roles for user deprecated unauthorized response has a 3xx status code +func (o *GetRolesForUserDeprecatedUnauthorized) IsRedirect() bool { + return false +} + +// IsClientError returns true when this get roles for user deprecated unauthorized response has a 4xx status code +func (o *GetRolesForUserDeprecatedUnauthorized) IsClientError() bool { + return true +} + +// IsServerError returns true when this get roles for user deprecated unauthorized response has a 5xx status code +func (o *GetRolesForUserDeprecatedUnauthorized) IsServerError() bool { + return false +} + +// IsCode returns true when this get roles for user deprecated unauthorized response a status code equal to that given +func (o *GetRolesForUserDeprecatedUnauthorized) IsCode(code int) bool { + return code == 401 +} + +// Code gets the status code for the get roles for user deprecated unauthorized response +func (o *GetRolesForUserDeprecatedUnauthorized) Code() int { + return 401 +} + +func (o *GetRolesForUserDeprecatedUnauthorized) Error() string { + return fmt.Sprintf("[GET /authz/users/{id}/roles][%d] getRolesForUserDeprecatedUnauthorized ", 401) +} + +func (o *GetRolesForUserDeprecatedUnauthorized) String() string { + return fmt.Sprintf("[GET /authz/users/{id}/roles][%d] getRolesForUserDeprecatedUnauthorized ", 401) +} + +func (o *GetRolesForUserDeprecatedUnauthorized) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewGetRolesForUserDeprecatedForbidden creates a GetRolesForUserDeprecatedForbidden with default headers values +func NewGetRolesForUserDeprecatedForbidden() *GetRolesForUserDeprecatedForbidden { + return &GetRolesForUserDeprecatedForbidden{} +} + +/* +GetRolesForUserDeprecatedForbidden describes a response with status code 403, with default header values. + +Forbidden +*/ +type GetRolesForUserDeprecatedForbidden struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this get roles for user deprecated forbidden response has a 2xx status code +func (o *GetRolesForUserDeprecatedForbidden) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this get roles for user deprecated forbidden response has a 3xx status code +func (o *GetRolesForUserDeprecatedForbidden) IsRedirect() bool { + return false +} + +// IsClientError returns true when this get roles for user deprecated forbidden response has a 4xx status code +func (o *GetRolesForUserDeprecatedForbidden) IsClientError() bool { + return true +} + +// IsServerError returns true when this get roles for user deprecated forbidden response has a 5xx status code +func (o *GetRolesForUserDeprecatedForbidden) IsServerError() bool { + return false +} + +// IsCode returns true when this get roles for user deprecated forbidden response a status code equal to that given +func (o *GetRolesForUserDeprecatedForbidden) IsCode(code int) bool { + return code == 403 +} + +// Code gets the status code for the get roles for user deprecated forbidden response +func (o *GetRolesForUserDeprecatedForbidden) Code() int { + return 403 +} + +func (o *GetRolesForUserDeprecatedForbidden) Error() string { + return fmt.Sprintf("[GET /authz/users/{id}/roles][%d] getRolesForUserDeprecatedForbidden %+v", 403, o.Payload) +} + +func (o *GetRolesForUserDeprecatedForbidden) String() string { + return fmt.Sprintf("[GET /authz/users/{id}/roles][%d] getRolesForUserDeprecatedForbidden %+v", 403, o.Payload) +} + +func (o *GetRolesForUserDeprecatedForbidden) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *GetRolesForUserDeprecatedForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewGetRolesForUserDeprecatedNotFound creates a GetRolesForUserDeprecatedNotFound with default headers values +func NewGetRolesForUserDeprecatedNotFound() *GetRolesForUserDeprecatedNotFound { + return &GetRolesForUserDeprecatedNotFound{} +} + +/* +GetRolesForUserDeprecatedNotFound describes a response with status code 404, with default header values. + +no role found for user +*/ +type GetRolesForUserDeprecatedNotFound struct { +} + +// IsSuccess returns true when this get roles for user deprecated not found response has a 2xx status code +func (o *GetRolesForUserDeprecatedNotFound) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this get roles for user deprecated not found response has a 3xx status code +func (o *GetRolesForUserDeprecatedNotFound) IsRedirect() bool { + return false +} + +// IsClientError returns true when this get roles for user deprecated not found response has a 4xx status code +func (o *GetRolesForUserDeprecatedNotFound) IsClientError() bool { + return true +} + +// IsServerError returns true when this get roles for user deprecated not found response has a 5xx status code +func (o *GetRolesForUserDeprecatedNotFound) IsServerError() bool { + return false +} + +// IsCode returns true when this get roles for user deprecated not found response a status code equal to that given +func (o *GetRolesForUserDeprecatedNotFound) IsCode(code int) bool { + return code == 404 +} + +// Code gets the status code for the get roles for user deprecated not found response +func (o *GetRolesForUserDeprecatedNotFound) Code() int { + return 404 +} + +func (o *GetRolesForUserDeprecatedNotFound) Error() string { + return fmt.Sprintf("[GET /authz/users/{id}/roles][%d] getRolesForUserDeprecatedNotFound ", 404) +} + +func (o *GetRolesForUserDeprecatedNotFound) String() string { + return fmt.Sprintf("[GET /authz/users/{id}/roles][%d] getRolesForUserDeprecatedNotFound ", 404) +} + +func (o *GetRolesForUserDeprecatedNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewGetRolesForUserDeprecatedUnprocessableEntity creates a GetRolesForUserDeprecatedUnprocessableEntity with default headers values +func NewGetRolesForUserDeprecatedUnprocessableEntity() *GetRolesForUserDeprecatedUnprocessableEntity { + return &GetRolesForUserDeprecatedUnprocessableEntity{} +} + +/* +GetRolesForUserDeprecatedUnprocessableEntity describes a response with status code 422, with default header values. + +Request body is well-formed (i.e., syntactically correct), but semantically erroneous. Are you sure the class is defined in the configuration file? +*/ +type GetRolesForUserDeprecatedUnprocessableEntity struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this get roles for user deprecated unprocessable entity response has a 2xx status code +func (o *GetRolesForUserDeprecatedUnprocessableEntity) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this get roles for user deprecated unprocessable entity response has a 3xx status code +func (o *GetRolesForUserDeprecatedUnprocessableEntity) IsRedirect() bool { + return false +} + +// IsClientError returns true when this get roles for user deprecated unprocessable entity response has a 4xx status code +func (o *GetRolesForUserDeprecatedUnprocessableEntity) IsClientError() bool { + return true +} + +// IsServerError returns true when this get roles for user deprecated unprocessable entity response has a 5xx status code +func (o *GetRolesForUserDeprecatedUnprocessableEntity) IsServerError() bool { + return false +} + +// IsCode returns true when this get roles for user deprecated unprocessable entity response a status code equal to that given +func (o *GetRolesForUserDeprecatedUnprocessableEntity) IsCode(code int) bool { + return code == 422 +} + +// Code gets the status code for the get roles for user deprecated unprocessable entity response +func (o *GetRolesForUserDeprecatedUnprocessableEntity) Code() int { + return 422 +} + +func (o *GetRolesForUserDeprecatedUnprocessableEntity) Error() string { + return fmt.Sprintf("[GET /authz/users/{id}/roles][%d] getRolesForUserDeprecatedUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *GetRolesForUserDeprecatedUnprocessableEntity) String() string { + return fmt.Sprintf("[GET /authz/users/{id}/roles][%d] getRolesForUserDeprecatedUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *GetRolesForUserDeprecatedUnprocessableEntity) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *GetRolesForUserDeprecatedUnprocessableEntity) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewGetRolesForUserDeprecatedInternalServerError creates a GetRolesForUserDeprecatedInternalServerError with default headers values +func NewGetRolesForUserDeprecatedInternalServerError() *GetRolesForUserDeprecatedInternalServerError { + return &GetRolesForUserDeprecatedInternalServerError{} +} + +/* +GetRolesForUserDeprecatedInternalServerError describes a response with status code 500, with default header values. + +An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. +*/ +type GetRolesForUserDeprecatedInternalServerError struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this get roles for user deprecated internal server error response has a 2xx status code +func (o *GetRolesForUserDeprecatedInternalServerError) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this get roles for user deprecated internal server error response has a 3xx status code +func (o *GetRolesForUserDeprecatedInternalServerError) IsRedirect() bool { + return false +} + +// IsClientError returns true when this get roles for user deprecated internal server error response has a 4xx status code +func (o *GetRolesForUserDeprecatedInternalServerError) IsClientError() bool { + return false +} + +// IsServerError returns true when this get roles for user deprecated internal server error response has a 5xx status code +func (o *GetRolesForUserDeprecatedInternalServerError) IsServerError() bool { + return true +} + +// IsCode returns true when this get roles for user deprecated internal server error response a status code equal to that given +func (o *GetRolesForUserDeprecatedInternalServerError) IsCode(code int) bool { + return code == 500 +} + +// Code gets the status code for the get roles for user deprecated internal server error response +func (o *GetRolesForUserDeprecatedInternalServerError) Code() int { + return 500 +} + +func (o *GetRolesForUserDeprecatedInternalServerError) Error() string { + return fmt.Sprintf("[GET /authz/users/{id}/roles][%d] getRolesForUserDeprecatedInternalServerError %+v", 500, o.Payload) +} + +func (o *GetRolesForUserDeprecatedInternalServerError) String() string { + return fmt.Sprintf("[GET /authz/users/{id}/roles][%d] getRolesForUserDeprecatedInternalServerError %+v", 500, o.Payload) +} + +func (o *GetRolesForUserDeprecatedInternalServerError) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *GetRolesForUserDeprecatedInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/authz/get_roles_for_user_parameters.go b/platform/dbops/binaries/weaviate-src/client/authz/get_roles_for_user_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..6e1133ff0dcda2084fa3a56917e42e2776d0bf4e --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/authz/get_roles_for_user_parameters.go @@ -0,0 +1,230 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package authz + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// NewGetRolesForUserParams creates a new GetRolesForUserParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewGetRolesForUserParams() *GetRolesForUserParams { + return &GetRolesForUserParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewGetRolesForUserParamsWithTimeout creates a new GetRolesForUserParams object +// with the ability to set a timeout on a request. +func NewGetRolesForUserParamsWithTimeout(timeout time.Duration) *GetRolesForUserParams { + return &GetRolesForUserParams{ + timeout: timeout, + } +} + +// NewGetRolesForUserParamsWithContext creates a new GetRolesForUserParams object +// with the ability to set a context for a request. +func NewGetRolesForUserParamsWithContext(ctx context.Context) *GetRolesForUserParams { + return &GetRolesForUserParams{ + Context: ctx, + } +} + +// NewGetRolesForUserParamsWithHTTPClient creates a new GetRolesForUserParams object +// with the ability to set a custom HTTPClient for a request. +func NewGetRolesForUserParamsWithHTTPClient(client *http.Client) *GetRolesForUserParams { + return &GetRolesForUserParams{ + HTTPClient: client, + } +} + +/* +GetRolesForUserParams contains all the parameters to send to the API endpoint + + for the get roles for user operation. + + Typically these are written to a http.Request. +*/ +type GetRolesForUserParams struct { + + /* ID. + + user name + */ + ID string + + /* IncludeFullRoles. + + Whether to include detailed role information needed the roles permission + */ + IncludeFullRoles *bool + + /* UserType. + + The type of user + */ + UserType string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the get roles for user params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *GetRolesForUserParams) WithDefaults() *GetRolesForUserParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the get roles for user params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *GetRolesForUserParams) SetDefaults() { + var ( + includeFullRolesDefault = bool(false) + ) + + val := GetRolesForUserParams{ + IncludeFullRoles: &includeFullRolesDefault, + } + + val.timeout = o.timeout + val.Context = o.Context + val.HTTPClient = o.HTTPClient + *o = val +} + +// WithTimeout adds the timeout to the get roles for user params +func (o *GetRolesForUserParams) WithTimeout(timeout time.Duration) *GetRolesForUserParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the get roles for user params +func (o *GetRolesForUserParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the get roles for user params +func (o *GetRolesForUserParams) WithContext(ctx context.Context) *GetRolesForUserParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the get roles for user params +func (o *GetRolesForUserParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the get roles for user params +func (o *GetRolesForUserParams) WithHTTPClient(client *http.Client) *GetRolesForUserParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the get roles for user params +func (o *GetRolesForUserParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithID adds the id to the get roles for user params +func (o *GetRolesForUserParams) WithID(id string) *GetRolesForUserParams { + o.SetID(id) + return o +} + +// SetID adds the id to the get roles for user params +func (o *GetRolesForUserParams) SetID(id string) { + o.ID = id +} + +// WithIncludeFullRoles adds the includeFullRoles to the get roles for user params +func (o *GetRolesForUserParams) WithIncludeFullRoles(includeFullRoles *bool) *GetRolesForUserParams { + o.SetIncludeFullRoles(includeFullRoles) + return o +} + +// SetIncludeFullRoles adds the includeFullRoles to the get roles for user params +func (o *GetRolesForUserParams) SetIncludeFullRoles(includeFullRoles *bool) { + o.IncludeFullRoles = includeFullRoles +} + +// WithUserType adds the userType to the get roles for user params +func (o *GetRolesForUserParams) WithUserType(userType string) *GetRolesForUserParams { + o.SetUserType(userType) + return o +} + +// SetUserType adds the userType to the get roles for user params +func (o *GetRolesForUserParams) SetUserType(userType string) { + o.UserType = userType +} + +// WriteToRequest writes these params to a swagger request +func (o *GetRolesForUserParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param id + if err := r.SetPathParam("id", o.ID); err != nil { + return err + } + + if o.IncludeFullRoles != nil { + + // query param includeFullRoles + var qrIncludeFullRoles bool + + if o.IncludeFullRoles != nil { + qrIncludeFullRoles = *o.IncludeFullRoles + } + qIncludeFullRoles := swag.FormatBool(qrIncludeFullRoles) + if qIncludeFullRoles != "" { + + if err := r.SetQueryParam("includeFullRoles", qIncludeFullRoles); err != nil { + return err + } + } + } + + // path param userType + if err := r.SetPathParam("userType", o.UserType); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/authz/get_roles_for_user_responses.go b/platform/dbops/binaries/weaviate-src/client/authz/get_roles_for_user_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..66fc84ee7acdc4aaa15ad8a4fb987c96c91e54f8 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/authz/get_roles_for_user_responses.go @@ -0,0 +1,532 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package authz + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/weaviate/weaviate/entities/models" +) + +// GetRolesForUserReader is a Reader for the GetRolesForUser structure. +type GetRolesForUserReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *GetRolesForUserReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewGetRolesForUserOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 400: + result := NewGetRolesForUserBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 401: + result := NewGetRolesForUserUnauthorized() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 403: + result := NewGetRolesForUserForbidden() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 404: + result := NewGetRolesForUserNotFound() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 422: + result := NewGetRolesForUserUnprocessableEntity() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 500: + result := NewGetRolesForUserInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + } +} + +// NewGetRolesForUserOK creates a GetRolesForUserOK with default headers values +func NewGetRolesForUserOK() *GetRolesForUserOK { + return &GetRolesForUserOK{} +} + +/* +GetRolesForUserOK describes a response with status code 200, with default header values. + +Role assigned users +*/ +type GetRolesForUserOK struct { + Payload models.RolesListResponse +} + +// IsSuccess returns true when this get roles for user o k response has a 2xx status code +func (o *GetRolesForUserOK) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this get roles for user o k response has a 3xx status code +func (o *GetRolesForUserOK) IsRedirect() bool { + return false +} + +// IsClientError returns true when this get roles for user o k response has a 4xx status code +func (o *GetRolesForUserOK) IsClientError() bool { + return false +} + +// IsServerError returns true when this get roles for user o k response has a 5xx status code +func (o *GetRolesForUserOK) IsServerError() bool { + return false +} + +// IsCode returns true when this get roles for user o k response a status code equal to that given +func (o *GetRolesForUserOK) IsCode(code int) bool { + return code == 200 +} + +// Code gets the status code for the get roles for user o k response +func (o *GetRolesForUserOK) Code() int { + return 200 +} + +func (o *GetRolesForUserOK) Error() string { + return fmt.Sprintf("[GET /authz/users/{id}/roles/{userType}][%d] getRolesForUserOK %+v", 200, o.Payload) +} + +func (o *GetRolesForUserOK) String() string { + return fmt.Sprintf("[GET /authz/users/{id}/roles/{userType}][%d] getRolesForUserOK %+v", 200, o.Payload) +} + +func (o *GetRolesForUserOK) GetPayload() models.RolesListResponse { + return o.Payload +} + +func (o *GetRolesForUserOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewGetRolesForUserBadRequest creates a GetRolesForUserBadRequest with default headers values +func NewGetRolesForUserBadRequest() *GetRolesForUserBadRequest { + return &GetRolesForUserBadRequest{} +} + +/* +GetRolesForUserBadRequest describes a response with status code 400, with default header values. + +Bad request +*/ +type GetRolesForUserBadRequest struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this get roles for user bad request response has a 2xx status code +func (o *GetRolesForUserBadRequest) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this get roles for user bad request response has a 3xx status code +func (o *GetRolesForUserBadRequest) IsRedirect() bool { + return false +} + +// IsClientError returns true when this get roles for user bad request response has a 4xx status code +func (o *GetRolesForUserBadRequest) IsClientError() bool { + return true +} + +// IsServerError returns true when this get roles for user bad request response has a 5xx status code +func (o *GetRolesForUserBadRequest) IsServerError() bool { + return false +} + +// IsCode returns true when this get roles for user bad request response a status code equal to that given +func (o *GetRolesForUserBadRequest) IsCode(code int) bool { + return code == 400 +} + +// Code gets the status code for the get roles for user bad request response +func (o *GetRolesForUserBadRequest) Code() int { + return 400 +} + +func (o *GetRolesForUserBadRequest) Error() string { + return fmt.Sprintf("[GET /authz/users/{id}/roles/{userType}][%d] getRolesForUserBadRequest %+v", 400, o.Payload) +} + +func (o *GetRolesForUserBadRequest) String() string { + return fmt.Sprintf("[GET /authz/users/{id}/roles/{userType}][%d] getRolesForUserBadRequest %+v", 400, o.Payload) +} + +func (o *GetRolesForUserBadRequest) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *GetRolesForUserBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewGetRolesForUserUnauthorized creates a GetRolesForUserUnauthorized with default headers values +func NewGetRolesForUserUnauthorized() *GetRolesForUserUnauthorized { + return &GetRolesForUserUnauthorized{} +} + +/* +GetRolesForUserUnauthorized describes a response with status code 401, with default header values. + +Unauthorized or invalid credentials. +*/ +type GetRolesForUserUnauthorized struct { +} + +// IsSuccess returns true when this get roles for user unauthorized response has a 2xx status code +func (o *GetRolesForUserUnauthorized) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this get roles for user unauthorized response has a 3xx status code +func (o *GetRolesForUserUnauthorized) IsRedirect() bool { + return false +} + +// IsClientError returns true when this get roles for user unauthorized response has a 4xx status code +func (o *GetRolesForUserUnauthorized) IsClientError() bool { + return true +} + +// IsServerError returns true when this get roles for user unauthorized response has a 5xx status code +func (o *GetRolesForUserUnauthorized) IsServerError() bool { + return false +} + +// IsCode returns true when this get roles for user unauthorized response a status code equal to that given +func (o *GetRolesForUserUnauthorized) IsCode(code int) bool { + return code == 401 +} + +// Code gets the status code for the get roles for user unauthorized response +func (o *GetRolesForUserUnauthorized) Code() int { + return 401 +} + +func (o *GetRolesForUserUnauthorized) Error() string { + return fmt.Sprintf("[GET /authz/users/{id}/roles/{userType}][%d] getRolesForUserUnauthorized ", 401) +} + +func (o *GetRolesForUserUnauthorized) String() string { + return fmt.Sprintf("[GET /authz/users/{id}/roles/{userType}][%d] getRolesForUserUnauthorized ", 401) +} + +func (o *GetRolesForUserUnauthorized) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewGetRolesForUserForbidden creates a GetRolesForUserForbidden with default headers values +func NewGetRolesForUserForbidden() *GetRolesForUserForbidden { + return &GetRolesForUserForbidden{} +} + +/* +GetRolesForUserForbidden describes a response with status code 403, with default header values. + +Forbidden +*/ +type GetRolesForUserForbidden struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this get roles for user forbidden response has a 2xx status code +func (o *GetRolesForUserForbidden) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this get roles for user forbidden response has a 3xx status code +func (o *GetRolesForUserForbidden) IsRedirect() bool { + return false +} + +// IsClientError returns true when this get roles for user forbidden response has a 4xx status code +func (o *GetRolesForUserForbidden) IsClientError() bool { + return true +} + +// IsServerError returns true when this get roles for user forbidden response has a 5xx status code +func (o *GetRolesForUserForbidden) IsServerError() bool { + return false +} + +// IsCode returns true when this get roles for user forbidden response a status code equal to that given +func (o *GetRolesForUserForbidden) IsCode(code int) bool { + return code == 403 +} + +// Code gets the status code for the get roles for user forbidden response +func (o *GetRolesForUserForbidden) Code() int { + return 403 +} + +func (o *GetRolesForUserForbidden) Error() string { + return fmt.Sprintf("[GET /authz/users/{id}/roles/{userType}][%d] getRolesForUserForbidden %+v", 403, o.Payload) +} + +func (o *GetRolesForUserForbidden) String() string { + return fmt.Sprintf("[GET /authz/users/{id}/roles/{userType}][%d] getRolesForUserForbidden %+v", 403, o.Payload) +} + +func (o *GetRolesForUserForbidden) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *GetRolesForUserForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewGetRolesForUserNotFound creates a GetRolesForUserNotFound with default headers values +func NewGetRolesForUserNotFound() *GetRolesForUserNotFound { + return &GetRolesForUserNotFound{} +} + +/* +GetRolesForUserNotFound describes a response with status code 404, with default header values. + +no role found for user +*/ +type GetRolesForUserNotFound struct { +} + +// IsSuccess returns true when this get roles for user not found response has a 2xx status code +func (o *GetRolesForUserNotFound) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this get roles for user not found response has a 3xx status code +func (o *GetRolesForUserNotFound) IsRedirect() bool { + return false +} + +// IsClientError returns true when this get roles for user not found response has a 4xx status code +func (o *GetRolesForUserNotFound) IsClientError() bool { + return true +} + +// IsServerError returns true when this get roles for user not found response has a 5xx status code +func (o *GetRolesForUserNotFound) IsServerError() bool { + return false +} + +// IsCode returns true when this get roles for user not found response a status code equal to that given +func (o *GetRolesForUserNotFound) IsCode(code int) bool { + return code == 404 +} + +// Code gets the status code for the get roles for user not found response +func (o *GetRolesForUserNotFound) Code() int { + return 404 +} + +func (o *GetRolesForUserNotFound) Error() string { + return fmt.Sprintf("[GET /authz/users/{id}/roles/{userType}][%d] getRolesForUserNotFound ", 404) +} + +func (o *GetRolesForUserNotFound) String() string { + return fmt.Sprintf("[GET /authz/users/{id}/roles/{userType}][%d] getRolesForUserNotFound ", 404) +} + +func (o *GetRolesForUserNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewGetRolesForUserUnprocessableEntity creates a GetRolesForUserUnprocessableEntity with default headers values +func NewGetRolesForUserUnprocessableEntity() *GetRolesForUserUnprocessableEntity { + return &GetRolesForUserUnprocessableEntity{} +} + +/* +GetRolesForUserUnprocessableEntity describes a response with status code 422, with default header values. + +Request body is well-formed (i.e., syntactically correct), but semantically erroneous. Are you sure the class is defined in the configuration file? +*/ +type GetRolesForUserUnprocessableEntity struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this get roles for user unprocessable entity response has a 2xx status code +func (o *GetRolesForUserUnprocessableEntity) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this get roles for user unprocessable entity response has a 3xx status code +func (o *GetRolesForUserUnprocessableEntity) IsRedirect() bool { + return false +} + +// IsClientError returns true when this get roles for user unprocessable entity response has a 4xx status code +func (o *GetRolesForUserUnprocessableEntity) IsClientError() bool { + return true +} + +// IsServerError returns true when this get roles for user unprocessable entity response has a 5xx status code +func (o *GetRolesForUserUnprocessableEntity) IsServerError() bool { + return false +} + +// IsCode returns true when this get roles for user unprocessable entity response a status code equal to that given +func (o *GetRolesForUserUnprocessableEntity) IsCode(code int) bool { + return code == 422 +} + +// Code gets the status code for the get roles for user unprocessable entity response +func (o *GetRolesForUserUnprocessableEntity) Code() int { + return 422 +} + +func (o *GetRolesForUserUnprocessableEntity) Error() string { + return fmt.Sprintf("[GET /authz/users/{id}/roles/{userType}][%d] getRolesForUserUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *GetRolesForUserUnprocessableEntity) String() string { + return fmt.Sprintf("[GET /authz/users/{id}/roles/{userType}][%d] getRolesForUserUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *GetRolesForUserUnprocessableEntity) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *GetRolesForUserUnprocessableEntity) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewGetRolesForUserInternalServerError creates a GetRolesForUserInternalServerError with default headers values +func NewGetRolesForUserInternalServerError() *GetRolesForUserInternalServerError { + return &GetRolesForUserInternalServerError{} +} + +/* +GetRolesForUserInternalServerError describes a response with status code 500, with default header values. + +An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. +*/ +type GetRolesForUserInternalServerError struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this get roles for user internal server error response has a 2xx status code +func (o *GetRolesForUserInternalServerError) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this get roles for user internal server error response has a 3xx status code +func (o *GetRolesForUserInternalServerError) IsRedirect() bool { + return false +} + +// IsClientError returns true when this get roles for user internal server error response has a 4xx status code +func (o *GetRolesForUserInternalServerError) IsClientError() bool { + return false +} + +// IsServerError returns true when this get roles for user internal server error response has a 5xx status code +func (o *GetRolesForUserInternalServerError) IsServerError() bool { + return true +} + +// IsCode returns true when this get roles for user internal server error response a status code equal to that given +func (o *GetRolesForUserInternalServerError) IsCode(code int) bool { + return code == 500 +} + +// Code gets the status code for the get roles for user internal server error response +func (o *GetRolesForUserInternalServerError) Code() int { + return 500 +} + +func (o *GetRolesForUserInternalServerError) Error() string { + return fmt.Sprintf("[GET /authz/users/{id}/roles/{userType}][%d] getRolesForUserInternalServerError %+v", 500, o.Payload) +} + +func (o *GetRolesForUserInternalServerError) String() string { + return fmt.Sprintf("[GET /authz/users/{id}/roles/{userType}][%d] getRolesForUserInternalServerError %+v", 500, o.Payload) +} + +func (o *GetRolesForUserInternalServerError) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *GetRolesForUserInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/authz/get_roles_parameters.go b/platform/dbops/binaries/weaviate-src/client/authz/get_roles_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..95d7503a9a75d6c96d42c7ac23bd13b4ddac8be8 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/authz/get_roles_parameters.go @@ -0,0 +1,139 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package authz + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewGetRolesParams creates a new GetRolesParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewGetRolesParams() *GetRolesParams { + return &GetRolesParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewGetRolesParamsWithTimeout creates a new GetRolesParams object +// with the ability to set a timeout on a request. +func NewGetRolesParamsWithTimeout(timeout time.Duration) *GetRolesParams { + return &GetRolesParams{ + timeout: timeout, + } +} + +// NewGetRolesParamsWithContext creates a new GetRolesParams object +// with the ability to set a context for a request. +func NewGetRolesParamsWithContext(ctx context.Context) *GetRolesParams { + return &GetRolesParams{ + Context: ctx, + } +} + +// NewGetRolesParamsWithHTTPClient creates a new GetRolesParams object +// with the ability to set a custom HTTPClient for a request. +func NewGetRolesParamsWithHTTPClient(client *http.Client) *GetRolesParams { + return &GetRolesParams{ + HTTPClient: client, + } +} + +/* +GetRolesParams contains all the parameters to send to the API endpoint + + for the get roles operation. + + Typically these are written to a http.Request. +*/ +type GetRolesParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the get roles params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *GetRolesParams) WithDefaults() *GetRolesParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the get roles params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *GetRolesParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the get roles params +func (o *GetRolesParams) WithTimeout(timeout time.Duration) *GetRolesParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the get roles params +func (o *GetRolesParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the get roles params +func (o *GetRolesParams) WithContext(ctx context.Context) *GetRolesParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the get roles params +func (o *GetRolesParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the get roles params +func (o *GetRolesParams) WithHTTPClient(client *http.Client) *GetRolesParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the get roles params +func (o *GetRolesParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *GetRolesParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/authz/get_roles_responses.go b/platform/dbops/binaries/weaviate-src/client/authz/get_roles_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..0860ed7dceb969a6124c9f447b42ce181af491d4 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/authz/get_roles_responses.go @@ -0,0 +1,396 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package authz + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/weaviate/weaviate/entities/models" +) + +// GetRolesReader is a Reader for the GetRoles structure. +type GetRolesReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *GetRolesReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewGetRolesOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 400: + result := NewGetRolesBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 401: + result := NewGetRolesUnauthorized() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 403: + result := NewGetRolesForbidden() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 500: + result := NewGetRolesInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + } +} + +// NewGetRolesOK creates a GetRolesOK with default headers values +func NewGetRolesOK() *GetRolesOK { + return &GetRolesOK{} +} + +/* +GetRolesOK describes a response with status code 200, with default header values. + +Successful response. +*/ +type GetRolesOK struct { + Payload models.RolesListResponse +} + +// IsSuccess returns true when this get roles o k response has a 2xx status code +func (o *GetRolesOK) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this get roles o k response has a 3xx status code +func (o *GetRolesOK) IsRedirect() bool { + return false +} + +// IsClientError returns true when this get roles o k response has a 4xx status code +func (o *GetRolesOK) IsClientError() bool { + return false +} + +// IsServerError returns true when this get roles o k response has a 5xx status code +func (o *GetRolesOK) IsServerError() bool { + return false +} + +// IsCode returns true when this get roles o k response a status code equal to that given +func (o *GetRolesOK) IsCode(code int) bool { + return code == 200 +} + +// Code gets the status code for the get roles o k response +func (o *GetRolesOK) Code() int { + return 200 +} + +func (o *GetRolesOK) Error() string { + return fmt.Sprintf("[GET /authz/roles][%d] getRolesOK %+v", 200, o.Payload) +} + +func (o *GetRolesOK) String() string { + return fmt.Sprintf("[GET /authz/roles][%d] getRolesOK %+v", 200, o.Payload) +} + +func (o *GetRolesOK) GetPayload() models.RolesListResponse { + return o.Payload +} + +func (o *GetRolesOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewGetRolesBadRequest creates a GetRolesBadRequest with default headers values +func NewGetRolesBadRequest() *GetRolesBadRequest { + return &GetRolesBadRequest{} +} + +/* +GetRolesBadRequest describes a response with status code 400, with default header values. + +Malformed request. +*/ +type GetRolesBadRequest struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this get roles bad request response has a 2xx status code +func (o *GetRolesBadRequest) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this get roles bad request response has a 3xx status code +func (o *GetRolesBadRequest) IsRedirect() bool { + return false +} + +// IsClientError returns true when this get roles bad request response has a 4xx status code +func (o *GetRolesBadRequest) IsClientError() bool { + return true +} + +// IsServerError returns true when this get roles bad request response has a 5xx status code +func (o *GetRolesBadRequest) IsServerError() bool { + return false +} + +// IsCode returns true when this get roles bad request response a status code equal to that given +func (o *GetRolesBadRequest) IsCode(code int) bool { + return code == 400 +} + +// Code gets the status code for the get roles bad request response +func (o *GetRolesBadRequest) Code() int { + return 400 +} + +func (o *GetRolesBadRequest) Error() string { + return fmt.Sprintf("[GET /authz/roles][%d] getRolesBadRequest %+v", 400, o.Payload) +} + +func (o *GetRolesBadRequest) String() string { + return fmt.Sprintf("[GET /authz/roles][%d] getRolesBadRequest %+v", 400, o.Payload) +} + +func (o *GetRolesBadRequest) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *GetRolesBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewGetRolesUnauthorized creates a GetRolesUnauthorized with default headers values +func NewGetRolesUnauthorized() *GetRolesUnauthorized { + return &GetRolesUnauthorized{} +} + +/* +GetRolesUnauthorized describes a response with status code 401, with default header values. + +Unauthorized or invalid credentials. +*/ +type GetRolesUnauthorized struct { +} + +// IsSuccess returns true when this get roles unauthorized response has a 2xx status code +func (o *GetRolesUnauthorized) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this get roles unauthorized response has a 3xx status code +func (o *GetRolesUnauthorized) IsRedirect() bool { + return false +} + +// IsClientError returns true when this get roles unauthorized response has a 4xx status code +func (o *GetRolesUnauthorized) IsClientError() bool { + return true +} + +// IsServerError returns true when this get roles unauthorized response has a 5xx status code +func (o *GetRolesUnauthorized) IsServerError() bool { + return false +} + +// IsCode returns true when this get roles unauthorized response a status code equal to that given +func (o *GetRolesUnauthorized) IsCode(code int) bool { + return code == 401 +} + +// Code gets the status code for the get roles unauthorized response +func (o *GetRolesUnauthorized) Code() int { + return 401 +} + +func (o *GetRolesUnauthorized) Error() string { + return fmt.Sprintf("[GET /authz/roles][%d] getRolesUnauthorized ", 401) +} + +func (o *GetRolesUnauthorized) String() string { + return fmt.Sprintf("[GET /authz/roles][%d] getRolesUnauthorized ", 401) +} + +func (o *GetRolesUnauthorized) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewGetRolesForbidden creates a GetRolesForbidden with default headers values +func NewGetRolesForbidden() *GetRolesForbidden { + return &GetRolesForbidden{} +} + +/* +GetRolesForbidden describes a response with status code 403, with default header values. + +Forbidden +*/ +type GetRolesForbidden struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this get roles forbidden response has a 2xx status code +func (o *GetRolesForbidden) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this get roles forbidden response has a 3xx status code +func (o *GetRolesForbidden) IsRedirect() bool { + return false +} + +// IsClientError returns true when this get roles forbidden response has a 4xx status code +func (o *GetRolesForbidden) IsClientError() bool { + return true +} + +// IsServerError returns true when this get roles forbidden response has a 5xx status code +func (o *GetRolesForbidden) IsServerError() bool { + return false +} + +// IsCode returns true when this get roles forbidden response a status code equal to that given +func (o *GetRolesForbidden) IsCode(code int) bool { + return code == 403 +} + +// Code gets the status code for the get roles forbidden response +func (o *GetRolesForbidden) Code() int { + return 403 +} + +func (o *GetRolesForbidden) Error() string { + return fmt.Sprintf("[GET /authz/roles][%d] getRolesForbidden %+v", 403, o.Payload) +} + +func (o *GetRolesForbidden) String() string { + return fmt.Sprintf("[GET /authz/roles][%d] getRolesForbidden %+v", 403, o.Payload) +} + +func (o *GetRolesForbidden) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *GetRolesForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewGetRolesInternalServerError creates a GetRolesInternalServerError with default headers values +func NewGetRolesInternalServerError() *GetRolesInternalServerError { + return &GetRolesInternalServerError{} +} + +/* +GetRolesInternalServerError describes a response with status code 500, with default header values. + +An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. +*/ +type GetRolesInternalServerError struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this get roles internal server error response has a 2xx status code +func (o *GetRolesInternalServerError) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this get roles internal server error response has a 3xx status code +func (o *GetRolesInternalServerError) IsRedirect() bool { + return false +} + +// IsClientError returns true when this get roles internal server error response has a 4xx status code +func (o *GetRolesInternalServerError) IsClientError() bool { + return false +} + +// IsServerError returns true when this get roles internal server error response has a 5xx status code +func (o *GetRolesInternalServerError) IsServerError() bool { + return true +} + +// IsCode returns true when this get roles internal server error response a status code equal to that given +func (o *GetRolesInternalServerError) IsCode(code int) bool { + return code == 500 +} + +// Code gets the status code for the get roles internal server error response +func (o *GetRolesInternalServerError) Code() int { + return 500 +} + +func (o *GetRolesInternalServerError) Error() string { + return fmt.Sprintf("[GET /authz/roles][%d] getRolesInternalServerError %+v", 500, o.Payload) +} + +func (o *GetRolesInternalServerError) String() string { + return fmt.Sprintf("[GET /authz/roles][%d] getRolesInternalServerError %+v", 500, o.Payload) +} + +func (o *GetRolesInternalServerError) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *GetRolesInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/authz/get_users_for_role_deprecated_parameters.go b/platform/dbops/binaries/weaviate-src/client/authz/get_users_for_role_deprecated_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..55c10bfca94065ed9dc469d913f9bfa9d5d5b036 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/authz/get_users_for_role_deprecated_parameters.go @@ -0,0 +1,162 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package authz + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewGetUsersForRoleDeprecatedParams creates a new GetUsersForRoleDeprecatedParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewGetUsersForRoleDeprecatedParams() *GetUsersForRoleDeprecatedParams { + return &GetUsersForRoleDeprecatedParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewGetUsersForRoleDeprecatedParamsWithTimeout creates a new GetUsersForRoleDeprecatedParams object +// with the ability to set a timeout on a request. +func NewGetUsersForRoleDeprecatedParamsWithTimeout(timeout time.Duration) *GetUsersForRoleDeprecatedParams { + return &GetUsersForRoleDeprecatedParams{ + timeout: timeout, + } +} + +// NewGetUsersForRoleDeprecatedParamsWithContext creates a new GetUsersForRoleDeprecatedParams object +// with the ability to set a context for a request. +func NewGetUsersForRoleDeprecatedParamsWithContext(ctx context.Context) *GetUsersForRoleDeprecatedParams { + return &GetUsersForRoleDeprecatedParams{ + Context: ctx, + } +} + +// NewGetUsersForRoleDeprecatedParamsWithHTTPClient creates a new GetUsersForRoleDeprecatedParams object +// with the ability to set a custom HTTPClient for a request. +func NewGetUsersForRoleDeprecatedParamsWithHTTPClient(client *http.Client) *GetUsersForRoleDeprecatedParams { + return &GetUsersForRoleDeprecatedParams{ + HTTPClient: client, + } +} + +/* +GetUsersForRoleDeprecatedParams contains all the parameters to send to the API endpoint + + for the get users for role deprecated operation. + + Typically these are written to a http.Request. +*/ +type GetUsersForRoleDeprecatedParams struct { + + /* ID. + + role name + */ + ID string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the get users for role deprecated params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *GetUsersForRoleDeprecatedParams) WithDefaults() *GetUsersForRoleDeprecatedParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the get users for role deprecated params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *GetUsersForRoleDeprecatedParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the get users for role deprecated params +func (o *GetUsersForRoleDeprecatedParams) WithTimeout(timeout time.Duration) *GetUsersForRoleDeprecatedParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the get users for role deprecated params +func (o *GetUsersForRoleDeprecatedParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the get users for role deprecated params +func (o *GetUsersForRoleDeprecatedParams) WithContext(ctx context.Context) *GetUsersForRoleDeprecatedParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the get users for role deprecated params +func (o *GetUsersForRoleDeprecatedParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the get users for role deprecated params +func (o *GetUsersForRoleDeprecatedParams) WithHTTPClient(client *http.Client) *GetUsersForRoleDeprecatedParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the get users for role deprecated params +func (o *GetUsersForRoleDeprecatedParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithID adds the id to the get users for role deprecated params +func (o *GetUsersForRoleDeprecatedParams) WithID(id string) *GetUsersForRoleDeprecatedParams { + o.SetID(id) + return o +} + +// SetID adds the id to the get users for role deprecated params +func (o *GetUsersForRoleDeprecatedParams) SetID(id string) { + o.ID = id +} + +// WriteToRequest writes these params to a swagger request +func (o *GetUsersForRoleDeprecatedParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param id + if err := r.SetPathParam("id", o.ID); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/authz/get_users_for_role_deprecated_responses.go b/platform/dbops/binaries/weaviate-src/client/authz/get_users_for_role_deprecated_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..6e155fac14385ba3d0ab7146fe8aa1a3abcda7b7 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/authz/get_users_for_role_deprecated_responses.go @@ -0,0 +1,458 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package authz + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/weaviate/weaviate/entities/models" +) + +// GetUsersForRoleDeprecatedReader is a Reader for the GetUsersForRoleDeprecated structure. +type GetUsersForRoleDeprecatedReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *GetUsersForRoleDeprecatedReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewGetUsersForRoleDeprecatedOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 400: + result := NewGetUsersForRoleDeprecatedBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 401: + result := NewGetUsersForRoleDeprecatedUnauthorized() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 403: + result := NewGetUsersForRoleDeprecatedForbidden() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 404: + result := NewGetUsersForRoleDeprecatedNotFound() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 500: + result := NewGetUsersForRoleDeprecatedInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + } +} + +// NewGetUsersForRoleDeprecatedOK creates a GetUsersForRoleDeprecatedOK with default headers values +func NewGetUsersForRoleDeprecatedOK() *GetUsersForRoleDeprecatedOK { + return &GetUsersForRoleDeprecatedOK{} +} + +/* +GetUsersForRoleDeprecatedOK describes a response with status code 200, with default header values. + +Users assigned to this role +*/ +type GetUsersForRoleDeprecatedOK struct { + Payload []string +} + +// IsSuccess returns true when this get users for role deprecated o k response has a 2xx status code +func (o *GetUsersForRoleDeprecatedOK) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this get users for role deprecated o k response has a 3xx status code +func (o *GetUsersForRoleDeprecatedOK) IsRedirect() bool { + return false +} + +// IsClientError returns true when this get users for role deprecated o k response has a 4xx status code +func (o *GetUsersForRoleDeprecatedOK) IsClientError() bool { + return false +} + +// IsServerError returns true when this get users for role deprecated o k response has a 5xx status code +func (o *GetUsersForRoleDeprecatedOK) IsServerError() bool { + return false +} + +// IsCode returns true when this get users for role deprecated o k response a status code equal to that given +func (o *GetUsersForRoleDeprecatedOK) IsCode(code int) bool { + return code == 200 +} + +// Code gets the status code for the get users for role deprecated o k response +func (o *GetUsersForRoleDeprecatedOK) Code() int { + return 200 +} + +func (o *GetUsersForRoleDeprecatedOK) Error() string { + return fmt.Sprintf("[GET /authz/roles/{id}/users][%d] getUsersForRoleDeprecatedOK %+v", 200, o.Payload) +} + +func (o *GetUsersForRoleDeprecatedOK) String() string { + return fmt.Sprintf("[GET /authz/roles/{id}/users][%d] getUsersForRoleDeprecatedOK %+v", 200, o.Payload) +} + +func (o *GetUsersForRoleDeprecatedOK) GetPayload() []string { + return o.Payload +} + +func (o *GetUsersForRoleDeprecatedOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewGetUsersForRoleDeprecatedBadRequest creates a GetUsersForRoleDeprecatedBadRequest with default headers values +func NewGetUsersForRoleDeprecatedBadRequest() *GetUsersForRoleDeprecatedBadRequest { + return &GetUsersForRoleDeprecatedBadRequest{} +} + +/* +GetUsersForRoleDeprecatedBadRequest describes a response with status code 400, with default header values. + +Bad request +*/ +type GetUsersForRoleDeprecatedBadRequest struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this get users for role deprecated bad request response has a 2xx status code +func (o *GetUsersForRoleDeprecatedBadRequest) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this get users for role deprecated bad request response has a 3xx status code +func (o *GetUsersForRoleDeprecatedBadRequest) IsRedirect() bool { + return false +} + +// IsClientError returns true when this get users for role deprecated bad request response has a 4xx status code +func (o *GetUsersForRoleDeprecatedBadRequest) IsClientError() bool { + return true +} + +// IsServerError returns true when this get users for role deprecated bad request response has a 5xx status code +func (o *GetUsersForRoleDeprecatedBadRequest) IsServerError() bool { + return false +} + +// IsCode returns true when this get users for role deprecated bad request response a status code equal to that given +func (o *GetUsersForRoleDeprecatedBadRequest) IsCode(code int) bool { + return code == 400 +} + +// Code gets the status code for the get users for role deprecated bad request response +func (o *GetUsersForRoleDeprecatedBadRequest) Code() int { + return 400 +} + +func (o *GetUsersForRoleDeprecatedBadRequest) Error() string { + return fmt.Sprintf("[GET /authz/roles/{id}/users][%d] getUsersForRoleDeprecatedBadRequest %+v", 400, o.Payload) +} + +func (o *GetUsersForRoleDeprecatedBadRequest) String() string { + return fmt.Sprintf("[GET /authz/roles/{id}/users][%d] getUsersForRoleDeprecatedBadRequest %+v", 400, o.Payload) +} + +func (o *GetUsersForRoleDeprecatedBadRequest) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *GetUsersForRoleDeprecatedBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewGetUsersForRoleDeprecatedUnauthorized creates a GetUsersForRoleDeprecatedUnauthorized with default headers values +func NewGetUsersForRoleDeprecatedUnauthorized() *GetUsersForRoleDeprecatedUnauthorized { + return &GetUsersForRoleDeprecatedUnauthorized{} +} + +/* +GetUsersForRoleDeprecatedUnauthorized describes a response with status code 401, with default header values. + +Unauthorized or invalid credentials. +*/ +type GetUsersForRoleDeprecatedUnauthorized struct { +} + +// IsSuccess returns true when this get users for role deprecated unauthorized response has a 2xx status code +func (o *GetUsersForRoleDeprecatedUnauthorized) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this get users for role deprecated unauthorized response has a 3xx status code +func (o *GetUsersForRoleDeprecatedUnauthorized) IsRedirect() bool { + return false +} + +// IsClientError returns true when this get users for role deprecated unauthorized response has a 4xx status code +func (o *GetUsersForRoleDeprecatedUnauthorized) IsClientError() bool { + return true +} + +// IsServerError returns true when this get users for role deprecated unauthorized response has a 5xx status code +func (o *GetUsersForRoleDeprecatedUnauthorized) IsServerError() bool { + return false +} + +// IsCode returns true when this get users for role deprecated unauthorized response a status code equal to that given +func (o *GetUsersForRoleDeprecatedUnauthorized) IsCode(code int) bool { + return code == 401 +} + +// Code gets the status code for the get users for role deprecated unauthorized response +func (o *GetUsersForRoleDeprecatedUnauthorized) Code() int { + return 401 +} + +func (o *GetUsersForRoleDeprecatedUnauthorized) Error() string { + return fmt.Sprintf("[GET /authz/roles/{id}/users][%d] getUsersForRoleDeprecatedUnauthorized ", 401) +} + +func (o *GetUsersForRoleDeprecatedUnauthorized) String() string { + return fmt.Sprintf("[GET /authz/roles/{id}/users][%d] getUsersForRoleDeprecatedUnauthorized ", 401) +} + +func (o *GetUsersForRoleDeprecatedUnauthorized) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewGetUsersForRoleDeprecatedForbidden creates a GetUsersForRoleDeprecatedForbidden with default headers values +func NewGetUsersForRoleDeprecatedForbidden() *GetUsersForRoleDeprecatedForbidden { + return &GetUsersForRoleDeprecatedForbidden{} +} + +/* +GetUsersForRoleDeprecatedForbidden describes a response with status code 403, with default header values. + +Forbidden +*/ +type GetUsersForRoleDeprecatedForbidden struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this get users for role deprecated forbidden response has a 2xx status code +func (o *GetUsersForRoleDeprecatedForbidden) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this get users for role deprecated forbidden response has a 3xx status code +func (o *GetUsersForRoleDeprecatedForbidden) IsRedirect() bool { + return false +} + +// IsClientError returns true when this get users for role deprecated forbidden response has a 4xx status code +func (o *GetUsersForRoleDeprecatedForbidden) IsClientError() bool { + return true +} + +// IsServerError returns true when this get users for role deprecated forbidden response has a 5xx status code +func (o *GetUsersForRoleDeprecatedForbidden) IsServerError() bool { + return false +} + +// IsCode returns true when this get users for role deprecated forbidden response a status code equal to that given +func (o *GetUsersForRoleDeprecatedForbidden) IsCode(code int) bool { + return code == 403 +} + +// Code gets the status code for the get users for role deprecated forbidden response +func (o *GetUsersForRoleDeprecatedForbidden) Code() int { + return 403 +} + +func (o *GetUsersForRoleDeprecatedForbidden) Error() string { + return fmt.Sprintf("[GET /authz/roles/{id}/users][%d] getUsersForRoleDeprecatedForbidden %+v", 403, o.Payload) +} + +func (o *GetUsersForRoleDeprecatedForbidden) String() string { + return fmt.Sprintf("[GET /authz/roles/{id}/users][%d] getUsersForRoleDeprecatedForbidden %+v", 403, o.Payload) +} + +func (o *GetUsersForRoleDeprecatedForbidden) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *GetUsersForRoleDeprecatedForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewGetUsersForRoleDeprecatedNotFound creates a GetUsersForRoleDeprecatedNotFound with default headers values +func NewGetUsersForRoleDeprecatedNotFound() *GetUsersForRoleDeprecatedNotFound { + return &GetUsersForRoleDeprecatedNotFound{} +} + +/* +GetUsersForRoleDeprecatedNotFound describes a response with status code 404, with default header values. + +no role found +*/ +type GetUsersForRoleDeprecatedNotFound struct { +} + +// IsSuccess returns true when this get users for role deprecated not found response has a 2xx status code +func (o *GetUsersForRoleDeprecatedNotFound) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this get users for role deprecated not found response has a 3xx status code +func (o *GetUsersForRoleDeprecatedNotFound) IsRedirect() bool { + return false +} + +// IsClientError returns true when this get users for role deprecated not found response has a 4xx status code +func (o *GetUsersForRoleDeprecatedNotFound) IsClientError() bool { + return true +} + +// IsServerError returns true when this get users for role deprecated not found response has a 5xx status code +func (o *GetUsersForRoleDeprecatedNotFound) IsServerError() bool { + return false +} + +// IsCode returns true when this get users for role deprecated not found response a status code equal to that given +func (o *GetUsersForRoleDeprecatedNotFound) IsCode(code int) bool { + return code == 404 +} + +// Code gets the status code for the get users for role deprecated not found response +func (o *GetUsersForRoleDeprecatedNotFound) Code() int { + return 404 +} + +func (o *GetUsersForRoleDeprecatedNotFound) Error() string { + return fmt.Sprintf("[GET /authz/roles/{id}/users][%d] getUsersForRoleDeprecatedNotFound ", 404) +} + +func (o *GetUsersForRoleDeprecatedNotFound) String() string { + return fmt.Sprintf("[GET /authz/roles/{id}/users][%d] getUsersForRoleDeprecatedNotFound ", 404) +} + +func (o *GetUsersForRoleDeprecatedNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewGetUsersForRoleDeprecatedInternalServerError creates a GetUsersForRoleDeprecatedInternalServerError with default headers values +func NewGetUsersForRoleDeprecatedInternalServerError() *GetUsersForRoleDeprecatedInternalServerError { + return &GetUsersForRoleDeprecatedInternalServerError{} +} + +/* +GetUsersForRoleDeprecatedInternalServerError describes a response with status code 500, with default header values. + +An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. +*/ +type GetUsersForRoleDeprecatedInternalServerError struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this get users for role deprecated internal server error response has a 2xx status code +func (o *GetUsersForRoleDeprecatedInternalServerError) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this get users for role deprecated internal server error response has a 3xx status code +func (o *GetUsersForRoleDeprecatedInternalServerError) IsRedirect() bool { + return false +} + +// IsClientError returns true when this get users for role deprecated internal server error response has a 4xx status code +func (o *GetUsersForRoleDeprecatedInternalServerError) IsClientError() bool { + return false +} + +// IsServerError returns true when this get users for role deprecated internal server error response has a 5xx status code +func (o *GetUsersForRoleDeprecatedInternalServerError) IsServerError() bool { + return true +} + +// IsCode returns true when this get users for role deprecated internal server error response a status code equal to that given +func (o *GetUsersForRoleDeprecatedInternalServerError) IsCode(code int) bool { + return code == 500 +} + +// Code gets the status code for the get users for role deprecated internal server error response +func (o *GetUsersForRoleDeprecatedInternalServerError) Code() int { + return 500 +} + +func (o *GetUsersForRoleDeprecatedInternalServerError) Error() string { + return fmt.Sprintf("[GET /authz/roles/{id}/users][%d] getUsersForRoleDeprecatedInternalServerError %+v", 500, o.Payload) +} + +func (o *GetUsersForRoleDeprecatedInternalServerError) String() string { + return fmt.Sprintf("[GET /authz/roles/{id}/users][%d] getUsersForRoleDeprecatedInternalServerError %+v", 500, o.Payload) +} + +func (o *GetUsersForRoleDeprecatedInternalServerError) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *GetUsersForRoleDeprecatedInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/authz/get_users_for_role_parameters.go b/platform/dbops/binaries/weaviate-src/client/authz/get_users_for_role_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..c34e732a7e80484634a3211888670b95dde576f1 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/authz/get_users_for_role_parameters.go @@ -0,0 +1,162 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package authz + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewGetUsersForRoleParams creates a new GetUsersForRoleParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewGetUsersForRoleParams() *GetUsersForRoleParams { + return &GetUsersForRoleParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewGetUsersForRoleParamsWithTimeout creates a new GetUsersForRoleParams object +// with the ability to set a timeout on a request. +func NewGetUsersForRoleParamsWithTimeout(timeout time.Duration) *GetUsersForRoleParams { + return &GetUsersForRoleParams{ + timeout: timeout, + } +} + +// NewGetUsersForRoleParamsWithContext creates a new GetUsersForRoleParams object +// with the ability to set a context for a request. +func NewGetUsersForRoleParamsWithContext(ctx context.Context) *GetUsersForRoleParams { + return &GetUsersForRoleParams{ + Context: ctx, + } +} + +// NewGetUsersForRoleParamsWithHTTPClient creates a new GetUsersForRoleParams object +// with the ability to set a custom HTTPClient for a request. +func NewGetUsersForRoleParamsWithHTTPClient(client *http.Client) *GetUsersForRoleParams { + return &GetUsersForRoleParams{ + HTTPClient: client, + } +} + +/* +GetUsersForRoleParams contains all the parameters to send to the API endpoint + + for the get users for role operation. + + Typically these are written to a http.Request. +*/ +type GetUsersForRoleParams struct { + + /* ID. + + role name + */ + ID string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the get users for role params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *GetUsersForRoleParams) WithDefaults() *GetUsersForRoleParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the get users for role params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *GetUsersForRoleParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the get users for role params +func (o *GetUsersForRoleParams) WithTimeout(timeout time.Duration) *GetUsersForRoleParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the get users for role params +func (o *GetUsersForRoleParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the get users for role params +func (o *GetUsersForRoleParams) WithContext(ctx context.Context) *GetUsersForRoleParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the get users for role params +func (o *GetUsersForRoleParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the get users for role params +func (o *GetUsersForRoleParams) WithHTTPClient(client *http.Client) *GetUsersForRoleParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the get users for role params +func (o *GetUsersForRoleParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithID adds the id to the get users for role params +func (o *GetUsersForRoleParams) WithID(id string) *GetUsersForRoleParams { + o.SetID(id) + return o +} + +// SetID adds the id to the get users for role params +func (o *GetUsersForRoleParams) SetID(id string) { + o.ID = id +} + +// WriteToRequest writes these params to a swagger request +func (o *GetUsersForRoleParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param id + if err := r.SetPathParam("id", o.ID); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/authz/get_users_for_role_responses.go b/platform/dbops/binaries/weaviate-src/client/authz/get_users_for_role_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..f4d0eddec82abae8f008845d6da3959144b949bd --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/authz/get_users_for_role_responses.go @@ -0,0 +1,562 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package authz + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "fmt" + "io" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" + + "github.com/weaviate/weaviate/entities/models" +) + +// GetUsersForRoleReader is a Reader for the GetUsersForRole structure. +type GetUsersForRoleReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *GetUsersForRoleReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewGetUsersForRoleOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 400: + result := NewGetUsersForRoleBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 401: + result := NewGetUsersForRoleUnauthorized() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 403: + result := NewGetUsersForRoleForbidden() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 404: + result := NewGetUsersForRoleNotFound() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 500: + result := NewGetUsersForRoleInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + } +} + +// NewGetUsersForRoleOK creates a GetUsersForRoleOK with default headers values +func NewGetUsersForRoleOK() *GetUsersForRoleOK { + return &GetUsersForRoleOK{} +} + +/* +GetUsersForRoleOK describes a response with status code 200, with default header values. + +Users assigned to this role +*/ +type GetUsersForRoleOK struct { + Payload []*GetUsersForRoleOKBodyItems0 +} + +// IsSuccess returns true when this get users for role o k response has a 2xx status code +func (o *GetUsersForRoleOK) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this get users for role o k response has a 3xx status code +func (o *GetUsersForRoleOK) IsRedirect() bool { + return false +} + +// IsClientError returns true when this get users for role o k response has a 4xx status code +func (o *GetUsersForRoleOK) IsClientError() bool { + return false +} + +// IsServerError returns true when this get users for role o k response has a 5xx status code +func (o *GetUsersForRoleOK) IsServerError() bool { + return false +} + +// IsCode returns true when this get users for role o k response a status code equal to that given +func (o *GetUsersForRoleOK) IsCode(code int) bool { + return code == 200 +} + +// Code gets the status code for the get users for role o k response +func (o *GetUsersForRoleOK) Code() int { + return 200 +} + +func (o *GetUsersForRoleOK) Error() string { + return fmt.Sprintf("[GET /authz/roles/{id}/user-assignments][%d] getUsersForRoleOK %+v", 200, o.Payload) +} + +func (o *GetUsersForRoleOK) String() string { + return fmt.Sprintf("[GET /authz/roles/{id}/user-assignments][%d] getUsersForRoleOK %+v", 200, o.Payload) +} + +func (o *GetUsersForRoleOK) GetPayload() []*GetUsersForRoleOKBodyItems0 { + return o.Payload +} + +func (o *GetUsersForRoleOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewGetUsersForRoleBadRequest creates a GetUsersForRoleBadRequest with default headers values +func NewGetUsersForRoleBadRequest() *GetUsersForRoleBadRequest { + return &GetUsersForRoleBadRequest{} +} + +/* +GetUsersForRoleBadRequest describes a response with status code 400, with default header values. + +Bad request +*/ +type GetUsersForRoleBadRequest struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this get users for role bad request response has a 2xx status code +func (o *GetUsersForRoleBadRequest) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this get users for role bad request response has a 3xx status code +func (o *GetUsersForRoleBadRequest) IsRedirect() bool { + return false +} + +// IsClientError returns true when this get users for role bad request response has a 4xx status code +func (o *GetUsersForRoleBadRequest) IsClientError() bool { + return true +} + +// IsServerError returns true when this get users for role bad request response has a 5xx status code +func (o *GetUsersForRoleBadRequest) IsServerError() bool { + return false +} + +// IsCode returns true when this get users for role bad request response a status code equal to that given +func (o *GetUsersForRoleBadRequest) IsCode(code int) bool { + return code == 400 +} + +// Code gets the status code for the get users for role bad request response +func (o *GetUsersForRoleBadRequest) Code() int { + return 400 +} + +func (o *GetUsersForRoleBadRequest) Error() string { + return fmt.Sprintf("[GET /authz/roles/{id}/user-assignments][%d] getUsersForRoleBadRequest %+v", 400, o.Payload) +} + +func (o *GetUsersForRoleBadRequest) String() string { + return fmt.Sprintf("[GET /authz/roles/{id}/user-assignments][%d] getUsersForRoleBadRequest %+v", 400, o.Payload) +} + +func (o *GetUsersForRoleBadRequest) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *GetUsersForRoleBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewGetUsersForRoleUnauthorized creates a GetUsersForRoleUnauthorized with default headers values +func NewGetUsersForRoleUnauthorized() *GetUsersForRoleUnauthorized { + return &GetUsersForRoleUnauthorized{} +} + +/* +GetUsersForRoleUnauthorized describes a response with status code 401, with default header values. + +Unauthorized or invalid credentials. +*/ +type GetUsersForRoleUnauthorized struct { +} + +// IsSuccess returns true when this get users for role unauthorized response has a 2xx status code +func (o *GetUsersForRoleUnauthorized) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this get users for role unauthorized response has a 3xx status code +func (o *GetUsersForRoleUnauthorized) IsRedirect() bool { + return false +} + +// IsClientError returns true when this get users for role unauthorized response has a 4xx status code +func (o *GetUsersForRoleUnauthorized) IsClientError() bool { + return true +} + +// IsServerError returns true when this get users for role unauthorized response has a 5xx status code +func (o *GetUsersForRoleUnauthorized) IsServerError() bool { + return false +} + +// IsCode returns true when this get users for role unauthorized response a status code equal to that given +func (o *GetUsersForRoleUnauthorized) IsCode(code int) bool { + return code == 401 +} + +// Code gets the status code for the get users for role unauthorized response +func (o *GetUsersForRoleUnauthorized) Code() int { + return 401 +} + +func (o *GetUsersForRoleUnauthorized) Error() string { + return fmt.Sprintf("[GET /authz/roles/{id}/user-assignments][%d] getUsersForRoleUnauthorized ", 401) +} + +func (o *GetUsersForRoleUnauthorized) String() string { + return fmt.Sprintf("[GET /authz/roles/{id}/user-assignments][%d] getUsersForRoleUnauthorized ", 401) +} + +func (o *GetUsersForRoleUnauthorized) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewGetUsersForRoleForbidden creates a GetUsersForRoleForbidden with default headers values +func NewGetUsersForRoleForbidden() *GetUsersForRoleForbidden { + return &GetUsersForRoleForbidden{} +} + +/* +GetUsersForRoleForbidden describes a response with status code 403, with default header values. + +Forbidden +*/ +type GetUsersForRoleForbidden struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this get users for role forbidden response has a 2xx status code +func (o *GetUsersForRoleForbidden) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this get users for role forbidden response has a 3xx status code +func (o *GetUsersForRoleForbidden) IsRedirect() bool { + return false +} + +// IsClientError returns true when this get users for role forbidden response has a 4xx status code +func (o *GetUsersForRoleForbidden) IsClientError() bool { + return true +} + +// IsServerError returns true when this get users for role forbidden response has a 5xx status code +func (o *GetUsersForRoleForbidden) IsServerError() bool { + return false +} + +// IsCode returns true when this get users for role forbidden response a status code equal to that given +func (o *GetUsersForRoleForbidden) IsCode(code int) bool { + return code == 403 +} + +// Code gets the status code for the get users for role forbidden response +func (o *GetUsersForRoleForbidden) Code() int { + return 403 +} + +func (o *GetUsersForRoleForbidden) Error() string { + return fmt.Sprintf("[GET /authz/roles/{id}/user-assignments][%d] getUsersForRoleForbidden %+v", 403, o.Payload) +} + +func (o *GetUsersForRoleForbidden) String() string { + return fmt.Sprintf("[GET /authz/roles/{id}/user-assignments][%d] getUsersForRoleForbidden %+v", 403, o.Payload) +} + +func (o *GetUsersForRoleForbidden) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *GetUsersForRoleForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewGetUsersForRoleNotFound creates a GetUsersForRoleNotFound with default headers values +func NewGetUsersForRoleNotFound() *GetUsersForRoleNotFound { + return &GetUsersForRoleNotFound{} +} + +/* +GetUsersForRoleNotFound describes a response with status code 404, with default header values. + +no role found +*/ +type GetUsersForRoleNotFound struct { +} + +// IsSuccess returns true when this get users for role not found response has a 2xx status code +func (o *GetUsersForRoleNotFound) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this get users for role not found response has a 3xx status code +func (o *GetUsersForRoleNotFound) IsRedirect() bool { + return false +} + +// IsClientError returns true when this get users for role not found response has a 4xx status code +func (o *GetUsersForRoleNotFound) IsClientError() bool { + return true +} + +// IsServerError returns true when this get users for role not found response has a 5xx status code +func (o *GetUsersForRoleNotFound) IsServerError() bool { + return false +} + +// IsCode returns true when this get users for role not found response a status code equal to that given +func (o *GetUsersForRoleNotFound) IsCode(code int) bool { + return code == 404 +} + +// Code gets the status code for the get users for role not found response +func (o *GetUsersForRoleNotFound) Code() int { + return 404 +} + +func (o *GetUsersForRoleNotFound) Error() string { + return fmt.Sprintf("[GET /authz/roles/{id}/user-assignments][%d] getUsersForRoleNotFound ", 404) +} + +func (o *GetUsersForRoleNotFound) String() string { + return fmt.Sprintf("[GET /authz/roles/{id}/user-assignments][%d] getUsersForRoleNotFound ", 404) +} + +func (o *GetUsersForRoleNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewGetUsersForRoleInternalServerError creates a GetUsersForRoleInternalServerError with default headers values +func NewGetUsersForRoleInternalServerError() *GetUsersForRoleInternalServerError { + return &GetUsersForRoleInternalServerError{} +} + +/* +GetUsersForRoleInternalServerError describes a response with status code 500, with default header values. + +An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. +*/ +type GetUsersForRoleInternalServerError struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this get users for role internal server error response has a 2xx status code +func (o *GetUsersForRoleInternalServerError) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this get users for role internal server error response has a 3xx status code +func (o *GetUsersForRoleInternalServerError) IsRedirect() bool { + return false +} + +// IsClientError returns true when this get users for role internal server error response has a 4xx status code +func (o *GetUsersForRoleInternalServerError) IsClientError() bool { + return false +} + +// IsServerError returns true when this get users for role internal server error response has a 5xx status code +func (o *GetUsersForRoleInternalServerError) IsServerError() bool { + return true +} + +// IsCode returns true when this get users for role internal server error response a status code equal to that given +func (o *GetUsersForRoleInternalServerError) IsCode(code int) bool { + return code == 500 +} + +// Code gets the status code for the get users for role internal server error response +func (o *GetUsersForRoleInternalServerError) Code() int { + return 500 +} + +func (o *GetUsersForRoleInternalServerError) Error() string { + return fmt.Sprintf("[GET /authz/roles/{id}/user-assignments][%d] getUsersForRoleInternalServerError %+v", 500, o.Payload) +} + +func (o *GetUsersForRoleInternalServerError) String() string { + return fmt.Sprintf("[GET /authz/roles/{id}/user-assignments][%d] getUsersForRoleInternalServerError %+v", 500, o.Payload) +} + +func (o *GetUsersForRoleInternalServerError) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *GetUsersForRoleInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +/* +GetUsersForRoleOKBodyItems0 get users for role o k body items0 +swagger:model GetUsersForRoleOKBodyItems0 +*/ +type GetUsersForRoleOKBodyItems0 struct { + + // user Id + UserID string `json:"userId,omitempty"` + + // user type + // Required: true + UserType *models.UserTypeOutput `json:"userType"` +} + +// Validate validates this get users for role o k body items0 +func (o *GetUsersForRoleOKBodyItems0) Validate(formats strfmt.Registry) error { + var res []error + + if err := o.validateUserType(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (o *GetUsersForRoleOKBodyItems0) validateUserType(formats strfmt.Registry) error { + + if err := validate.Required("userType", "body", o.UserType); err != nil { + return err + } + + if err := validate.Required("userType", "body", o.UserType); err != nil { + return err + } + + if o.UserType != nil { + if err := o.UserType.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("userType") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("userType") + } + return err + } + } + + return nil +} + +// ContextValidate validate this get users for role o k body items0 based on the context it is used +func (o *GetUsersForRoleOKBodyItems0) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := o.contextValidateUserType(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (o *GetUsersForRoleOKBodyItems0) contextValidateUserType(ctx context.Context, formats strfmt.Registry) error { + + if o.UserType != nil { + if err := o.UserType.ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("userType") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("userType") + } + return err + } + } + + return nil +} + +// MarshalBinary interface implementation +func (o *GetUsersForRoleOKBodyItems0) MarshalBinary() ([]byte, error) { + if o == nil { + return nil, nil + } + return swag.WriteJSON(o) +} + +// UnmarshalBinary interface implementation +func (o *GetUsersForRoleOKBodyItems0) UnmarshalBinary(b []byte) error { + var res GetUsersForRoleOKBodyItems0 + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *o = res + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/authz/has_permission_parameters.go b/platform/dbops/binaries/weaviate-src/client/authz/has_permission_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..a208e446aa010a0a51cabd857d3bfcbda48fcda6 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/authz/has_permission_parameters.go @@ -0,0 +1,183 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package authz + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + + "github.com/weaviate/weaviate/entities/models" +) + +// NewHasPermissionParams creates a new HasPermissionParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewHasPermissionParams() *HasPermissionParams { + return &HasPermissionParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewHasPermissionParamsWithTimeout creates a new HasPermissionParams object +// with the ability to set a timeout on a request. +func NewHasPermissionParamsWithTimeout(timeout time.Duration) *HasPermissionParams { + return &HasPermissionParams{ + timeout: timeout, + } +} + +// NewHasPermissionParamsWithContext creates a new HasPermissionParams object +// with the ability to set a context for a request. +func NewHasPermissionParamsWithContext(ctx context.Context) *HasPermissionParams { + return &HasPermissionParams{ + Context: ctx, + } +} + +// NewHasPermissionParamsWithHTTPClient creates a new HasPermissionParams object +// with the ability to set a custom HTTPClient for a request. +func NewHasPermissionParamsWithHTTPClient(client *http.Client) *HasPermissionParams { + return &HasPermissionParams{ + HTTPClient: client, + } +} + +/* +HasPermissionParams contains all the parameters to send to the API endpoint + + for the has permission operation. + + Typically these are written to a http.Request. +*/ +type HasPermissionParams struct { + + // Body. + Body *models.Permission + + /* ID. + + role name + */ + ID string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the has permission params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *HasPermissionParams) WithDefaults() *HasPermissionParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the has permission params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *HasPermissionParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the has permission params +func (o *HasPermissionParams) WithTimeout(timeout time.Duration) *HasPermissionParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the has permission params +func (o *HasPermissionParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the has permission params +func (o *HasPermissionParams) WithContext(ctx context.Context) *HasPermissionParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the has permission params +func (o *HasPermissionParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the has permission params +func (o *HasPermissionParams) WithHTTPClient(client *http.Client) *HasPermissionParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the has permission params +func (o *HasPermissionParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithBody adds the body to the has permission params +func (o *HasPermissionParams) WithBody(body *models.Permission) *HasPermissionParams { + o.SetBody(body) + return o +} + +// SetBody adds the body to the has permission params +func (o *HasPermissionParams) SetBody(body *models.Permission) { + o.Body = body +} + +// WithID adds the id to the has permission params +func (o *HasPermissionParams) WithID(id string) *HasPermissionParams { + o.SetID(id) + return o +} + +// SetID adds the id to the has permission params +func (o *HasPermissionParams) SetID(id string) { + o.ID = id +} + +// WriteToRequest writes these params to a swagger request +func (o *HasPermissionParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + if o.Body != nil { + if err := r.SetBodyParam(o.Body); err != nil { + return err + } + } + + // path param id + if err := r.SetPathParam("id", o.ID); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/authz/has_permission_responses.go b/platform/dbops/binaries/weaviate-src/client/authz/has_permission_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..77db796738aead6765bd5e722157c55ec3ca6674 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/authz/has_permission_responses.go @@ -0,0 +1,470 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package authz + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/weaviate/weaviate/entities/models" +) + +// HasPermissionReader is a Reader for the HasPermission structure. +type HasPermissionReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *HasPermissionReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewHasPermissionOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 400: + result := NewHasPermissionBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 401: + result := NewHasPermissionUnauthorized() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 403: + result := NewHasPermissionForbidden() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 422: + result := NewHasPermissionUnprocessableEntity() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 500: + result := NewHasPermissionInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + } +} + +// NewHasPermissionOK creates a HasPermissionOK with default headers values +func NewHasPermissionOK() *HasPermissionOK { + return &HasPermissionOK{} +} + +/* +HasPermissionOK describes a response with status code 200, with default header values. + +Permission check was successful +*/ +type HasPermissionOK struct { + Payload bool +} + +// IsSuccess returns true when this has permission o k response has a 2xx status code +func (o *HasPermissionOK) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this has permission o k response has a 3xx status code +func (o *HasPermissionOK) IsRedirect() bool { + return false +} + +// IsClientError returns true when this has permission o k response has a 4xx status code +func (o *HasPermissionOK) IsClientError() bool { + return false +} + +// IsServerError returns true when this has permission o k response has a 5xx status code +func (o *HasPermissionOK) IsServerError() bool { + return false +} + +// IsCode returns true when this has permission o k response a status code equal to that given +func (o *HasPermissionOK) IsCode(code int) bool { + return code == 200 +} + +// Code gets the status code for the has permission o k response +func (o *HasPermissionOK) Code() int { + return 200 +} + +func (o *HasPermissionOK) Error() string { + return fmt.Sprintf("[POST /authz/roles/{id}/has-permission][%d] hasPermissionOK %+v", 200, o.Payload) +} + +func (o *HasPermissionOK) String() string { + return fmt.Sprintf("[POST /authz/roles/{id}/has-permission][%d] hasPermissionOK %+v", 200, o.Payload) +} + +func (o *HasPermissionOK) GetPayload() bool { + return o.Payload +} + +func (o *HasPermissionOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewHasPermissionBadRequest creates a HasPermissionBadRequest with default headers values +func NewHasPermissionBadRequest() *HasPermissionBadRequest { + return &HasPermissionBadRequest{} +} + +/* +HasPermissionBadRequest describes a response with status code 400, with default header values. + +Malformed request. +*/ +type HasPermissionBadRequest struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this has permission bad request response has a 2xx status code +func (o *HasPermissionBadRequest) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this has permission bad request response has a 3xx status code +func (o *HasPermissionBadRequest) IsRedirect() bool { + return false +} + +// IsClientError returns true when this has permission bad request response has a 4xx status code +func (o *HasPermissionBadRequest) IsClientError() bool { + return true +} + +// IsServerError returns true when this has permission bad request response has a 5xx status code +func (o *HasPermissionBadRequest) IsServerError() bool { + return false +} + +// IsCode returns true when this has permission bad request response a status code equal to that given +func (o *HasPermissionBadRequest) IsCode(code int) bool { + return code == 400 +} + +// Code gets the status code for the has permission bad request response +func (o *HasPermissionBadRequest) Code() int { + return 400 +} + +func (o *HasPermissionBadRequest) Error() string { + return fmt.Sprintf("[POST /authz/roles/{id}/has-permission][%d] hasPermissionBadRequest %+v", 400, o.Payload) +} + +func (o *HasPermissionBadRequest) String() string { + return fmt.Sprintf("[POST /authz/roles/{id}/has-permission][%d] hasPermissionBadRequest %+v", 400, o.Payload) +} + +func (o *HasPermissionBadRequest) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *HasPermissionBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewHasPermissionUnauthorized creates a HasPermissionUnauthorized with default headers values +func NewHasPermissionUnauthorized() *HasPermissionUnauthorized { + return &HasPermissionUnauthorized{} +} + +/* +HasPermissionUnauthorized describes a response with status code 401, with default header values. + +Unauthorized or invalid credentials. +*/ +type HasPermissionUnauthorized struct { +} + +// IsSuccess returns true when this has permission unauthorized response has a 2xx status code +func (o *HasPermissionUnauthorized) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this has permission unauthorized response has a 3xx status code +func (o *HasPermissionUnauthorized) IsRedirect() bool { + return false +} + +// IsClientError returns true when this has permission unauthorized response has a 4xx status code +func (o *HasPermissionUnauthorized) IsClientError() bool { + return true +} + +// IsServerError returns true when this has permission unauthorized response has a 5xx status code +func (o *HasPermissionUnauthorized) IsServerError() bool { + return false +} + +// IsCode returns true when this has permission unauthorized response a status code equal to that given +func (o *HasPermissionUnauthorized) IsCode(code int) bool { + return code == 401 +} + +// Code gets the status code for the has permission unauthorized response +func (o *HasPermissionUnauthorized) Code() int { + return 401 +} + +func (o *HasPermissionUnauthorized) Error() string { + return fmt.Sprintf("[POST /authz/roles/{id}/has-permission][%d] hasPermissionUnauthorized ", 401) +} + +func (o *HasPermissionUnauthorized) String() string { + return fmt.Sprintf("[POST /authz/roles/{id}/has-permission][%d] hasPermissionUnauthorized ", 401) +} + +func (o *HasPermissionUnauthorized) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewHasPermissionForbidden creates a HasPermissionForbidden with default headers values +func NewHasPermissionForbidden() *HasPermissionForbidden { + return &HasPermissionForbidden{} +} + +/* +HasPermissionForbidden describes a response with status code 403, with default header values. + +Forbidden +*/ +type HasPermissionForbidden struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this has permission forbidden response has a 2xx status code +func (o *HasPermissionForbidden) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this has permission forbidden response has a 3xx status code +func (o *HasPermissionForbidden) IsRedirect() bool { + return false +} + +// IsClientError returns true when this has permission forbidden response has a 4xx status code +func (o *HasPermissionForbidden) IsClientError() bool { + return true +} + +// IsServerError returns true when this has permission forbidden response has a 5xx status code +func (o *HasPermissionForbidden) IsServerError() bool { + return false +} + +// IsCode returns true when this has permission forbidden response a status code equal to that given +func (o *HasPermissionForbidden) IsCode(code int) bool { + return code == 403 +} + +// Code gets the status code for the has permission forbidden response +func (o *HasPermissionForbidden) Code() int { + return 403 +} + +func (o *HasPermissionForbidden) Error() string { + return fmt.Sprintf("[POST /authz/roles/{id}/has-permission][%d] hasPermissionForbidden %+v", 403, o.Payload) +} + +func (o *HasPermissionForbidden) String() string { + return fmt.Sprintf("[POST /authz/roles/{id}/has-permission][%d] hasPermissionForbidden %+v", 403, o.Payload) +} + +func (o *HasPermissionForbidden) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *HasPermissionForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewHasPermissionUnprocessableEntity creates a HasPermissionUnprocessableEntity with default headers values +func NewHasPermissionUnprocessableEntity() *HasPermissionUnprocessableEntity { + return &HasPermissionUnprocessableEntity{} +} + +/* +HasPermissionUnprocessableEntity describes a response with status code 422, with default header values. + +Request body is well-formed (i.e., syntactically correct), but semantically erroneous. Are you sure the class is defined in the configuration file? +*/ +type HasPermissionUnprocessableEntity struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this has permission unprocessable entity response has a 2xx status code +func (o *HasPermissionUnprocessableEntity) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this has permission unprocessable entity response has a 3xx status code +func (o *HasPermissionUnprocessableEntity) IsRedirect() bool { + return false +} + +// IsClientError returns true when this has permission unprocessable entity response has a 4xx status code +func (o *HasPermissionUnprocessableEntity) IsClientError() bool { + return true +} + +// IsServerError returns true when this has permission unprocessable entity response has a 5xx status code +func (o *HasPermissionUnprocessableEntity) IsServerError() bool { + return false +} + +// IsCode returns true when this has permission unprocessable entity response a status code equal to that given +func (o *HasPermissionUnprocessableEntity) IsCode(code int) bool { + return code == 422 +} + +// Code gets the status code for the has permission unprocessable entity response +func (o *HasPermissionUnprocessableEntity) Code() int { + return 422 +} + +func (o *HasPermissionUnprocessableEntity) Error() string { + return fmt.Sprintf("[POST /authz/roles/{id}/has-permission][%d] hasPermissionUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *HasPermissionUnprocessableEntity) String() string { + return fmt.Sprintf("[POST /authz/roles/{id}/has-permission][%d] hasPermissionUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *HasPermissionUnprocessableEntity) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *HasPermissionUnprocessableEntity) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewHasPermissionInternalServerError creates a HasPermissionInternalServerError with default headers values +func NewHasPermissionInternalServerError() *HasPermissionInternalServerError { + return &HasPermissionInternalServerError{} +} + +/* +HasPermissionInternalServerError describes a response with status code 500, with default header values. + +An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. +*/ +type HasPermissionInternalServerError struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this has permission internal server error response has a 2xx status code +func (o *HasPermissionInternalServerError) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this has permission internal server error response has a 3xx status code +func (o *HasPermissionInternalServerError) IsRedirect() bool { + return false +} + +// IsClientError returns true when this has permission internal server error response has a 4xx status code +func (o *HasPermissionInternalServerError) IsClientError() bool { + return false +} + +// IsServerError returns true when this has permission internal server error response has a 5xx status code +func (o *HasPermissionInternalServerError) IsServerError() bool { + return true +} + +// IsCode returns true when this has permission internal server error response a status code equal to that given +func (o *HasPermissionInternalServerError) IsCode(code int) bool { + return code == 500 +} + +// Code gets the status code for the has permission internal server error response +func (o *HasPermissionInternalServerError) Code() int { + return 500 +} + +func (o *HasPermissionInternalServerError) Error() string { + return fmt.Sprintf("[POST /authz/roles/{id}/has-permission][%d] hasPermissionInternalServerError %+v", 500, o.Payload) +} + +func (o *HasPermissionInternalServerError) String() string { + return fmt.Sprintf("[POST /authz/roles/{id}/has-permission][%d] hasPermissionInternalServerError %+v", 500, o.Payload) +} + +func (o *HasPermissionInternalServerError) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *HasPermissionInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/authz/remove_permissions_parameters.go b/platform/dbops/binaries/weaviate-src/client/authz/remove_permissions_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..d8d6fe9ebfa279ba4419595351b667974973993a --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/authz/remove_permissions_parameters.go @@ -0,0 +1,179 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package authz + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewRemovePermissionsParams creates a new RemovePermissionsParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewRemovePermissionsParams() *RemovePermissionsParams { + return &RemovePermissionsParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewRemovePermissionsParamsWithTimeout creates a new RemovePermissionsParams object +// with the ability to set a timeout on a request. +func NewRemovePermissionsParamsWithTimeout(timeout time.Duration) *RemovePermissionsParams { + return &RemovePermissionsParams{ + timeout: timeout, + } +} + +// NewRemovePermissionsParamsWithContext creates a new RemovePermissionsParams object +// with the ability to set a context for a request. +func NewRemovePermissionsParamsWithContext(ctx context.Context) *RemovePermissionsParams { + return &RemovePermissionsParams{ + Context: ctx, + } +} + +// NewRemovePermissionsParamsWithHTTPClient creates a new RemovePermissionsParams object +// with the ability to set a custom HTTPClient for a request. +func NewRemovePermissionsParamsWithHTTPClient(client *http.Client) *RemovePermissionsParams { + return &RemovePermissionsParams{ + HTTPClient: client, + } +} + +/* +RemovePermissionsParams contains all the parameters to send to the API endpoint + + for the remove permissions operation. + + Typically these are written to a http.Request. +*/ +type RemovePermissionsParams struct { + + // Body. + Body RemovePermissionsBody + + /* ID. + + role name + */ + ID string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the remove permissions params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *RemovePermissionsParams) WithDefaults() *RemovePermissionsParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the remove permissions params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *RemovePermissionsParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the remove permissions params +func (o *RemovePermissionsParams) WithTimeout(timeout time.Duration) *RemovePermissionsParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the remove permissions params +func (o *RemovePermissionsParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the remove permissions params +func (o *RemovePermissionsParams) WithContext(ctx context.Context) *RemovePermissionsParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the remove permissions params +func (o *RemovePermissionsParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the remove permissions params +func (o *RemovePermissionsParams) WithHTTPClient(client *http.Client) *RemovePermissionsParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the remove permissions params +func (o *RemovePermissionsParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithBody adds the body to the remove permissions params +func (o *RemovePermissionsParams) WithBody(body RemovePermissionsBody) *RemovePermissionsParams { + o.SetBody(body) + return o +} + +// SetBody adds the body to the remove permissions params +func (o *RemovePermissionsParams) SetBody(body RemovePermissionsBody) { + o.Body = body +} + +// WithID adds the id to the remove permissions params +func (o *RemovePermissionsParams) WithID(id string) *RemovePermissionsParams { + o.SetID(id) + return o +} + +// SetID adds the id to the remove permissions params +func (o *RemovePermissionsParams) SetID(id string) { + o.ID = id +} + +// WriteToRequest writes these params to a swagger request +func (o *RemovePermissionsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + if err := r.SetBodyParam(o.Body); err != nil { + return err + } + + // path param id + if err := r.SetPathParam("id", o.ID); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/authz/remove_permissions_responses.go b/platform/dbops/binaries/weaviate-src/client/authz/remove_permissions_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..0c6bec959666db916b7ebd519f809d03808d2048 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/authz/remove_permissions_responses.go @@ -0,0 +1,631 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package authz + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "fmt" + "io" + "strconv" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" + + "github.com/weaviate/weaviate/entities/models" +) + +// RemovePermissionsReader is a Reader for the RemovePermissions structure. +type RemovePermissionsReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *RemovePermissionsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewRemovePermissionsOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 400: + result := NewRemovePermissionsBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 401: + result := NewRemovePermissionsUnauthorized() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 403: + result := NewRemovePermissionsForbidden() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 404: + result := NewRemovePermissionsNotFound() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 422: + result := NewRemovePermissionsUnprocessableEntity() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 500: + result := NewRemovePermissionsInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + } +} + +// NewRemovePermissionsOK creates a RemovePermissionsOK with default headers values +func NewRemovePermissionsOK() *RemovePermissionsOK { + return &RemovePermissionsOK{} +} + +/* +RemovePermissionsOK describes a response with status code 200, with default header values. + +Permissions removed successfully +*/ +type RemovePermissionsOK struct { +} + +// IsSuccess returns true when this remove permissions o k response has a 2xx status code +func (o *RemovePermissionsOK) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this remove permissions o k response has a 3xx status code +func (o *RemovePermissionsOK) IsRedirect() bool { + return false +} + +// IsClientError returns true when this remove permissions o k response has a 4xx status code +func (o *RemovePermissionsOK) IsClientError() bool { + return false +} + +// IsServerError returns true when this remove permissions o k response has a 5xx status code +func (o *RemovePermissionsOK) IsServerError() bool { + return false +} + +// IsCode returns true when this remove permissions o k response a status code equal to that given +func (o *RemovePermissionsOK) IsCode(code int) bool { + return code == 200 +} + +// Code gets the status code for the remove permissions o k response +func (o *RemovePermissionsOK) Code() int { + return 200 +} + +func (o *RemovePermissionsOK) Error() string { + return fmt.Sprintf("[POST /authz/roles/{id}/remove-permissions][%d] removePermissionsOK ", 200) +} + +func (o *RemovePermissionsOK) String() string { + return fmt.Sprintf("[POST /authz/roles/{id}/remove-permissions][%d] removePermissionsOK ", 200) +} + +func (o *RemovePermissionsOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewRemovePermissionsBadRequest creates a RemovePermissionsBadRequest with default headers values +func NewRemovePermissionsBadRequest() *RemovePermissionsBadRequest { + return &RemovePermissionsBadRequest{} +} + +/* +RemovePermissionsBadRequest describes a response with status code 400, with default header values. + +Malformed request. +*/ +type RemovePermissionsBadRequest struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this remove permissions bad request response has a 2xx status code +func (o *RemovePermissionsBadRequest) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this remove permissions bad request response has a 3xx status code +func (o *RemovePermissionsBadRequest) IsRedirect() bool { + return false +} + +// IsClientError returns true when this remove permissions bad request response has a 4xx status code +func (o *RemovePermissionsBadRequest) IsClientError() bool { + return true +} + +// IsServerError returns true when this remove permissions bad request response has a 5xx status code +func (o *RemovePermissionsBadRequest) IsServerError() bool { + return false +} + +// IsCode returns true when this remove permissions bad request response a status code equal to that given +func (o *RemovePermissionsBadRequest) IsCode(code int) bool { + return code == 400 +} + +// Code gets the status code for the remove permissions bad request response +func (o *RemovePermissionsBadRequest) Code() int { + return 400 +} + +func (o *RemovePermissionsBadRequest) Error() string { + return fmt.Sprintf("[POST /authz/roles/{id}/remove-permissions][%d] removePermissionsBadRequest %+v", 400, o.Payload) +} + +func (o *RemovePermissionsBadRequest) String() string { + return fmt.Sprintf("[POST /authz/roles/{id}/remove-permissions][%d] removePermissionsBadRequest %+v", 400, o.Payload) +} + +func (o *RemovePermissionsBadRequest) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *RemovePermissionsBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewRemovePermissionsUnauthorized creates a RemovePermissionsUnauthorized with default headers values +func NewRemovePermissionsUnauthorized() *RemovePermissionsUnauthorized { + return &RemovePermissionsUnauthorized{} +} + +/* +RemovePermissionsUnauthorized describes a response with status code 401, with default header values. + +Unauthorized or invalid credentials. +*/ +type RemovePermissionsUnauthorized struct { +} + +// IsSuccess returns true when this remove permissions unauthorized response has a 2xx status code +func (o *RemovePermissionsUnauthorized) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this remove permissions unauthorized response has a 3xx status code +func (o *RemovePermissionsUnauthorized) IsRedirect() bool { + return false +} + +// IsClientError returns true when this remove permissions unauthorized response has a 4xx status code +func (o *RemovePermissionsUnauthorized) IsClientError() bool { + return true +} + +// IsServerError returns true when this remove permissions unauthorized response has a 5xx status code +func (o *RemovePermissionsUnauthorized) IsServerError() bool { + return false +} + +// IsCode returns true when this remove permissions unauthorized response a status code equal to that given +func (o *RemovePermissionsUnauthorized) IsCode(code int) bool { + return code == 401 +} + +// Code gets the status code for the remove permissions unauthorized response +func (o *RemovePermissionsUnauthorized) Code() int { + return 401 +} + +func (o *RemovePermissionsUnauthorized) Error() string { + return fmt.Sprintf("[POST /authz/roles/{id}/remove-permissions][%d] removePermissionsUnauthorized ", 401) +} + +func (o *RemovePermissionsUnauthorized) String() string { + return fmt.Sprintf("[POST /authz/roles/{id}/remove-permissions][%d] removePermissionsUnauthorized ", 401) +} + +func (o *RemovePermissionsUnauthorized) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewRemovePermissionsForbidden creates a RemovePermissionsForbidden with default headers values +func NewRemovePermissionsForbidden() *RemovePermissionsForbidden { + return &RemovePermissionsForbidden{} +} + +/* +RemovePermissionsForbidden describes a response with status code 403, with default header values. + +Forbidden +*/ +type RemovePermissionsForbidden struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this remove permissions forbidden response has a 2xx status code +func (o *RemovePermissionsForbidden) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this remove permissions forbidden response has a 3xx status code +func (o *RemovePermissionsForbidden) IsRedirect() bool { + return false +} + +// IsClientError returns true when this remove permissions forbidden response has a 4xx status code +func (o *RemovePermissionsForbidden) IsClientError() bool { + return true +} + +// IsServerError returns true when this remove permissions forbidden response has a 5xx status code +func (o *RemovePermissionsForbidden) IsServerError() bool { + return false +} + +// IsCode returns true when this remove permissions forbidden response a status code equal to that given +func (o *RemovePermissionsForbidden) IsCode(code int) bool { + return code == 403 +} + +// Code gets the status code for the remove permissions forbidden response +func (o *RemovePermissionsForbidden) Code() int { + return 403 +} + +func (o *RemovePermissionsForbidden) Error() string { + return fmt.Sprintf("[POST /authz/roles/{id}/remove-permissions][%d] removePermissionsForbidden %+v", 403, o.Payload) +} + +func (o *RemovePermissionsForbidden) String() string { + return fmt.Sprintf("[POST /authz/roles/{id}/remove-permissions][%d] removePermissionsForbidden %+v", 403, o.Payload) +} + +func (o *RemovePermissionsForbidden) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *RemovePermissionsForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewRemovePermissionsNotFound creates a RemovePermissionsNotFound with default headers values +func NewRemovePermissionsNotFound() *RemovePermissionsNotFound { + return &RemovePermissionsNotFound{} +} + +/* +RemovePermissionsNotFound describes a response with status code 404, with default header values. + +no role found +*/ +type RemovePermissionsNotFound struct { +} + +// IsSuccess returns true when this remove permissions not found response has a 2xx status code +func (o *RemovePermissionsNotFound) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this remove permissions not found response has a 3xx status code +func (o *RemovePermissionsNotFound) IsRedirect() bool { + return false +} + +// IsClientError returns true when this remove permissions not found response has a 4xx status code +func (o *RemovePermissionsNotFound) IsClientError() bool { + return true +} + +// IsServerError returns true when this remove permissions not found response has a 5xx status code +func (o *RemovePermissionsNotFound) IsServerError() bool { + return false +} + +// IsCode returns true when this remove permissions not found response a status code equal to that given +func (o *RemovePermissionsNotFound) IsCode(code int) bool { + return code == 404 +} + +// Code gets the status code for the remove permissions not found response +func (o *RemovePermissionsNotFound) Code() int { + return 404 +} + +func (o *RemovePermissionsNotFound) Error() string { + return fmt.Sprintf("[POST /authz/roles/{id}/remove-permissions][%d] removePermissionsNotFound ", 404) +} + +func (o *RemovePermissionsNotFound) String() string { + return fmt.Sprintf("[POST /authz/roles/{id}/remove-permissions][%d] removePermissionsNotFound ", 404) +} + +func (o *RemovePermissionsNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewRemovePermissionsUnprocessableEntity creates a RemovePermissionsUnprocessableEntity with default headers values +func NewRemovePermissionsUnprocessableEntity() *RemovePermissionsUnprocessableEntity { + return &RemovePermissionsUnprocessableEntity{} +} + +/* +RemovePermissionsUnprocessableEntity describes a response with status code 422, with default header values. + +Request body is well-formed (i.e., syntactically correct), but semantically erroneous. Are you sure the class is defined in the configuration file? +*/ +type RemovePermissionsUnprocessableEntity struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this remove permissions unprocessable entity response has a 2xx status code +func (o *RemovePermissionsUnprocessableEntity) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this remove permissions unprocessable entity response has a 3xx status code +func (o *RemovePermissionsUnprocessableEntity) IsRedirect() bool { + return false +} + +// IsClientError returns true when this remove permissions unprocessable entity response has a 4xx status code +func (o *RemovePermissionsUnprocessableEntity) IsClientError() bool { + return true +} + +// IsServerError returns true when this remove permissions unprocessable entity response has a 5xx status code +func (o *RemovePermissionsUnprocessableEntity) IsServerError() bool { + return false +} + +// IsCode returns true when this remove permissions unprocessable entity response a status code equal to that given +func (o *RemovePermissionsUnprocessableEntity) IsCode(code int) bool { + return code == 422 +} + +// Code gets the status code for the remove permissions unprocessable entity response +func (o *RemovePermissionsUnprocessableEntity) Code() int { + return 422 +} + +func (o *RemovePermissionsUnprocessableEntity) Error() string { + return fmt.Sprintf("[POST /authz/roles/{id}/remove-permissions][%d] removePermissionsUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *RemovePermissionsUnprocessableEntity) String() string { + return fmt.Sprintf("[POST /authz/roles/{id}/remove-permissions][%d] removePermissionsUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *RemovePermissionsUnprocessableEntity) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *RemovePermissionsUnprocessableEntity) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewRemovePermissionsInternalServerError creates a RemovePermissionsInternalServerError with default headers values +func NewRemovePermissionsInternalServerError() *RemovePermissionsInternalServerError { + return &RemovePermissionsInternalServerError{} +} + +/* +RemovePermissionsInternalServerError describes a response with status code 500, with default header values. + +An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. +*/ +type RemovePermissionsInternalServerError struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this remove permissions internal server error response has a 2xx status code +func (o *RemovePermissionsInternalServerError) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this remove permissions internal server error response has a 3xx status code +func (o *RemovePermissionsInternalServerError) IsRedirect() bool { + return false +} + +// IsClientError returns true when this remove permissions internal server error response has a 4xx status code +func (o *RemovePermissionsInternalServerError) IsClientError() bool { + return false +} + +// IsServerError returns true when this remove permissions internal server error response has a 5xx status code +func (o *RemovePermissionsInternalServerError) IsServerError() bool { + return true +} + +// IsCode returns true when this remove permissions internal server error response a status code equal to that given +func (o *RemovePermissionsInternalServerError) IsCode(code int) bool { + return code == 500 +} + +// Code gets the status code for the remove permissions internal server error response +func (o *RemovePermissionsInternalServerError) Code() int { + return 500 +} + +func (o *RemovePermissionsInternalServerError) Error() string { + return fmt.Sprintf("[POST /authz/roles/{id}/remove-permissions][%d] removePermissionsInternalServerError %+v", 500, o.Payload) +} + +func (o *RemovePermissionsInternalServerError) String() string { + return fmt.Sprintf("[POST /authz/roles/{id}/remove-permissions][%d] removePermissionsInternalServerError %+v", 500, o.Payload) +} + +func (o *RemovePermissionsInternalServerError) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *RemovePermissionsInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +/* +RemovePermissionsBody remove permissions body +swagger:model RemovePermissionsBody +*/ +type RemovePermissionsBody struct { + + // permissions to remove from the role + // Required: true + Permissions []*models.Permission `json:"permissions"` +} + +// Validate validates this remove permissions body +func (o *RemovePermissionsBody) Validate(formats strfmt.Registry) error { + var res []error + + if err := o.validatePermissions(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (o *RemovePermissionsBody) validatePermissions(formats strfmt.Registry) error { + + if err := validate.Required("body"+"."+"permissions", "body", o.Permissions); err != nil { + return err + } + + for i := 0; i < len(o.Permissions); i++ { + if swag.IsZero(o.Permissions[i]) { // not required + continue + } + + if o.Permissions[i] != nil { + if err := o.Permissions[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("body" + "." + "permissions" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("body" + "." + "permissions" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +// ContextValidate validate this remove permissions body based on the context it is used +func (o *RemovePermissionsBody) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := o.contextValidatePermissions(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (o *RemovePermissionsBody) contextValidatePermissions(ctx context.Context, formats strfmt.Registry) error { + + for i := 0; i < len(o.Permissions); i++ { + + if o.Permissions[i] != nil { + if err := o.Permissions[i].ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("body" + "." + "permissions" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("body" + "." + "permissions" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +// MarshalBinary interface implementation +func (o *RemovePermissionsBody) MarshalBinary() ([]byte, error) { + if o == nil { + return nil, nil + } + return swag.WriteJSON(o) +} + +// UnmarshalBinary interface implementation +func (o *RemovePermissionsBody) UnmarshalBinary(b []byte) error { + var res RemovePermissionsBody + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *o = res + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/authz/revoke_role_from_group_parameters.go b/platform/dbops/binaries/weaviate-src/client/authz/revoke_role_from_group_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..efd2a4649c2b6fdf90afa89a808baf91d13ce595 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/authz/revoke_role_from_group_parameters.go @@ -0,0 +1,179 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package authz + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewRevokeRoleFromGroupParams creates a new RevokeRoleFromGroupParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewRevokeRoleFromGroupParams() *RevokeRoleFromGroupParams { + return &RevokeRoleFromGroupParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewRevokeRoleFromGroupParamsWithTimeout creates a new RevokeRoleFromGroupParams object +// with the ability to set a timeout on a request. +func NewRevokeRoleFromGroupParamsWithTimeout(timeout time.Duration) *RevokeRoleFromGroupParams { + return &RevokeRoleFromGroupParams{ + timeout: timeout, + } +} + +// NewRevokeRoleFromGroupParamsWithContext creates a new RevokeRoleFromGroupParams object +// with the ability to set a context for a request. +func NewRevokeRoleFromGroupParamsWithContext(ctx context.Context) *RevokeRoleFromGroupParams { + return &RevokeRoleFromGroupParams{ + Context: ctx, + } +} + +// NewRevokeRoleFromGroupParamsWithHTTPClient creates a new RevokeRoleFromGroupParams object +// with the ability to set a custom HTTPClient for a request. +func NewRevokeRoleFromGroupParamsWithHTTPClient(client *http.Client) *RevokeRoleFromGroupParams { + return &RevokeRoleFromGroupParams{ + HTTPClient: client, + } +} + +/* +RevokeRoleFromGroupParams contains all the parameters to send to the API endpoint + + for the revoke role from group operation. + + Typically these are written to a http.Request. +*/ +type RevokeRoleFromGroupParams struct { + + // Body. + Body RevokeRoleFromGroupBody + + /* ID. + + group name + */ + ID string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the revoke role from group params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *RevokeRoleFromGroupParams) WithDefaults() *RevokeRoleFromGroupParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the revoke role from group params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *RevokeRoleFromGroupParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the revoke role from group params +func (o *RevokeRoleFromGroupParams) WithTimeout(timeout time.Duration) *RevokeRoleFromGroupParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the revoke role from group params +func (o *RevokeRoleFromGroupParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the revoke role from group params +func (o *RevokeRoleFromGroupParams) WithContext(ctx context.Context) *RevokeRoleFromGroupParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the revoke role from group params +func (o *RevokeRoleFromGroupParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the revoke role from group params +func (o *RevokeRoleFromGroupParams) WithHTTPClient(client *http.Client) *RevokeRoleFromGroupParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the revoke role from group params +func (o *RevokeRoleFromGroupParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithBody adds the body to the revoke role from group params +func (o *RevokeRoleFromGroupParams) WithBody(body RevokeRoleFromGroupBody) *RevokeRoleFromGroupParams { + o.SetBody(body) + return o +} + +// SetBody adds the body to the revoke role from group params +func (o *RevokeRoleFromGroupParams) SetBody(body RevokeRoleFromGroupBody) { + o.Body = body +} + +// WithID adds the id to the revoke role from group params +func (o *RevokeRoleFromGroupParams) WithID(id string) *RevokeRoleFromGroupParams { + o.SetID(id) + return o +} + +// SetID adds the id to the revoke role from group params +func (o *RevokeRoleFromGroupParams) SetID(id string) { + o.ID = id +} + +// WriteToRequest writes these params to a swagger request +func (o *RevokeRoleFromGroupParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + if err := r.SetBodyParam(o.Body); err != nil { + return err + } + + // path param id + if err := r.SetPathParam("id", o.ID); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/authz/revoke_role_from_group_responses.go b/platform/dbops/binaries/weaviate-src/client/authz/revoke_role_from_group_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..667ed6d43a86a21ed46febd2b364c655ffe084d2 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/authz/revoke_role_from_group_responses.go @@ -0,0 +1,541 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package authz + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "fmt" + "io" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + + "github.com/weaviate/weaviate/entities/models" +) + +// RevokeRoleFromGroupReader is a Reader for the RevokeRoleFromGroup structure. +type RevokeRoleFromGroupReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *RevokeRoleFromGroupReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewRevokeRoleFromGroupOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 400: + result := NewRevokeRoleFromGroupBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 401: + result := NewRevokeRoleFromGroupUnauthorized() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 403: + result := NewRevokeRoleFromGroupForbidden() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 404: + result := NewRevokeRoleFromGroupNotFound() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 500: + result := NewRevokeRoleFromGroupInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + } +} + +// NewRevokeRoleFromGroupOK creates a RevokeRoleFromGroupOK with default headers values +func NewRevokeRoleFromGroupOK() *RevokeRoleFromGroupOK { + return &RevokeRoleFromGroupOK{} +} + +/* +RevokeRoleFromGroupOK describes a response with status code 200, with default header values. + +Role revoked successfully +*/ +type RevokeRoleFromGroupOK struct { +} + +// IsSuccess returns true when this revoke role from group o k response has a 2xx status code +func (o *RevokeRoleFromGroupOK) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this revoke role from group o k response has a 3xx status code +func (o *RevokeRoleFromGroupOK) IsRedirect() bool { + return false +} + +// IsClientError returns true when this revoke role from group o k response has a 4xx status code +func (o *RevokeRoleFromGroupOK) IsClientError() bool { + return false +} + +// IsServerError returns true when this revoke role from group o k response has a 5xx status code +func (o *RevokeRoleFromGroupOK) IsServerError() bool { + return false +} + +// IsCode returns true when this revoke role from group o k response a status code equal to that given +func (o *RevokeRoleFromGroupOK) IsCode(code int) bool { + return code == 200 +} + +// Code gets the status code for the revoke role from group o k response +func (o *RevokeRoleFromGroupOK) Code() int { + return 200 +} + +func (o *RevokeRoleFromGroupOK) Error() string { + return fmt.Sprintf("[POST /authz/groups/{id}/revoke][%d] revokeRoleFromGroupOK ", 200) +} + +func (o *RevokeRoleFromGroupOK) String() string { + return fmt.Sprintf("[POST /authz/groups/{id}/revoke][%d] revokeRoleFromGroupOK ", 200) +} + +func (o *RevokeRoleFromGroupOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewRevokeRoleFromGroupBadRequest creates a RevokeRoleFromGroupBadRequest with default headers values +func NewRevokeRoleFromGroupBadRequest() *RevokeRoleFromGroupBadRequest { + return &RevokeRoleFromGroupBadRequest{} +} + +/* +RevokeRoleFromGroupBadRequest describes a response with status code 400, with default header values. + +Bad request +*/ +type RevokeRoleFromGroupBadRequest struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this revoke role from group bad request response has a 2xx status code +func (o *RevokeRoleFromGroupBadRequest) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this revoke role from group bad request response has a 3xx status code +func (o *RevokeRoleFromGroupBadRequest) IsRedirect() bool { + return false +} + +// IsClientError returns true when this revoke role from group bad request response has a 4xx status code +func (o *RevokeRoleFromGroupBadRequest) IsClientError() bool { + return true +} + +// IsServerError returns true when this revoke role from group bad request response has a 5xx status code +func (o *RevokeRoleFromGroupBadRequest) IsServerError() bool { + return false +} + +// IsCode returns true when this revoke role from group bad request response a status code equal to that given +func (o *RevokeRoleFromGroupBadRequest) IsCode(code int) bool { + return code == 400 +} + +// Code gets the status code for the revoke role from group bad request response +func (o *RevokeRoleFromGroupBadRequest) Code() int { + return 400 +} + +func (o *RevokeRoleFromGroupBadRequest) Error() string { + return fmt.Sprintf("[POST /authz/groups/{id}/revoke][%d] revokeRoleFromGroupBadRequest %+v", 400, o.Payload) +} + +func (o *RevokeRoleFromGroupBadRequest) String() string { + return fmt.Sprintf("[POST /authz/groups/{id}/revoke][%d] revokeRoleFromGroupBadRequest %+v", 400, o.Payload) +} + +func (o *RevokeRoleFromGroupBadRequest) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *RevokeRoleFromGroupBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewRevokeRoleFromGroupUnauthorized creates a RevokeRoleFromGroupUnauthorized with default headers values +func NewRevokeRoleFromGroupUnauthorized() *RevokeRoleFromGroupUnauthorized { + return &RevokeRoleFromGroupUnauthorized{} +} + +/* +RevokeRoleFromGroupUnauthorized describes a response with status code 401, with default header values. + +Unauthorized or invalid credentials. +*/ +type RevokeRoleFromGroupUnauthorized struct { +} + +// IsSuccess returns true when this revoke role from group unauthorized response has a 2xx status code +func (o *RevokeRoleFromGroupUnauthorized) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this revoke role from group unauthorized response has a 3xx status code +func (o *RevokeRoleFromGroupUnauthorized) IsRedirect() bool { + return false +} + +// IsClientError returns true when this revoke role from group unauthorized response has a 4xx status code +func (o *RevokeRoleFromGroupUnauthorized) IsClientError() bool { + return true +} + +// IsServerError returns true when this revoke role from group unauthorized response has a 5xx status code +func (o *RevokeRoleFromGroupUnauthorized) IsServerError() bool { + return false +} + +// IsCode returns true when this revoke role from group unauthorized response a status code equal to that given +func (o *RevokeRoleFromGroupUnauthorized) IsCode(code int) bool { + return code == 401 +} + +// Code gets the status code for the revoke role from group unauthorized response +func (o *RevokeRoleFromGroupUnauthorized) Code() int { + return 401 +} + +func (o *RevokeRoleFromGroupUnauthorized) Error() string { + return fmt.Sprintf("[POST /authz/groups/{id}/revoke][%d] revokeRoleFromGroupUnauthorized ", 401) +} + +func (o *RevokeRoleFromGroupUnauthorized) String() string { + return fmt.Sprintf("[POST /authz/groups/{id}/revoke][%d] revokeRoleFromGroupUnauthorized ", 401) +} + +func (o *RevokeRoleFromGroupUnauthorized) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewRevokeRoleFromGroupForbidden creates a RevokeRoleFromGroupForbidden with default headers values +func NewRevokeRoleFromGroupForbidden() *RevokeRoleFromGroupForbidden { + return &RevokeRoleFromGroupForbidden{} +} + +/* +RevokeRoleFromGroupForbidden describes a response with status code 403, with default header values. + +Forbidden +*/ +type RevokeRoleFromGroupForbidden struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this revoke role from group forbidden response has a 2xx status code +func (o *RevokeRoleFromGroupForbidden) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this revoke role from group forbidden response has a 3xx status code +func (o *RevokeRoleFromGroupForbidden) IsRedirect() bool { + return false +} + +// IsClientError returns true when this revoke role from group forbidden response has a 4xx status code +func (o *RevokeRoleFromGroupForbidden) IsClientError() bool { + return true +} + +// IsServerError returns true when this revoke role from group forbidden response has a 5xx status code +func (o *RevokeRoleFromGroupForbidden) IsServerError() bool { + return false +} + +// IsCode returns true when this revoke role from group forbidden response a status code equal to that given +func (o *RevokeRoleFromGroupForbidden) IsCode(code int) bool { + return code == 403 +} + +// Code gets the status code for the revoke role from group forbidden response +func (o *RevokeRoleFromGroupForbidden) Code() int { + return 403 +} + +func (o *RevokeRoleFromGroupForbidden) Error() string { + return fmt.Sprintf("[POST /authz/groups/{id}/revoke][%d] revokeRoleFromGroupForbidden %+v", 403, o.Payload) +} + +func (o *RevokeRoleFromGroupForbidden) String() string { + return fmt.Sprintf("[POST /authz/groups/{id}/revoke][%d] revokeRoleFromGroupForbidden %+v", 403, o.Payload) +} + +func (o *RevokeRoleFromGroupForbidden) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *RevokeRoleFromGroupForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewRevokeRoleFromGroupNotFound creates a RevokeRoleFromGroupNotFound with default headers values +func NewRevokeRoleFromGroupNotFound() *RevokeRoleFromGroupNotFound { + return &RevokeRoleFromGroupNotFound{} +} + +/* +RevokeRoleFromGroupNotFound describes a response with status code 404, with default header values. + +role or group is not found. +*/ +type RevokeRoleFromGroupNotFound struct { +} + +// IsSuccess returns true when this revoke role from group not found response has a 2xx status code +func (o *RevokeRoleFromGroupNotFound) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this revoke role from group not found response has a 3xx status code +func (o *RevokeRoleFromGroupNotFound) IsRedirect() bool { + return false +} + +// IsClientError returns true when this revoke role from group not found response has a 4xx status code +func (o *RevokeRoleFromGroupNotFound) IsClientError() bool { + return true +} + +// IsServerError returns true when this revoke role from group not found response has a 5xx status code +func (o *RevokeRoleFromGroupNotFound) IsServerError() bool { + return false +} + +// IsCode returns true when this revoke role from group not found response a status code equal to that given +func (o *RevokeRoleFromGroupNotFound) IsCode(code int) bool { + return code == 404 +} + +// Code gets the status code for the revoke role from group not found response +func (o *RevokeRoleFromGroupNotFound) Code() int { + return 404 +} + +func (o *RevokeRoleFromGroupNotFound) Error() string { + return fmt.Sprintf("[POST /authz/groups/{id}/revoke][%d] revokeRoleFromGroupNotFound ", 404) +} + +func (o *RevokeRoleFromGroupNotFound) String() string { + return fmt.Sprintf("[POST /authz/groups/{id}/revoke][%d] revokeRoleFromGroupNotFound ", 404) +} + +func (o *RevokeRoleFromGroupNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewRevokeRoleFromGroupInternalServerError creates a RevokeRoleFromGroupInternalServerError with default headers values +func NewRevokeRoleFromGroupInternalServerError() *RevokeRoleFromGroupInternalServerError { + return &RevokeRoleFromGroupInternalServerError{} +} + +/* +RevokeRoleFromGroupInternalServerError describes a response with status code 500, with default header values. + +An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. +*/ +type RevokeRoleFromGroupInternalServerError struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this revoke role from group internal server error response has a 2xx status code +func (o *RevokeRoleFromGroupInternalServerError) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this revoke role from group internal server error response has a 3xx status code +func (o *RevokeRoleFromGroupInternalServerError) IsRedirect() bool { + return false +} + +// IsClientError returns true when this revoke role from group internal server error response has a 4xx status code +func (o *RevokeRoleFromGroupInternalServerError) IsClientError() bool { + return false +} + +// IsServerError returns true when this revoke role from group internal server error response has a 5xx status code +func (o *RevokeRoleFromGroupInternalServerError) IsServerError() bool { + return true +} + +// IsCode returns true when this revoke role from group internal server error response a status code equal to that given +func (o *RevokeRoleFromGroupInternalServerError) IsCode(code int) bool { + return code == 500 +} + +// Code gets the status code for the revoke role from group internal server error response +func (o *RevokeRoleFromGroupInternalServerError) Code() int { + return 500 +} + +func (o *RevokeRoleFromGroupInternalServerError) Error() string { + return fmt.Sprintf("[POST /authz/groups/{id}/revoke][%d] revokeRoleFromGroupInternalServerError %+v", 500, o.Payload) +} + +func (o *RevokeRoleFromGroupInternalServerError) String() string { + return fmt.Sprintf("[POST /authz/groups/{id}/revoke][%d] revokeRoleFromGroupInternalServerError %+v", 500, o.Payload) +} + +func (o *RevokeRoleFromGroupInternalServerError) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *RevokeRoleFromGroupInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +/* +RevokeRoleFromGroupBody revoke role from group body +swagger:model RevokeRoleFromGroupBody +*/ +type RevokeRoleFromGroupBody struct { + + // group type + GroupType models.GroupType `json:"groupType,omitempty"` + + // the roles that revoked from group + Roles []string `json:"roles"` +} + +// Validate validates this revoke role from group body +func (o *RevokeRoleFromGroupBody) Validate(formats strfmt.Registry) error { + var res []error + + if err := o.validateGroupType(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (o *RevokeRoleFromGroupBody) validateGroupType(formats strfmt.Registry) error { + if swag.IsZero(o.GroupType) { // not required + return nil + } + + if err := o.GroupType.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("body" + "." + "groupType") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("body" + "." + "groupType") + } + return err + } + + return nil +} + +// ContextValidate validate this revoke role from group body based on the context it is used +func (o *RevokeRoleFromGroupBody) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := o.contextValidateGroupType(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (o *RevokeRoleFromGroupBody) contextValidateGroupType(ctx context.Context, formats strfmt.Registry) error { + + if err := o.GroupType.ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("body" + "." + "groupType") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("body" + "." + "groupType") + } + return err + } + + return nil +} + +// MarshalBinary interface implementation +func (o *RevokeRoleFromGroupBody) MarshalBinary() ([]byte, error) { + if o == nil { + return nil, nil + } + return swag.WriteJSON(o) +} + +// UnmarshalBinary interface implementation +func (o *RevokeRoleFromGroupBody) UnmarshalBinary(b []byte) error { + var res RevokeRoleFromGroupBody + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *o = res + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/authz/revoke_role_from_user_parameters.go b/platform/dbops/binaries/weaviate-src/client/authz/revoke_role_from_user_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..97104ba3824158f36c26d815ee4c340bb121b06d --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/authz/revoke_role_from_user_parameters.go @@ -0,0 +1,179 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package authz + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewRevokeRoleFromUserParams creates a new RevokeRoleFromUserParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewRevokeRoleFromUserParams() *RevokeRoleFromUserParams { + return &RevokeRoleFromUserParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewRevokeRoleFromUserParamsWithTimeout creates a new RevokeRoleFromUserParams object +// with the ability to set a timeout on a request. +func NewRevokeRoleFromUserParamsWithTimeout(timeout time.Duration) *RevokeRoleFromUserParams { + return &RevokeRoleFromUserParams{ + timeout: timeout, + } +} + +// NewRevokeRoleFromUserParamsWithContext creates a new RevokeRoleFromUserParams object +// with the ability to set a context for a request. +func NewRevokeRoleFromUserParamsWithContext(ctx context.Context) *RevokeRoleFromUserParams { + return &RevokeRoleFromUserParams{ + Context: ctx, + } +} + +// NewRevokeRoleFromUserParamsWithHTTPClient creates a new RevokeRoleFromUserParams object +// with the ability to set a custom HTTPClient for a request. +func NewRevokeRoleFromUserParamsWithHTTPClient(client *http.Client) *RevokeRoleFromUserParams { + return &RevokeRoleFromUserParams{ + HTTPClient: client, + } +} + +/* +RevokeRoleFromUserParams contains all the parameters to send to the API endpoint + + for the revoke role from user operation. + + Typically these are written to a http.Request. +*/ +type RevokeRoleFromUserParams struct { + + // Body. + Body RevokeRoleFromUserBody + + /* ID. + + user name + */ + ID string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the revoke role from user params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *RevokeRoleFromUserParams) WithDefaults() *RevokeRoleFromUserParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the revoke role from user params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *RevokeRoleFromUserParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the revoke role from user params +func (o *RevokeRoleFromUserParams) WithTimeout(timeout time.Duration) *RevokeRoleFromUserParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the revoke role from user params +func (o *RevokeRoleFromUserParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the revoke role from user params +func (o *RevokeRoleFromUserParams) WithContext(ctx context.Context) *RevokeRoleFromUserParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the revoke role from user params +func (o *RevokeRoleFromUserParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the revoke role from user params +func (o *RevokeRoleFromUserParams) WithHTTPClient(client *http.Client) *RevokeRoleFromUserParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the revoke role from user params +func (o *RevokeRoleFromUserParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithBody adds the body to the revoke role from user params +func (o *RevokeRoleFromUserParams) WithBody(body RevokeRoleFromUserBody) *RevokeRoleFromUserParams { + o.SetBody(body) + return o +} + +// SetBody adds the body to the revoke role from user params +func (o *RevokeRoleFromUserParams) SetBody(body RevokeRoleFromUserBody) { + o.Body = body +} + +// WithID adds the id to the revoke role from user params +func (o *RevokeRoleFromUserParams) WithID(id string) *RevokeRoleFromUserParams { + o.SetID(id) + return o +} + +// SetID adds the id to the revoke role from user params +func (o *RevokeRoleFromUserParams) SetID(id string) { + o.ID = id +} + +// WriteToRequest writes these params to a swagger request +func (o *RevokeRoleFromUserParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + if err := r.SetBodyParam(o.Body); err != nil { + return err + } + + // path param id + if err := r.SetPathParam("id", o.ID); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/authz/revoke_role_from_user_responses.go b/platform/dbops/binaries/weaviate-src/client/authz/revoke_role_from_user_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..a96ec84e7fb4a367e7b8d30510933246104be3aa --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/authz/revoke_role_from_user_responses.go @@ -0,0 +1,553 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package authz + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "fmt" + "io" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + + "github.com/weaviate/weaviate/entities/models" +) + +// RevokeRoleFromUserReader is a Reader for the RevokeRoleFromUser structure. +type RevokeRoleFromUserReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *RevokeRoleFromUserReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewRevokeRoleFromUserOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 400: + result := NewRevokeRoleFromUserBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 401: + result := NewRevokeRoleFromUserUnauthorized() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 403: + result := NewRevokeRoleFromUserForbidden() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 404: + result := NewRevokeRoleFromUserNotFound() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 500: + result := NewRevokeRoleFromUserInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + } +} + +// NewRevokeRoleFromUserOK creates a RevokeRoleFromUserOK with default headers values +func NewRevokeRoleFromUserOK() *RevokeRoleFromUserOK { + return &RevokeRoleFromUserOK{} +} + +/* +RevokeRoleFromUserOK describes a response with status code 200, with default header values. + +Role revoked successfully +*/ +type RevokeRoleFromUserOK struct { +} + +// IsSuccess returns true when this revoke role from user o k response has a 2xx status code +func (o *RevokeRoleFromUserOK) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this revoke role from user o k response has a 3xx status code +func (o *RevokeRoleFromUserOK) IsRedirect() bool { + return false +} + +// IsClientError returns true when this revoke role from user o k response has a 4xx status code +func (o *RevokeRoleFromUserOK) IsClientError() bool { + return false +} + +// IsServerError returns true when this revoke role from user o k response has a 5xx status code +func (o *RevokeRoleFromUserOK) IsServerError() bool { + return false +} + +// IsCode returns true when this revoke role from user o k response a status code equal to that given +func (o *RevokeRoleFromUserOK) IsCode(code int) bool { + return code == 200 +} + +// Code gets the status code for the revoke role from user o k response +func (o *RevokeRoleFromUserOK) Code() int { + return 200 +} + +func (o *RevokeRoleFromUserOK) Error() string { + return fmt.Sprintf("[POST /authz/users/{id}/revoke][%d] revokeRoleFromUserOK ", 200) +} + +func (o *RevokeRoleFromUserOK) String() string { + return fmt.Sprintf("[POST /authz/users/{id}/revoke][%d] revokeRoleFromUserOK ", 200) +} + +func (o *RevokeRoleFromUserOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewRevokeRoleFromUserBadRequest creates a RevokeRoleFromUserBadRequest with default headers values +func NewRevokeRoleFromUserBadRequest() *RevokeRoleFromUserBadRequest { + return &RevokeRoleFromUserBadRequest{} +} + +/* +RevokeRoleFromUserBadRequest describes a response with status code 400, with default header values. + +Bad request +*/ +type RevokeRoleFromUserBadRequest struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this revoke role from user bad request response has a 2xx status code +func (o *RevokeRoleFromUserBadRequest) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this revoke role from user bad request response has a 3xx status code +func (o *RevokeRoleFromUserBadRequest) IsRedirect() bool { + return false +} + +// IsClientError returns true when this revoke role from user bad request response has a 4xx status code +func (o *RevokeRoleFromUserBadRequest) IsClientError() bool { + return true +} + +// IsServerError returns true when this revoke role from user bad request response has a 5xx status code +func (o *RevokeRoleFromUserBadRequest) IsServerError() bool { + return false +} + +// IsCode returns true when this revoke role from user bad request response a status code equal to that given +func (o *RevokeRoleFromUserBadRequest) IsCode(code int) bool { + return code == 400 +} + +// Code gets the status code for the revoke role from user bad request response +func (o *RevokeRoleFromUserBadRequest) Code() int { + return 400 +} + +func (o *RevokeRoleFromUserBadRequest) Error() string { + return fmt.Sprintf("[POST /authz/users/{id}/revoke][%d] revokeRoleFromUserBadRequest %+v", 400, o.Payload) +} + +func (o *RevokeRoleFromUserBadRequest) String() string { + return fmt.Sprintf("[POST /authz/users/{id}/revoke][%d] revokeRoleFromUserBadRequest %+v", 400, o.Payload) +} + +func (o *RevokeRoleFromUserBadRequest) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *RevokeRoleFromUserBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewRevokeRoleFromUserUnauthorized creates a RevokeRoleFromUserUnauthorized with default headers values +func NewRevokeRoleFromUserUnauthorized() *RevokeRoleFromUserUnauthorized { + return &RevokeRoleFromUserUnauthorized{} +} + +/* +RevokeRoleFromUserUnauthorized describes a response with status code 401, with default header values. + +Unauthorized or invalid credentials. +*/ +type RevokeRoleFromUserUnauthorized struct { +} + +// IsSuccess returns true when this revoke role from user unauthorized response has a 2xx status code +func (o *RevokeRoleFromUserUnauthorized) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this revoke role from user unauthorized response has a 3xx status code +func (o *RevokeRoleFromUserUnauthorized) IsRedirect() bool { + return false +} + +// IsClientError returns true when this revoke role from user unauthorized response has a 4xx status code +func (o *RevokeRoleFromUserUnauthorized) IsClientError() bool { + return true +} + +// IsServerError returns true when this revoke role from user unauthorized response has a 5xx status code +func (o *RevokeRoleFromUserUnauthorized) IsServerError() bool { + return false +} + +// IsCode returns true when this revoke role from user unauthorized response a status code equal to that given +func (o *RevokeRoleFromUserUnauthorized) IsCode(code int) bool { + return code == 401 +} + +// Code gets the status code for the revoke role from user unauthorized response +func (o *RevokeRoleFromUserUnauthorized) Code() int { + return 401 +} + +func (o *RevokeRoleFromUserUnauthorized) Error() string { + return fmt.Sprintf("[POST /authz/users/{id}/revoke][%d] revokeRoleFromUserUnauthorized ", 401) +} + +func (o *RevokeRoleFromUserUnauthorized) String() string { + return fmt.Sprintf("[POST /authz/users/{id}/revoke][%d] revokeRoleFromUserUnauthorized ", 401) +} + +func (o *RevokeRoleFromUserUnauthorized) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewRevokeRoleFromUserForbidden creates a RevokeRoleFromUserForbidden with default headers values +func NewRevokeRoleFromUserForbidden() *RevokeRoleFromUserForbidden { + return &RevokeRoleFromUserForbidden{} +} + +/* +RevokeRoleFromUserForbidden describes a response with status code 403, with default header values. + +Forbidden +*/ +type RevokeRoleFromUserForbidden struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this revoke role from user forbidden response has a 2xx status code +func (o *RevokeRoleFromUserForbidden) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this revoke role from user forbidden response has a 3xx status code +func (o *RevokeRoleFromUserForbidden) IsRedirect() bool { + return false +} + +// IsClientError returns true when this revoke role from user forbidden response has a 4xx status code +func (o *RevokeRoleFromUserForbidden) IsClientError() bool { + return true +} + +// IsServerError returns true when this revoke role from user forbidden response has a 5xx status code +func (o *RevokeRoleFromUserForbidden) IsServerError() bool { + return false +} + +// IsCode returns true when this revoke role from user forbidden response a status code equal to that given +func (o *RevokeRoleFromUserForbidden) IsCode(code int) bool { + return code == 403 +} + +// Code gets the status code for the revoke role from user forbidden response +func (o *RevokeRoleFromUserForbidden) Code() int { + return 403 +} + +func (o *RevokeRoleFromUserForbidden) Error() string { + return fmt.Sprintf("[POST /authz/users/{id}/revoke][%d] revokeRoleFromUserForbidden %+v", 403, o.Payload) +} + +func (o *RevokeRoleFromUserForbidden) String() string { + return fmt.Sprintf("[POST /authz/users/{id}/revoke][%d] revokeRoleFromUserForbidden %+v", 403, o.Payload) +} + +func (o *RevokeRoleFromUserForbidden) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *RevokeRoleFromUserForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewRevokeRoleFromUserNotFound creates a RevokeRoleFromUserNotFound with default headers values +func NewRevokeRoleFromUserNotFound() *RevokeRoleFromUserNotFound { + return &RevokeRoleFromUserNotFound{} +} + +/* +RevokeRoleFromUserNotFound describes a response with status code 404, with default header values. + +role or user is not found. +*/ +type RevokeRoleFromUserNotFound struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this revoke role from user not found response has a 2xx status code +func (o *RevokeRoleFromUserNotFound) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this revoke role from user not found response has a 3xx status code +func (o *RevokeRoleFromUserNotFound) IsRedirect() bool { + return false +} + +// IsClientError returns true when this revoke role from user not found response has a 4xx status code +func (o *RevokeRoleFromUserNotFound) IsClientError() bool { + return true +} + +// IsServerError returns true when this revoke role from user not found response has a 5xx status code +func (o *RevokeRoleFromUserNotFound) IsServerError() bool { + return false +} + +// IsCode returns true when this revoke role from user not found response a status code equal to that given +func (o *RevokeRoleFromUserNotFound) IsCode(code int) bool { + return code == 404 +} + +// Code gets the status code for the revoke role from user not found response +func (o *RevokeRoleFromUserNotFound) Code() int { + return 404 +} + +func (o *RevokeRoleFromUserNotFound) Error() string { + return fmt.Sprintf("[POST /authz/users/{id}/revoke][%d] revokeRoleFromUserNotFound %+v", 404, o.Payload) +} + +func (o *RevokeRoleFromUserNotFound) String() string { + return fmt.Sprintf("[POST /authz/users/{id}/revoke][%d] revokeRoleFromUserNotFound %+v", 404, o.Payload) +} + +func (o *RevokeRoleFromUserNotFound) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *RevokeRoleFromUserNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewRevokeRoleFromUserInternalServerError creates a RevokeRoleFromUserInternalServerError with default headers values +func NewRevokeRoleFromUserInternalServerError() *RevokeRoleFromUserInternalServerError { + return &RevokeRoleFromUserInternalServerError{} +} + +/* +RevokeRoleFromUserInternalServerError describes a response with status code 500, with default header values. + +An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. +*/ +type RevokeRoleFromUserInternalServerError struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this revoke role from user internal server error response has a 2xx status code +func (o *RevokeRoleFromUserInternalServerError) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this revoke role from user internal server error response has a 3xx status code +func (o *RevokeRoleFromUserInternalServerError) IsRedirect() bool { + return false +} + +// IsClientError returns true when this revoke role from user internal server error response has a 4xx status code +func (o *RevokeRoleFromUserInternalServerError) IsClientError() bool { + return false +} + +// IsServerError returns true when this revoke role from user internal server error response has a 5xx status code +func (o *RevokeRoleFromUserInternalServerError) IsServerError() bool { + return true +} + +// IsCode returns true when this revoke role from user internal server error response a status code equal to that given +func (o *RevokeRoleFromUserInternalServerError) IsCode(code int) bool { + return code == 500 +} + +// Code gets the status code for the revoke role from user internal server error response +func (o *RevokeRoleFromUserInternalServerError) Code() int { + return 500 +} + +func (o *RevokeRoleFromUserInternalServerError) Error() string { + return fmt.Sprintf("[POST /authz/users/{id}/revoke][%d] revokeRoleFromUserInternalServerError %+v", 500, o.Payload) +} + +func (o *RevokeRoleFromUserInternalServerError) String() string { + return fmt.Sprintf("[POST /authz/users/{id}/revoke][%d] revokeRoleFromUserInternalServerError %+v", 500, o.Payload) +} + +func (o *RevokeRoleFromUserInternalServerError) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *RevokeRoleFromUserInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +/* +RevokeRoleFromUserBody revoke role from user body +swagger:model RevokeRoleFromUserBody +*/ +type RevokeRoleFromUserBody struct { + + // the roles that revoked from the key or user + Roles []string `json:"roles"` + + // user type + UserType models.UserTypeInput `json:"userType,omitempty"` +} + +// Validate validates this revoke role from user body +func (o *RevokeRoleFromUserBody) Validate(formats strfmt.Registry) error { + var res []error + + if err := o.validateUserType(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (o *RevokeRoleFromUserBody) validateUserType(formats strfmt.Registry) error { + if swag.IsZero(o.UserType) { // not required + return nil + } + + if err := o.UserType.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("body" + "." + "userType") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("body" + "." + "userType") + } + return err + } + + return nil +} + +// ContextValidate validate this revoke role from user body based on the context it is used +func (o *RevokeRoleFromUserBody) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := o.contextValidateUserType(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (o *RevokeRoleFromUserBody) contextValidateUserType(ctx context.Context, formats strfmt.Registry) error { + + if err := o.UserType.ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("body" + "." + "userType") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("body" + "." + "userType") + } + return err + } + + return nil +} + +// MarshalBinary interface implementation +func (o *RevokeRoleFromUserBody) MarshalBinary() ([]byte, error) { + if o == nil { + return nil, nil + } + return swag.WriteJSON(o) +} + +// UnmarshalBinary interface implementation +func (o *RevokeRoleFromUserBody) UnmarshalBinary(b []byte) error { + var res RevokeRoleFromUserBody + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *o = res + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/backups/backups_cancel_parameters.go b/platform/dbops/binaries/weaviate-src/client/backups/backups_cancel_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..f4531f9eab28edd467549b13f21c1ed620a8657b --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/backups/backups_cancel_parameters.go @@ -0,0 +1,252 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package backups + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewBackupsCancelParams creates a new BackupsCancelParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewBackupsCancelParams() *BackupsCancelParams { + return &BackupsCancelParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewBackupsCancelParamsWithTimeout creates a new BackupsCancelParams object +// with the ability to set a timeout on a request. +func NewBackupsCancelParamsWithTimeout(timeout time.Duration) *BackupsCancelParams { + return &BackupsCancelParams{ + timeout: timeout, + } +} + +// NewBackupsCancelParamsWithContext creates a new BackupsCancelParams object +// with the ability to set a context for a request. +func NewBackupsCancelParamsWithContext(ctx context.Context) *BackupsCancelParams { + return &BackupsCancelParams{ + Context: ctx, + } +} + +// NewBackupsCancelParamsWithHTTPClient creates a new BackupsCancelParams object +// with the ability to set a custom HTTPClient for a request. +func NewBackupsCancelParamsWithHTTPClient(client *http.Client) *BackupsCancelParams { + return &BackupsCancelParams{ + HTTPClient: client, + } +} + +/* +BackupsCancelParams contains all the parameters to send to the API endpoint + + for the backups cancel operation. + + Typically these are written to a http.Request. +*/ +type BackupsCancelParams struct { + + /* Backend. + + Backup backend name e.g. filesystem, gcs, s3. + */ + Backend string + + /* Bucket. + + Name of the bucket, container, volume, etc + */ + Bucket *string + + /* ID. + + The ID of a backup. Must be URL-safe and work as a filesystem path, only lowercase, numbers, underscore, minus characters allowed. + */ + ID string + + /* Path. + + The path within the bucket + */ + Path *string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the backups cancel params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *BackupsCancelParams) WithDefaults() *BackupsCancelParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the backups cancel params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *BackupsCancelParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the backups cancel params +func (o *BackupsCancelParams) WithTimeout(timeout time.Duration) *BackupsCancelParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the backups cancel params +func (o *BackupsCancelParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the backups cancel params +func (o *BackupsCancelParams) WithContext(ctx context.Context) *BackupsCancelParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the backups cancel params +func (o *BackupsCancelParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the backups cancel params +func (o *BackupsCancelParams) WithHTTPClient(client *http.Client) *BackupsCancelParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the backups cancel params +func (o *BackupsCancelParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithBackend adds the backend to the backups cancel params +func (o *BackupsCancelParams) WithBackend(backend string) *BackupsCancelParams { + o.SetBackend(backend) + return o +} + +// SetBackend adds the backend to the backups cancel params +func (o *BackupsCancelParams) SetBackend(backend string) { + o.Backend = backend +} + +// WithBucket adds the bucket to the backups cancel params +func (o *BackupsCancelParams) WithBucket(bucket *string) *BackupsCancelParams { + o.SetBucket(bucket) + return o +} + +// SetBucket adds the bucket to the backups cancel params +func (o *BackupsCancelParams) SetBucket(bucket *string) { + o.Bucket = bucket +} + +// WithID adds the id to the backups cancel params +func (o *BackupsCancelParams) WithID(id string) *BackupsCancelParams { + o.SetID(id) + return o +} + +// SetID adds the id to the backups cancel params +func (o *BackupsCancelParams) SetID(id string) { + o.ID = id +} + +// WithPath adds the path to the backups cancel params +func (o *BackupsCancelParams) WithPath(path *string) *BackupsCancelParams { + o.SetPath(path) + return o +} + +// SetPath adds the path to the backups cancel params +func (o *BackupsCancelParams) SetPath(path *string) { + o.Path = path +} + +// WriteToRequest writes these params to a swagger request +func (o *BackupsCancelParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param backend + if err := r.SetPathParam("backend", o.Backend); err != nil { + return err + } + + if o.Bucket != nil { + + // query param bucket + var qrBucket string + + if o.Bucket != nil { + qrBucket = *o.Bucket + } + qBucket := qrBucket + if qBucket != "" { + + if err := r.SetQueryParam("bucket", qBucket); err != nil { + return err + } + } + } + + // path param id + if err := r.SetPathParam("id", o.ID); err != nil { + return err + } + + if o.Path != nil { + + // query param path + var qrPath string + + if o.Path != nil { + qrPath = *o.Path + } + qPath := qrPath + if qPath != "" { + + if err := r.SetQueryParam("path", qPath); err != nil { + return err + } + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/backups/backups_cancel_responses.go b/platform/dbops/binaries/weaviate-src/client/backups/backups_cancel_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..da4af76ed460b9d285dd8504ccad9240a4baaafd --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/backups/backups_cancel_responses.go @@ -0,0 +1,386 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package backups + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/weaviate/weaviate/entities/models" +) + +// BackupsCancelReader is a Reader for the BackupsCancel structure. +type BackupsCancelReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *BackupsCancelReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 204: + result := NewBackupsCancelNoContent() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 401: + result := NewBackupsCancelUnauthorized() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 403: + result := NewBackupsCancelForbidden() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 422: + result := NewBackupsCancelUnprocessableEntity() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 500: + result := NewBackupsCancelInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + } +} + +// NewBackupsCancelNoContent creates a BackupsCancelNoContent with default headers values +func NewBackupsCancelNoContent() *BackupsCancelNoContent { + return &BackupsCancelNoContent{} +} + +/* +BackupsCancelNoContent describes a response with status code 204, with default header values. + +Successfully deleted. +*/ +type BackupsCancelNoContent struct { +} + +// IsSuccess returns true when this backups cancel no content response has a 2xx status code +func (o *BackupsCancelNoContent) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this backups cancel no content response has a 3xx status code +func (o *BackupsCancelNoContent) IsRedirect() bool { + return false +} + +// IsClientError returns true when this backups cancel no content response has a 4xx status code +func (o *BackupsCancelNoContent) IsClientError() bool { + return false +} + +// IsServerError returns true when this backups cancel no content response has a 5xx status code +func (o *BackupsCancelNoContent) IsServerError() bool { + return false +} + +// IsCode returns true when this backups cancel no content response a status code equal to that given +func (o *BackupsCancelNoContent) IsCode(code int) bool { + return code == 204 +} + +// Code gets the status code for the backups cancel no content response +func (o *BackupsCancelNoContent) Code() int { + return 204 +} + +func (o *BackupsCancelNoContent) Error() string { + return fmt.Sprintf("[DELETE /backups/{backend}/{id}][%d] backupsCancelNoContent ", 204) +} + +func (o *BackupsCancelNoContent) String() string { + return fmt.Sprintf("[DELETE /backups/{backend}/{id}][%d] backupsCancelNoContent ", 204) +} + +func (o *BackupsCancelNoContent) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewBackupsCancelUnauthorized creates a BackupsCancelUnauthorized with default headers values +func NewBackupsCancelUnauthorized() *BackupsCancelUnauthorized { + return &BackupsCancelUnauthorized{} +} + +/* +BackupsCancelUnauthorized describes a response with status code 401, with default header values. + +Unauthorized or invalid credentials. +*/ +type BackupsCancelUnauthorized struct { +} + +// IsSuccess returns true when this backups cancel unauthorized response has a 2xx status code +func (o *BackupsCancelUnauthorized) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this backups cancel unauthorized response has a 3xx status code +func (o *BackupsCancelUnauthorized) IsRedirect() bool { + return false +} + +// IsClientError returns true when this backups cancel unauthorized response has a 4xx status code +func (o *BackupsCancelUnauthorized) IsClientError() bool { + return true +} + +// IsServerError returns true when this backups cancel unauthorized response has a 5xx status code +func (o *BackupsCancelUnauthorized) IsServerError() bool { + return false +} + +// IsCode returns true when this backups cancel unauthorized response a status code equal to that given +func (o *BackupsCancelUnauthorized) IsCode(code int) bool { + return code == 401 +} + +// Code gets the status code for the backups cancel unauthorized response +func (o *BackupsCancelUnauthorized) Code() int { + return 401 +} + +func (o *BackupsCancelUnauthorized) Error() string { + return fmt.Sprintf("[DELETE /backups/{backend}/{id}][%d] backupsCancelUnauthorized ", 401) +} + +func (o *BackupsCancelUnauthorized) String() string { + return fmt.Sprintf("[DELETE /backups/{backend}/{id}][%d] backupsCancelUnauthorized ", 401) +} + +func (o *BackupsCancelUnauthorized) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewBackupsCancelForbidden creates a BackupsCancelForbidden with default headers values +func NewBackupsCancelForbidden() *BackupsCancelForbidden { + return &BackupsCancelForbidden{} +} + +/* +BackupsCancelForbidden describes a response with status code 403, with default header values. + +Forbidden +*/ +type BackupsCancelForbidden struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this backups cancel forbidden response has a 2xx status code +func (o *BackupsCancelForbidden) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this backups cancel forbidden response has a 3xx status code +func (o *BackupsCancelForbidden) IsRedirect() bool { + return false +} + +// IsClientError returns true when this backups cancel forbidden response has a 4xx status code +func (o *BackupsCancelForbidden) IsClientError() bool { + return true +} + +// IsServerError returns true when this backups cancel forbidden response has a 5xx status code +func (o *BackupsCancelForbidden) IsServerError() bool { + return false +} + +// IsCode returns true when this backups cancel forbidden response a status code equal to that given +func (o *BackupsCancelForbidden) IsCode(code int) bool { + return code == 403 +} + +// Code gets the status code for the backups cancel forbidden response +func (o *BackupsCancelForbidden) Code() int { + return 403 +} + +func (o *BackupsCancelForbidden) Error() string { + return fmt.Sprintf("[DELETE /backups/{backend}/{id}][%d] backupsCancelForbidden %+v", 403, o.Payload) +} + +func (o *BackupsCancelForbidden) String() string { + return fmt.Sprintf("[DELETE /backups/{backend}/{id}][%d] backupsCancelForbidden %+v", 403, o.Payload) +} + +func (o *BackupsCancelForbidden) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *BackupsCancelForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewBackupsCancelUnprocessableEntity creates a BackupsCancelUnprocessableEntity with default headers values +func NewBackupsCancelUnprocessableEntity() *BackupsCancelUnprocessableEntity { + return &BackupsCancelUnprocessableEntity{} +} + +/* +BackupsCancelUnprocessableEntity describes a response with status code 422, with default header values. + +Invalid backup cancellation attempt. +*/ +type BackupsCancelUnprocessableEntity struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this backups cancel unprocessable entity response has a 2xx status code +func (o *BackupsCancelUnprocessableEntity) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this backups cancel unprocessable entity response has a 3xx status code +func (o *BackupsCancelUnprocessableEntity) IsRedirect() bool { + return false +} + +// IsClientError returns true when this backups cancel unprocessable entity response has a 4xx status code +func (o *BackupsCancelUnprocessableEntity) IsClientError() bool { + return true +} + +// IsServerError returns true when this backups cancel unprocessable entity response has a 5xx status code +func (o *BackupsCancelUnprocessableEntity) IsServerError() bool { + return false +} + +// IsCode returns true when this backups cancel unprocessable entity response a status code equal to that given +func (o *BackupsCancelUnprocessableEntity) IsCode(code int) bool { + return code == 422 +} + +// Code gets the status code for the backups cancel unprocessable entity response +func (o *BackupsCancelUnprocessableEntity) Code() int { + return 422 +} + +func (o *BackupsCancelUnprocessableEntity) Error() string { + return fmt.Sprintf("[DELETE /backups/{backend}/{id}][%d] backupsCancelUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *BackupsCancelUnprocessableEntity) String() string { + return fmt.Sprintf("[DELETE /backups/{backend}/{id}][%d] backupsCancelUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *BackupsCancelUnprocessableEntity) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *BackupsCancelUnprocessableEntity) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewBackupsCancelInternalServerError creates a BackupsCancelInternalServerError with default headers values +func NewBackupsCancelInternalServerError() *BackupsCancelInternalServerError { + return &BackupsCancelInternalServerError{} +} + +/* +BackupsCancelInternalServerError describes a response with status code 500, with default header values. + +An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. +*/ +type BackupsCancelInternalServerError struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this backups cancel internal server error response has a 2xx status code +func (o *BackupsCancelInternalServerError) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this backups cancel internal server error response has a 3xx status code +func (o *BackupsCancelInternalServerError) IsRedirect() bool { + return false +} + +// IsClientError returns true when this backups cancel internal server error response has a 4xx status code +func (o *BackupsCancelInternalServerError) IsClientError() bool { + return false +} + +// IsServerError returns true when this backups cancel internal server error response has a 5xx status code +func (o *BackupsCancelInternalServerError) IsServerError() bool { + return true +} + +// IsCode returns true when this backups cancel internal server error response a status code equal to that given +func (o *BackupsCancelInternalServerError) IsCode(code int) bool { + return code == 500 +} + +// Code gets the status code for the backups cancel internal server error response +func (o *BackupsCancelInternalServerError) Code() int { + return 500 +} + +func (o *BackupsCancelInternalServerError) Error() string { + return fmt.Sprintf("[DELETE /backups/{backend}/{id}][%d] backupsCancelInternalServerError %+v", 500, o.Payload) +} + +func (o *BackupsCancelInternalServerError) String() string { + return fmt.Sprintf("[DELETE /backups/{backend}/{id}][%d] backupsCancelInternalServerError %+v", 500, o.Payload) +} + +func (o *BackupsCancelInternalServerError) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *BackupsCancelInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/backups/backups_client.go b/platform/dbops/binaries/weaviate-src/client/backups/backups_client.go new file mode 100644 index 0000000000000000000000000000000000000000..be5fcb930eff39a88d3c5da94a1b5918a55f3f36 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/backups/backups_client.go @@ -0,0 +1,308 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package backups + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" +) + +// New creates a new backups API client. +func New(transport runtime.ClientTransport, formats strfmt.Registry) ClientService { + return &Client{transport: transport, formats: formats} +} + +/* +Client for backups API +*/ +type Client struct { + transport runtime.ClientTransport + formats strfmt.Registry +} + +// ClientOption is the option for Client methods +type ClientOption func(*runtime.ClientOperation) + +// ClientService is the interface for Client methods +type ClientService interface { + BackupsCancel(params *BackupsCancelParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*BackupsCancelNoContent, error) + + BackupsCreate(params *BackupsCreateParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*BackupsCreateOK, error) + + BackupsCreateStatus(params *BackupsCreateStatusParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*BackupsCreateStatusOK, error) + + BackupsList(params *BackupsListParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*BackupsListOK, error) + + BackupsRestore(params *BackupsRestoreParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*BackupsRestoreOK, error) + + BackupsRestoreStatus(params *BackupsRestoreStatusParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*BackupsRestoreStatusOK, error) + + SetTransport(transport runtime.ClientTransport) +} + +/* +BackupsCancel cancels backup + +Cancel created backup with specified ID +*/ +func (a *Client) BackupsCancel(params *BackupsCancelParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*BackupsCancelNoContent, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewBackupsCancelParams() + } + op := &runtime.ClientOperation{ + ID: "backups.cancel", + Method: "DELETE", + PathPattern: "/backups/{backend}/{id}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json", "application/yaml"}, + Schemes: []string{"https"}, + Params: params, + Reader: &BackupsCancelReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + success, ok := result.(*BackupsCancelNoContent) + if ok { + return success, nil + } + // unexpected success response + // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for backups.cancel: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + +/* +BackupsCreate starts a backup process + +Start creating a backup for a set of collections.

Notes:
- Weaviate uses gzip compression by default.
- Weaviate stays usable while a backup process is ongoing. +*/ +func (a *Client) BackupsCreate(params *BackupsCreateParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*BackupsCreateOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewBackupsCreateParams() + } + op := &runtime.ClientOperation{ + ID: "backups.create", + Method: "POST", + PathPattern: "/backups/{backend}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json", "application/yaml"}, + Schemes: []string{"https"}, + Params: params, + Reader: &BackupsCreateReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + success, ok := result.(*BackupsCreateOK) + if ok { + return success, nil + } + // unexpected success response + // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for backups.create: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + +/* +BackupsCreateStatus gets backup process status + +Returns status of backup creation attempt for a set of collections.

All client implementations have a `wait for completion` option which will poll the backup status in the background and only return once the backup has completed (successfully or unsuccessfully). If you set the `wait for completion` option to false, you can also check the status yourself using this endpoint. +*/ +func (a *Client) BackupsCreateStatus(params *BackupsCreateStatusParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*BackupsCreateStatusOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewBackupsCreateStatusParams() + } + op := &runtime.ClientOperation{ + ID: "backups.create.status", + Method: "GET", + PathPattern: "/backups/{backend}/{id}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json", "application/yaml"}, + Schemes: []string{"https"}, + Params: params, + Reader: &BackupsCreateStatusReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + success, ok := result.(*BackupsCreateStatusOK) + if ok { + return success, nil + } + // unexpected success response + // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for backups.create.status: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + +/* +BackupsList lists backups in progress + +[Coming soon] List all backups in progress not implemented yet. +*/ +func (a *Client) BackupsList(params *BackupsListParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*BackupsListOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewBackupsListParams() + } + op := &runtime.ClientOperation{ + ID: "backups.list", + Method: "GET", + PathPattern: "/backups/{backend}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json", "application/yaml"}, + Schemes: []string{"https"}, + Params: params, + Reader: &BackupsListReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + success, ok := result.(*BackupsListOK) + if ok { + return success, nil + } + // unexpected success response + // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for backups.list: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + +/* +BackupsRestore starts a restoration process + +Starts a process of restoring a backup for a set of collections.

Any backup can be restored to any machine, as long as the number of nodes between source and target are identical.

Requrements:

- None of the collections to be restored already exist on the target restoration node(s).
- The node names of the backed-up collections' must match those of the target restoration node(s). +*/ +func (a *Client) BackupsRestore(params *BackupsRestoreParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*BackupsRestoreOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewBackupsRestoreParams() + } + op := &runtime.ClientOperation{ + ID: "backups.restore", + Method: "POST", + PathPattern: "/backups/{backend}/{id}/restore", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json", "application/yaml"}, + Schemes: []string{"https"}, + Params: params, + Reader: &BackupsRestoreReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + success, ok := result.(*BackupsRestoreOK) + if ok { + return success, nil + } + // unexpected success response + // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for backups.restore: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + +/* +BackupsRestoreStatus gets restore process status + +Returns status of a backup restoration attempt for a set of classes.

All client implementations have a `wait for completion` option which will poll the backup status in the background and only return once the backup has completed (successfully or unsuccessfully). If you set the `wait for completion` option to false, you can also check the status yourself using the this endpoint. +*/ +func (a *Client) BackupsRestoreStatus(params *BackupsRestoreStatusParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*BackupsRestoreStatusOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewBackupsRestoreStatusParams() + } + op := &runtime.ClientOperation{ + ID: "backups.restore.status", + Method: "GET", + PathPattern: "/backups/{backend}/{id}/restore", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json", "application/yaml"}, + Schemes: []string{"https"}, + Params: params, + Reader: &BackupsRestoreStatusReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + success, ok := result.(*BackupsRestoreStatusOK) + if ok { + return success, nil + } + // unexpected success response + // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for backups.restore.status: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + +// SetTransport changes the transport on the client +func (a *Client) SetTransport(transport runtime.ClientTransport) { + a.transport = transport +} diff --git a/platform/dbops/binaries/weaviate-src/client/backups/backups_create_parameters.go b/platform/dbops/binaries/weaviate-src/client/backups/backups_create_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..d9600c069ba82d47e1a78a02f221435a43e274f3 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/backups/backups_create_parameters.go @@ -0,0 +1,183 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package backups + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + + "github.com/weaviate/weaviate/entities/models" +) + +// NewBackupsCreateParams creates a new BackupsCreateParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewBackupsCreateParams() *BackupsCreateParams { + return &BackupsCreateParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewBackupsCreateParamsWithTimeout creates a new BackupsCreateParams object +// with the ability to set a timeout on a request. +func NewBackupsCreateParamsWithTimeout(timeout time.Duration) *BackupsCreateParams { + return &BackupsCreateParams{ + timeout: timeout, + } +} + +// NewBackupsCreateParamsWithContext creates a new BackupsCreateParams object +// with the ability to set a context for a request. +func NewBackupsCreateParamsWithContext(ctx context.Context) *BackupsCreateParams { + return &BackupsCreateParams{ + Context: ctx, + } +} + +// NewBackupsCreateParamsWithHTTPClient creates a new BackupsCreateParams object +// with the ability to set a custom HTTPClient for a request. +func NewBackupsCreateParamsWithHTTPClient(client *http.Client) *BackupsCreateParams { + return &BackupsCreateParams{ + HTTPClient: client, + } +} + +/* +BackupsCreateParams contains all the parameters to send to the API endpoint + + for the backups create operation. + + Typically these are written to a http.Request. +*/ +type BackupsCreateParams struct { + + /* Backend. + + Backup backend name e.g. `filesystem`, `gcs`, `s3`, `azure`. + */ + Backend string + + // Body. + Body *models.BackupCreateRequest + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the backups create params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *BackupsCreateParams) WithDefaults() *BackupsCreateParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the backups create params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *BackupsCreateParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the backups create params +func (o *BackupsCreateParams) WithTimeout(timeout time.Duration) *BackupsCreateParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the backups create params +func (o *BackupsCreateParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the backups create params +func (o *BackupsCreateParams) WithContext(ctx context.Context) *BackupsCreateParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the backups create params +func (o *BackupsCreateParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the backups create params +func (o *BackupsCreateParams) WithHTTPClient(client *http.Client) *BackupsCreateParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the backups create params +func (o *BackupsCreateParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithBackend adds the backend to the backups create params +func (o *BackupsCreateParams) WithBackend(backend string) *BackupsCreateParams { + o.SetBackend(backend) + return o +} + +// SetBackend adds the backend to the backups create params +func (o *BackupsCreateParams) SetBackend(backend string) { + o.Backend = backend +} + +// WithBody adds the body to the backups create params +func (o *BackupsCreateParams) WithBody(body *models.BackupCreateRequest) *BackupsCreateParams { + o.SetBody(body) + return o +} + +// SetBody adds the body to the backups create params +func (o *BackupsCreateParams) SetBody(body *models.BackupCreateRequest) { + o.Body = body +} + +// WriteToRequest writes these params to a swagger request +func (o *BackupsCreateParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param backend + if err := r.SetPathParam("backend", o.Backend); err != nil { + return err + } + if o.Body != nil { + if err := r.SetBodyParam(o.Body); err != nil { + return err + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/backups/backups_create_responses.go b/platform/dbops/binaries/weaviate-src/client/backups/backups_create_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..a05c7d995c1a0072f1f03ab9dcaa8bc50c0a3ed0 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/backups/backups_create_responses.go @@ -0,0 +1,398 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package backups + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/weaviate/weaviate/entities/models" +) + +// BackupsCreateReader is a Reader for the BackupsCreate structure. +type BackupsCreateReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *BackupsCreateReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewBackupsCreateOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 401: + result := NewBackupsCreateUnauthorized() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 403: + result := NewBackupsCreateForbidden() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 422: + result := NewBackupsCreateUnprocessableEntity() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 500: + result := NewBackupsCreateInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + } +} + +// NewBackupsCreateOK creates a BackupsCreateOK with default headers values +func NewBackupsCreateOK() *BackupsCreateOK { + return &BackupsCreateOK{} +} + +/* +BackupsCreateOK describes a response with status code 200, with default header values. + +Backup create process successfully started. +*/ +type BackupsCreateOK struct { + Payload *models.BackupCreateResponse +} + +// IsSuccess returns true when this backups create o k response has a 2xx status code +func (o *BackupsCreateOK) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this backups create o k response has a 3xx status code +func (o *BackupsCreateOK) IsRedirect() bool { + return false +} + +// IsClientError returns true when this backups create o k response has a 4xx status code +func (o *BackupsCreateOK) IsClientError() bool { + return false +} + +// IsServerError returns true when this backups create o k response has a 5xx status code +func (o *BackupsCreateOK) IsServerError() bool { + return false +} + +// IsCode returns true when this backups create o k response a status code equal to that given +func (o *BackupsCreateOK) IsCode(code int) bool { + return code == 200 +} + +// Code gets the status code for the backups create o k response +func (o *BackupsCreateOK) Code() int { + return 200 +} + +func (o *BackupsCreateOK) Error() string { + return fmt.Sprintf("[POST /backups/{backend}][%d] backupsCreateOK %+v", 200, o.Payload) +} + +func (o *BackupsCreateOK) String() string { + return fmt.Sprintf("[POST /backups/{backend}][%d] backupsCreateOK %+v", 200, o.Payload) +} + +func (o *BackupsCreateOK) GetPayload() *models.BackupCreateResponse { + return o.Payload +} + +func (o *BackupsCreateOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.BackupCreateResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewBackupsCreateUnauthorized creates a BackupsCreateUnauthorized with default headers values +func NewBackupsCreateUnauthorized() *BackupsCreateUnauthorized { + return &BackupsCreateUnauthorized{} +} + +/* +BackupsCreateUnauthorized describes a response with status code 401, with default header values. + +Unauthorized or invalid credentials. +*/ +type BackupsCreateUnauthorized struct { +} + +// IsSuccess returns true when this backups create unauthorized response has a 2xx status code +func (o *BackupsCreateUnauthorized) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this backups create unauthorized response has a 3xx status code +func (o *BackupsCreateUnauthorized) IsRedirect() bool { + return false +} + +// IsClientError returns true when this backups create unauthorized response has a 4xx status code +func (o *BackupsCreateUnauthorized) IsClientError() bool { + return true +} + +// IsServerError returns true when this backups create unauthorized response has a 5xx status code +func (o *BackupsCreateUnauthorized) IsServerError() bool { + return false +} + +// IsCode returns true when this backups create unauthorized response a status code equal to that given +func (o *BackupsCreateUnauthorized) IsCode(code int) bool { + return code == 401 +} + +// Code gets the status code for the backups create unauthorized response +func (o *BackupsCreateUnauthorized) Code() int { + return 401 +} + +func (o *BackupsCreateUnauthorized) Error() string { + return fmt.Sprintf("[POST /backups/{backend}][%d] backupsCreateUnauthorized ", 401) +} + +func (o *BackupsCreateUnauthorized) String() string { + return fmt.Sprintf("[POST /backups/{backend}][%d] backupsCreateUnauthorized ", 401) +} + +func (o *BackupsCreateUnauthorized) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewBackupsCreateForbidden creates a BackupsCreateForbidden with default headers values +func NewBackupsCreateForbidden() *BackupsCreateForbidden { + return &BackupsCreateForbidden{} +} + +/* +BackupsCreateForbidden describes a response with status code 403, with default header values. + +Forbidden +*/ +type BackupsCreateForbidden struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this backups create forbidden response has a 2xx status code +func (o *BackupsCreateForbidden) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this backups create forbidden response has a 3xx status code +func (o *BackupsCreateForbidden) IsRedirect() bool { + return false +} + +// IsClientError returns true when this backups create forbidden response has a 4xx status code +func (o *BackupsCreateForbidden) IsClientError() bool { + return true +} + +// IsServerError returns true when this backups create forbidden response has a 5xx status code +func (o *BackupsCreateForbidden) IsServerError() bool { + return false +} + +// IsCode returns true when this backups create forbidden response a status code equal to that given +func (o *BackupsCreateForbidden) IsCode(code int) bool { + return code == 403 +} + +// Code gets the status code for the backups create forbidden response +func (o *BackupsCreateForbidden) Code() int { + return 403 +} + +func (o *BackupsCreateForbidden) Error() string { + return fmt.Sprintf("[POST /backups/{backend}][%d] backupsCreateForbidden %+v", 403, o.Payload) +} + +func (o *BackupsCreateForbidden) String() string { + return fmt.Sprintf("[POST /backups/{backend}][%d] backupsCreateForbidden %+v", 403, o.Payload) +} + +func (o *BackupsCreateForbidden) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *BackupsCreateForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewBackupsCreateUnprocessableEntity creates a BackupsCreateUnprocessableEntity with default headers values +func NewBackupsCreateUnprocessableEntity() *BackupsCreateUnprocessableEntity { + return &BackupsCreateUnprocessableEntity{} +} + +/* +BackupsCreateUnprocessableEntity describes a response with status code 422, with default header values. + +Invalid backup creation attempt. +*/ +type BackupsCreateUnprocessableEntity struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this backups create unprocessable entity response has a 2xx status code +func (o *BackupsCreateUnprocessableEntity) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this backups create unprocessable entity response has a 3xx status code +func (o *BackupsCreateUnprocessableEntity) IsRedirect() bool { + return false +} + +// IsClientError returns true when this backups create unprocessable entity response has a 4xx status code +func (o *BackupsCreateUnprocessableEntity) IsClientError() bool { + return true +} + +// IsServerError returns true when this backups create unprocessable entity response has a 5xx status code +func (o *BackupsCreateUnprocessableEntity) IsServerError() bool { + return false +} + +// IsCode returns true when this backups create unprocessable entity response a status code equal to that given +func (o *BackupsCreateUnprocessableEntity) IsCode(code int) bool { + return code == 422 +} + +// Code gets the status code for the backups create unprocessable entity response +func (o *BackupsCreateUnprocessableEntity) Code() int { + return 422 +} + +func (o *BackupsCreateUnprocessableEntity) Error() string { + return fmt.Sprintf("[POST /backups/{backend}][%d] backupsCreateUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *BackupsCreateUnprocessableEntity) String() string { + return fmt.Sprintf("[POST /backups/{backend}][%d] backupsCreateUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *BackupsCreateUnprocessableEntity) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *BackupsCreateUnprocessableEntity) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewBackupsCreateInternalServerError creates a BackupsCreateInternalServerError with default headers values +func NewBackupsCreateInternalServerError() *BackupsCreateInternalServerError { + return &BackupsCreateInternalServerError{} +} + +/* +BackupsCreateInternalServerError describes a response with status code 500, with default header values. + +An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. +*/ +type BackupsCreateInternalServerError struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this backups create internal server error response has a 2xx status code +func (o *BackupsCreateInternalServerError) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this backups create internal server error response has a 3xx status code +func (o *BackupsCreateInternalServerError) IsRedirect() bool { + return false +} + +// IsClientError returns true when this backups create internal server error response has a 4xx status code +func (o *BackupsCreateInternalServerError) IsClientError() bool { + return false +} + +// IsServerError returns true when this backups create internal server error response has a 5xx status code +func (o *BackupsCreateInternalServerError) IsServerError() bool { + return true +} + +// IsCode returns true when this backups create internal server error response a status code equal to that given +func (o *BackupsCreateInternalServerError) IsCode(code int) bool { + return code == 500 +} + +// Code gets the status code for the backups create internal server error response +func (o *BackupsCreateInternalServerError) Code() int { + return 500 +} + +func (o *BackupsCreateInternalServerError) Error() string { + return fmt.Sprintf("[POST /backups/{backend}][%d] backupsCreateInternalServerError %+v", 500, o.Payload) +} + +func (o *BackupsCreateInternalServerError) String() string { + return fmt.Sprintf("[POST /backups/{backend}][%d] backupsCreateInternalServerError %+v", 500, o.Payload) +} + +func (o *BackupsCreateInternalServerError) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *BackupsCreateInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/backups/backups_create_status_parameters.go b/platform/dbops/binaries/weaviate-src/client/backups/backups_create_status_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..822eab373a1b5fcc7e0c3868f7420e83f6b47a1e --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/backups/backups_create_status_parameters.go @@ -0,0 +1,252 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package backups + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewBackupsCreateStatusParams creates a new BackupsCreateStatusParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewBackupsCreateStatusParams() *BackupsCreateStatusParams { + return &BackupsCreateStatusParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewBackupsCreateStatusParamsWithTimeout creates a new BackupsCreateStatusParams object +// with the ability to set a timeout on a request. +func NewBackupsCreateStatusParamsWithTimeout(timeout time.Duration) *BackupsCreateStatusParams { + return &BackupsCreateStatusParams{ + timeout: timeout, + } +} + +// NewBackupsCreateStatusParamsWithContext creates a new BackupsCreateStatusParams object +// with the ability to set a context for a request. +func NewBackupsCreateStatusParamsWithContext(ctx context.Context) *BackupsCreateStatusParams { + return &BackupsCreateStatusParams{ + Context: ctx, + } +} + +// NewBackupsCreateStatusParamsWithHTTPClient creates a new BackupsCreateStatusParams object +// with the ability to set a custom HTTPClient for a request. +func NewBackupsCreateStatusParamsWithHTTPClient(client *http.Client) *BackupsCreateStatusParams { + return &BackupsCreateStatusParams{ + HTTPClient: client, + } +} + +/* +BackupsCreateStatusParams contains all the parameters to send to the API endpoint + + for the backups create status operation. + + Typically these are written to a http.Request. +*/ +type BackupsCreateStatusParams struct { + + /* Backend. + + Backup backend name e.g. filesystem, gcs, s3. + */ + Backend string + + /* Bucket. + + Name of the bucket, container, volume, etc + */ + Bucket *string + + /* ID. + + The ID of a backup. Must be URL-safe and work as a filesystem path, only lowercase, numbers, underscore, minus characters allowed. + */ + ID string + + /* Path. + + The path within the bucket + */ + Path *string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the backups create status params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *BackupsCreateStatusParams) WithDefaults() *BackupsCreateStatusParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the backups create status params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *BackupsCreateStatusParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the backups create status params +func (o *BackupsCreateStatusParams) WithTimeout(timeout time.Duration) *BackupsCreateStatusParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the backups create status params +func (o *BackupsCreateStatusParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the backups create status params +func (o *BackupsCreateStatusParams) WithContext(ctx context.Context) *BackupsCreateStatusParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the backups create status params +func (o *BackupsCreateStatusParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the backups create status params +func (o *BackupsCreateStatusParams) WithHTTPClient(client *http.Client) *BackupsCreateStatusParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the backups create status params +func (o *BackupsCreateStatusParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithBackend adds the backend to the backups create status params +func (o *BackupsCreateStatusParams) WithBackend(backend string) *BackupsCreateStatusParams { + o.SetBackend(backend) + return o +} + +// SetBackend adds the backend to the backups create status params +func (o *BackupsCreateStatusParams) SetBackend(backend string) { + o.Backend = backend +} + +// WithBucket adds the bucket to the backups create status params +func (o *BackupsCreateStatusParams) WithBucket(bucket *string) *BackupsCreateStatusParams { + o.SetBucket(bucket) + return o +} + +// SetBucket adds the bucket to the backups create status params +func (o *BackupsCreateStatusParams) SetBucket(bucket *string) { + o.Bucket = bucket +} + +// WithID adds the id to the backups create status params +func (o *BackupsCreateStatusParams) WithID(id string) *BackupsCreateStatusParams { + o.SetID(id) + return o +} + +// SetID adds the id to the backups create status params +func (o *BackupsCreateStatusParams) SetID(id string) { + o.ID = id +} + +// WithPath adds the path to the backups create status params +func (o *BackupsCreateStatusParams) WithPath(path *string) *BackupsCreateStatusParams { + o.SetPath(path) + return o +} + +// SetPath adds the path to the backups create status params +func (o *BackupsCreateStatusParams) SetPath(path *string) { + o.Path = path +} + +// WriteToRequest writes these params to a swagger request +func (o *BackupsCreateStatusParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param backend + if err := r.SetPathParam("backend", o.Backend); err != nil { + return err + } + + if o.Bucket != nil { + + // query param bucket + var qrBucket string + + if o.Bucket != nil { + qrBucket = *o.Bucket + } + qBucket := qrBucket + if qBucket != "" { + + if err := r.SetQueryParam("bucket", qBucket); err != nil { + return err + } + } + } + + // path param id + if err := r.SetPathParam("id", o.ID); err != nil { + return err + } + + if o.Path != nil { + + // query param path + var qrPath string + + if o.Path != nil { + qrPath = *o.Path + } + qPath := qrPath + if qPath != "" { + + if err := r.SetQueryParam("path", qPath); err != nil { + return err + } + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/backups/backups_create_status_responses.go b/platform/dbops/binaries/weaviate-src/client/backups/backups_create_status_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..8a89c4a75dc9bc7db1ab467a60c127d549c956f5 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/backups/backups_create_status_responses.go @@ -0,0 +1,472 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package backups + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/weaviate/weaviate/entities/models" +) + +// BackupsCreateStatusReader is a Reader for the BackupsCreateStatus structure. +type BackupsCreateStatusReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *BackupsCreateStatusReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewBackupsCreateStatusOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 401: + result := NewBackupsCreateStatusUnauthorized() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 403: + result := NewBackupsCreateStatusForbidden() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 404: + result := NewBackupsCreateStatusNotFound() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 422: + result := NewBackupsCreateStatusUnprocessableEntity() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 500: + result := NewBackupsCreateStatusInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + } +} + +// NewBackupsCreateStatusOK creates a BackupsCreateStatusOK with default headers values +func NewBackupsCreateStatusOK() *BackupsCreateStatusOK { + return &BackupsCreateStatusOK{} +} + +/* +BackupsCreateStatusOK describes a response with status code 200, with default header values. + +Backup creation status successfully returned +*/ +type BackupsCreateStatusOK struct { + Payload *models.BackupCreateStatusResponse +} + +// IsSuccess returns true when this backups create status o k response has a 2xx status code +func (o *BackupsCreateStatusOK) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this backups create status o k response has a 3xx status code +func (o *BackupsCreateStatusOK) IsRedirect() bool { + return false +} + +// IsClientError returns true when this backups create status o k response has a 4xx status code +func (o *BackupsCreateStatusOK) IsClientError() bool { + return false +} + +// IsServerError returns true when this backups create status o k response has a 5xx status code +func (o *BackupsCreateStatusOK) IsServerError() bool { + return false +} + +// IsCode returns true when this backups create status o k response a status code equal to that given +func (o *BackupsCreateStatusOK) IsCode(code int) bool { + return code == 200 +} + +// Code gets the status code for the backups create status o k response +func (o *BackupsCreateStatusOK) Code() int { + return 200 +} + +func (o *BackupsCreateStatusOK) Error() string { + return fmt.Sprintf("[GET /backups/{backend}/{id}][%d] backupsCreateStatusOK %+v", 200, o.Payload) +} + +func (o *BackupsCreateStatusOK) String() string { + return fmt.Sprintf("[GET /backups/{backend}/{id}][%d] backupsCreateStatusOK %+v", 200, o.Payload) +} + +func (o *BackupsCreateStatusOK) GetPayload() *models.BackupCreateStatusResponse { + return o.Payload +} + +func (o *BackupsCreateStatusOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.BackupCreateStatusResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewBackupsCreateStatusUnauthorized creates a BackupsCreateStatusUnauthorized with default headers values +func NewBackupsCreateStatusUnauthorized() *BackupsCreateStatusUnauthorized { + return &BackupsCreateStatusUnauthorized{} +} + +/* +BackupsCreateStatusUnauthorized describes a response with status code 401, with default header values. + +Unauthorized or invalid credentials. +*/ +type BackupsCreateStatusUnauthorized struct { +} + +// IsSuccess returns true when this backups create status unauthorized response has a 2xx status code +func (o *BackupsCreateStatusUnauthorized) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this backups create status unauthorized response has a 3xx status code +func (o *BackupsCreateStatusUnauthorized) IsRedirect() bool { + return false +} + +// IsClientError returns true when this backups create status unauthorized response has a 4xx status code +func (o *BackupsCreateStatusUnauthorized) IsClientError() bool { + return true +} + +// IsServerError returns true when this backups create status unauthorized response has a 5xx status code +func (o *BackupsCreateStatusUnauthorized) IsServerError() bool { + return false +} + +// IsCode returns true when this backups create status unauthorized response a status code equal to that given +func (o *BackupsCreateStatusUnauthorized) IsCode(code int) bool { + return code == 401 +} + +// Code gets the status code for the backups create status unauthorized response +func (o *BackupsCreateStatusUnauthorized) Code() int { + return 401 +} + +func (o *BackupsCreateStatusUnauthorized) Error() string { + return fmt.Sprintf("[GET /backups/{backend}/{id}][%d] backupsCreateStatusUnauthorized ", 401) +} + +func (o *BackupsCreateStatusUnauthorized) String() string { + return fmt.Sprintf("[GET /backups/{backend}/{id}][%d] backupsCreateStatusUnauthorized ", 401) +} + +func (o *BackupsCreateStatusUnauthorized) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewBackupsCreateStatusForbidden creates a BackupsCreateStatusForbidden with default headers values +func NewBackupsCreateStatusForbidden() *BackupsCreateStatusForbidden { + return &BackupsCreateStatusForbidden{} +} + +/* +BackupsCreateStatusForbidden describes a response with status code 403, with default header values. + +Forbidden +*/ +type BackupsCreateStatusForbidden struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this backups create status forbidden response has a 2xx status code +func (o *BackupsCreateStatusForbidden) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this backups create status forbidden response has a 3xx status code +func (o *BackupsCreateStatusForbidden) IsRedirect() bool { + return false +} + +// IsClientError returns true when this backups create status forbidden response has a 4xx status code +func (o *BackupsCreateStatusForbidden) IsClientError() bool { + return true +} + +// IsServerError returns true when this backups create status forbidden response has a 5xx status code +func (o *BackupsCreateStatusForbidden) IsServerError() bool { + return false +} + +// IsCode returns true when this backups create status forbidden response a status code equal to that given +func (o *BackupsCreateStatusForbidden) IsCode(code int) bool { + return code == 403 +} + +// Code gets the status code for the backups create status forbidden response +func (o *BackupsCreateStatusForbidden) Code() int { + return 403 +} + +func (o *BackupsCreateStatusForbidden) Error() string { + return fmt.Sprintf("[GET /backups/{backend}/{id}][%d] backupsCreateStatusForbidden %+v", 403, o.Payload) +} + +func (o *BackupsCreateStatusForbidden) String() string { + return fmt.Sprintf("[GET /backups/{backend}/{id}][%d] backupsCreateStatusForbidden %+v", 403, o.Payload) +} + +func (o *BackupsCreateStatusForbidden) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *BackupsCreateStatusForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewBackupsCreateStatusNotFound creates a BackupsCreateStatusNotFound with default headers values +func NewBackupsCreateStatusNotFound() *BackupsCreateStatusNotFound { + return &BackupsCreateStatusNotFound{} +} + +/* +BackupsCreateStatusNotFound describes a response with status code 404, with default header values. + +Not Found - Backup does not exist +*/ +type BackupsCreateStatusNotFound struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this backups create status not found response has a 2xx status code +func (o *BackupsCreateStatusNotFound) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this backups create status not found response has a 3xx status code +func (o *BackupsCreateStatusNotFound) IsRedirect() bool { + return false +} + +// IsClientError returns true when this backups create status not found response has a 4xx status code +func (o *BackupsCreateStatusNotFound) IsClientError() bool { + return true +} + +// IsServerError returns true when this backups create status not found response has a 5xx status code +func (o *BackupsCreateStatusNotFound) IsServerError() bool { + return false +} + +// IsCode returns true when this backups create status not found response a status code equal to that given +func (o *BackupsCreateStatusNotFound) IsCode(code int) bool { + return code == 404 +} + +// Code gets the status code for the backups create status not found response +func (o *BackupsCreateStatusNotFound) Code() int { + return 404 +} + +func (o *BackupsCreateStatusNotFound) Error() string { + return fmt.Sprintf("[GET /backups/{backend}/{id}][%d] backupsCreateStatusNotFound %+v", 404, o.Payload) +} + +func (o *BackupsCreateStatusNotFound) String() string { + return fmt.Sprintf("[GET /backups/{backend}/{id}][%d] backupsCreateStatusNotFound %+v", 404, o.Payload) +} + +func (o *BackupsCreateStatusNotFound) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *BackupsCreateStatusNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewBackupsCreateStatusUnprocessableEntity creates a BackupsCreateStatusUnprocessableEntity with default headers values +func NewBackupsCreateStatusUnprocessableEntity() *BackupsCreateStatusUnprocessableEntity { + return &BackupsCreateStatusUnprocessableEntity{} +} + +/* +BackupsCreateStatusUnprocessableEntity describes a response with status code 422, with default header values. + +Invalid backup restoration status attempt. +*/ +type BackupsCreateStatusUnprocessableEntity struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this backups create status unprocessable entity response has a 2xx status code +func (o *BackupsCreateStatusUnprocessableEntity) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this backups create status unprocessable entity response has a 3xx status code +func (o *BackupsCreateStatusUnprocessableEntity) IsRedirect() bool { + return false +} + +// IsClientError returns true when this backups create status unprocessable entity response has a 4xx status code +func (o *BackupsCreateStatusUnprocessableEntity) IsClientError() bool { + return true +} + +// IsServerError returns true when this backups create status unprocessable entity response has a 5xx status code +func (o *BackupsCreateStatusUnprocessableEntity) IsServerError() bool { + return false +} + +// IsCode returns true when this backups create status unprocessable entity response a status code equal to that given +func (o *BackupsCreateStatusUnprocessableEntity) IsCode(code int) bool { + return code == 422 +} + +// Code gets the status code for the backups create status unprocessable entity response +func (o *BackupsCreateStatusUnprocessableEntity) Code() int { + return 422 +} + +func (o *BackupsCreateStatusUnprocessableEntity) Error() string { + return fmt.Sprintf("[GET /backups/{backend}/{id}][%d] backupsCreateStatusUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *BackupsCreateStatusUnprocessableEntity) String() string { + return fmt.Sprintf("[GET /backups/{backend}/{id}][%d] backupsCreateStatusUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *BackupsCreateStatusUnprocessableEntity) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *BackupsCreateStatusUnprocessableEntity) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewBackupsCreateStatusInternalServerError creates a BackupsCreateStatusInternalServerError with default headers values +func NewBackupsCreateStatusInternalServerError() *BackupsCreateStatusInternalServerError { + return &BackupsCreateStatusInternalServerError{} +} + +/* +BackupsCreateStatusInternalServerError describes a response with status code 500, with default header values. + +An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. +*/ +type BackupsCreateStatusInternalServerError struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this backups create status internal server error response has a 2xx status code +func (o *BackupsCreateStatusInternalServerError) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this backups create status internal server error response has a 3xx status code +func (o *BackupsCreateStatusInternalServerError) IsRedirect() bool { + return false +} + +// IsClientError returns true when this backups create status internal server error response has a 4xx status code +func (o *BackupsCreateStatusInternalServerError) IsClientError() bool { + return false +} + +// IsServerError returns true when this backups create status internal server error response has a 5xx status code +func (o *BackupsCreateStatusInternalServerError) IsServerError() bool { + return true +} + +// IsCode returns true when this backups create status internal server error response a status code equal to that given +func (o *BackupsCreateStatusInternalServerError) IsCode(code int) bool { + return code == 500 +} + +// Code gets the status code for the backups create status internal server error response +func (o *BackupsCreateStatusInternalServerError) Code() int { + return 500 +} + +func (o *BackupsCreateStatusInternalServerError) Error() string { + return fmt.Sprintf("[GET /backups/{backend}/{id}][%d] backupsCreateStatusInternalServerError %+v", 500, o.Payload) +} + +func (o *BackupsCreateStatusInternalServerError) String() string { + return fmt.Sprintf("[GET /backups/{backend}/{id}][%d] backupsCreateStatusInternalServerError %+v", 500, o.Payload) +} + +func (o *BackupsCreateStatusInternalServerError) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *BackupsCreateStatusInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/backups/backups_list_parameters.go b/platform/dbops/binaries/weaviate-src/client/backups/backups_list_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..e0629c9628f54e511b7ac810e7faf568bcd8c94a --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/backups/backups_list_parameters.go @@ -0,0 +1,162 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package backups + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewBackupsListParams creates a new BackupsListParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewBackupsListParams() *BackupsListParams { + return &BackupsListParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewBackupsListParamsWithTimeout creates a new BackupsListParams object +// with the ability to set a timeout on a request. +func NewBackupsListParamsWithTimeout(timeout time.Duration) *BackupsListParams { + return &BackupsListParams{ + timeout: timeout, + } +} + +// NewBackupsListParamsWithContext creates a new BackupsListParams object +// with the ability to set a context for a request. +func NewBackupsListParamsWithContext(ctx context.Context) *BackupsListParams { + return &BackupsListParams{ + Context: ctx, + } +} + +// NewBackupsListParamsWithHTTPClient creates a new BackupsListParams object +// with the ability to set a custom HTTPClient for a request. +func NewBackupsListParamsWithHTTPClient(client *http.Client) *BackupsListParams { + return &BackupsListParams{ + HTTPClient: client, + } +} + +/* +BackupsListParams contains all the parameters to send to the API endpoint + + for the backups list operation. + + Typically these are written to a http.Request. +*/ +type BackupsListParams struct { + + /* Backend. + + Backup backend name e.g. filesystem, gcs, s3. + */ + Backend string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the backups list params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *BackupsListParams) WithDefaults() *BackupsListParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the backups list params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *BackupsListParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the backups list params +func (o *BackupsListParams) WithTimeout(timeout time.Duration) *BackupsListParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the backups list params +func (o *BackupsListParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the backups list params +func (o *BackupsListParams) WithContext(ctx context.Context) *BackupsListParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the backups list params +func (o *BackupsListParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the backups list params +func (o *BackupsListParams) WithHTTPClient(client *http.Client) *BackupsListParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the backups list params +func (o *BackupsListParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithBackend adds the backend to the backups list params +func (o *BackupsListParams) WithBackend(backend string) *BackupsListParams { + o.SetBackend(backend) + return o +} + +// SetBackend adds the backend to the backups list params +func (o *BackupsListParams) SetBackend(backend string) { + o.Backend = backend +} + +// WriteToRequest writes these params to a swagger request +func (o *BackupsListParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param backend + if err := r.SetPathParam("backend", o.Backend); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/backups/backups_list_responses.go b/platform/dbops/binaries/weaviate-src/client/backups/backups_list_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..063cde89fc635febb38270248100b59fb4874995 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/backups/backups_list_responses.go @@ -0,0 +1,396 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package backups + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/weaviate/weaviate/entities/models" +) + +// BackupsListReader is a Reader for the BackupsList structure. +type BackupsListReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *BackupsListReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewBackupsListOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 401: + result := NewBackupsListUnauthorized() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 403: + result := NewBackupsListForbidden() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 422: + result := NewBackupsListUnprocessableEntity() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 500: + result := NewBackupsListInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + } +} + +// NewBackupsListOK creates a BackupsListOK with default headers values +func NewBackupsListOK() *BackupsListOK { + return &BackupsListOK{} +} + +/* +BackupsListOK describes a response with status code 200, with default header values. + +Existed backups +*/ +type BackupsListOK struct { + Payload models.BackupListResponse +} + +// IsSuccess returns true when this backups list o k response has a 2xx status code +func (o *BackupsListOK) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this backups list o k response has a 3xx status code +func (o *BackupsListOK) IsRedirect() bool { + return false +} + +// IsClientError returns true when this backups list o k response has a 4xx status code +func (o *BackupsListOK) IsClientError() bool { + return false +} + +// IsServerError returns true when this backups list o k response has a 5xx status code +func (o *BackupsListOK) IsServerError() bool { + return false +} + +// IsCode returns true when this backups list o k response a status code equal to that given +func (o *BackupsListOK) IsCode(code int) bool { + return code == 200 +} + +// Code gets the status code for the backups list o k response +func (o *BackupsListOK) Code() int { + return 200 +} + +func (o *BackupsListOK) Error() string { + return fmt.Sprintf("[GET /backups/{backend}][%d] backupsListOK %+v", 200, o.Payload) +} + +func (o *BackupsListOK) String() string { + return fmt.Sprintf("[GET /backups/{backend}][%d] backupsListOK %+v", 200, o.Payload) +} + +func (o *BackupsListOK) GetPayload() models.BackupListResponse { + return o.Payload +} + +func (o *BackupsListOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewBackupsListUnauthorized creates a BackupsListUnauthorized with default headers values +func NewBackupsListUnauthorized() *BackupsListUnauthorized { + return &BackupsListUnauthorized{} +} + +/* +BackupsListUnauthorized describes a response with status code 401, with default header values. + +Unauthorized or invalid credentials. +*/ +type BackupsListUnauthorized struct { +} + +// IsSuccess returns true when this backups list unauthorized response has a 2xx status code +func (o *BackupsListUnauthorized) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this backups list unauthorized response has a 3xx status code +func (o *BackupsListUnauthorized) IsRedirect() bool { + return false +} + +// IsClientError returns true when this backups list unauthorized response has a 4xx status code +func (o *BackupsListUnauthorized) IsClientError() bool { + return true +} + +// IsServerError returns true when this backups list unauthorized response has a 5xx status code +func (o *BackupsListUnauthorized) IsServerError() bool { + return false +} + +// IsCode returns true when this backups list unauthorized response a status code equal to that given +func (o *BackupsListUnauthorized) IsCode(code int) bool { + return code == 401 +} + +// Code gets the status code for the backups list unauthorized response +func (o *BackupsListUnauthorized) Code() int { + return 401 +} + +func (o *BackupsListUnauthorized) Error() string { + return fmt.Sprintf("[GET /backups/{backend}][%d] backupsListUnauthorized ", 401) +} + +func (o *BackupsListUnauthorized) String() string { + return fmt.Sprintf("[GET /backups/{backend}][%d] backupsListUnauthorized ", 401) +} + +func (o *BackupsListUnauthorized) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewBackupsListForbidden creates a BackupsListForbidden with default headers values +func NewBackupsListForbidden() *BackupsListForbidden { + return &BackupsListForbidden{} +} + +/* +BackupsListForbidden describes a response with status code 403, with default header values. + +Forbidden +*/ +type BackupsListForbidden struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this backups list forbidden response has a 2xx status code +func (o *BackupsListForbidden) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this backups list forbidden response has a 3xx status code +func (o *BackupsListForbidden) IsRedirect() bool { + return false +} + +// IsClientError returns true when this backups list forbidden response has a 4xx status code +func (o *BackupsListForbidden) IsClientError() bool { + return true +} + +// IsServerError returns true when this backups list forbidden response has a 5xx status code +func (o *BackupsListForbidden) IsServerError() bool { + return false +} + +// IsCode returns true when this backups list forbidden response a status code equal to that given +func (o *BackupsListForbidden) IsCode(code int) bool { + return code == 403 +} + +// Code gets the status code for the backups list forbidden response +func (o *BackupsListForbidden) Code() int { + return 403 +} + +func (o *BackupsListForbidden) Error() string { + return fmt.Sprintf("[GET /backups/{backend}][%d] backupsListForbidden %+v", 403, o.Payload) +} + +func (o *BackupsListForbidden) String() string { + return fmt.Sprintf("[GET /backups/{backend}][%d] backupsListForbidden %+v", 403, o.Payload) +} + +func (o *BackupsListForbidden) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *BackupsListForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewBackupsListUnprocessableEntity creates a BackupsListUnprocessableEntity with default headers values +func NewBackupsListUnprocessableEntity() *BackupsListUnprocessableEntity { + return &BackupsListUnprocessableEntity{} +} + +/* +BackupsListUnprocessableEntity describes a response with status code 422, with default header values. + +Invalid backup list. +*/ +type BackupsListUnprocessableEntity struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this backups list unprocessable entity response has a 2xx status code +func (o *BackupsListUnprocessableEntity) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this backups list unprocessable entity response has a 3xx status code +func (o *BackupsListUnprocessableEntity) IsRedirect() bool { + return false +} + +// IsClientError returns true when this backups list unprocessable entity response has a 4xx status code +func (o *BackupsListUnprocessableEntity) IsClientError() bool { + return true +} + +// IsServerError returns true when this backups list unprocessable entity response has a 5xx status code +func (o *BackupsListUnprocessableEntity) IsServerError() bool { + return false +} + +// IsCode returns true when this backups list unprocessable entity response a status code equal to that given +func (o *BackupsListUnprocessableEntity) IsCode(code int) bool { + return code == 422 +} + +// Code gets the status code for the backups list unprocessable entity response +func (o *BackupsListUnprocessableEntity) Code() int { + return 422 +} + +func (o *BackupsListUnprocessableEntity) Error() string { + return fmt.Sprintf("[GET /backups/{backend}][%d] backupsListUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *BackupsListUnprocessableEntity) String() string { + return fmt.Sprintf("[GET /backups/{backend}][%d] backupsListUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *BackupsListUnprocessableEntity) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *BackupsListUnprocessableEntity) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewBackupsListInternalServerError creates a BackupsListInternalServerError with default headers values +func NewBackupsListInternalServerError() *BackupsListInternalServerError { + return &BackupsListInternalServerError{} +} + +/* +BackupsListInternalServerError describes a response with status code 500, with default header values. + +An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. +*/ +type BackupsListInternalServerError struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this backups list internal server error response has a 2xx status code +func (o *BackupsListInternalServerError) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this backups list internal server error response has a 3xx status code +func (o *BackupsListInternalServerError) IsRedirect() bool { + return false +} + +// IsClientError returns true when this backups list internal server error response has a 4xx status code +func (o *BackupsListInternalServerError) IsClientError() bool { + return false +} + +// IsServerError returns true when this backups list internal server error response has a 5xx status code +func (o *BackupsListInternalServerError) IsServerError() bool { + return true +} + +// IsCode returns true when this backups list internal server error response a status code equal to that given +func (o *BackupsListInternalServerError) IsCode(code int) bool { + return code == 500 +} + +// Code gets the status code for the backups list internal server error response +func (o *BackupsListInternalServerError) Code() int { + return 500 +} + +func (o *BackupsListInternalServerError) Error() string { + return fmt.Sprintf("[GET /backups/{backend}][%d] backupsListInternalServerError %+v", 500, o.Payload) +} + +func (o *BackupsListInternalServerError) String() string { + return fmt.Sprintf("[GET /backups/{backend}][%d] backupsListInternalServerError %+v", 500, o.Payload) +} + +func (o *BackupsListInternalServerError) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *BackupsListInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/backups/backups_restore_parameters.go b/platform/dbops/binaries/weaviate-src/client/backups/backups_restore_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..3d4d00d5d0eeee2ae309de7e32988a40a3d7caa9 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/backups/backups_restore_parameters.go @@ -0,0 +1,205 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package backups + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + + "github.com/weaviate/weaviate/entities/models" +) + +// NewBackupsRestoreParams creates a new BackupsRestoreParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewBackupsRestoreParams() *BackupsRestoreParams { + return &BackupsRestoreParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewBackupsRestoreParamsWithTimeout creates a new BackupsRestoreParams object +// with the ability to set a timeout on a request. +func NewBackupsRestoreParamsWithTimeout(timeout time.Duration) *BackupsRestoreParams { + return &BackupsRestoreParams{ + timeout: timeout, + } +} + +// NewBackupsRestoreParamsWithContext creates a new BackupsRestoreParams object +// with the ability to set a context for a request. +func NewBackupsRestoreParamsWithContext(ctx context.Context) *BackupsRestoreParams { + return &BackupsRestoreParams{ + Context: ctx, + } +} + +// NewBackupsRestoreParamsWithHTTPClient creates a new BackupsRestoreParams object +// with the ability to set a custom HTTPClient for a request. +func NewBackupsRestoreParamsWithHTTPClient(client *http.Client) *BackupsRestoreParams { + return &BackupsRestoreParams{ + HTTPClient: client, + } +} + +/* +BackupsRestoreParams contains all the parameters to send to the API endpoint + + for the backups restore operation. + + Typically these are written to a http.Request. +*/ +type BackupsRestoreParams struct { + + /* Backend. + + Backup backend name e.g. `filesystem`, `gcs`, `s3`, `azure`. + */ + Backend string + + // Body. + Body *models.BackupRestoreRequest + + /* ID. + + The ID of a backup. Must be URL-safe and work as a filesystem path, only lowercase, numbers, underscore, minus characters allowed. + */ + ID string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the backups restore params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *BackupsRestoreParams) WithDefaults() *BackupsRestoreParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the backups restore params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *BackupsRestoreParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the backups restore params +func (o *BackupsRestoreParams) WithTimeout(timeout time.Duration) *BackupsRestoreParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the backups restore params +func (o *BackupsRestoreParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the backups restore params +func (o *BackupsRestoreParams) WithContext(ctx context.Context) *BackupsRestoreParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the backups restore params +func (o *BackupsRestoreParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the backups restore params +func (o *BackupsRestoreParams) WithHTTPClient(client *http.Client) *BackupsRestoreParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the backups restore params +func (o *BackupsRestoreParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithBackend adds the backend to the backups restore params +func (o *BackupsRestoreParams) WithBackend(backend string) *BackupsRestoreParams { + o.SetBackend(backend) + return o +} + +// SetBackend adds the backend to the backups restore params +func (o *BackupsRestoreParams) SetBackend(backend string) { + o.Backend = backend +} + +// WithBody adds the body to the backups restore params +func (o *BackupsRestoreParams) WithBody(body *models.BackupRestoreRequest) *BackupsRestoreParams { + o.SetBody(body) + return o +} + +// SetBody adds the body to the backups restore params +func (o *BackupsRestoreParams) SetBody(body *models.BackupRestoreRequest) { + o.Body = body +} + +// WithID adds the id to the backups restore params +func (o *BackupsRestoreParams) WithID(id string) *BackupsRestoreParams { + o.SetID(id) + return o +} + +// SetID adds the id to the backups restore params +func (o *BackupsRestoreParams) SetID(id string) { + o.ID = id +} + +// WriteToRequest writes these params to a swagger request +func (o *BackupsRestoreParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param backend + if err := r.SetPathParam("backend", o.Backend); err != nil { + return err + } + if o.Body != nil { + if err := r.SetBodyParam(o.Body); err != nil { + return err + } + } + + // path param id + if err := r.SetPathParam("id", o.ID); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/backups/backups_restore_responses.go b/platform/dbops/binaries/weaviate-src/client/backups/backups_restore_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..4d602e3a747f39da55c3d88b2ec45ea319c9ecfd --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/backups/backups_restore_responses.go @@ -0,0 +1,472 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package backups + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/weaviate/weaviate/entities/models" +) + +// BackupsRestoreReader is a Reader for the BackupsRestore structure. +type BackupsRestoreReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *BackupsRestoreReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewBackupsRestoreOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 401: + result := NewBackupsRestoreUnauthorized() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 403: + result := NewBackupsRestoreForbidden() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 404: + result := NewBackupsRestoreNotFound() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 422: + result := NewBackupsRestoreUnprocessableEntity() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 500: + result := NewBackupsRestoreInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + } +} + +// NewBackupsRestoreOK creates a BackupsRestoreOK with default headers values +func NewBackupsRestoreOK() *BackupsRestoreOK { + return &BackupsRestoreOK{} +} + +/* +BackupsRestoreOK describes a response with status code 200, with default header values. + +Backup restoration process successfully started. +*/ +type BackupsRestoreOK struct { + Payload *models.BackupRestoreResponse +} + +// IsSuccess returns true when this backups restore o k response has a 2xx status code +func (o *BackupsRestoreOK) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this backups restore o k response has a 3xx status code +func (o *BackupsRestoreOK) IsRedirect() bool { + return false +} + +// IsClientError returns true when this backups restore o k response has a 4xx status code +func (o *BackupsRestoreOK) IsClientError() bool { + return false +} + +// IsServerError returns true when this backups restore o k response has a 5xx status code +func (o *BackupsRestoreOK) IsServerError() bool { + return false +} + +// IsCode returns true when this backups restore o k response a status code equal to that given +func (o *BackupsRestoreOK) IsCode(code int) bool { + return code == 200 +} + +// Code gets the status code for the backups restore o k response +func (o *BackupsRestoreOK) Code() int { + return 200 +} + +func (o *BackupsRestoreOK) Error() string { + return fmt.Sprintf("[POST /backups/{backend}/{id}/restore][%d] backupsRestoreOK %+v", 200, o.Payload) +} + +func (o *BackupsRestoreOK) String() string { + return fmt.Sprintf("[POST /backups/{backend}/{id}/restore][%d] backupsRestoreOK %+v", 200, o.Payload) +} + +func (o *BackupsRestoreOK) GetPayload() *models.BackupRestoreResponse { + return o.Payload +} + +func (o *BackupsRestoreOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.BackupRestoreResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewBackupsRestoreUnauthorized creates a BackupsRestoreUnauthorized with default headers values +func NewBackupsRestoreUnauthorized() *BackupsRestoreUnauthorized { + return &BackupsRestoreUnauthorized{} +} + +/* +BackupsRestoreUnauthorized describes a response with status code 401, with default header values. + +Unauthorized or invalid credentials. +*/ +type BackupsRestoreUnauthorized struct { +} + +// IsSuccess returns true when this backups restore unauthorized response has a 2xx status code +func (o *BackupsRestoreUnauthorized) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this backups restore unauthorized response has a 3xx status code +func (o *BackupsRestoreUnauthorized) IsRedirect() bool { + return false +} + +// IsClientError returns true when this backups restore unauthorized response has a 4xx status code +func (o *BackupsRestoreUnauthorized) IsClientError() bool { + return true +} + +// IsServerError returns true when this backups restore unauthorized response has a 5xx status code +func (o *BackupsRestoreUnauthorized) IsServerError() bool { + return false +} + +// IsCode returns true when this backups restore unauthorized response a status code equal to that given +func (o *BackupsRestoreUnauthorized) IsCode(code int) bool { + return code == 401 +} + +// Code gets the status code for the backups restore unauthorized response +func (o *BackupsRestoreUnauthorized) Code() int { + return 401 +} + +func (o *BackupsRestoreUnauthorized) Error() string { + return fmt.Sprintf("[POST /backups/{backend}/{id}/restore][%d] backupsRestoreUnauthorized ", 401) +} + +func (o *BackupsRestoreUnauthorized) String() string { + return fmt.Sprintf("[POST /backups/{backend}/{id}/restore][%d] backupsRestoreUnauthorized ", 401) +} + +func (o *BackupsRestoreUnauthorized) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewBackupsRestoreForbidden creates a BackupsRestoreForbidden with default headers values +func NewBackupsRestoreForbidden() *BackupsRestoreForbidden { + return &BackupsRestoreForbidden{} +} + +/* +BackupsRestoreForbidden describes a response with status code 403, with default header values. + +Forbidden +*/ +type BackupsRestoreForbidden struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this backups restore forbidden response has a 2xx status code +func (o *BackupsRestoreForbidden) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this backups restore forbidden response has a 3xx status code +func (o *BackupsRestoreForbidden) IsRedirect() bool { + return false +} + +// IsClientError returns true when this backups restore forbidden response has a 4xx status code +func (o *BackupsRestoreForbidden) IsClientError() bool { + return true +} + +// IsServerError returns true when this backups restore forbidden response has a 5xx status code +func (o *BackupsRestoreForbidden) IsServerError() bool { + return false +} + +// IsCode returns true when this backups restore forbidden response a status code equal to that given +func (o *BackupsRestoreForbidden) IsCode(code int) bool { + return code == 403 +} + +// Code gets the status code for the backups restore forbidden response +func (o *BackupsRestoreForbidden) Code() int { + return 403 +} + +func (o *BackupsRestoreForbidden) Error() string { + return fmt.Sprintf("[POST /backups/{backend}/{id}/restore][%d] backupsRestoreForbidden %+v", 403, o.Payload) +} + +func (o *BackupsRestoreForbidden) String() string { + return fmt.Sprintf("[POST /backups/{backend}/{id}/restore][%d] backupsRestoreForbidden %+v", 403, o.Payload) +} + +func (o *BackupsRestoreForbidden) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *BackupsRestoreForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewBackupsRestoreNotFound creates a BackupsRestoreNotFound with default headers values +func NewBackupsRestoreNotFound() *BackupsRestoreNotFound { + return &BackupsRestoreNotFound{} +} + +/* +BackupsRestoreNotFound describes a response with status code 404, with default header values. + +Not Found - Backup does not exist +*/ +type BackupsRestoreNotFound struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this backups restore not found response has a 2xx status code +func (o *BackupsRestoreNotFound) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this backups restore not found response has a 3xx status code +func (o *BackupsRestoreNotFound) IsRedirect() bool { + return false +} + +// IsClientError returns true when this backups restore not found response has a 4xx status code +func (o *BackupsRestoreNotFound) IsClientError() bool { + return true +} + +// IsServerError returns true when this backups restore not found response has a 5xx status code +func (o *BackupsRestoreNotFound) IsServerError() bool { + return false +} + +// IsCode returns true when this backups restore not found response a status code equal to that given +func (o *BackupsRestoreNotFound) IsCode(code int) bool { + return code == 404 +} + +// Code gets the status code for the backups restore not found response +func (o *BackupsRestoreNotFound) Code() int { + return 404 +} + +func (o *BackupsRestoreNotFound) Error() string { + return fmt.Sprintf("[POST /backups/{backend}/{id}/restore][%d] backupsRestoreNotFound %+v", 404, o.Payload) +} + +func (o *BackupsRestoreNotFound) String() string { + return fmt.Sprintf("[POST /backups/{backend}/{id}/restore][%d] backupsRestoreNotFound %+v", 404, o.Payload) +} + +func (o *BackupsRestoreNotFound) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *BackupsRestoreNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewBackupsRestoreUnprocessableEntity creates a BackupsRestoreUnprocessableEntity with default headers values +func NewBackupsRestoreUnprocessableEntity() *BackupsRestoreUnprocessableEntity { + return &BackupsRestoreUnprocessableEntity{} +} + +/* +BackupsRestoreUnprocessableEntity describes a response with status code 422, with default header values. + +Invalid backup restoration attempt. +*/ +type BackupsRestoreUnprocessableEntity struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this backups restore unprocessable entity response has a 2xx status code +func (o *BackupsRestoreUnprocessableEntity) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this backups restore unprocessable entity response has a 3xx status code +func (o *BackupsRestoreUnprocessableEntity) IsRedirect() bool { + return false +} + +// IsClientError returns true when this backups restore unprocessable entity response has a 4xx status code +func (o *BackupsRestoreUnprocessableEntity) IsClientError() bool { + return true +} + +// IsServerError returns true when this backups restore unprocessable entity response has a 5xx status code +func (o *BackupsRestoreUnprocessableEntity) IsServerError() bool { + return false +} + +// IsCode returns true when this backups restore unprocessable entity response a status code equal to that given +func (o *BackupsRestoreUnprocessableEntity) IsCode(code int) bool { + return code == 422 +} + +// Code gets the status code for the backups restore unprocessable entity response +func (o *BackupsRestoreUnprocessableEntity) Code() int { + return 422 +} + +func (o *BackupsRestoreUnprocessableEntity) Error() string { + return fmt.Sprintf("[POST /backups/{backend}/{id}/restore][%d] backupsRestoreUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *BackupsRestoreUnprocessableEntity) String() string { + return fmt.Sprintf("[POST /backups/{backend}/{id}/restore][%d] backupsRestoreUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *BackupsRestoreUnprocessableEntity) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *BackupsRestoreUnprocessableEntity) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewBackupsRestoreInternalServerError creates a BackupsRestoreInternalServerError with default headers values +func NewBackupsRestoreInternalServerError() *BackupsRestoreInternalServerError { + return &BackupsRestoreInternalServerError{} +} + +/* +BackupsRestoreInternalServerError describes a response with status code 500, with default header values. + +An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. +*/ +type BackupsRestoreInternalServerError struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this backups restore internal server error response has a 2xx status code +func (o *BackupsRestoreInternalServerError) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this backups restore internal server error response has a 3xx status code +func (o *BackupsRestoreInternalServerError) IsRedirect() bool { + return false +} + +// IsClientError returns true when this backups restore internal server error response has a 4xx status code +func (o *BackupsRestoreInternalServerError) IsClientError() bool { + return false +} + +// IsServerError returns true when this backups restore internal server error response has a 5xx status code +func (o *BackupsRestoreInternalServerError) IsServerError() bool { + return true +} + +// IsCode returns true when this backups restore internal server error response a status code equal to that given +func (o *BackupsRestoreInternalServerError) IsCode(code int) bool { + return code == 500 +} + +// Code gets the status code for the backups restore internal server error response +func (o *BackupsRestoreInternalServerError) Code() int { + return 500 +} + +func (o *BackupsRestoreInternalServerError) Error() string { + return fmt.Sprintf("[POST /backups/{backend}/{id}/restore][%d] backupsRestoreInternalServerError %+v", 500, o.Payload) +} + +func (o *BackupsRestoreInternalServerError) String() string { + return fmt.Sprintf("[POST /backups/{backend}/{id}/restore][%d] backupsRestoreInternalServerError %+v", 500, o.Payload) +} + +func (o *BackupsRestoreInternalServerError) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *BackupsRestoreInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/backups/backups_restore_status_parameters.go b/platform/dbops/binaries/weaviate-src/client/backups/backups_restore_status_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..f858b2f7cc68fbf9125ba8bcfeb9bcf6bbffef6e --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/backups/backups_restore_status_parameters.go @@ -0,0 +1,252 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package backups + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewBackupsRestoreStatusParams creates a new BackupsRestoreStatusParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewBackupsRestoreStatusParams() *BackupsRestoreStatusParams { + return &BackupsRestoreStatusParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewBackupsRestoreStatusParamsWithTimeout creates a new BackupsRestoreStatusParams object +// with the ability to set a timeout on a request. +func NewBackupsRestoreStatusParamsWithTimeout(timeout time.Duration) *BackupsRestoreStatusParams { + return &BackupsRestoreStatusParams{ + timeout: timeout, + } +} + +// NewBackupsRestoreStatusParamsWithContext creates a new BackupsRestoreStatusParams object +// with the ability to set a context for a request. +func NewBackupsRestoreStatusParamsWithContext(ctx context.Context) *BackupsRestoreStatusParams { + return &BackupsRestoreStatusParams{ + Context: ctx, + } +} + +// NewBackupsRestoreStatusParamsWithHTTPClient creates a new BackupsRestoreStatusParams object +// with the ability to set a custom HTTPClient for a request. +func NewBackupsRestoreStatusParamsWithHTTPClient(client *http.Client) *BackupsRestoreStatusParams { + return &BackupsRestoreStatusParams{ + HTTPClient: client, + } +} + +/* +BackupsRestoreStatusParams contains all the parameters to send to the API endpoint + + for the backups restore status operation. + + Typically these are written to a http.Request. +*/ +type BackupsRestoreStatusParams struct { + + /* Backend. + + Backup backend name e.g. `filesystem`, `gcs`, `s3`, `azure`. + */ + Backend string + + /* Bucket. + + Name of the bucket, container, volume, etc + */ + Bucket *string + + /* ID. + + The ID of a backup. Must be URL-safe and work as a filesystem path, only lowercase, numbers, underscore, minus characters allowed. + */ + ID string + + /* Path. + + The path within the bucket + */ + Path *string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the backups restore status params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *BackupsRestoreStatusParams) WithDefaults() *BackupsRestoreStatusParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the backups restore status params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *BackupsRestoreStatusParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the backups restore status params +func (o *BackupsRestoreStatusParams) WithTimeout(timeout time.Duration) *BackupsRestoreStatusParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the backups restore status params +func (o *BackupsRestoreStatusParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the backups restore status params +func (o *BackupsRestoreStatusParams) WithContext(ctx context.Context) *BackupsRestoreStatusParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the backups restore status params +func (o *BackupsRestoreStatusParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the backups restore status params +func (o *BackupsRestoreStatusParams) WithHTTPClient(client *http.Client) *BackupsRestoreStatusParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the backups restore status params +func (o *BackupsRestoreStatusParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithBackend adds the backend to the backups restore status params +func (o *BackupsRestoreStatusParams) WithBackend(backend string) *BackupsRestoreStatusParams { + o.SetBackend(backend) + return o +} + +// SetBackend adds the backend to the backups restore status params +func (o *BackupsRestoreStatusParams) SetBackend(backend string) { + o.Backend = backend +} + +// WithBucket adds the bucket to the backups restore status params +func (o *BackupsRestoreStatusParams) WithBucket(bucket *string) *BackupsRestoreStatusParams { + o.SetBucket(bucket) + return o +} + +// SetBucket adds the bucket to the backups restore status params +func (o *BackupsRestoreStatusParams) SetBucket(bucket *string) { + o.Bucket = bucket +} + +// WithID adds the id to the backups restore status params +func (o *BackupsRestoreStatusParams) WithID(id string) *BackupsRestoreStatusParams { + o.SetID(id) + return o +} + +// SetID adds the id to the backups restore status params +func (o *BackupsRestoreStatusParams) SetID(id string) { + o.ID = id +} + +// WithPath adds the path to the backups restore status params +func (o *BackupsRestoreStatusParams) WithPath(path *string) *BackupsRestoreStatusParams { + o.SetPath(path) + return o +} + +// SetPath adds the path to the backups restore status params +func (o *BackupsRestoreStatusParams) SetPath(path *string) { + o.Path = path +} + +// WriteToRequest writes these params to a swagger request +func (o *BackupsRestoreStatusParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param backend + if err := r.SetPathParam("backend", o.Backend); err != nil { + return err + } + + if o.Bucket != nil { + + // query param bucket + var qrBucket string + + if o.Bucket != nil { + qrBucket = *o.Bucket + } + qBucket := qrBucket + if qBucket != "" { + + if err := r.SetQueryParam("bucket", qBucket); err != nil { + return err + } + } + } + + // path param id + if err := r.SetPathParam("id", o.ID); err != nil { + return err + } + + if o.Path != nil { + + // query param path + var qrPath string + + if o.Path != nil { + qrPath = *o.Path + } + qPath := qrPath + if qPath != "" { + + if err := r.SetQueryParam("path", qPath); err != nil { + return err + } + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/backups/backups_restore_status_responses.go b/platform/dbops/binaries/weaviate-src/client/backups/backups_restore_status_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..be220b3a4dafb707a1c6f7a35b68df553ec55c52 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/backups/backups_restore_status_responses.go @@ -0,0 +1,398 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package backups + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/weaviate/weaviate/entities/models" +) + +// BackupsRestoreStatusReader is a Reader for the BackupsRestoreStatus structure. +type BackupsRestoreStatusReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *BackupsRestoreStatusReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewBackupsRestoreStatusOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 401: + result := NewBackupsRestoreStatusUnauthorized() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 403: + result := NewBackupsRestoreStatusForbidden() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 404: + result := NewBackupsRestoreStatusNotFound() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 500: + result := NewBackupsRestoreStatusInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + } +} + +// NewBackupsRestoreStatusOK creates a BackupsRestoreStatusOK with default headers values +func NewBackupsRestoreStatusOK() *BackupsRestoreStatusOK { + return &BackupsRestoreStatusOK{} +} + +/* +BackupsRestoreStatusOK describes a response with status code 200, with default header values. + +Backup restoration status successfully returned +*/ +type BackupsRestoreStatusOK struct { + Payload *models.BackupRestoreStatusResponse +} + +// IsSuccess returns true when this backups restore status o k response has a 2xx status code +func (o *BackupsRestoreStatusOK) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this backups restore status o k response has a 3xx status code +func (o *BackupsRestoreStatusOK) IsRedirect() bool { + return false +} + +// IsClientError returns true when this backups restore status o k response has a 4xx status code +func (o *BackupsRestoreStatusOK) IsClientError() bool { + return false +} + +// IsServerError returns true when this backups restore status o k response has a 5xx status code +func (o *BackupsRestoreStatusOK) IsServerError() bool { + return false +} + +// IsCode returns true when this backups restore status o k response a status code equal to that given +func (o *BackupsRestoreStatusOK) IsCode(code int) bool { + return code == 200 +} + +// Code gets the status code for the backups restore status o k response +func (o *BackupsRestoreStatusOK) Code() int { + return 200 +} + +func (o *BackupsRestoreStatusOK) Error() string { + return fmt.Sprintf("[GET /backups/{backend}/{id}/restore][%d] backupsRestoreStatusOK %+v", 200, o.Payload) +} + +func (o *BackupsRestoreStatusOK) String() string { + return fmt.Sprintf("[GET /backups/{backend}/{id}/restore][%d] backupsRestoreStatusOK %+v", 200, o.Payload) +} + +func (o *BackupsRestoreStatusOK) GetPayload() *models.BackupRestoreStatusResponse { + return o.Payload +} + +func (o *BackupsRestoreStatusOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.BackupRestoreStatusResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewBackupsRestoreStatusUnauthorized creates a BackupsRestoreStatusUnauthorized with default headers values +func NewBackupsRestoreStatusUnauthorized() *BackupsRestoreStatusUnauthorized { + return &BackupsRestoreStatusUnauthorized{} +} + +/* +BackupsRestoreStatusUnauthorized describes a response with status code 401, with default header values. + +Unauthorized or invalid credentials. +*/ +type BackupsRestoreStatusUnauthorized struct { +} + +// IsSuccess returns true when this backups restore status unauthorized response has a 2xx status code +func (o *BackupsRestoreStatusUnauthorized) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this backups restore status unauthorized response has a 3xx status code +func (o *BackupsRestoreStatusUnauthorized) IsRedirect() bool { + return false +} + +// IsClientError returns true when this backups restore status unauthorized response has a 4xx status code +func (o *BackupsRestoreStatusUnauthorized) IsClientError() bool { + return true +} + +// IsServerError returns true when this backups restore status unauthorized response has a 5xx status code +func (o *BackupsRestoreStatusUnauthorized) IsServerError() bool { + return false +} + +// IsCode returns true when this backups restore status unauthorized response a status code equal to that given +func (o *BackupsRestoreStatusUnauthorized) IsCode(code int) bool { + return code == 401 +} + +// Code gets the status code for the backups restore status unauthorized response +func (o *BackupsRestoreStatusUnauthorized) Code() int { + return 401 +} + +func (o *BackupsRestoreStatusUnauthorized) Error() string { + return fmt.Sprintf("[GET /backups/{backend}/{id}/restore][%d] backupsRestoreStatusUnauthorized ", 401) +} + +func (o *BackupsRestoreStatusUnauthorized) String() string { + return fmt.Sprintf("[GET /backups/{backend}/{id}/restore][%d] backupsRestoreStatusUnauthorized ", 401) +} + +func (o *BackupsRestoreStatusUnauthorized) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewBackupsRestoreStatusForbidden creates a BackupsRestoreStatusForbidden with default headers values +func NewBackupsRestoreStatusForbidden() *BackupsRestoreStatusForbidden { + return &BackupsRestoreStatusForbidden{} +} + +/* +BackupsRestoreStatusForbidden describes a response with status code 403, with default header values. + +Forbidden +*/ +type BackupsRestoreStatusForbidden struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this backups restore status forbidden response has a 2xx status code +func (o *BackupsRestoreStatusForbidden) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this backups restore status forbidden response has a 3xx status code +func (o *BackupsRestoreStatusForbidden) IsRedirect() bool { + return false +} + +// IsClientError returns true when this backups restore status forbidden response has a 4xx status code +func (o *BackupsRestoreStatusForbidden) IsClientError() bool { + return true +} + +// IsServerError returns true when this backups restore status forbidden response has a 5xx status code +func (o *BackupsRestoreStatusForbidden) IsServerError() bool { + return false +} + +// IsCode returns true when this backups restore status forbidden response a status code equal to that given +func (o *BackupsRestoreStatusForbidden) IsCode(code int) bool { + return code == 403 +} + +// Code gets the status code for the backups restore status forbidden response +func (o *BackupsRestoreStatusForbidden) Code() int { + return 403 +} + +func (o *BackupsRestoreStatusForbidden) Error() string { + return fmt.Sprintf("[GET /backups/{backend}/{id}/restore][%d] backupsRestoreStatusForbidden %+v", 403, o.Payload) +} + +func (o *BackupsRestoreStatusForbidden) String() string { + return fmt.Sprintf("[GET /backups/{backend}/{id}/restore][%d] backupsRestoreStatusForbidden %+v", 403, o.Payload) +} + +func (o *BackupsRestoreStatusForbidden) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *BackupsRestoreStatusForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewBackupsRestoreStatusNotFound creates a BackupsRestoreStatusNotFound with default headers values +func NewBackupsRestoreStatusNotFound() *BackupsRestoreStatusNotFound { + return &BackupsRestoreStatusNotFound{} +} + +/* +BackupsRestoreStatusNotFound describes a response with status code 404, with default header values. + +Not Found - Backup does not exist +*/ +type BackupsRestoreStatusNotFound struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this backups restore status not found response has a 2xx status code +func (o *BackupsRestoreStatusNotFound) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this backups restore status not found response has a 3xx status code +func (o *BackupsRestoreStatusNotFound) IsRedirect() bool { + return false +} + +// IsClientError returns true when this backups restore status not found response has a 4xx status code +func (o *BackupsRestoreStatusNotFound) IsClientError() bool { + return true +} + +// IsServerError returns true when this backups restore status not found response has a 5xx status code +func (o *BackupsRestoreStatusNotFound) IsServerError() bool { + return false +} + +// IsCode returns true when this backups restore status not found response a status code equal to that given +func (o *BackupsRestoreStatusNotFound) IsCode(code int) bool { + return code == 404 +} + +// Code gets the status code for the backups restore status not found response +func (o *BackupsRestoreStatusNotFound) Code() int { + return 404 +} + +func (o *BackupsRestoreStatusNotFound) Error() string { + return fmt.Sprintf("[GET /backups/{backend}/{id}/restore][%d] backupsRestoreStatusNotFound %+v", 404, o.Payload) +} + +func (o *BackupsRestoreStatusNotFound) String() string { + return fmt.Sprintf("[GET /backups/{backend}/{id}/restore][%d] backupsRestoreStatusNotFound %+v", 404, o.Payload) +} + +func (o *BackupsRestoreStatusNotFound) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *BackupsRestoreStatusNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewBackupsRestoreStatusInternalServerError creates a BackupsRestoreStatusInternalServerError with default headers values +func NewBackupsRestoreStatusInternalServerError() *BackupsRestoreStatusInternalServerError { + return &BackupsRestoreStatusInternalServerError{} +} + +/* +BackupsRestoreStatusInternalServerError describes a response with status code 500, with default header values. + +An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. +*/ +type BackupsRestoreStatusInternalServerError struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this backups restore status internal server error response has a 2xx status code +func (o *BackupsRestoreStatusInternalServerError) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this backups restore status internal server error response has a 3xx status code +func (o *BackupsRestoreStatusInternalServerError) IsRedirect() bool { + return false +} + +// IsClientError returns true when this backups restore status internal server error response has a 4xx status code +func (o *BackupsRestoreStatusInternalServerError) IsClientError() bool { + return false +} + +// IsServerError returns true when this backups restore status internal server error response has a 5xx status code +func (o *BackupsRestoreStatusInternalServerError) IsServerError() bool { + return true +} + +// IsCode returns true when this backups restore status internal server error response a status code equal to that given +func (o *BackupsRestoreStatusInternalServerError) IsCode(code int) bool { + return code == 500 +} + +// Code gets the status code for the backups restore status internal server error response +func (o *BackupsRestoreStatusInternalServerError) Code() int { + return 500 +} + +func (o *BackupsRestoreStatusInternalServerError) Error() string { + return fmt.Sprintf("[GET /backups/{backend}/{id}/restore][%d] backupsRestoreStatusInternalServerError %+v", 500, o.Payload) +} + +func (o *BackupsRestoreStatusInternalServerError) String() string { + return fmt.Sprintf("[GET /backups/{backend}/{id}/restore][%d] backupsRestoreStatusInternalServerError %+v", 500, o.Payload) +} + +func (o *BackupsRestoreStatusInternalServerError) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *BackupsRestoreStatusInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/batch/batch_client.go b/platform/dbops/binaries/weaviate-src/client/batch/batch_client.go new file mode 100644 index 0000000000000000000000000000000000000000..9fafd82d4df23512ecaaeb4c02aecfafc36e8470 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/batch/batch_client.go @@ -0,0 +1,179 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package batch + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" +) + +// New creates a new batch API client. +func New(transport runtime.ClientTransport, formats strfmt.Registry) ClientService { + return &Client{transport: transport, formats: formats} +} + +/* +Client for batch API +*/ +type Client struct { + transport runtime.ClientTransport + formats strfmt.Registry +} + +// ClientOption is the option for Client methods +type ClientOption func(*runtime.ClientOperation) + +// ClientService is the interface for Client methods +type ClientService interface { + BatchObjectsCreate(params *BatchObjectsCreateParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*BatchObjectsCreateOK, error) + + BatchObjectsDelete(params *BatchObjectsDeleteParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*BatchObjectsDeleteOK, error) + + BatchReferencesCreate(params *BatchReferencesCreateParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*BatchReferencesCreateOK, error) + + SetTransport(transport runtime.ClientTransport) +} + +/* +BatchObjectsCreate creates new objects based on a object template as a batch + +Create new objects in bulk.

Meta-data and schema values are validated.

**Note: idempotence of `/batch/objects`**:
`POST /batch/objects` is idempotent, and will overwrite any existing object given the same id. +*/ +func (a *Client) BatchObjectsCreate(params *BatchObjectsCreateParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*BatchObjectsCreateOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewBatchObjectsCreateParams() + } + op := &runtime.ClientOperation{ + ID: "batch.objects.create", + Method: "POST", + PathPattern: "/batch/objects", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json", "application/yaml"}, + Schemes: []string{"https"}, + Params: params, + Reader: &BatchObjectsCreateReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + success, ok := result.(*BatchObjectsCreateOK) + if ok { + return success, nil + } + // unexpected success response + // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for batch.objects.create: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + +/* +BatchObjectsDelete deletes objects based on a match filter as a batch + +Batch delete objects that match a particular filter.

The request body takes a single `where` filter and will delete all objects matched.

Note that there is a limit to the number of objects to be deleted at once using this filter, in order to protect against unexpected memory surges and very-long-running requests. The default limit is 10,000 and may be configured by setting the `QUERY_MAXIMUM_RESULTS` environment variable.

Objects are deleted in the same order that they would be returned in an equivalent Get query. To delete more objects than the limit, run the same query multiple times. +*/ +func (a *Client) BatchObjectsDelete(params *BatchObjectsDeleteParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*BatchObjectsDeleteOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewBatchObjectsDeleteParams() + } + op := &runtime.ClientOperation{ + ID: "batch.objects.delete", + Method: "DELETE", + PathPattern: "/batch/objects", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json", "application/yaml"}, + Schemes: []string{"https"}, + Params: params, + Reader: &BatchObjectsDeleteReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + success, ok := result.(*BatchObjectsDeleteOK) + if ok { + return success, nil + } + // unexpected success response + // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for batch.objects.delete: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + +/* +BatchReferencesCreate creates new cross references between arbitrary classes in bulk + +Batch create cross-references between collections items (objects or objects) in bulk. +*/ +func (a *Client) BatchReferencesCreate(params *BatchReferencesCreateParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*BatchReferencesCreateOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewBatchReferencesCreateParams() + } + op := &runtime.ClientOperation{ + ID: "batch.references.create", + Method: "POST", + PathPattern: "/batch/references", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json", "application/yaml"}, + Schemes: []string{"https"}, + Params: params, + Reader: &BatchReferencesCreateReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + success, ok := result.(*BatchReferencesCreateOK) + if ok { + return success, nil + } + // unexpected success response + // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for batch.references.create: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + +// SetTransport changes the transport on the client +func (a *Client) SetTransport(transport runtime.ClientTransport) { + a.transport = transport +} diff --git a/platform/dbops/binaries/weaviate-src/client/batch/batch_objects_create_parameters.go b/platform/dbops/binaries/weaviate-src/client/batch/batch_objects_create_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..914ed61a24d5d53d0162a76c758e656ccbdb2c11 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/batch/batch_objects_create_parameters.go @@ -0,0 +1,191 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package batch + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewBatchObjectsCreateParams creates a new BatchObjectsCreateParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewBatchObjectsCreateParams() *BatchObjectsCreateParams { + return &BatchObjectsCreateParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewBatchObjectsCreateParamsWithTimeout creates a new BatchObjectsCreateParams object +// with the ability to set a timeout on a request. +func NewBatchObjectsCreateParamsWithTimeout(timeout time.Duration) *BatchObjectsCreateParams { + return &BatchObjectsCreateParams{ + timeout: timeout, + } +} + +// NewBatchObjectsCreateParamsWithContext creates a new BatchObjectsCreateParams object +// with the ability to set a context for a request. +func NewBatchObjectsCreateParamsWithContext(ctx context.Context) *BatchObjectsCreateParams { + return &BatchObjectsCreateParams{ + Context: ctx, + } +} + +// NewBatchObjectsCreateParamsWithHTTPClient creates a new BatchObjectsCreateParams object +// with the ability to set a custom HTTPClient for a request. +func NewBatchObjectsCreateParamsWithHTTPClient(client *http.Client) *BatchObjectsCreateParams { + return &BatchObjectsCreateParams{ + HTTPClient: client, + } +} + +/* +BatchObjectsCreateParams contains all the parameters to send to the API endpoint + + for the batch objects create operation. + + Typically these are written to a http.Request. +*/ +type BatchObjectsCreateParams struct { + + // Body. + Body BatchObjectsCreateBody + + /* ConsistencyLevel. + + Determines how many replicas must acknowledge a request before it is considered successful + */ + ConsistencyLevel *string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the batch objects create params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *BatchObjectsCreateParams) WithDefaults() *BatchObjectsCreateParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the batch objects create params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *BatchObjectsCreateParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the batch objects create params +func (o *BatchObjectsCreateParams) WithTimeout(timeout time.Duration) *BatchObjectsCreateParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the batch objects create params +func (o *BatchObjectsCreateParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the batch objects create params +func (o *BatchObjectsCreateParams) WithContext(ctx context.Context) *BatchObjectsCreateParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the batch objects create params +func (o *BatchObjectsCreateParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the batch objects create params +func (o *BatchObjectsCreateParams) WithHTTPClient(client *http.Client) *BatchObjectsCreateParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the batch objects create params +func (o *BatchObjectsCreateParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithBody adds the body to the batch objects create params +func (o *BatchObjectsCreateParams) WithBody(body BatchObjectsCreateBody) *BatchObjectsCreateParams { + o.SetBody(body) + return o +} + +// SetBody adds the body to the batch objects create params +func (o *BatchObjectsCreateParams) SetBody(body BatchObjectsCreateBody) { + o.Body = body +} + +// WithConsistencyLevel adds the consistencyLevel to the batch objects create params +func (o *BatchObjectsCreateParams) WithConsistencyLevel(consistencyLevel *string) *BatchObjectsCreateParams { + o.SetConsistencyLevel(consistencyLevel) + return o +} + +// SetConsistencyLevel adds the consistencyLevel to the batch objects create params +func (o *BatchObjectsCreateParams) SetConsistencyLevel(consistencyLevel *string) { + o.ConsistencyLevel = consistencyLevel +} + +// WriteToRequest writes these params to a swagger request +func (o *BatchObjectsCreateParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + if err := r.SetBodyParam(o.Body); err != nil { + return err + } + + if o.ConsistencyLevel != nil { + + // query param consistency_level + var qrConsistencyLevel string + + if o.ConsistencyLevel != nil { + qrConsistencyLevel = *o.ConsistencyLevel + } + qConsistencyLevel := qrConsistencyLevel + if qConsistencyLevel != "" { + + if err := r.SetQueryParam("consistency_level", qConsistencyLevel); err != nil { + return err + } + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/batch/batch_objects_create_responses.go b/platform/dbops/binaries/weaviate-src/client/batch/batch_objects_create_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..b1bb536447131927287dea47cc4cd595746c7007 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/batch/batch_objects_create_responses.go @@ -0,0 +1,624 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package batch + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "encoding/json" + "fmt" + "io" + "strconv" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" + + "github.com/weaviate/weaviate/entities/models" +) + +// BatchObjectsCreateReader is a Reader for the BatchObjectsCreate structure. +type BatchObjectsCreateReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *BatchObjectsCreateReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewBatchObjectsCreateOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 400: + result := NewBatchObjectsCreateBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 401: + result := NewBatchObjectsCreateUnauthorized() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 403: + result := NewBatchObjectsCreateForbidden() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 422: + result := NewBatchObjectsCreateUnprocessableEntity() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 500: + result := NewBatchObjectsCreateInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + } +} + +// NewBatchObjectsCreateOK creates a BatchObjectsCreateOK with default headers values +func NewBatchObjectsCreateOK() *BatchObjectsCreateOK { + return &BatchObjectsCreateOK{} +} + +/* +BatchObjectsCreateOK describes a response with status code 200, with default header values. + +Request succeeded, see response body to get detailed information about each batched item. +*/ +type BatchObjectsCreateOK struct { + Payload []*models.ObjectsGetResponse +} + +// IsSuccess returns true when this batch objects create o k response has a 2xx status code +func (o *BatchObjectsCreateOK) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this batch objects create o k response has a 3xx status code +func (o *BatchObjectsCreateOK) IsRedirect() bool { + return false +} + +// IsClientError returns true when this batch objects create o k response has a 4xx status code +func (o *BatchObjectsCreateOK) IsClientError() bool { + return false +} + +// IsServerError returns true when this batch objects create o k response has a 5xx status code +func (o *BatchObjectsCreateOK) IsServerError() bool { + return false +} + +// IsCode returns true when this batch objects create o k response a status code equal to that given +func (o *BatchObjectsCreateOK) IsCode(code int) bool { + return code == 200 +} + +// Code gets the status code for the batch objects create o k response +func (o *BatchObjectsCreateOK) Code() int { + return 200 +} + +func (o *BatchObjectsCreateOK) Error() string { + return fmt.Sprintf("[POST /batch/objects][%d] batchObjectsCreateOK %+v", 200, o.Payload) +} + +func (o *BatchObjectsCreateOK) String() string { + return fmt.Sprintf("[POST /batch/objects][%d] batchObjectsCreateOK %+v", 200, o.Payload) +} + +func (o *BatchObjectsCreateOK) GetPayload() []*models.ObjectsGetResponse { + return o.Payload +} + +func (o *BatchObjectsCreateOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewBatchObjectsCreateBadRequest creates a BatchObjectsCreateBadRequest with default headers values +func NewBatchObjectsCreateBadRequest() *BatchObjectsCreateBadRequest { + return &BatchObjectsCreateBadRequest{} +} + +/* +BatchObjectsCreateBadRequest describes a response with status code 400, with default header values. + +Malformed request. +*/ +type BatchObjectsCreateBadRequest struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this batch objects create bad request response has a 2xx status code +func (o *BatchObjectsCreateBadRequest) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this batch objects create bad request response has a 3xx status code +func (o *BatchObjectsCreateBadRequest) IsRedirect() bool { + return false +} + +// IsClientError returns true when this batch objects create bad request response has a 4xx status code +func (o *BatchObjectsCreateBadRequest) IsClientError() bool { + return true +} + +// IsServerError returns true when this batch objects create bad request response has a 5xx status code +func (o *BatchObjectsCreateBadRequest) IsServerError() bool { + return false +} + +// IsCode returns true when this batch objects create bad request response a status code equal to that given +func (o *BatchObjectsCreateBadRequest) IsCode(code int) bool { + return code == 400 +} + +// Code gets the status code for the batch objects create bad request response +func (o *BatchObjectsCreateBadRequest) Code() int { + return 400 +} + +func (o *BatchObjectsCreateBadRequest) Error() string { + return fmt.Sprintf("[POST /batch/objects][%d] batchObjectsCreateBadRequest %+v", 400, o.Payload) +} + +func (o *BatchObjectsCreateBadRequest) String() string { + return fmt.Sprintf("[POST /batch/objects][%d] batchObjectsCreateBadRequest %+v", 400, o.Payload) +} + +func (o *BatchObjectsCreateBadRequest) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *BatchObjectsCreateBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewBatchObjectsCreateUnauthorized creates a BatchObjectsCreateUnauthorized with default headers values +func NewBatchObjectsCreateUnauthorized() *BatchObjectsCreateUnauthorized { + return &BatchObjectsCreateUnauthorized{} +} + +/* +BatchObjectsCreateUnauthorized describes a response with status code 401, with default header values. + +Unauthorized or invalid credentials. +*/ +type BatchObjectsCreateUnauthorized struct { +} + +// IsSuccess returns true when this batch objects create unauthorized response has a 2xx status code +func (o *BatchObjectsCreateUnauthorized) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this batch objects create unauthorized response has a 3xx status code +func (o *BatchObjectsCreateUnauthorized) IsRedirect() bool { + return false +} + +// IsClientError returns true when this batch objects create unauthorized response has a 4xx status code +func (o *BatchObjectsCreateUnauthorized) IsClientError() bool { + return true +} + +// IsServerError returns true when this batch objects create unauthorized response has a 5xx status code +func (o *BatchObjectsCreateUnauthorized) IsServerError() bool { + return false +} + +// IsCode returns true when this batch objects create unauthorized response a status code equal to that given +func (o *BatchObjectsCreateUnauthorized) IsCode(code int) bool { + return code == 401 +} + +// Code gets the status code for the batch objects create unauthorized response +func (o *BatchObjectsCreateUnauthorized) Code() int { + return 401 +} + +func (o *BatchObjectsCreateUnauthorized) Error() string { + return fmt.Sprintf("[POST /batch/objects][%d] batchObjectsCreateUnauthorized ", 401) +} + +func (o *BatchObjectsCreateUnauthorized) String() string { + return fmt.Sprintf("[POST /batch/objects][%d] batchObjectsCreateUnauthorized ", 401) +} + +func (o *BatchObjectsCreateUnauthorized) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewBatchObjectsCreateForbidden creates a BatchObjectsCreateForbidden with default headers values +func NewBatchObjectsCreateForbidden() *BatchObjectsCreateForbidden { + return &BatchObjectsCreateForbidden{} +} + +/* +BatchObjectsCreateForbidden describes a response with status code 403, with default header values. + +Forbidden +*/ +type BatchObjectsCreateForbidden struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this batch objects create forbidden response has a 2xx status code +func (o *BatchObjectsCreateForbidden) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this batch objects create forbidden response has a 3xx status code +func (o *BatchObjectsCreateForbidden) IsRedirect() bool { + return false +} + +// IsClientError returns true when this batch objects create forbidden response has a 4xx status code +func (o *BatchObjectsCreateForbidden) IsClientError() bool { + return true +} + +// IsServerError returns true when this batch objects create forbidden response has a 5xx status code +func (o *BatchObjectsCreateForbidden) IsServerError() bool { + return false +} + +// IsCode returns true when this batch objects create forbidden response a status code equal to that given +func (o *BatchObjectsCreateForbidden) IsCode(code int) bool { + return code == 403 +} + +// Code gets the status code for the batch objects create forbidden response +func (o *BatchObjectsCreateForbidden) Code() int { + return 403 +} + +func (o *BatchObjectsCreateForbidden) Error() string { + return fmt.Sprintf("[POST /batch/objects][%d] batchObjectsCreateForbidden %+v", 403, o.Payload) +} + +func (o *BatchObjectsCreateForbidden) String() string { + return fmt.Sprintf("[POST /batch/objects][%d] batchObjectsCreateForbidden %+v", 403, o.Payload) +} + +func (o *BatchObjectsCreateForbidden) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *BatchObjectsCreateForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewBatchObjectsCreateUnprocessableEntity creates a BatchObjectsCreateUnprocessableEntity with default headers values +func NewBatchObjectsCreateUnprocessableEntity() *BatchObjectsCreateUnprocessableEntity { + return &BatchObjectsCreateUnprocessableEntity{} +} + +/* +BatchObjectsCreateUnprocessableEntity describes a response with status code 422, with default header values. + +Request body is well-formed (i.e., syntactically correct), but semantically erroneous. Are you sure the class is defined in the configuration file? +*/ +type BatchObjectsCreateUnprocessableEntity struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this batch objects create unprocessable entity response has a 2xx status code +func (o *BatchObjectsCreateUnprocessableEntity) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this batch objects create unprocessable entity response has a 3xx status code +func (o *BatchObjectsCreateUnprocessableEntity) IsRedirect() bool { + return false +} + +// IsClientError returns true when this batch objects create unprocessable entity response has a 4xx status code +func (o *BatchObjectsCreateUnprocessableEntity) IsClientError() bool { + return true +} + +// IsServerError returns true when this batch objects create unprocessable entity response has a 5xx status code +func (o *BatchObjectsCreateUnprocessableEntity) IsServerError() bool { + return false +} + +// IsCode returns true when this batch objects create unprocessable entity response a status code equal to that given +func (o *BatchObjectsCreateUnprocessableEntity) IsCode(code int) bool { + return code == 422 +} + +// Code gets the status code for the batch objects create unprocessable entity response +func (o *BatchObjectsCreateUnprocessableEntity) Code() int { + return 422 +} + +func (o *BatchObjectsCreateUnprocessableEntity) Error() string { + return fmt.Sprintf("[POST /batch/objects][%d] batchObjectsCreateUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *BatchObjectsCreateUnprocessableEntity) String() string { + return fmt.Sprintf("[POST /batch/objects][%d] batchObjectsCreateUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *BatchObjectsCreateUnprocessableEntity) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *BatchObjectsCreateUnprocessableEntity) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewBatchObjectsCreateInternalServerError creates a BatchObjectsCreateInternalServerError with default headers values +func NewBatchObjectsCreateInternalServerError() *BatchObjectsCreateInternalServerError { + return &BatchObjectsCreateInternalServerError{} +} + +/* +BatchObjectsCreateInternalServerError describes a response with status code 500, with default header values. + +An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. +*/ +type BatchObjectsCreateInternalServerError struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this batch objects create internal server error response has a 2xx status code +func (o *BatchObjectsCreateInternalServerError) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this batch objects create internal server error response has a 3xx status code +func (o *BatchObjectsCreateInternalServerError) IsRedirect() bool { + return false +} + +// IsClientError returns true when this batch objects create internal server error response has a 4xx status code +func (o *BatchObjectsCreateInternalServerError) IsClientError() bool { + return false +} + +// IsServerError returns true when this batch objects create internal server error response has a 5xx status code +func (o *BatchObjectsCreateInternalServerError) IsServerError() bool { + return true +} + +// IsCode returns true when this batch objects create internal server error response a status code equal to that given +func (o *BatchObjectsCreateInternalServerError) IsCode(code int) bool { + return code == 500 +} + +// Code gets the status code for the batch objects create internal server error response +func (o *BatchObjectsCreateInternalServerError) Code() int { + return 500 +} + +func (o *BatchObjectsCreateInternalServerError) Error() string { + return fmt.Sprintf("[POST /batch/objects][%d] batchObjectsCreateInternalServerError %+v", 500, o.Payload) +} + +func (o *BatchObjectsCreateInternalServerError) String() string { + return fmt.Sprintf("[POST /batch/objects][%d] batchObjectsCreateInternalServerError %+v", 500, o.Payload) +} + +func (o *BatchObjectsCreateInternalServerError) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *BatchObjectsCreateInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +/* +BatchObjectsCreateBody batch objects create body +swagger:model BatchObjectsCreateBody +*/ +type BatchObjectsCreateBody struct { + + // Define which fields need to be returned. Default value is ALL + Fields []*string `json:"fields"` + + // objects + Objects []*models.Object `json:"objects"` +} + +// Validate validates this batch objects create body +func (o *BatchObjectsCreateBody) Validate(formats strfmt.Registry) error { + var res []error + + if err := o.validateFields(formats); err != nil { + res = append(res, err) + } + + if err := o.validateObjects(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +var batchObjectsCreateBodyFieldsItemsEnum []interface{} + +func init() { + var res []string + if err := json.Unmarshal([]byte(`["ALL","class","schema","id","creationTimeUnix"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + batchObjectsCreateBodyFieldsItemsEnum = append(batchObjectsCreateBodyFieldsItemsEnum, v) + } +} + +func (o *BatchObjectsCreateBody) validateFieldsItemsEnum(path, location string, value string) error { + if err := validate.EnumCase(path, location, value, batchObjectsCreateBodyFieldsItemsEnum, true); err != nil { + return err + } + return nil +} + +func (o *BatchObjectsCreateBody) validateFields(formats strfmt.Registry) error { + if swag.IsZero(o.Fields) { // not required + return nil + } + + for i := 0; i < len(o.Fields); i++ { + if swag.IsZero(o.Fields[i]) { // not required + continue + } + + // value enum + if err := o.validateFieldsItemsEnum("body"+"."+"fields"+"."+strconv.Itoa(i), "body", *o.Fields[i]); err != nil { + return err + } + + } + + return nil +} + +func (o *BatchObjectsCreateBody) validateObjects(formats strfmt.Registry) error { + if swag.IsZero(o.Objects) { // not required + return nil + } + + for i := 0; i < len(o.Objects); i++ { + if swag.IsZero(o.Objects[i]) { // not required + continue + } + + if o.Objects[i] != nil { + if err := o.Objects[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("body" + "." + "objects" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("body" + "." + "objects" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +// ContextValidate validate this batch objects create body based on the context it is used +func (o *BatchObjectsCreateBody) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := o.contextValidateObjects(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (o *BatchObjectsCreateBody) contextValidateObjects(ctx context.Context, formats strfmt.Registry) error { + + for i := 0; i < len(o.Objects); i++ { + + if o.Objects[i] != nil { + if err := o.Objects[i].ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("body" + "." + "objects" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("body" + "." + "objects" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +// MarshalBinary interface implementation +func (o *BatchObjectsCreateBody) MarshalBinary() ([]byte, error) { + if o == nil { + return nil, nil + } + return swag.WriteJSON(o) +} + +// UnmarshalBinary interface implementation +func (o *BatchObjectsCreateBody) UnmarshalBinary(b []byte) error { + var res BatchObjectsCreateBody + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *o = res + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/batch/batch_objects_delete_parameters.go b/platform/dbops/binaries/weaviate-src/client/batch/batch_objects_delete_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..75e288c5caaade5c38fee0faf8dd86dc89597420 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/batch/batch_objects_delete_parameters.go @@ -0,0 +1,229 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package batch + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + + "github.com/weaviate/weaviate/entities/models" +) + +// NewBatchObjectsDeleteParams creates a new BatchObjectsDeleteParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewBatchObjectsDeleteParams() *BatchObjectsDeleteParams { + return &BatchObjectsDeleteParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewBatchObjectsDeleteParamsWithTimeout creates a new BatchObjectsDeleteParams object +// with the ability to set a timeout on a request. +func NewBatchObjectsDeleteParamsWithTimeout(timeout time.Duration) *BatchObjectsDeleteParams { + return &BatchObjectsDeleteParams{ + timeout: timeout, + } +} + +// NewBatchObjectsDeleteParamsWithContext creates a new BatchObjectsDeleteParams object +// with the ability to set a context for a request. +func NewBatchObjectsDeleteParamsWithContext(ctx context.Context) *BatchObjectsDeleteParams { + return &BatchObjectsDeleteParams{ + Context: ctx, + } +} + +// NewBatchObjectsDeleteParamsWithHTTPClient creates a new BatchObjectsDeleteParams object +// with the ability to set a custom HTTPClient for a request. +func NewBatchObjectsDeleteParamsWithHTTPClient(client *http.Client) *BatchObjectsDeleteParams { + return &BatchObjectsDeleteParams{ + HTTPClient: client, + } +} + +/* +BatchObjectsDeleteParams contains all the parameters to send to the API endpoint + + for the batch objects delete operation. + + Typically these are written to a http.Request. +*/ +type BatchObjectsDeleteParams struct { + + // Body. + Body *models.BatchDelete + + /* ConsistencyLevel. + + Determines how many replicas must acknowledge a request before it is considered successful + */ + ConsistencyLevel *string + + /* Tenant. + + Specifies the tenant in a request targeting a multi-tenant class + */ + Tenant *string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the batch objects delete params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *BatchObjectsDeleteParams) WithDefaults() *BatchObjectsDeleteParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the batch objects delete params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *BatchObjectsDeleteParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the batch objects delete params +func (o *BatchObjectsDeleteParams) WithTimeout(timeout time.Duration) *BatchObjectsDeleteParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the batch objects delete params +func (o *BatchObjectsDeleteParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the batch objects delete params +func (o *BatchObjectsDeleteParams) WithContext(ctx context.Context) *BatchObjectsDeleteParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the batch objects delete params +func (o *BatchObjectsDeleteParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the batch objects delete params +func (o *BatchObjectsDeleteParams) WithHTTPClient(client *http.Client) *BatchObjectsDeleteParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the batch objects delete params +func (o *BatchObjectsDeleteParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithBody adds the body to the batch objects delete params +func (o *BatchObjectsDeleteParams) WithBody(body *models.BatchDelete) *BatchObjectsDeleteParams { + o.SetBody(body) + return o +} + +// SetBody adds the body to the batch objects delete params +func (o *BatchObjectsDeleteParams) SetBody(body *models.BatchDelete) { + o.Body = body +} + +// WithConsistencyLevel adds the consistencyLevel to the batch objects delete params +func (o *BatchObjectsDeleteParams) WithConsistencyLevel(consistencyLevel *string) *BatchObjectsDeleteParams { + o.SetConsistencyLevel(consistencyLevel) + return o +} + +// SetConsistencyLevel adds the consistencyLevel to the batch objects delete params +func (o *BatchObjectsDeleteParams) SetConsistencyLevel(consistencyLevel *string) { + o.ConsistencyLevel = consistencyLevel +} + +// WithTenant adds the tenant to the batch objects delete params +func (o *BatchObjectsDeleteParams) WithTenant(tenant *string) *BatchObjectsDeleteParams { + o.SetTenant(tenant) + return o +} + +// SetTenant adds the tenant to the batch objects delete params +func (o *BatchObjectsDeleteParams) SetTenant(tenant *string) { + o.Tenant = tenant +} + +// WriteToRequest writes these params to a swagger request +func (o *BatchObjectsDeleteParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + if o.Body != nil { + if err := r.SetBodyParam(o.Body); err != nil { + return err + } + } + + if o.ConsistencyLevel != nil { + + // query param consistency_level + var qrConsistencyLevel string + + if o.ConsistencyLevel != nil { + qrConsistencyLevel = *o.ConsistencyLevel + } + qConsistencyLevel := qrConsistencyLevel + if qConsistencyLevel != "" { + + if err := r.SetQueryParam("consistency_level", qConsistencyLevel); err != nil { + return err + } + } + } + + if o.Tenant != nil { + + // query param tenant + var qrTenant string + + if o.Tenant != nil { + qrTenant = *o.Tenant + } + qTenant := qrTenant + if qTenant != "" { + + if err := r.SetQueryParam("tenant", qTenant); err != nil { + return err + } + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/batch/batch_objects_delete_responses.go b/platform/dbops/binaries/weaviate-src/client/batch/batch_objects_delete_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..7c073338ff37bd2dbe4e8a6a9aa3609e87e3a0f9 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/batch/batch_objects_delete_responses.go @@ -0,0 +1,472 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package batch + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/weaviate/weaviate/entities/models" +) + +// BatchObjectsDeleteReader is a Reader for the BatchObjectsDelete structure. +type BatchObjectsDeleteReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *BatchObjectsDeleteReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewBatchObjectsDeleteOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 400: + result := NewBatchObjectsDeleteBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 401: + result := NewBatchObjectsDeleteUnauthorized() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 403: + result := NewBatchObjectsDeleteForbidden() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 422: + result := NewBatchObjectsDeleteUnprocessableEntity() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 500: + result := NewBatchObjectsDeleteInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + } +} + +// NewBatchObjectsDeleteOK creates a BatchObjectsDeleteOK with default headers values +func NewBatchObjectsDeleteOK() *BatchObjectsDeleteOK { + return &BatchObjectsDeleteOK{} +} + +/* +BatchObjectsDeleteOK describes a response with status code 200, with default header values. + +Request succeeded, see response body to get detailed information about each batched item. +*/ +type BatchObjectsDeleteOK struct { + Payload *models.BatchDeleteResponse +} + +// IsSuccess returns true when this batch objects delete o k response has a 2xx status code +func (o *BatchObjectsDeleteOK) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this batch objects delete o k response has a 3xx status code +func (o *BatchObjectsDeleteOK) IsRedirect() bool { + return false +} + +// IsClientError returns true when this batch objects delete o k response has a 4xx status code +func (o *BatchObjectsDeleteOK) IsClientError() bool { + return false +} + +// IsServerError returns true when this batch objects delete o k response has a 5xx status code +func (o *BatchObjectsDeleteOK) IsServerError() bool { + return false +} + +// IsCode returns true when this batch objects delete o k response a status code equal to that given +func (o *BatchObjectsDeleteOK) IsCode(code int) bool { + return code == 200 +} + +// Code gets the status code for the batch objects delete o k response +func (o *BatchObjectsDeleteOK) Code() int { + return 200 +} + +func (o *BatchObjectsDeleteOK) Error() string { + return fmt.Sprintf("[DELETE /batch/objects][%d] batchObjectsDeleteOK %+v", 200, o.Payload) +} + +func (o *BatchObjectsDeleteOK) String() string { + return fmt.Sprintf("[DELETE /batch/objects][%d] batchObjectsDeleteOK %+v", 200, o.Payload) +} + +func (o *BatchObjectsDeleteOK) GetPayload() *models.BatchDeleteResponse { + return o.Payload +} + +func (o *BatchObjectsDeleteOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.BatchDeleteResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewBatchObjectsDeleteBadRequest creates a BatchObjectsDeleteBadRequest with default headers values +func NewBatchObjectsDeleteBadRequest() *BatchObjectsDeleteBadRequest { + return &BatchObjectsDeleteBadRequest{} +} + +/* +BatchObjectsDeleteBadRequest describes a response with status code 400, with default header values. + +Malformed request. +*/ +type BatchObjectsDeleteBadRequest struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this batch objects delete bad request response has a 2xx status code +func (o *BatchObjectsDeleteBadRequest) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this batch objects delete bad request response has a 3xx status code +func (o *BatchObjectsDeleteBadRequest) IsRedirect() bool { + return false +} + +// IsClientError returns true when this batch objects delete bad request response has a 4xx status code +func (o *BatchObjectsDeleteBadRequest) IsClientError() bool { + return true +} + +// IsServerError returns true when this batch objects delete bad request response has a 5xx status code +func (o *BatchObjectsDeleteBadRequest) IsServerError() bool { + return false +} + +// IsCode returns true when this batch objects delete bad request response a status code equal to that given +func (o *BatchObjectsDeleteBadRequest) IsCode(code int) bool { + return code == 400 +} + +// Code gets the status code for the batch objects delete bad request response +func (o *BatchObjectsDeleteBadRequest) Code() int { + return 400 +} + +func (o *BatchObjectsDeleteBadRequest) Error() string { + return fmt.Sprintf("[DELETE /batch/objects][%d] batchObjectsDeleteBadRequest %+v", 400, o.Payload) +} + +func (o *BatchObjectsDeleteBadRequest) String() string { + return fmt.Sprintf("[DELETE /batch/objects][%d] batchObjectsDeleteBadRequest %+v", 400, o.Payload) +} + +func (o *BatchObjectsDeleteBadRequest) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *BatchObjectsDeleteBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewBatchObjectsDeleteUnauthorized creates a BatchObjectsDeleteUnauthorized with default headers values +func NewBatchObjectsDeleteUnauthorized() *BatchObjectsDeleteUnauthorized { + return &BatchObjectsDeleteUnauthorized{} +} + +/* +BatchObjectsDeleteUnauthorized describes a response with status code 401, with default header values. + +Unauthorized or invalid credentials. +*/ +type BatchObjectsDeleteUnauthorized struct { +} + +// IsSuccess returns true when this batch objects delete unauthorized response has a 2xx status code +func (o *BatchObjectsDeleteUnauthorized) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this batch objects delete unauthorized response has a 3xx status code +func (o *BatchObjectsDeleteUnauthorized) IsRedirect() bool { + return false +} + +// IsClientError returns true when this batch objects delete unauthorized response has a 4xx status code +func (o *BatchObjectsDeleteUnauthorized) IsClientError() bool { + return true +} + +// IsServerError returns true when this batch objects delete unauthorized response has a 5xx status code +func (o *BatchObjectsDeleteUnauthorized) IsServerError() bool { + return false +} + +// IsCode returns true when this batch objects delete unauthorized response a status code equal to that given +func (o *BatchObjectsDeleteUnauthorized) IsCode(code int) bool { + return code == 401 +} + +// Code gets the status code for the batch objects delete unauthorized response +func (o *BatchObjectsDeleteUnauthorized) Code() int { + return 401 +} + +func (o *BatchObjectsDeleteUnauthorized) Error() string { + return fmt.Sprintf("[DELETE /batch/objects][%d] batchObjectsDeleteUnauthorized ", 401) +} + +func (o *BatchObjectsDeleteUnauthorized) String() string { + return fmt.Sprintf("[DELETE /batch/objects][%d] batchObjectsDeleteUnauthorized ", 401) +} + +func (o *BatchObjectsDeleteUnauthorized) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewBatchObjectsDeleteForbidden creates a BatchObjectsDeleteForbidden with default headers values +func NewBatchObjectsDeleteForbidden() *BatchObjectsDeleteForbidden { + return &BatchObjectsDeleteForbidden{} +} + +/* +BatchObjectsDeleteForbidden describes a response with status code 403, with default header values. + +Forbidden +*/ +type BatchObjectsDeleteForbidden struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this batch objects delete forbidden response has a 2xx status code +func (o *BatchObjectsDeleteForbidden) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this batch objects delete forbidden response has a 3xx status code +func (o *BatchObjectsDeleteForbidden) IsRedirect() bool { + return false +} + +// IsClientError returns true when this batch objects delete forbidden response has a 4xx status code +func (o *BatchObjectsDeleteForbidden) IsClientError() bool { + return true +} + +// IsServerError returns true when this batch objects delete forbidden response has a 5xx status code +func (o *BatchObjectsDeleteForbidden) IsServerError() bool { + return false +} + +// IsCode returns true when this batch objects delete forbidden response a status code equal to that given +func (o *BatchObjectsDeleteForbidden) IsCode(code int) bool { + return code == 403 +} + +// Code gets the status code for the batch objects delete forbidden response +func (o *BatchObjectsDeleteForbidden) Code() int { + return 403 +} + +func (o *BatchObjectsDeleteForbidden) Error() string { + return fmt.Sprintf("[DELETE /batch/objects][%d] batchObjectsDeleteForbidden %+v", 403, o.Payload) +} + +func (o *BatchObjectsDeleteForbidden) String() string { + return fmt.Sprintf("[DELETE /batch/objects][%d] batchObjectsDeleteForbidden %+v", 403, o.Payload) +} + +func (o *BatchObjectsDeleteForbidden) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *BatchObjectsDeleteForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewBatchObjectsDeleteUnprocessableEntity creates a BatchObjectsDeleteUnprocessableEntity with default headers values +func NewBatchObjectsDeleteUnprocessableEntity() *BatchObjectsDeleteUnprocessableEntity { + return &BatchObjectsDeleteUnprocessableEntity{} +} + +/* +BatchObjectsDeleteUnprocessableEntity describes a response with status code 422, with default header values. + +Request body is well-formed (i.e., syntactically correct), but semantically erroneous. Are you sure the class is defined in the configuration file? +*/ +type BatchObjectsDeleteUnprocessableEntity struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this batch objects delete unprocessable entity response has a 2xx status code +func (o *BatchObjectsDeleteUnprocessableEntity) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this batch objects delete unprocessable entity response has a 3xx status code +func (o *BatchObjectsDeleteUnprocessableEntity) IsRedirect() bool { + return false +} + +// IsClientError returns true when this batch objects delete unprocessable entity response has a 4xx status code +func (o *BatchObjectsDeleteUnprocessableEntity) IsClientError() bool { + return true +} + +// IsServerError returns true when this batch objects delete unprocessable entity response has a 5xx status code +func (o *BatchObjectsDeleteUnprocessableEntity) IsServerError() bool { + return false +} + +// IsCode returns true when this batch objects delete unprocessable entity response a status code equal to that given +func (o *BatchObjectsDeleteUnprocessableEntity) IsCode(code int) bool { + return code == 422 +} + +// Code gets the status code for the batch objects delete unprocessable entity response +func (o *BatchObjectsDeleteUnprocessableEntity) Code() int { + return 422 +} + +func (o *BatchObjectsDeleteUnprocessableEntity) Error() string { + return fmt.Sprintf("[DELETE /batch/objects][%d] batchObjectsDeleteUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *BatchObjectsDeleteUnprocessableEntity) String() string { + return fmt.Sprintf("[DELETE /batch/objects][%d] batchObjectsDeleteUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *BatchObjectsDeleteUnprocessableEntity) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *BatchObjectsDeleteUnprocessableEntity) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewBatchObjectsDeleteInternalServerError creates a BatchObjectsDeleteInternalServerError with default headers values +func NewBatchObjectsDeleteInternalServerError() *BatchObjectsDeleteInternalServerError { + return &BatchObjectsDeleteInternalServerError{} +} + +/* +BatchObjectsDeleteInternalServerError describes a response with status code 500, with default header values. + +An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. +*/ +type BatchObjectsDeleteInternalServerError struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this batch objects delete internal server error response has a 2xx status code +func (o *BatchObjectsDeleteInternalServerError) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this batch objects delete internal server error response has a 3xx status code +func (o *BatchObjectsDeleteInternalServerError) IsRedirect() bool { + return false +} + +// IsClientError returns true when this batch objects delete internal server error response has a 4xx status code +func (o *BatchObjectsDeleteInternalServerError) IsClientError() bool { + return false +} + +// IsServerError returns true when this batch objects delete internal server error response has a 5xx status code +func (o *BatchObjectsDeleteInternalServerError) IsServerError() bool { + return true +} + +// IsCode returns true when this batch objects delete internal server error response a status code equal to that given +func (o *BatchObjectsDeleteInternalServerError) IsCode(code int) bool { + return code == 500 +} + +// Code gets the status code for the batch objects delete internal server error response +func (o *BatchObjectsDeleteInternalServerError) Code() int { + return 500 +} + +func (o *BatchObjectsDeleteInternalServerError) Error() string { + return fmt.Sprintf("[DELETE /batch/objects][%d] batchObjectsDeleteInternalServerError %+v", 500, o.Payload) +} + +func (o *BatchObjectsDeleteInternalServerError) String() string { + return fmt.Sprintf("[DELETE /batch/objects][%d] batchObjectsDeleteInternalServerError %+v", 500, o.Payload) +} + +func (o *BatchObjectsDeleteInternalServerError) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *BatchObjectsDeleteInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/batch/batch_references_create_parameters.go b/platform/dbops/binaries/weaviate-src/client/batch/batch_references_create_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..de16a0bdc4b37de50edf4b68685e5771932e2454 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/batch/batch_references_create_parameters.go @@ -0,0 +1,198 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package batch + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + + "github.com/weaviate/weaviate/entities/models" +) + +// NewBatchReferencesCreateParams creates a new BatchReferencesCreateParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewBatchReferencesCreateParams() *BatchReferencesCreateParams { + return &BatchReferencesCreateParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewBatchReferencesCreateParamsWithTimeout creates a new BatchReferencesCreateParams object +// with the ability to set a timeout on a request. +func NewBatchReferencesCreateParamsWithTimeout(timeout time.Duration) *BatchReferencesCreateParams { + return &BatchReferencesCreateParams{ + timeout: timeout, + } +} + +// NewBatchReferencesCreateParamsWithContext creates a new BatchReferencesCreateParams object +// with the ability to set a context for a request. +func NewBatchReferencesCreateParamsWithContext(ctx context.Context) *BatchReferencesCreateParams { + return &BatchReferencesCreateParams{ + Context: ctx, + } +} + +// NewBatchReferencesCreateParamsWithHTTPClient creates a new BatchReferencesCreateParams object +// with the ability to set a custom HTTPClient for a request. +func NewBatchReferencesCreateParamsWithHTTPClient(client *http.Client) *BatchReferencesCreateParams { + return &BatchReferencesCreateParams{ + HTTPClient: client, + } +} + +/* +BatchReferencesCreateParams contains all the parameters to send to the API endpoint + + for the batch references create operation. + + Typically these are written to a http.Request. +*/ +type BatchReferencesCreateParams struct { + + /* Body. + + A list of references to be batched. The ideal size depends on the used database connector. Please see the documentation of the used connector for help + */ + Body []*models.BatchReference + + /* ConsistencyLevel. + + Determines how many replicas must acknowledge a request before it is considered successful + */ + ConsistencyLevel *string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the batch references create params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *BatchReferencesCreateParams) WithDefaults() *BatchReferencesCreateParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the batch references create params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *BatchReferencesCreateParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the batch references create params +func (o *BatchReferencesCreateParams) WithTimeout(timeout time.Duration) *BatchReferencesCreateParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the batch references create params +func (o *BatchReferencesCreateParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the batch references create params +func (o *BatchReferencesCreateParams) WithContext(ctx context.Context) *BatchReferencesCreateParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the batch references create params +func (o *BatchReferencesCreateParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the batch references create params +func (o *BatchReferencesCreateParams) WithHTTPClient(client *http.Client) *BatchReferencesCreateParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the batch references create params +func (o *BatchReferencesCreateParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithBody adds the body to the batch references create params +func (o *BatchReferencesCreateParams) WithBody(body []*models.BatchReference) *BatchReferencesCreateParams { + o.SetBody(body) + return o +} + +// SetBody adds the body to the batch references create params +func (o *BatchReferencesCreateParams) SetBody(body []*models.BatchReference) { + o.Body = body +} + +// WithConsistencyLevel adds the consistencyLevel to the batch references create params +func (o *BatchReferencesCreateParams) WithConsistencyLevel(consistencyLevel *string) *BatchReferencesCreateParams { + o.SetConsistencyLevel(consistencyLevel) + return o +} + +// SetConsistencyLevel adds the consistencyLevel to the batch references create params +func (o *BatchReferencesCreateParams) SetConsistencyLevel(consistencyLevel *string) { + o.ConsistencyLevel = consistencyLevel +} + +// WriteToRequest writes these params to a swagger request +func (o *BatchReferencesCreateParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + if o.Body != nil { + if err := r.SetBodyParam(o.Body); err != nil { + return err + } + } + + if o.ConsistencyLevel != nil { + + // query param consistency_level + var qrConsistencyLevel string + + if o.ConsistencyLevel != nil { + qrConsistencyLevel = *o.ConsistencyLevel + } + qConsistencyLevel := qrConsistencyLevel + if qConsistencyLevel != "" { + + if err := r.SetQueryParam("consistency_level", qConsistencyLevel); err != nil { + return err + } + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/batch/batch_references_create_responses.go b/platform/dbops/binaries/weaviate-src/client/batch/batch_references_create_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..91791190a5fc0030e3690e1b17a0de932cd7e622 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/batch/batch_references_create_responses.go @@ -0,0 +1,470 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package batch + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/weaviate/weaviate/entities/models" +) + +// BatchReferencesCreateReader is a Reader for the BatchReferencesCreate structure. +type BatchReferencesCreateReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *BatchReferencesCreateReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewBatchReferencesCreateOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 400: + result := NewBatchReferencesCreateBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 401: + result := NewBatchReferencesCreateUnauthorized() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 403: + result := NewBatchReferencesCreateForbidden() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 422: + result := NewBatchReferencesCreateUnprocessableEntity() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 500: + result := NewBatchReferencesCreateInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + } +} + +// NewBatchReferencesCreateOK creates a BatchReferencesCreateOK with default headers values +func NewBatchReferencesCreateOK() *BatchReferencesCreateOK { + return &BatchReferencesCreateOK{} +} + +/* +BatchReferencesCreateOK describes a response with status code 200, with default header values. + +Request Successful. Warning: A successful request does not guarantee that every batched reference was successfully created. Inspect the response body to see which references succeeded and which failed. +*/ +type BatchReferencesCreateOK struct { + Payload []*models.BatchReferenceResponse +} + +// IsSuccess returns true when this batch references create o k response has a 2xx status code +func (o *BatchReferencesCreateOK) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this batch references create o k response has a 3xx status code +func (o *BatchReferencesCreateOK) IsRedirect() bool { + return false +} + +// IsClientError returns true when this batch references create o k response has a 4xx status code +func (o *BatchReferencesCreateOK) IsClientError() bool { + return false +} + +// IsServerError returns true when this batch references create o k response has a 5xx status code +func (o *BatchReferencesCreateOK) IsServerError() bool { + return false +} + +// IsCode returns true when this batch references create o k response a status code equal to that given +func (o *BatchReferencesCreateOK) IsCode(code int) bool { + return code == 200 +} + +// Code gets the status code for the batch references create o k response +func (o *BatchReferencesCreateOK) Code() int { + return 200 +} + +func (o *BatchReferencesCreateOK) Error() string { + return fmt.Sprintf("[POST /batch/references][%d] batchReferencesCreateOK %+v", 200, o.Payload) +} + +func (o *BatchReferencesCreateOK) String() string { + return fmt.Sprintf("[POST /batch/references][%d] batchReferencesCreateOK %+v", 200, o.Payload) +} + +func (o *BatchReferencesCreateOK) GetPayload() []*models.BatchReferenceResponse { + return o.Payload +} + +func (o *BatchReferencesCreateOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewBatchReferencesCreateBadRequest creates a BatchReferencesCreateBadRequest with default headers values +func NewBatchReferencesCreateBadRequest() *BatchReferencesCreateBadRequest { + return &BatchReferencesCreateBadRequest{} +} + +/* +BatchReferencesCreateBadRequest describes a response with status code 400, with default header values. + +Malformed request. +*/ +type BatchReferencesCreateBadRequest struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this batch references create bad request response has a 2xx status code +func (o *BatchReferencesCreateBadRequest) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this batch references create bad request response has a 3xx status code +func (o *BatchReferencesCreateBadRequest) IsRedirect() bool { + return false +} + +// IsClientError returns true when this batch references create bad request response has a 4xx status code +func (o *BatchReferencesCreateBadRequest) IsClientError() bool { + return true +} + +// IsServerError returns true when this batch references create bad request response has a 5xx status code +func (o *BatchReferencesCreateBadRequest) IsServerError() bool { + return false +} + +// IsCode returns true when this batch references create bad request response a status code equal to that given +func (o *BatchReferencesCreateBadRequest) IsCode(code int) bool { + return code == 400 +} + +// Code gets the status code for the batch references create bad request response +func (o *BatchReferencesCreateBadRequest) Code() int { + return 400 +} + +func (o *BatchReferencesCreateBadRequest) Error() string { + return fmt.Sprintf("[POST /batch/references][%d] batchReferencesCreateBadRequest %+v", 400, o.Payload) +} + +func (o *BatchReferencesCreateBadRequest) String() string { + return fmt.Sprintf("[POST /batch/references][%d] batchReferencesCreateBadRequest %+v", 400, o.Payload) +} + +func (o *BatchReferencesCreateBadRequest) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *BatchReferencesCreateBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewBatchReferencesCreateUnauthorized creates a BatchReferencesCreateUnauthorized with default headers values +func NewBatchReferencesCreateUnauthorized() *BatchReferencesCreateUnauthorized { + return &BatchReferencesCreateUnauthorized{} +} + +/* +BatchReferencesCreateUnauthorized describes a response with status code 401, with default header values. + +Unauthorized or invalid credentials. +*/ +type BatchReferencesCreateUnauthorized struct { +} + +// IsSuccess returns true when this batch references create unauthorized response has a 2xx status code +func (o *BatchReferencesCreateUnauthorized) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this batch references create unauthorized response has a 3xx status code +func (o *BatchReferencesCreateUnauthorized) IsRedirect() bool { + return false +} + +// IsClientError returns true when this batch references create unauthorized response has a 4xx status code +func (o *BatchReferencesCreateUnauthorized) IsClientError() bool { + return true +} + +// IsServerError returns true when this batch references create unauthorized response has a 5xx status code +func (o *BatchReferencesCreateUnauthorized) IsServerError() bool { + return false +} + +// IsCode returns true when this batch references create unauthorized response a status code equal to that given +func (o *BatchReferencesCreateUnauthorized) IsCode(code int) bool { + return code == 401 +} + +// Code gets the status code for the batch references create unauthorized response +func (o *BatchReferencesCreateUnauthorized) Code() int { + return 401 +} + +func (o *BatchReferencesCreateUnauthorized) Error() string { + return fmt.Sprintf("[POST /batch/references][%d] batchReferencesCreateUnauthorized ", 401) +} + +func (o *BatchReferencesCreateUnauthorized) String() string { + return fmt.Sprintf("[POST /batch/references][%d] batchReferencesCreateUnauthorized ", 401) +} + +func (o *BatchReferencesCreateUnauthorized) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewBatchReferencesCreateForbidden creates a BatchReferencesCreateForbidden with default headers values +func NewBatchReferencesCreateForbidden() *BatchReferencesCreateForbidden { + return &BatchReferencesCreateForbidden{} +} + +/* +BatchReferencesCreateForbidden describes a response with status code 403, with default header values. + +Forbidden +*/ +type BatchReferencesCreateForbidden struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this batch references create forbidden response has a 2xx status code +func (o *BatchReferencesCreateForbidden) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this batch references create forbidden response has a 3xx status code +func (o *BatchReferencesCreateForbidden) IsRedirect() bool { + return false +} + +// IsClientError returns true when this batch references create forbidden response has a 4xx status code +func (o *BatchReferencesCreateForbidden) IsClientError() bool { + return true +} + +// IsServerError returns true when this batch references create forbidden response has a 5xx status code +func (o *BatchReferencesCreateForbidden) IsServerError() bool { + return false +} + +// IsCode returns true when this batch references create forbidden response a status code equal to that given +func (o *BatchReferencesCreateForbidden) IsCode(code int) bool { + return code == 403 +} + +// Code gets the status code for the batch references create forbidden response +func (o *BatchReferencesCreateForbidden) Code() int { + return 403 +} + +func (o *BatchReferencesCreateForbidden) Error() string { + return fmt.Sprintf("[POST /batch/references][%d] batchReferencesCreateForbidden %+v", 403, o.Payload) +} + +func (o *BatchReferencesCreateForbidden) String() string { + return fmt.Sprintf("[POST /batch/references][%d] batchReferencesCreateForbidden %+v", 403, o.Payload) +} + +func (o *BatchReferencesCreateForbidden) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *BatchReferencesCreateForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewBatchReferencesCreateUnprocessableEntity creates a BatchReferencesCreateUnprocessableEntity with default headers values +func NewBatchReferencesCreateUnprocessableEntity() *BatchReferencesCreateUnprocessableEntity { + return &BatchReferencesCreateUnprocessableEntity{} +} + +/* +BatchReferencesCreateUnprocessableEntity describes a response with status code 422, with default header values. + +Request body is well-formed (i.e., syntactically correct), but semantically erroneous. Are you sure the class is defined in the configuration file? +*/ +type BatchReferencesCreateUnprocessableEntity struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this batch references create unprocessable entity response has a 2xx status code +func (o *BatchReferencesCreateUnprocessableEntity) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this batch references create unprocessable entity response has a 3xx status code +func (o *BatchReferencesCreateUnprocessableEntity) IsRedirect() bool { + return false +} + +// IsClientError returns true when this batch references create unprocessable entity response has a 4xx status code +func (o *BatchReferencesCreateUnprocessableEntity) IsClientError() bool { + return true +} + +// IsServerError returns true when this batch references create unprocessable entity response has a 5xx status code +func (o *BatchReferencesCreateUnprocessableEntity) IsServerError() bool { + return false +} + +// IsCode returns true when this batch references create unprocessable entity response a status code equal to that given +func (o *BatchReferencesCreateUnprocessableEntity) IsCode(code int) bool { + return code == 422 +} + +// Code gets the status code for the batch references create unprocessable entity response +func (o *BatchReferencesCreateUnprocessableEntity) Code() int { + return 422 +} + +func (o *BatchReferencesCreateUnprocessableEntity) Error() string { + return fmt.Sprintf("[POST /batch/references][%d] batchReferencesCreateUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *BatchReferencesCreateUnprocessableEntity) String() string { + return fmt.Sprintf("[POST /batch/references][%d] batchReferencesCreateUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *BatchReferencesCreateUnprocessableEntity) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *BatchReferencesCreateUnprocessableEntity) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewBatchReferencesCreateInternalServerError creates a BatchReferencesCreateInternalServerError with default headers values +func NewBatchReferencesCreateInternalServerError() *BatchReferencesCreateInternalServerError { + return &BatchReferencesCreateInternalServerError{} +} + +/* +BatchReferencesCreateInternalServerError describes a response with status code 500, with default header values. + +An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. +*/ +type BatchReferencesCreateInternalServerError struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this batch references create internal server error response has a 2xx status code +func (o *BatchReferencesCreateInternalServerError) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this batch references create internal server error response has a 3xx status code +func (o *BatchReferencesCreateInternalServerError) IsRedirect() bool { + return false +} + +// IsClientError returns true when this batch references create internal server error response has a 4xx status code +func (o *BatchReferencesCreateInternalServerError) IsClientError() bool { + return false +} + +// IsServerError returns true when this batch references create internal server error response has a 5xx status code +func (o *BatchReferencesCreateInternalServerError) IsServerError() bool { + return true +} + +// IsCode returns true when this batch references create internal server error response a status code equal to that given +func (o *BatchReferencesCreateInternalServerError) IsCode(code int) bool { + return code == 500 +} + +// Code gets the status code for the batch references create internal server error response +func (o *BatchReferencesCreateInternalServerError) Code() int { + return 500 +} + +func (o *BatchReferencesCreateInternalServerError) Error() string { + return fmt.Sprintf("[POST /batch/references][%d] batchReferencesCreateInternalServerError %+v", 500, o.Payload) +} + +func (o *BatchReferencesCreateInternalServerError) String() string { + return fmt.Sprintf("[POST /batch/references][%d] batchReferencesCreateInternalServerError %+v", 500, o.Payload) +} + +func (o *BatchReferencesCreateInternalServerError) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *BatchReferencesCreateInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/classifications/classifications_client.go b/platform/dbops/binaries/weaviate-src/client/classifications/classifications_client.go new file mode 100644 index 0000000000000000000000000000000000000000..a33218767b97b22bb023af2fb1694f06edf7fe79 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/classifications/classifications_client.go @@ -0,0 +1,136 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package classifications + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" +) + +// New creates a new classifications API client. +func New(transport runtime.ClientTransport, formats strfmt.Registry) ClientService { + return &Client{transport: transport, formats: formats} +} + +/* +Client for classifications API +*/ +type Client struct { + transport runtime.ClientTransport + formats strfmt.Registry +} + +// ClientOption is the option for Client methods +type ClientOption func(*runtime.ClientOperation) + +// ClientService is the interface for Client methods +type ClientService interface { + ClassificationsGet(params *ClassificationsGetParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ClassificationsGetOK, error) + + ClassificationsPost(params *ClassificationsPostParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ClassificationsPostCreated, error) + + SetTransport(transport runtime.ClientTransport) +} + +/* +ClassificationsGet views previously created classification + +Get status, results and metadata of a previously created classification +*/ +func (a *Client) ClassificationsGet(params *ClassificationsGetParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ClassificationsGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewClassificationsGetParams() + } + op := &runtime.ClientOperation{ + ID: "classifications.get", + Method: "GET", + PathPattern: "/classifications/{id}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json", "application/yaml"}, + Schemes: []string{"https"}, + Params: params, + Reader: &ClassificationsGetReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + success, ok := result.(*ClassificationsGetOK) + if ok { + return success, nil + } + // unexpected success response + // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for classifications.get: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + +/* +ClassificationsPost starts a classification + +Trigger a classification based on the specified params. Classifications will run in the background, use GET /classifications/ to retrieve the status of your classification. +*/ +func (a *Client) ClassificationsPost(params *ClassificationsPostParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ClassificationsPostCreated, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewClassificationsPostParams() + } + op := &runtime.ClientOperation{ + ID: "classifications.post", + Method: "POST", + PathPattern: "/classifications/", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json", "application/yaml"}, + Schemes: []string{"https"}, + Params: params, + Reader: &ClassificationsPostReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + success, ok := result.(*ClassificationsPostCreated) + if ok { + return success, nil + } + // unexpected success response + // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for classifications.post: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + +// SetTransport changes the transport on the client +func (a *Client) SetTransport(transport runtime.ClientTransport) { + a.transport = transport +} diff --git a/platform/dbops/binaries/weaviate-src/client/classifications/classifications_get_parameters.go b/platform/dbops/binaries/weaviate-src/client/classifications/classifications_get_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..ecd6b6def5152f9411788fa2c57891bf5cebb3c8 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/classifications/classifications_get_parameters.go @@ -0,0 +1,162 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package classifications + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewClassificationsGetParams creates a new ClassificationsGetParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewClassificationsGetParams() *ClassificationsGetParams { + return &ClassificationsGetParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewClassificationsGetParamsWithTimeout creates a new ClassificationsGetParams object +// with the ability to set a timeout on a request. +func NewClassificationsGetParamsWithTimeout(timeout time.Duration) *ClassificationsGetParams { + return &ClassificationsGetParams{ + timeout: timeout, + } +} + +// NewClassificationsGetParamsWithContext creates a new ClassificationsGetParams object +// with the ability to set a context for a request. +func NewClassificationsGetParamsWithContext(ctx context.Context) *ClassificationsGetParams { + return &ClassificationsGetParams{ + Context: ctx, + } +} + +// NewClassificationsGetParamsWithHTTPClient creates a new ClassificationsGetParams object +// with the ability to set a custom HTTPClient for a request. +func NewClassificationsGetParamsWithHTTPClient(client *http.Client) *ClassificationsGetParams { + return &ClassificationsGetParams{ + HTTPClient: client, + } +} + +/* +ClassificationsGetParams contains all the parameters to send to the API endpoint + + for the classifications get operation. + + Typically these are written to a http.Request. +*/ +type ClassificationsGetParams struct { + + /* ID. + + classification id + */ + ID string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the classifications get params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *ClassificationsGetParams) WithDefaults() *ClassificationsGetParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the classifications get params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *ClassificationsGetParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the classifications get params +func (o *ClassificationsGetParams) WithTimeout(timeout time.Duration) *ClassificationsGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the classifications get params +func (o *ClassificationsGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the classifications get params +func (o *ClassificationsGetParams) WithContext(ctx context.Context) *ClassificationsGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the classifications get params +func (o *ClassificationsGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the classifications get params +func (o *ClassificationsGetParams) WithHTTPClient(client *http.Client) *ClassificationsGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the classifications get params +func (o *ClassificationsGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithID adds the id to the classifications get params +func (o *ClassificationsGetParams) WithID(id string) *ClassificationsGetParams { + o.SetID(id) + return o +} + +// SetID adds the id to the classifications get params +func (o *ClassificationsGetParams) SetID(id string) { + o.ID = id +} + +// WriteToRequest writes these params to a swagger request +func (o *ClassificationsGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param id + if err := r.SetPathParam("id", o.ID); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/classifications/classifications_get_responses.go b/platform/dbops/binaries/weaviate-src/client/classifications/classifications_get_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..bc5ddd7ee39ff0a1ff07c2d1c13c3e2072733d89 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/classifications/classifications_get_responses.go @@ -0,0 +1,386 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package classifications + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/weaviate/weaviate/entities/models" +) + +// ClassificationsGetReader is a Reader for the ClassificationsGet structure. +type ClassificationsGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ClassificationsGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewClassificationsGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 401: + result := NewClassificationsGetUnauthorized() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 403: + result := NewClassificationsGetForbidden() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 404: + result := NewClassificationsGetNotFound() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 500: + result := NewClassificationsGetInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + } +} + +// NewClassificationsGetOK creates a ClassificationsGetOK with default headers values +func NewClassificationsGetOK() *ClassificationsGetOK { + return &ClassificationsGetOK{} +} + +/* +ClassificationsGetOK describes a response with status code 200, with default header values. + +Found the classification, returned as body +*/ +type ClassificationsGetOK struct { + Payload *models.Classification +} + +// IsSuccess returns true when this classifications get o k response has a 2xx status code +func (o *ClassificationsGetOK) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this classifications get o k response has a 3xx status code +func (o *ClassificationsGetOK) IsRedirect() bool { + return false +} + +// IsClientError returns true when this classifications get o k response has a 4xx status code +func (o *ClassificationsGetOK) IsClientError() bool { + return false +} + +// IsServerError returns true when this classifications get o k response has a 5xx status code +func (o *ClassificationsGetOK) IsServerError() bool { + return false +} + +// IsCode returns true when this classifications get o k response a status code equal to that given +func (o *ClassificationsGetOK) IsCode(code int) bool { + return code == 200 +} + +// Code gets the status code for the classifications get o k response +func (o *ClassificationsGetOK) Code() int { + return 200 +} + +func (o *ClassificationsGetOK) Error() string { + return fmt.Sprintf("[GET /classifications/{id}][%d] classificationsGetOK %+v", 200, o.Payload) +} + +func (o *ClassificationsGetOK) String() string { + return fmt.Sprintf("[GET /classifications/{id}][%d] classificationsGetOK %+v", 200, o.Payload) +} + +func (o *ClassificationsGetOK) GetPayload() *models.Classification { + return o.Payload +} + +func (o *ClassificationsGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Classification) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewClassificationsGetUnauthorized creates a ClassificationsGetUnauthorized with default headers values +func NewClassificationsGetUnauthorized() *ClassificationsGetUnauthorized { + return &ClassificationsGetUnauthorized{} +} + +/* +ClassificationsGetUnauthorized describes a response with status code 401, with default header values. + +Unauthorized or invalid credentials. +*/ +type ClassificationsGetUnauthorized struct { +} + +// IsSuccess returns true when this classifications get unauthorized response has a 2xx status code +func (o *ClassificationsGetUnauthorized) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this classifications get unauthorized response has a 3xx status code +func (o *ClassificationsGetUnauthorized) IsRedirect() bool { + return false +} + +// IsClientError returns true when this classifications get unauthorized response has a 4xx status code +func (o *ClassificationsGetUnauthorized) IsClientError() bool { + return true +} + +// IsServerError returns true when this classifications get unauthorized response has a 5xx status code +func (o *ClassificationsGetUnauthorized) IsServerError() bool { + return false +} + +// IsCode returns true when this classifications get unauthorized response a status code equal to that given +func (o *ClassificationsGetUnauthorized) IsCode(code int) bool { + return code == 401 +} + +// Code gets the status code for the classifications get unauthorized response +func (o *ClassificationsGetUnauthorized) Code() int { + return 401 +} + +func (o *ClassificationsGetUnauthorized) Error() string { + return fmt.Sprintf("[GET /classifications/{id}][%d] classificationsGetUnauthorized ", 401) +} + +func (o *ClassificationsGetUnauthorized) String() string { + return fmt.Sprintf("[GET /classifications/{id}][%d] classificationsGetUnauthorized ", 401) +} + +func (o *ClassificationsGetUnauthorized) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewClassificationsGetForbidden creates a ClassificationsGetForbidden with default headers values +func NewClassificationsGetForbidden() *ClassificationsGetForbidden { + return &ClassificationsGetForbidden{} +} + +/* +ClassificationsGetForbidden describes a response with status code 403, with default header values. + +Forbidden +*/ +type ClassificationsGetForbidden struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this classifications get forbidden response has a 2xx status code +func (o *ClassificationsGetForbidden) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this classifications get forbidden response has a 3xx status code +func (o *ClassificationsGetForbidden) IsRedirect() bool { + return false +} + +// IsClientError returns true when this classifications get forbidden response has a 4xx status code +func (o *ClassificationsGetForbidden) IsClientError() bool { + return true +} + +// IsServerError returns true when this classifications get forbidden response has a 5xx status code +func (o *ClassificationsGetForbidden) IsServerError() bool { + return false +} + +// IsCode returns true when this classifications get forbidden response a status code equal to that given +func (o *ClassificationsGetForbidden) IsCode(code int) bool { + return code == 403 +} + +// Code gets the status code for the classifications get forbidden response +func (o *ClassificationsGetForbidden) Code() int { + return 403 +} + +func (o *ClassificationsGetForbidden) Error() string { + return fmt.Sprintf("[GET /classifications/{id}][%d] classificationsGetForbidden %+v", 403, o.Payload) +} + +func (o *ClassificationsGetForbidden) String() string { + return fmt.Sprintf("[GET /classifications/{id}][%d] classificationsGetForbidden %+v", 403, o.Payload) +} + +func (o *ClassificationsGetForbidden) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *ClassificationsGetForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewClassificationsGetNotFound creates a ClassificationsGetNotFound with default headers values +func NewClassificationsGetNotFound() *ClassificationsGetNotFound { + return &ClassificationsGetNotFound{} +} + +/* +ClassificationsGetNotFound describes a response with status code 404, with default header values. + +Not Found - Classification does not exist +*/ +type ClassificationsGetNotFound struct { +} + +// IsSuccess returns true when this classifications get not found response has a 2xx status code +func (o *ClassificationsGetNotFound) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this classifications get not found response has a 3xx status code +func (o *ClassificationsGetNotFound) IsRedirect() bool { + return false +} + +// IsClientError returns true when this classifications get not found response has a 4xx status code +func (o *ClassificationsGetNotFound) IsClientError() bool { + return true +} + +// IsServerError returns true when this classifications get not found response has a 5xx status code +func (o *ClassificationsGetNotFound) IsServerError() bool { + return false +} + +// IsCode returns true when this classifications get not found response a status code equal to that given +func (o *ClassificationsGetNotFound) IsCode(code int) bool { + return code == 404 +} + +// Code gets the status code for the classifications get not found response +func (o *ClassificationsGetNotFound) Code() int { + return 404 +} + +func (o *ClassificationsGetNotFound) Error() string { + return fmt.Sprintf("[GET /classifications/{id}][%d] classificationsGetNotFound ", 404) +} + +func (o *ClassificationsGetNotFound) String() string { + return fmt.Sprintf("[GET /classifications/{id}][%d] classificationsGetNotFound ", 404) +} + +func (o *ClassificationsGetNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewClassificationsGetInternalServerError creates a ClassificationsGetInternalServerError with default headers values +func NewClassificationsGetInternalServerError() *ClassificationsGetInternalServerError { + return &ClassificationsGetInternalServerError{} +} + +/* +ClassificationsGetInternalServerError describes a response with status code 500, with default header values. + +An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. +*/ +type ClassificationsGetInternalServerError struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this classifications get internal server error response has a 2xx status code +func (o *ClassificationsGetInternalServerError) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this classifications get internal server error response has a 3xx status code +func (o *ClassificationsGetInternalServerError) IsRedirect() bool { + return false +} + +// IsClientError returns true when this classifications get internal server error response has a 4xx status code +func (o *ClassificationsGetInternalServerError) IsClientError() bool { + return false +} + +// IsServerError returns true when this classifications get internal server error response has a 5xx status code +func (o *ClassificationsGetInternalServerError) IsServerError() bool { + return true +} + +// IsCode returns true when this classifications get internal server error response a status code equal to that given +func (o *ClassificationsGetInternalServerError) IsCode(code int) bool { + return code == 500 +} + +// Code gets the status code for the classifications get internal server error response +func (o *ClassificationsGetInternalServerError) Code() int { + return 500 +} + +func (o *ClassificationsGetInternalServerError) Error() string { + return fmt.Sprintf("[GET /classifications/{id}][%d] classificationsGetInternalServerError %+v", 500, o.Payload) +} + +func (o *ClassificationsGetInternalServerError) String() string { + return fmt.Sprintf("[GET /classifications/{id}][%d] classificationsGetInternalServerError %+v", 500, o.Payload) +} + +func (o *ClassificationsGetInternalServerError) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *ClassificationsGetInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/classifications/classifications_post_parameters.go b/platform/dbops/binaries/weaviate-src/client/classifications/classifications_post_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..e275c482173292373ecd97fe05a21af7a7fe0280 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/classifications/classifications_post_parameters.go @@ -0,0 +1,164 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package classifications + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + + "github.com/weaviate/weaviate/entities/models" +) + +// NewClassificationsPostParams creates a new ClassificationsPostParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewClassificationsPostParams() *ClassificationsPostParams { + return &ClassificationsPostParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewClassificationsPostParamsWithTimeout creates a new ClassificationsPostParams object +// with the ability to set a timeout on a request. +func NewClassificationsPostParamsWithTimeout(timeout time.Duration) *ClassificationsPostParams { + return &ClassificationsPostParams{ + timeout: timeout, + } +} + +// NewClassificationsPostParamsWithContext creates a new ClassificationsPostParams object +// with the ability to set a context for a request. +func NewClassificationsPostParamsWithContext(ctx context.Context) *ClassificationsPostParams { + return &ClassificationsPostParams{ + Context: ctx, + } +} + +// NewClassificationsPostParamsWithHTTPClient creates a new ClassificationsPostParams object +// with the ability to set a custom HTTPClient for a request. +func NewClassificationsPostParamsWithHTTPClient(client *http.Client) *ClassificationsPostParams { + return &ClassificationsPostParams{ + HTTPClient: client, + } +} + +/* +ClassificationsPostParams contains all the parameters to send to the API endpoint + + for the classifications post operation. + + Typically these are written to a http.Request. +*/ +type ClassificationsPostParams struct { + + /* Params. + + parameters to start a classification + */ + Params *models.Classification + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the classifications post params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *ClassificationsPostParams) WithDefaults() *ClassificationsPostParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the classifications post params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *ClassificationsPostParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the classifications post params +func (o *ClassificationsPostParams) WithTimeout(timeout time.Duration) *ClassificationsPostParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the classifications post params +func (o *ClassificationsPostParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the classifications post params +func (o *ClassificationsPostParams) WithContext(ctx context.Context) *ClassificationsPostParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the classifications post params +func (o *ClassificationsPostParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the classifications post params +func (o *ClassificationsPostParams) WithHTTPClient(client *http.Client) *ClassificationsPostParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the classifications post params +func (o *ClassificationsPostParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithParams adds the params to the classifications post params +func (o *ClassificationsPostParams) WithParams(params *models.Classification) *ClassificationsPostParams { + o.SetParams(params) + return o +} + +// SetParams adds the params to the classifications post params +func (o *ClassificationsPostParams) SetParams(params *models.Classification) { + o.Params = params +} + +// WriteToRequest writes these params to a swagger request +func (o *ClassificationsPostParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + if o.Params != nil { + if err := r.SetBodyParam(o.Params); err != nil { + return err + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/classifications/classifications_post_responses.go b/platform/dbops/binaries/weaviate-src/client/classifications/classifications_post_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..426fed4eab5f6214fc9ee7f5f246d89fa5e1fe30 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/classifications/classifications_post_responses.go @@ -0,0 +1,398 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package classifications + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/weaviate/weaviate/entities/models" +) + +// ClassificationsPostReader is a Reader for the ClassificationsPost structure. +type ClassificationsPostReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ClassificationsPostReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 201: + result := NewClassificationsPostCreated() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 400: + result := NewClassificationsPostBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 401: + result := NewClassificationsPostUnauthorized() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 403: + result := NewClassificationsPostForbidden() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 500: + result := NewClassificationsPostInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + } +} + +// NewClassificationsPostCreated creates a ClassificationsPostCreated with default headers values +func NewClassificationsPostCreated() *ClassificationsPostCreated { + return &ClassificationsPostCreated{} +} + +/* +ClassificationsPostCreated describes a response with status code 201, with default header values. + +Successfully started classification. +*/ +type ClassificationsPostCreated struct { + Payload *models.Classification +} + +// IsSuccess returns true when this classifications post created response has a 2xx status code +func (o *ClassificationsPostCreated) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this classifications post created response has a 3xx status code +func (o *ClassificationsPostCreated) IsRedirect() bool { + return false +} + +// IsClientError returns true when this classifications post created response has a 4xx status code +func (o *ClassificationsPostCreated) IsClientError() bool { + return false +} + +// IsServerError returns true when this classifications post created response has a 5xx status code +func (o *ClassificationsPostCreated) IsServerError() bool { + return false +} + +// IsCode returns true when this classifications post created response a status code equal to that given +func (o *ClassificationsPostCreated) IsCode(code int) bool { + return code == 201 +} + +// Code gets the status code for the classifications post created response +func (o *ClassificationsPostCreated) Code() int { + return 201 +} + +func (o *ClassificationsPostCreated) Error() string { + return fmt.Sprintf("[POST /classifications/][%d] classificationsPostCreated %+v", 201, o.Payload) +} + +func (o *ClassificationsPostCreated) String() string { + return fmt.Sprintf("[POST /classifications/][%d] classificationsPostCreated %+v", 201, o.Payload) +} + +func (o *ClassificationsPostCreated) GetPayload() *models.Classification { + return o.Payload +} + +func (o *ClassificationsPostCreated) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Classification) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewClassificationsPostBadRequest creates a ClassificationsPostBadRequest with default headers values +func NewClassificationsPostBadRequest() *ClassificationsPostBadRequest { + return &ClassificationsPostBadRequest{} +} + +/* +ClassificationsPostBadRequest describes a response with status code 400, with default header values. + +Incorrect request +*/ +type ClassificationsPostBadRequest struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this classifications post bad request response has a 2xx status code +func (o *ClassificationsPostBadRequest) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this classifications post bad request response has a 3xx status code +func (o *ClassificationsPostBadRequest) IsRedirect() bool { + return false +} + +// IsClientError returns true when this classifications post bad request response has a 4xx status code +func (o *ClassificationsPostBadRequest) IsClientError() bool { + return true +} + +// IsServerError returns true when this classifications post bad request response has a 5xx status code +func (o *ClassificationsPostBadRequest) IsServerError() bool { + return false +} + +// IsCode returns true when this classifications post bad request response a status code equal to that given +func (o *ClassificationsPostBadRequest) IsCode(code int) bool { + return code == 400 +} + +// Code gets the status code for the classifications post bad request response +func (o *ClassificationsPostBadRequest) Code() int { + return 400 +} + +func (o *ClassificationsPostBadRequest) Error() string { + return fmt.Sprintf("[POST /classifications/][%d] classificationsPostBadRequest %+v", 400, o.Payload) +} + +func (o *ClassificationsPostBadRequest) String() string { + return fmt.Sprintf("[POST /classifications/][%d] classificationsPostBadRequest %+v", 400, o.Payload) +} + +func (o *ClassificationsPostBadRequest) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *ClassificationsPostBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewClassificationsPostUnauthorized creates a ClassificationsPostUnauthorized with default headers values +func NewClassificationsPostUnauthorized() *ClassificationsPostUnauthorized { + return &ClassificationsPostUnauthorized{} +} + +/* +ClassificationsPostUnauthorized describes a response with status code 401, with default header values. + +Unauthorized or invalid credentials. +*/ +type ClassificationsPostUnauthorized struct { +} + +// IsSuccess returns true when this classifications post unauthorized response has a 2xx status code +func (o *ClassificationsPostUnauthorized) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this classifications post unauthorized response has a 3xx status code +func (o *ClassificationsPostUnauthorized) IsRedirect() bool { + return false +} + +// IsClientError returns true when this classifications post unauthorized response has a 4xx status code +func (o *ClassificationsPostUnauthorized) IsClientError() bool { + return true +} + +// IsServerError returns true when this classifications post unauthorized response has a 5xx status code +func (o *ClassificationsPostUnauthorized) IsServerError() bool { + return false +} + +// IsCode returns true when this classifications post unauthorized response a status code equal to that given +func (o *ClassificationsPostUnauthorized) IsCode(code int) bool { + return code == 401 +} + +// Code gets the status code for the classifications post unauthorized response +func (o *ClassificationsPostUnauthorized) Code() int { + return 401 +} + +func (o *ClassificationsPostUnauthorized) Error() string { + return fmt.Sprintf("[POST /classifications/][%d] classificationsPostUnauthorized ", 401) +} + +func (o *ClassificationsPostUnauthorized) String() string { + return fmt.Sprintf("[POST /classifications/][%d] classificationsPostUnauthorized ", 401) +} + +func (o *ClassificationsPostUnauthorized) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewClassificationsPostForbidden creates a ClassificationsPostForbidden with default headers values +func NewClassificationsPostForbidden() *ClassificationsPostForbidden { + return &ClassificationsPostForbidden{} +} + +/* +ClassificationsPostForbidden describes a response with status code 403, with default header values. + +Forbidden +*/ +type ClassificationsPostForbidden struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this classifications post forbidden response has a 2xx status code +func (o *ClassificationsPostForbidden) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this classifications post forbidden response has a 3xx status code +func (o *ClassificationsPostForbidden) IsRedirect() bool { + return false +} + +// IsClientError returns true when this classifications post forbidden response has a 4xx status code +func (o *ClassificationsPostForbidden) IsClientError() bool { + return true +} + +// IsServerError returns true when this classifications post forbidden response has a 5xx status code +func (o *ClassificationsPostForbidden) IsServerError() bool { + return false +} + +// IsCode returns true when this classifications post forbidden response a status code equal to that given +func (o *ClassificationsPostForbidden) IsCode(code int) bool { + return code == 403 +} + +// Code gets the status code for the classifications post forbidden response +func (o *ClassificationsPostForbidden) Code() int { + return 403 +} + +func (o *ClassificationsPostForbidden) Error() string { + return fmt.Sprintf("[POST /classifications/][%d] classificationsPostForbidden %+v", 403, o.Payload) +} + +func (o *ClassificationsPostForbidden) String() string { + return fmt.Sprintf("[POST /classifications/][%d] classificationsPostForbidden %+v", 403, o.Payload) +} + +func (o *ClassificationsPostForbidden) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *ClassificationsPostForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewClassificationsPostInternalServerError creates a ClassificationsPostInternalServerError with default headers values +func NewClassificationsPostInternalServerError() *ClassificationsPostInternalServerError { + return &ClassificationsPostInternalServerError{} +} + +/* +ClassificationsPostInternalServerError describes a response with status code 500, with default header values. + +An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. +*/ +type ClassificationsPostInternalServerError struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this classifications post internal server error response has a 2xx status code +func (o *ClassificationsPostInternalServerError) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this classifications post internal server error response has a 3xx status code +func (o *ClassificationsPostInternalServerError) IsRedirect() bool { + return false +} + +// IsClientError returns true when this classifications post internal server error response has a 4xx status code +func (o *ClassificationsPostInternalServerError) IsClientError() bool { + return false +} + +// IsServerError returns true when this classifications post internal server error response has a 5xx status code +func (o *ClassificationsPostInternalServerError) IsServerError() bool { + return true +} + +// IsCode returns true when this classifications post internal server error response a status code equal to that given +func (o *ClassificationsPostInternalServerError) IsCode(code int) bool { + return code == 500 +} + +// Code gets the status code for the classifications post internal server error response +func (o *ClassificationsPostInternalServerError) Code() int { + return 500 +} + +func (o *ClassificationsPostInternalServerError) Error() string { + return fmt.Sprintf("[POST /classifications/][%d] classificationsPostInternalServerError %+v", 500, o.Payload) +} + +func (o *ClassificationsPostInternalServerError) String() string { + return fmt.Sprintf("[POST /classifications/][%d] classificationsPostInternalServerError %+v", 500, o.Payload) +} + +func (o *ClassificationsPostInternalServerError) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *ClassificationsPostInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/cluster/cluster_client.go b/platform/dbops/binaries/weaviate-src/client/cluster/cluster_client.go new file mode 100644 index 0000000000000000000000000000000000000000..3e97935be09440dcf8ec9efd6d19605fac75f4c0 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/cluster/cluster_client.go @@ -0,0 +1,93 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package cluster + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" +) + +// New creates a new cluster API client. +func New(transport runtime.ClientTransport, formats strfmt.Registry) ClientService { + return &Client{transport: transport, formats: formats} +} + +/* +Client for cluster API +*/ +type Client struct { + transport runtime.ClientTransport + formats strfmt.Registry +} + +// ClientOption is the option for Client methods +type ClientOption func(*runtime.ClientOperation) + +// ClientService is the interface for Client methods +type ClientService interface { + ClusterGetStatistics(params *ClusterGetStatisticsParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ClusterGetStatisticsOK, error) + + SetTransport(transport runtime.ClientTransport) +} + +/* +ClusterGetStatistics sees raft cluster statistics + +Returns Raft cluster statistics of Weaviate DB. +*/ +func (a *Client) ClusterGetStatistics(params *ClusterGetStatisticsParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ClusterGetStatisticsOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewClusterGetStatisticsParams() + } + op := &runtime.ClientOperation{ + ID: "cluster.get.statistics", + Method: "GET", + PathPattern: "/cluster/statistics", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json", "application/yaml"}, + Schemes: []string{"https"}, + Params: params, + Reader: &ClusterGetStatisticsReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + success, ok := result.(*ClusterGetStatisticsOK) + if ok { + return success, nil + } + // unexpected success response + // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for cluster.get.statistics: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + +// SetTransport changes the transport on the client +func (a *Client) SetTransport(transport runtime.ClientTransport) { + a.transport = transport +} diff --git a/platform/dbops/binaries/weaviate-src/client/cluster/cluster_get_statistics_parameters.go b/platform/dbops/binaries/weaviate-src/client/cluster/cluster_get_statistics_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..39caa007c9eea55672f7e344656be43755c109a7 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/cluster/cluster_get_statistics_parameters.go @@ -0,0 +1,139 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package cluster + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewClusterGetStatisticsParams creates a new ClusterGetStatisticsParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewClusterGetStatisticsParams() *ClusterGetStatisticsParams { + return &ClusterGetStatisticsParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewClusterGetStatisticsParamsWithTimeout creates a new ClusterGetStatisticsParams object +// with the ability to set a timeout on a request. +func NewClusterGetStatisticsParamsWithTimeout(timeout time.Duration) *ClusterGetStatisticsParams { + return &ClusterGetStatisticsParams{ + timeout: timeout, + } +} + +// NewClusterGetStatisticsParamsWithContext creates a new ClusterGetStatisticsParams object +// with the ability to set a context for a request. +func NewClusterGetStatisticsParamsWithContext(ctx context.Context) *ClusterGetStatisticsParams { + return &ClusterGetStatisticsParams{ + Context: ctx, + } +} + +// NewClusterGetStatisticsParamsWithHTTPClient creates a new ClusterGetStatisticsParams object +// with the ability to set a custom HTTPClient for a request. +func NewClusterGetStatisticsParamsWithHTTPClient(client *http.Client) *ClusterGetStatisticsParams { + return &ClusterGetStatisticsParams{ + HTTPClient: client, + } +} + +/* +ClusterGetStatisticsParams contains all the parameters to send to the API endpoint + + for the cluster get statistics operation. + + Typically these are written to a http.Request. +*/ +type ClusterGetStatisticsParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the cluster get statistics params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *ClusterGetStatisticsParams) WithDefaults() *ClusterGetStatisticsParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the cluster get statistics params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *ClusterGetStatisticsParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the cluster get statistics params +func (o *ClusterGetStatisticsParams) WithTimeout(timeout time.Duration) *ClusterGetStatisticsParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the cluster get statistics params +func (o *ClusterGetStatisticsParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the cluster get statistics params +func (o *ClusterGetStatisticsParams) WithContext(ctx context.Context) *ClusterGetStatisticsParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the cluster get statistics params +func (o *ClusterGetStatisticsParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the cluster get statistics params +func (o *ClusterGetStatisticsParams) WithHTTPClient(client *http.Client) *ClusterGetStatisticsParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the cluster get statistics params +func (o *ClusterGetStatisticsParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *ClusterGetStatisticsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/cluster/cluster_get_statistics_responses.go b/platform/dbops/binaries/weaviate-src/client/cluster/cluster_get_statistics_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..cbd2bd7849e7abdb130f3c82fb707e9f09c6652a --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/cluster/cluster_get_statistics_responses.go @@ -0,0 +1,398 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package cluster + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/weaviate/weaviate/entities/models" +) + +// ClusterGetStatisticsReader is a Reader for the ClusterGetStatistics structure. +type ClusterGetStatisticsReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ClusterGetStatisticsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewClusterGetStatisticsOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 401: + result := NewClusterGetStatisticsUnauthorized() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 403: + result := NewClusterGetStatisticsForbidden() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 422: + result := NewClusterGetStatisticsUnprocessableEntity() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 500: + result := NewClusterGetStatisticsInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + } +} + +// NewClusterGetStatisticsOK creates a ClusterGetStatisticsOK with default headers values +func NewClusterGetStatisticsOK() *ClusterGetStatisticsOK { + return &ClusterGetStatisticsOK{} +} + +/* +ClusterGetStatisticsOK describes a response with status code 200, with default header values. + +Cluster statistics successfully returned +*/ +type ClusterGetStatisticsOK struct { + Payload *models.ClusterStatisticsResponse +} + +// IsSuccess returns true when this cluster get statistics o k response has a 2xx status code +func (o *ClusterGetStatisticsOK) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this cluster get statistics o k response has a 3xx status code +func (o *ClusterGetStatisticsOK) IsRedirect() bool { + return false +} + +// IsClientError returns true when this cluster get statistics o k response has a 4xx status code +func (o *ClusterGetStatisticsOK) IsClientError() bool { + return false +} + +// IsServerError returns true when this cluster get statistics o k response has a 5xx status code +func (o *ClusterGetStatisticsOK) IsServerError() bool { + return false +} + +// IsCode returns true when this cluster get statistics o k response a status code equal to that given +func (o *ClusterGetStatisticsOK) IsCode(code int) bool { + return code == 200 +} + +// Code gets the status code for the cluster get statistics o k response +func (o *ClusterGetStatisticsOK) Code() int { + return 200 +} + +func (o *ClusterGetStatisticsOK) Error() string { + return fmt.Sprintf("[GET /cluster/statistics][%d] clusterGetStatisticsOK %+v", 200, o.Payload) +} + +func (o *ClusterGetStatisticsOK) String() string { + return fmt.Sprintf("[GET /cluster/statistics][%d] clusterGetStatisticsOK %+v", 200, o.Payload) +} + +func (o *ClusterGetStatisticsOK) GetPayload() *models.ClusterStatisticsResponse { + return o.Payload +} + +func (o *ClusterGetStatisticsOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ClusterStatisticsResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewClusterGetStatisticsUnauthorized creates a ClusterGetStatisticsUnauthorized with default headers values +func NewClusterGetStatisticsUnauthorized() *ClusterGetStatisticsUnauthorized { + return &ClusterGetStatisticsUnauthorized{} +} + +/* +ClusterGetStatisticsUnauthorized describes a response with status code 401, with default header values. + +Unauthorized or invalid credentials. +*/ +type ClusterGetStatisticsUnauthorized struct { +} + +// IsSuccess returns true when this cluster get statistics unauthorized response has a 2xx status code +func (o *ClusterGetStatisticsUnauthorized) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this cluster get statistics unauthorized response has a 3xx status code +func (o *ClusterGetStatisticsUnauthorized) IsRedirect() bool { + return false +} + +// IsClientError returns true when this cluster get statistics unauthorized response has a 4xx status code +func (o *ClusterGetStatisticsUnauthorized) IsClientError() bool { + return true +} + +// IsServerError returns true when this cluster get statistics unauthorized response has a 5xx status code +func (o *ClusterGetStatisticsUnauthorized) IsServerError() bool { + return false +} + +// IsCode returns true when this cluster get statistics unauthorized response a status code equal to that given +func (o *ClusterGetStatisticsUnauthorized) IsCode(code int) bool { + return code == 401 +} + +// Code gets the status code for the cluster get statistics unauthorized response +func (o *ClusterGetStatisticsUnauthorized) Code() int { + return 401 +} + +func (o *ClusterGetStatisticsUnauthorized) Error() string { + return fmt.Sprintf("[GET /cluster/statistics][%d] clusterGetStatisticsUnauthorized ", 401) +} + +func (o *ClusterGetStatisticsUnauthorized) String() string { + return fmt.Sprintf("[GET /cluster/statistics][%d] clusterGetStatisticsUnauthorized ", 401) +} + +func (o *ClusterGetStatisticsUnauthorized) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewClusterGetStatisticsForbidden creates a ClusterGetStatisticsForbidden with default headers values +func NewClusterGetStatisticsForbidden() *ClusterGetStatisticsForbidden { + return &ClusterGetStatisticsForbidden{} +} + +/* +ClusterGetStatisticsForbidden describes a response with status code 403, with default header values. + +Forbidden +*/ +type ClusterGetStatisticsForbidden struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this cluster get statistics forbidden response has a 2xx status code +func (o *ClusterGetStatisticsForbidden) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this cluster get statistics forbidden response has a 3xx status code +func (o *ClusterGetStatisticsForbidden) IsRedirect() bool { + return false +} + +// IsClientError returns true when this cluster get statistics forbidden response has a 4xx status code +func (o *ClusterGetStatisticsForbidden) IsClientError() bool { + return true +} + +// IsServerError returns true when this cluster get statistics forbidden response has a 5xx status code +func (o *ClusterGetStatisticsForbidden) IsServerError() bool { + return false +} + +// IsCode returns true when this cluster get statistics forbidden response a status code equal to that given +func (o *ClusterGetStatisticsForbidden) IsCode(code int) bool { + return code == 403 +} + +// Code gets the status code for the cluster get statistics forbidden response +func (o *ClusterGetStatisticsForbidden) Code() int { + return 403 +} + +func (o *ClusterGetStatisticsForbidden) Error() string { + return fmt.Sprintf("[GET /cluster/statistics][%d] clusterGetStatisticsForbidden %+v", 403, o.Payload) +} + +func (o *ClusterGetStatisticsForbidden) String() string { + return fmt.Sprintf("[GET /cluster/statistics][%d] clusterGetStatisticsForbidden %+v", 403, o.Payload) +} + +func (o *ClusterGetStatisticsForbidden) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *ClusterGetStatisticsForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewClusterGetStatisticsUnprocessableEntity creates a ClusterGetStatisticsUnprocessableEntity with default headers values +func NewClusterGetStatisticsUnprocessableEntity() *ClusterGetStatisticsUnprocessableEntity { + return &ClusterGetStatisticsUnprocessableEntity{} +} + +/* +ClusterGetStatisticsUnprocessableEntity describes a response with status code 422, with default header values. + +Invalid backup restoration status attempt. +*/ +type ClusterGetStatisticsUnprocessableEntity struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this cluster get statistics unprocessable entity response has a 2xx status code +func (o *ClusterGetStatisticsUnprocessableEntity) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this cluster get statistics unprocessable entity response has a 3xx status code +func (o *ClusterGetStatisticsUnprocessableEntity) IsRedirect() bool { + return false +} + +// IsClientError returns true when this cluster get statistics unprocessable entity response has a 4xx status code +func (o *ClusterGetStatisticsUnprocessableEntity) IsClientError() bool { + return true +} + +// IsServerError returns true when this cluster get statistics unprocessable entity response has a 5xx status code +func (o *ClusterGetStatisticsUnprocessableEntity) IsServerError() bool { + return false +} + +// IsCode returns true when this cluster get statistics unprocessable entity response a status code equal to that given +func (o *ClusterGetStatisticsUnprocessableEntity) IsCode(code int) bool { + return code == 422 +} + +// Code gets the status code for the cluster get statistics unprocessable entity response +func (o *ClusterGetStatisticsUnprocessableEntity) Code() int { + return 422 +} + +func (o *ClusterGetStatisticsUnprocessableEntity) Error() string { + return fmt.Sprintf("[GET /cluster/statistics][%d] clusterGetStatisticsUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *ClusterGetStatisticsUnprocessableEntity) String() string { + return fmt.Sprintf("[GET /cluster/statistics][%d] clusterGetStatisticsUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *ClusterGetStatisticsUnprocessableEntity) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *ClusterGetStatisticsUnprocessableEntity) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewClusterGetStatisticsInternalServerError creates a ClusterGetStatisticsInternalServerError with default headers values +func NewClusterGetStatisticsInternalServerError() *ClusterGetStatisticsInternalServerError { + return &ClusterGetStatisticsInternalServerError{} +} + +/* +ClusterGetStatisticsInternalServerError describes a response with status code 500, with default header values. + +An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. +*/ +type ClusterGetStatisticsInternalServerError struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this cluster get statistics internal server error response has a 2xx status code +func (o *ClusterGetStatisticsInternalServerError) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this cluster get statistics internal server error response has a 3xx status code +func (o *ClusterGetStatisticsInternalServerError) IsRedirect() bool { + return false +} + +// IsClientError returns true when this cluster get statistics internal server error response has a 4xx status code +func (o *ClusterGetStatisticsInternalServerError) IsClientError() bool { + return false +} + +// IsServerError returns true when this cluster get statistics internal server error response has a 5xx status code +func (o *ClusterGetStatisticsInternalServerError) IsServerError() bool { + return true +} + +// IsCode returns true when this cluster get statistics internal server error response a status code equal to that given +func (o *ClusterGetStatisticsInternalServerError) IsCode(code int) bool { + return code == 500 +} + +// Code gets the status code for the cluster get statistics internal server error response +func (o *ClusterGetStatisticsInternalServerError) Code() int { + return 500 +} + +func (o *ClusterGetStatisticsInternalServerError) Error() string { + return fmt.Sprintf("[GET /cluster/statistics][%d] clusterGetStatisticsInternalServerError %+v", 500, o.Payload) +} + +func (o *ClusterGetStatisticsInternalServerError) String() string { + return fmt.Sprintf("[GET /cluster/statistics][%d] clusterGetStatisticsInternalServerError %+v", 500, o.Payload) +} + +func (o *ClusterGetStatisticsInternalServerError) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *ClusterGetStatisticsInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/distributed_tasks/distributed_tasks_client.go b/platform/dbops/binaries/weaviate-src/client/distributed_tasks/distributed_tasks_client.go new file mode 100644 index 0000000000000000000000000000000000000000..12589943c5c23e0f85330c66675be7aef6984293 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/distributed_tasks/distributed_tasks_client.go @@ -0,0 +1,91 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package distributed_tasks + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" +) + +// New creates a new distributed tasks API client. +func New(transport runtime.ClientTransport, formats strfmt.Registry) ClientService { + return &Client{transport: transport, formats: formats} +} + +/* +Client for distributed tasks API +*/ +type Client struct { + transport runtime.ClientTransport + formats strfmt.Registry +} + +// ClientOption is the option for Client methods +type ClientOption func(*runtime.ClientOperation) + +// ClientService is the interface for Client methods +type ClientService interface { + DistributedTasksGet(params *DistributedTasksGetParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*DistributedTasksGetOK, error) + + SetTransport(transport runtime.ClientTransport) +} + +/* +DistributedTasksGet lists all distributed tasks in the cluster +*/ +func (a *Client) DistributedTasksGet(params *DistributedTasksGetParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*DistributedTasksGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewDistributedTasksGetParams() + } + op := &runtime.ClientOperation{ + ID: "distributedTasks.get", + Method: "GET", + PathPattern: "/tasks", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json", "application/yaml"}, + Schemes: []string{"https"}, + Params: params, + Reader: &DistributedTasksGetReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + success, ok := result.(*DistributedTasksGetOK) + if ok { + return success, nil + } + // unexpected success response + // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for distributedTasks.get: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + +// SetTransport changes the transport on the client +func (a *Client) SetTransport(transport runtime.ClientTransport) { + a.transport = transport +} diff --git a/platform/dbops/binaries/weaviate-src/client/distributed_tasks/distributed_tasks_get_parameters.go b/platform/dbops/binaries/weaviate-src/client/distributed_tasks/distributed_tasks_get_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..39592450b1b88561b01018514254cf84c165093e --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/distributed_tasks/distributed_tasks_get_parameters.go @@ -0,0 +1,139 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package distributed_tasks + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewDistributedTasksGetParams creates a new DistributedTasksGetParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewDistributedTasksGetParams() *DistributedTasksGetParams { + return &DistributedTasksGetParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewDistributedTasksGetParamsWithTimeout creates a new DistributedTasksGetParams object +// with the ability to set a timeout on a request. +func NewDistributedTasksGetParamsWithTimeout(timeout time.Duration) *DistributedTasksGetParams { + return &DistributedTasksGetParams{ + timeout: timeout, + } +} + +// NewDistributedTasksGetParamsWithContext creates a new DistributedTasksGetParams object +// with the ability to set a context for a request. +func NewDistributedTasksGetParamsWithContext(ctx context.Context) *DistributedTasksGetParams { + return &DistributedTasksGetParams{ + Context: ctx, + } +} + +// NewDistributedTasksGetParamsWithHTTPClient creates a new DistributedTasksGetParams object +// with the ability to set a custom HTTPClient for a request. +func NewDistributedTasksGetParamsWithHTTPClient(client *http.Client) *DistributedTasksGetParams { + return &DistributedTasksGetParams{ + HTTPClient: client, + } +} + +/* +DistributedTasksGetParams contains all the parameters to send to the API endpoint + + for the distributed tasks get operation. + + Typically these are written to a http.Request. +*/ +type DistributedTasksGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the distributed tasks get params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *DistributedTasksGetParams) WithDefaults() *DistributedTasksGetParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the distributed tasks get params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *DistributedTasksGetParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the distributed tasks get params +func (o *DistributedTasksGetParams) WithTimeout(timeout time.Duration) *DistributedTasksGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the distributed tasks get params +func (o *DistributedTasksGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the distributed tasks get params +func (o *DistributedTasksGetParams) WithContext(ctx context.Context) *DistributedTasksGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the distributed tasks get params +func (o *DistributedTasksGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the distributed tasks get params +func (o *DistributedTasksGetParams) WithHTTPClient(client *http.Client) *DistributedTasksGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the distributed tasks get params +func (o *DistributedTasksGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *DistributedTasksGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/distributed_tasks/distributed_tasks_get_responses.go b/platform/dbops/binaries/weaviate-src/client/distributed_tasks/distributed_tasks_get_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..44f688e2404bdf51509de05303afdf1472249937 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/distributed_tasks/distributed_tasks_get_responses.go @@ -0,0 +1,260 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package distributed_tasks + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/weaviate/weaviate/entities/models" +) + +// DistributedTasksGetReader is a Reader for the DistributedTasksGet structure. +type DistributedTasksGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *DistributedTasksGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewDistributedTasksGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 403: + result := NewDistributedTasksGetForbidden() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 500: + result := NewDistributedTasksGetInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + } +} + +// NewDistributedTasksGetOK creates a DistributedTasksGetOK with default headers values +func NewDistributedTasksGetOK() *DistributedTasksGetOK { + return &DistributedTasksGetOK{} +} + +/* +DistributedTasksGetOK describes a response with status code 200, with default header values. + +Distributed tasks successfully returned +*/ +type DistributedTasksGetOK struct { + Payload models.DistributedTasks +} + +// IsSuccess returns true when this distributed tasks get o k response has a 2xx status code +func (o *DistributedTasksGetOK) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this distributed tasks get o k response has a 3xx status code +func (o *DistributedTasksGetOK) IsRedirect() bool { + return false +} + +// IsClientError returns true when this distributed tasks get o k response has a 4xx status code +func (o *DistributedTasksGetOK) IsClientError() bool { + return false +} + +// IsServerError returns true when this distributed tasks get o k response has a 5xx status code +func (o *DistributedTasksGetOK) IsServerError() bool { + return false +} + +// IsCode returns true when this distributed tasks get o k response a status code equal to that given +func (o *DistributedTasksGetOK) IsCode(code int) bool { + return code == 200 +} + +// Code gets the status code for the distributed tasks get o k response +func (o *DistributedTasksGetOK) Code() int { + return 200 +} + +func (o *DistributedTasksGetOK) Error() string { + return fmt.Sprintf("[GET /tasks][%d] distributedTasksGetOK %+v", 200, o.Payload) +} + +func (o *DistributedTasksGetOK) String() string { + return fmt.Sprintf("[GET /tasks][%d] distributedTasksGetOK %+v", 200, o.Payload) +} + +func (o *DistributedTasksGetOK) GetPayload() models.DistributedTasks { + return o.Payload +} + +func (o *DistributedTasksGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewDistributedTasksGetForbidden creates a DistributedTasksGetForbidden with default headers values +func NewDistributedTasksGetForbidden() *DistributedTasksGetForbidden { + return &DistributedTasksGetForbidden{} +} + +/* +DistributedTasksGetForbidden describes a response with status code 403, with default header values. + +Unauthorized or invalid credentials. +*/ +type DistributedTasksGetForbidden struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this distributed tasks get forbidden response has a 2xx status code +func (o *DistributedTasksGetForbidden) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this distributed tasks get forbidden response has a 3xx status code +func (o *DistributedTasksGetForbidden) IsRedirect() bool { + return false +} + +// IsClientError returns true when this distributed tasks get forbidden response has a 4xx status code +func (o *DistributedTasksGetForbidden) IsClientError() bool { + return true +} + +// IsServerError returns true when this distributed tasks get forbidden response has a 5xx status code +func (o *DistributedTasksGetForbidden) IsServerError() bool { + return false +} + +// IsCode returns true when this distributed tasks get forbidden response a status code equal to that given +func (o *DistributedTasksGetForbidden) IsCode(code int) bool { + return code == 403 +} + +// Code gets the status code for the distributed tasks get forbidden response +func (o *DistributedTasksGetForbidden) Code() int { + return 403 +} + +func (o *DistributedTasksGetForbidden) Error() string { + return fmt.Sprintf("[GET /tasks][%d] distributedTasksGetForbidden %+v", 403, o.Payload) +} + +func (o *DistributedTasksGetForbidden) String() string { + return fmt.Sprintf("[GET /tasks][%d] distributedTasksGetForbidden %+v", 403, o.Payload) +} + +func (o *DistributedTasksGetForbidden) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *DistributedTasksGetForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewDistributedTasksGetInternalServerError creates a DistributedTasksGetInternalServerError with default headers values +func NewDistributedTasksGetInternalServerError() *DistributedTasksGetInternalServerError { + return &DistributedTasksGetInternalServerError{} +} + +/* +DistributedTasksGetInternalServerError describes a response with status code 500, with default header values. + +An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. +*/ +type DistributedTasksGetInternalServerError struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this distributed tasks get internal server error response has a 2xx status code +func (o *DistributedTasksGetInternalServerError) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this distributed tasks get internal server error response has a 3xx status code +func (o *DistributedTasksGetInternalServerError) IsRedirect() bool { + return false +} + +// IsClientError returns true when this distributed tasks get internal server error response has a 4xx status code +func (o *DistributedTasksGetInternalServerError) IsClientError() bool { + return false +} + +// IsServerError returns true when this distributed tasks get internal server error response has a 5xx status code +func (o *DistributedTasksGetInternalServerError) IsServerError() bool { + return true +} + +// IsCode returns true when this distributed tasks get internal server error response a status code equal to that given +func (o *DistributedTasksGetInternalServerError) IsCode(code int) bool { + return code == 500 +} + +// Code gets the status code for the distributed tasks get internal server error response +func (o *DistributedTasksGetInternalServerError) Code() int { + return 500 +} + +func (o *DistributedTasksGetInternalServerError) Error() string { + return fmt.Sprintf("[GET /tasks][%d] distributedTasksGetInternalServerError %+v", 500, o.Payload) +} + +func (o *DistributedTasksGetInternalServerError) String() string { + return fmt.Sprintf("[GET /tasks][%d] distributedTasksGetInternalServerError %+v", 500, o.Payload) +} + +func (o *DistributedTasksGetInternalServerError) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *DistributedTasksGetInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/graphql/graphql_batch_parameters.go b/platform/dbops/binaries/weaviate-src/client/graphql/graphql_batch_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..36628c6526106bc40c90e3c557e57e720c874d4f --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/graphql/graphql_batch_parameters.go @@ -0,0 +1,164 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package graphql + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + + "github.com/weaviate/weaviate/entities/models" +) + +// NewGraphqlBatchParams creates a new GraphqlBatchParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewGraphqlBatchParams() *GraphqlBatchParams { + return &GraphqlBatchParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewGraphqlBatchParamsWithTimeout creates a new GraphqlBatchParams object +// with the ability to set a timeout on a request. +func NewGraphqlBatchParamsWithTimeout(timeout time.Duration) *GraphqlBatchParams { + return &GraphqlBatchParams{ + timeout: timeout, + } +} + +// NewGraphqlBatchParamsWithContext creates a new GraphqlBatchParams object +// with the ability to set a context for a request. +func NewGraphqlBatchParamsWithContext(ctx context.Context) *GraphqlBatchParams { + return &GraphqlBatchParams{ + Context: ctx, + } +} + +// NewGraphqlBatchParamsWithHTTPClient creates a new GraphqlBatchParams object +// with the ability to set a custom HTTPClient for a request. +func NewGraphqlBatchParamsWithHTTPClient(client *http.Client) *GraphqlBatchParams { + return &GraphqlBatchParams{ + HTTPClient: client, + } +} + +/* +GraphqlBatchParams contains all the parameters to send to the API endpoint + + for the graphql batch operation. + + Typically these are written to a http.Request. +*/ +type GraphqlBatchParams struct { + + /* Body. + + The GraphQL queries. + */ + Body models.GraphQLQueries + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the graphql batch params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *GraphqlBatchParams) WithDefaults() *GraphqlBatchParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the graphql batch params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *GraphqlBatchParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the graphql batch params +func (o *GraphqlBatchParams) WithTimeout(timeout time.Duration) *GraphqlBatchParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the graphql batch params +func (o *GraphqlBatchParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the graphql batch params +func (o *GraphqlBatchParams) WithContext(ctx context.Context) *GraphqlBatchParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the graphql batch params +func (o *GraphqlBatchParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the graphql batch params +func (o *GraphqlBatchParams) WithHTTPClient(client *http.Client) *GraphqlBatchParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the graphql batch params +func (o *GraphqlBatchParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithBody adds the body to the graphql batch params +func (o *GraphqlBatchParams) WithBody(body models.GraphQLQueries) *GraphqlBatchParams { + o.SetBody(body) + return o +} + +// SetBody adds the body to the graphql batch params +func (o *GraphqlBatchParams) SetBody(body models.GraphQLQueries) { + o.Body = body +} + +// WriteToRequest writes these params to a swagger request +func (o *GraphqlBatchParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + if o.Body != nil { + if err := r.SetBodyParam(o.Body); err != nil { + return err + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/graphql/graphql_batch_responses.go b/platform/dbops/binaries/weaviate-src/client/graphql/graphql_batch_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..20829827bf6348d4679010678397b1baf7a43946 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/graphql/graphql_batch_responses.go @@ -0,0 +1,396 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package graphql + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/weaviate/weaviate/entities/models" +) + +// GraphqlBatchReader is a Reader for the GraphqlBatch structure. +type GraphqlBatchReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *GraphqlBatchReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewGraphqlBatchOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 401: + result := NewGraphqlBatchUnauthorized() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 403: + result := NewGraphqlBatchForbidden() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 422: + result := NewGraphqlBatchUnprocessableEntity() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 500: + result := NewGraphqlBatchInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + } +} + +// NewGraphqlBatchOK creates a GraphqlBatchOK with default headers values +func NewGraphqlBatchOK() *GraphqlBatchOK { + return &GraphqlBatchOK{} +} + +/* +GraphqlBatchOK describes a response with status code 200, with default header values. + +Successful query (with select). +*/ +type GraphqlBatchOK struct { + Payload models.GraphQLResponses +} + +// IsSuccess returns true when this graphql batch o k response has a 2xx status code +func (o *GraphqlBatchOK) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this graphql batch o k response has a 3xx status code +func (o *GraphqlBatchOK) IsRedirect() bool { + return false +} + +// IsClientError returns true when this graphql batch o k response has a 4xx status code +func (o *GraphqlBatchOK) IsClientError() bool { + return false +} + +// IsServerError returns true when this graphql batch o k response has a 5xx status code +func (o *GraphqlBatchOK) IsServerError() bool { + return false +} + +// IsCode returns true when this graphql batch o k response a status code equal to that given +func (o *GraphqlBatchOK) IsCode(code int) bool { + return code == 200 +} + +// Code gets the status code for the graphql batch o k response +func (o *GraphqlBatchOK) Code() int { + return 200 +} + +func (o *GraphqlBatchOK) Error() string { + return fmt.Sprintf("[POST /graphql/batch][%d] graphqlBatchOK %+v", 200, o.Payload) +} + +func (o *GraphqlBatchOK) String() string { + return fmt.Sprintf("[POST /graphql/batch][%d] graphqlBatchOK %+v", 200, o.Payload) +} + +func (o *GraphqlBatchOK) GetPayload() models.GraphQLResponses { + return o.Payload +} + +func (o *GraphqlBatchOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewGraphqlBatchUnauthorized creates a GraphqlBatchUnauthorized with default headers values +func NewGraphqlBatchUnauthorized() *GraphqlBatchUnauthorized { + return &GraphqlBatchUnauthorized{} +} + +/* +GraphqlBatchUnauthorized describes a response with status code 401, with default header values. + +Unauthorized or invalid credentials. +*/ +type GraphqlBatchUnauthorized struct { +} + +// IsSuccess returns true when this graphql batch unauthorized response has a 2xx status code +func (o *GraphqlBatchUnauthorized) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this graphql batch unauthorized response has a 3xx status code +func (o *GraphqlBatchUnauthorized) IsRedirect() bool { + return false +} + +// IsClientError returns true when this graphql batch unauthorized response has a 4xx status code +func (o *GraphqlBatchUnauthorized) IsClientError() bool { + return true +} + +// IsServerError returns true when this graphql batch unauthorized response has a 5xx status code +func (o *GraphqlBatchUnauthorized) IsServerError() bool { + return false +} + +// IsCode returns true when this graphql batch unauthorized response a status code equal to that given +func (o *GraphqlBatchUnauthorized) IsCode(code int) bool { + return code == 401 +} + +// Code gets the status code for the graphql batch unauthorized response +func (o *GraphqlBatchUnauthorized) Code() int { + return 401 +} + +func (o *GraphqlBatchUnauthorized) Error() string { + return fmt.Sprintf("[POST /graphql/batch][%d] graphqlBatchUnauthorized ", 401) +} + +func (o *GraphqlBatchUnauthorized) String() string { + return fmt.Sprintf("[POST /graphql/batch][%d] graphqlBatchUnauthorized ", 401) +} + +func (o *GraphqlBatchUnauthorized) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewGraphqlBatchForbidden creates a GraphqlBatchForbidden with default headers values +func NewGraphqlBatchForbidden() *GraphqlBatchForbidden { + return &GraphqlBatchForbidden{} +} + +/* +GraphqlBatchForbidden describes a response with status code 403, with default header values. + +Forbidden +*/ +type GraphqlBatchForbidden struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this graphql batch forbidden response has a 2xx status code +func (o *GraphqlBatchForbidden) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this graphql batch forbidden response has a 3xx status code +func (o *GraphqlBatchForbidden) IsRedirect() bool { + return false +} + +// IsClientError returns true when this graphql batch forbidden response has a 4xx status code +func (o *GraphqlBatchForbidden) IsClientError() bool { + return true +} + +// IsServerError returns true when this graphql batch forbidden response has a 5xx status code +func (o *GraphqlBatchForbidden) IsServerError() bool { + return false +} + +// IsCode returns true when this graphql batch forbidden response a status code equal to that given +func (o *GraphqlBatchForbidden) IsCode(code int) bool { + return code == 403 +} + +// Code gets the status code for the graphql batch forbidden response +func (o *GraphqlBatchForbidden) Code() int { + return 403 +} + +func (o *GraphqlBatchForbidden) Error() string { + return fmt.Sprintf("[POST /graphql/batch][%d] graphqlBatchForbidden %+v", 403, o.Payload) +} + +func (o *GraphqlBatchForbidden) String() string { + return fmt.Sprintf("[POST /graphql/batch][%d] graphqlBatchForbidden %+v", 403, o.Payload) +} + +func (o *GraphqlBatchForbidden) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *GraphqlBatchForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewGraphqlBatchUnprocessableEntity creates a GraphqlBatchUnprocessableEntity with default headers values +func NewGraphqlBatchUnprocessableEntity() *GraphqlBatchUnprocessableEntity { + return &GraphqlBatchUnprocessableEntity{} +} + +/* +GraphqlBatchUnprocessableEntity describes a response with status code 422, with default header values. + +Request body is well-formed (i.e., syntactically correct), but semantically erroneous. Are you sure the class is defined in the configuration file? +*/ +type GraphqlBatchUnprocessableEntity struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this graphql batch unprocessable entity response has a 2xx status code +func (o *GraphqlBatchUnprocessableEntity) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this graphql batch unprocessable entity response has a 3xx status code +func (o *GraphqlBatchUnprocessableEntity) IsRedirect() bool { + return false +} + +// IsClientError returns true when this graphql batch unprocessable entity response has a 4xx status code +func (o *GraphqlBatchUnprocessableEntity) IsClientError() bool { + return true +} + +// IsServerError returns true when this graphql batch unprocessable entity response has a 5xx status code +func (o *GraphqlBatchUnprocessableEntity) IsServerError() bool { + return false +} + +// IsCode returns true when this graphql batch unprocessable entity response a status code equal to that given +func (o *GraphqlBatchUnprocessableEntity) IsCode(code int) bool { + return code == 422 +} + +// Code gets the status code for the graphql batch unprocessable entity response +func (o *GraphqlBatchUnprocessableEntity) Code() int { + return 422 +} + +func (o *GraphqlBatchUnprocessableEntity) Error() string { + return fmt.Sprintf("[POST /graphql/batch][%d] graphqlBatchUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *GraphqlBatchUnprocessableEntity) String() string { + return fmt.Sprintf("[POST /graphql/batch][%d] graphqlBatchUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *GraphqlBatchUnprocessableEntity) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *GraphqlBatchUnprocessableEntity) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewGraphqlBatchInternalServerError creates a GraphqlBatchInternalServerError with default headers values +func NewGraphqlBatchInternalServerError() *GraphqlBatchInternalServerError { + return &GraphqlBatchInternalServerError{} +} + +/* +GraphqlBatchInternalServerError describes a response with status code 500, with default header values. + +An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. +*/ +type GraphqlBatchInternalServerError struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this graphql batch internal server error response has a 2xx status code +func (o *GraphqlBatchInternalServerError) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this graphql batch internal server error response has a 3xx status code +func (o *GraphqlBatchInternalServerError) IsRedirect() bool { + return false +} + +// IsClientError returns true when this graphql batch internal server error response has a 4xx status code +func (o *GraphqlBatchInternalServerError) IsClientError() bool { + return false +} + +// IsServerError returns true when this graphql batch internal server error response has a 5xx status code +func (o *GraphqlBatchInternalServerError) IsServerError() bool { + return true +} + +// IsCode returns true when this graphql batch internal server error response a status code equal to that given +func (o *GraphqlBatchInternalServerError) IsCode(code int) bool { + return code == 500 +} + +// Code gets the status code for the graphql batch internal server error response +func (o *GraphqlBatchInternalServerError) Code() int { + return 500 +} + +func (o *GraphqlBatchInternalServerError) Error() string { + return fmt.Sprintf("[POST /graphql/batch][%d] graphqlBatchInternalServerError %+v", 500, o.Payload) +} + +func (o *GraphqlBatchInternalServerError) String() string { + return fmt.Sprintf("[POST /graphql/batch][%d] graphqlBatchInternalServerError %+v", 500, o.Payload) +} + +func (o *GraphqlBatchInternalServerError) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *GraphqlBatchInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/graphql/graphql_client.go b/platform/dbops/binaries/weaviate-src/client/graphql/graphql_client.go new file mode 100644 index 0000000000000000000000000000000000000000..6c1cea35255a303caa86735f62d4e66321583c5d --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/graphql/graphql_client.go @@ -0,0 +1,136 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package graphql + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" +) + +// New creates a new graphql API client. +func New(transport runtime.ClientTransport, formats strfmt.Registry) ClientService { + return &Client{transport: transport, formats: formats} +} + +/* +Client for graphql API +*/ +type Client struct { + transport runtime.ClientTransport + formats strfmt.Registry +} + +// ClientOption is the option for Client methods +type ClientOption func(*runtime.ClientOperation) + +// ClientService is the interface for Client methods +type ClientService interface { + GraphqlBatch(params *GraphqlBatchParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*GraphqlBatchOK, error) + + GraphqlPost(params *GraphqlPostParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*GraphqlPostOK, error) + + SetTransport(transport runtime.ClientTransport) +} + +/* +GraphqlBatch gets a response based on graph q l + +Perform a batched GraphQL query +*/ +func (a *Client) GraphqlBatch(params *GraphqlBatchParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*GraphqlBatchOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewGraphqlBatchParams() + } + op := &runtime.ClientOperation{ + ID: "graphql.batch", + Method: "POST", + PathPattern: "/graphql/batch", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json", "application/yaml"}, + Schemes: []string{"https"}, + Params: params, + Reader: &GraphqlBatchReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + success, ok := result.(*GraphqlBatchOK) + if ok { + return success, nil + } + // unexpected success response + // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for graphql.batch: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + +/* +GraphqlPost gets a response based on graph q l + +Get a response based on a GraphQL query +*/ +func (a *Client) GraphqlPost(params *GraphqlPostParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*GraphqlPostOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewGraphqlPostParams() + } + op := &runtime.ClientOperation{ + ID: "graphql.post", + Method: "POST", + PathPattern: "/graphql", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json", "application/yaml"}, + Schemes: []string{"https"}, + Params: params, + Reader: &GraphqlPostReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + success, ok := result.(*GraphqlPostOK) + if ok { + return success, nil + } + // unexpected success response + // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for graphql.post: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + +// SetTransport changes the transport on the client +func (a *Client) SetTransport(transport runtime.ClientTransport) { + a.transport = transport +} diff --git a/platform/dbops/binaries/weaviate-src/client/graphql/graphql_post_parameters.go b/platform/dbops/binaries/weaviate-src/client/graphql/graphql_post_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..faa5a827d99bbe1afb7d3a2039863bec862c1557 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/graphql/graphql_post_parameters.go @@ -0,0 +1,164 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package graphql + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + + "github.com/weaviate/weaviate/entities/models" +) + +// NewGraphqlPostParams creates a new GraphqlPostParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewGraphqlPostParams() *GraphqlPostParams { + return &GraphqlPostParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewGraphqlPostParamsWithTimeout creates a new GraphqlPostParams object +// with the ability to set a timeout on a request. +func NewGraphqlPostParamsWithTimeout(timeout time.Duration) *GraphqlPostParams { + return &GraphqlPostParams{ + timeout: timeout, + } +} + +// NewGraphqlPostParamsWithContext creates a new GraphqlPostParams object +// with the ability to set a context for a request. +func NewGraphqlPostParamsWithContext(ctx context.Context) *GraphqlPostParams { + return &GraphqlPostParams{ + Context: ctx, + } +} + +// NewGraphqlPostParamsWithHTTPClient creates a new GraphqlPostParams object +// with the ability to set a custom HTTPClient for a request. +func NewGraphqlPostParamsWithHTTPClient(client *http.Client) *GraphqlPostParams { + return &GraphqlPostParams{ + HTTPClient: client, + } +} + +/* +GraphqlPostParams contains all the parameters to send to the API endpoint + + for the graphql post operation. + + Typically these are written to a http.Request. +*/ +type GraphqlPostParams struct { + + /* Body. + + The GraphQL query request parameters. + */ + Body *models.GraphQLQuery + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the graphql post params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *GraphqlPostParams) WithDefaults() *GraphqlPostParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the graphql post params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *GraphqlPostParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the graphql post params +func (o *GraphqlPostParams) WithTimeout(timeout time.Duration) *GraphqlPostParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the graphql post params +func (o *GraphqlPostParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the graphql post params +func (o *GraphqlPostParams) WithContext(ctx context.Context) *GraphqlPostParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the graphql post params +func (o *GraphqlPostParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the graphql post params +func (o *GraphqlPostParams) WithHTTPClient(client *http.Client) *GraphqlPostParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the graphql post params +func (o *GraphqlPostParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithBody adds the body to the graphql post params +func (o *GraphqlPostParams) WithBody(body *models.GraphQLQuery) *GraphqlPostParams { + o.SetBody(body) + return o +} + +// SetBody adds the body to the graphql post params +func (o *GraphqlPostParams) SetBody(body *models.GraphQLQuery) { + o.Body = body +} + +// WriteToRequest writes these params to a swagger request +func (o *GraphqlPostParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + if o.Body != nil { + if err := r.SetBodyParam(o.Body); err != nil { + return err + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/graphql/graphql_post_responses.go b/platform/dbops/binaries/weaviate-src/client/graphql/graphql_post_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..5ce0eeb7cd4a678e269b6fd4c77f62e415aaa24a --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/graphql/graphql_post_responses.go @@ -0,0 +1,398 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package graphql + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/weaviate/weaviate/entities/models" +) + +// GraphqlPostReader is a Reader for the GraphqlPost structure. +type GraphqlPostReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *GraphqlPostReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewGraphqlPostOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 401: + result := NewGraphqlPostUnauthorized() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 403: + result := NewGraphqlPostForbidden() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 422: + result := NewGraphqlPostUnprocessableEntity() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 500: + result := NewGraphqlPostInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + } +} + +// NewGraphqlPostOK creates a GraphqlPostOK with default headers values +func NewGraphqlPostOK() *GraphqlPostOK { + return &GraphqlPostOK{} +} + +/* +GraphqlPostOK describes a response with status code 200, with default header values. + +Successful query (with select). +*/ +type GraphqlPostOK struct { + Payload *models.GraphQLResponse +} + +// IsSuccess returns true when this graphql post o k response has a 2xx status code +func (o *GraphqlPostOK) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this graphql post o k response has a 3xx status code +func (o *GraphqlPostOK) IsRedirect() bool { + return false +} + +// IsClientError returns true when this graphql post o k response has a 4xx status code +func (o *GraphqlPostOK) IsClientError() bool { + return false +} + +// IsServerError returns true when this graphql post o k response has a 5xx status code +func (o *GraphqlPostOK) IsServerError() bool { + return false +} + +// IsCode returns true when this graphql post o k response a status code equal to that given +func (o *GraphqlPostOK) IsCode(code int) bool { + return code == 200 +} + +// Code gets the status code for the graphql post o k response +func (o *GraphqlPostOK) Code() int { + return 200 +} + +func (o *GraphqlPostOK) Error() string { + return fmt.Sprintf("[POST /graphql][%d] graphqlPostOK %+v", 200, o.Payload) +} + +func (o *GraphqlPostOK) String() string { + return fmt.Sprintf("[POST /graphql][%d] graphqlPostOK %+v", 200, o.Payload) +} + +func (o *GraphqlPostOK) GetPayload() *models.GraphQLResponse { + return o.Payload +} + +func (o *GraphqlPostOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.GraphQLResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewGraphqlPostUnauthorized creates a GraphqlPostUnauthorized with default headers values +func NewGraphqlPostUnauthorized() *GraphqlPostUnauthorized { + return &GraphqlPostUnauthorized{} +} + +/* +GraphqlPostUnauthorized describes a response with status code 401, with default header values. + +Unauthorized or invalid credentials. +*/ +type GraphqlPostUnauthorized struct { +} + +// IsSuccess returns true when this graphql post unauthorized response has a 2xx status code +func (o *GraphqlPostUnauthorized) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this graphql post unauthorized response has a 3xx status code +func (o *GraphqlPostUnauthorized) IsRedirect() bool { + return false +} + +// IsClientError returns true when this graphql post unauthorized response has a 4xx status code +func (o *GraphqlPostUnauthorized) IsClientError() bool { + return true +} + +// IsServerError returns true when this graphql post unauthorized response has a 5xx status code +func (o *GraphqlPostUnauthorized) IsServerError() bool { + return false +} + +// IsCode returns true when this graphql post unauthorized response a status code equal to that given +func (o *GraphqlPostUnauthorized) IsCode(code int) bool { + return code == 401 +} + +// Code gets the status code for the graphql post unauthorized response +func (o *GraphqlPostUnauthorized) Code() int { + return 401 +} + +func (o *GraphqlPostUnauthorized) Error() string { + return fmt.Sprintf("[POST /graphql][%d] graphqlPostUnauthorized ", 401) +} + +func (o *GraphqlPostUnauthorized) String() string { + return fmt.Sprintf("[POST /graphql][%d] graphqlPostUnauthorized ", 401) +} + +func (o *GraphqlPostUnauthorized) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewGraphqlPostForbidden creates a GraphqlPostForbidden with default headers values +func NewGraphqlPostForbidden() *GraphqlPostForbidden { + return &GraphqlPostForbidden{} +} + +/* +GraphqlPostForbidden describes a response with status code 403, with default header values. + +Forbidden +*/ +type GraphqlPostForbidden struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this graphql post forbidden response has a 2xx status code +func (o *GraphqlPostForbidden) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this graphql post forbidden response has a 3xx status code +func (o *GraphqlPostForbidden) IsRedirect() bool { + return false +} + +// IsClientError returns true when this graphql post forbidden response has a 4xx status code +func (o *GraphqlPostForbidden) IsClientError() bool { + return true +} + +// IsServerError returns true when this graphql post forbidden response has a 5xx status code +func (o *GraphqlPostForbidden) IsServerError() bool { + return false +} + +// IsCode returns true when this graphql post forbidden response a status code equal to that given +func (o *GraphqlPostForbidden) IsCode(code int) bool { + return code == 403 +} + +// Code gets the status code for the graphql post forbidden response +func (o *GraphqlPostForbidden) Code() int { + return 403 +} + +func (o *GraphqlPostForbidden) Error() string { + return fmt.Sprintf("[POST /graphql][%d] graphqlPostForbidden %+v", 403, o.Payload) +} + +func (o *GraphqlPostForbidden) String() string { + return fmt.Sprintf("[POST /graphql][%d] graphqlPostForbidden %+v", 403, o.Payload) +} + +func (o *GraphqlPostForbidden) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *GraphqlPostForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewGraphqlPostUnprocessableEntity creates a GraphqlPostUnprocessableEntity with default headers values +func NewGraphqlPostUnprocessableEntity() *GraphqlPostUnprocessableEntity { + return &GraphqlPostUnprocessableEntity{} +} + +/* +GraphqlPostUnprocessableEntity describes a response with status code 422, with default header values. + +Request body is well-formed (i.e., syntactically correct), but semantically erroneous. Are you sure the class is defined in the configuration file? +*/ +type GraphqlPostUnprocessableEntity struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this graphql post unprocessable entity response has a 2xx status code +func (o *GraphqlPostUnprocessableEntity) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this graphql post unprocessable entity response has a 3xx status code +func (o *GraphqlPostUnprocessableEntity) IsRedirect() bool { + return false +} + +// IsClientError returns true when this graphql post unprocessable entity response has a 4xx status code +func (o *GraphqlPostUnprocessableEntity) IsClientError() bool { + return true +} + +// IsServerError returns true when this graphql post unprocessable entity response has a 5xx status code +func (o *GraphqlPostUnprocessableEntity) IsServerError() bool { + return false +} + +// IsCode returns true when this graphql post unprocessable entity response a status code equal to that given +func (o *GraphqlPostUnprocessableEntity) IsCode(code int) bool { + return code == 422 +} + +// Code gets the status code for the graphql post unprocessable entity response +func (o *GraphqlPostUnprocessableEntity) Code() int { + return 422 +} + +func (o *GraphqlPostUnprocessableEntity) Error() string { + return fmt.Sprintf("[POST /graphql][%d] graphqlPostUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *GraphqlPostUnprocessableEntity) String() string { + return fmt.Sprintf("[POST /graphql][%d] graphqlPostUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *GraphqlPostUnprocessableEntity) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *GraphqlPostUnprocessableEntity) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewGraphqlPostInternalServerError creates a GraphqlPostInternalServerError with default headers values +func NewGraphqlPostInternalServerError() *GraphqlPostInternalServerError { + return &GraphqlPostInternalServerError{} +} + +/* +GraphqlPostInternalServerError describes a response with status code 500, with default header values. + +An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. +*/ +type GraphqlPostInternalServerError struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this graphql post internal server error response has a 2xx status code +func (o *GraphqlPostInternalServerError) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this graphql post internal server error response has a 3xx status code +func (o *GraphqlPostInternalServerError) IsRedirect() bool { + return false +} + +// IsClientError returns true when this graphql post internal server error response has a 4xx status code +func (o *GraphqlPostInternalServerError) IsClientError() bool { + return false +} + +// IsServerError returns true when this graphql post internal server error response has a 5xx status code +func (o *GraphqlPostInternalServerError) IsServerError() bool { + return true +} + +// IsCode returns true when this graphql post internal server error response a status code equal to that given +func (o *GraphqlPostInternalServerError) IsCode(code int) bool { + return code == 500 +} + +// Code gets the status code for the graphql post internal server error response +func (o *GraphqlPostInternalServerError) Code() int { + return 500 +} + +func (o *GraphqlPostInternalServerError) Error() string { + return fmt.Sprintf("[POST /graphql][%d] graphqlPostInternalServerError %+v", 500, o.Payload) +} + +func (o *GraphqlPostInternalServerError) String() string { + return fmt.Sprintf("[POST /graphql][%d] graphqlPostInternalServerError %+v", 500, o.Payload) +} + +func (o *GraphqlPostInternalServerError) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *GraphqlPostInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/meta/meta_client.go b/platform/dbops/binaries/weaviate-src/client/meta/meta_client.go new file mode 100644 index 0000000000000000000000000000000000000000..209f58f40daa758571c1fe7bd30fdccf5a893d36 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/meta/meta_client.go @@ -0,0 +1,93 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package meta + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" +) + +// New creates a new meta API client. +func New(transport runtime.ClientTransport, formats strfmt.Registry) ClientService { + return &Client{transport: transport, formats: formats} +} + +/* +Client for meta API +*/ +type Client struct { + transport runtime.ClientTransport + formats strfmt.Registry +} + +// ClientOption is the option for Client methods +type ClientOption func(*runtime.ClientOperation) + +// ClientService is the interface for Client methods +type ClientService interface { + MetaGet(params *MetaGetParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*MetaGetOK, error) + + SetTransport(transport runtime.ClientTransport) +} + +/* +MetaGet returns meta information of the current weaviate instance + +Returns meta information about the server. Can be used to provide information to another Weaviate instance that wants to interact with the current instance. +*/ +func (a *Client) MetaGet(params *MetaGetParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*MetaGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewMetaGetParams() + } + op := &runtime.ClientOperation{ + ID: "meta.get", + Method: "GET", + PathPattern: "/meta", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json", "application/yaml"}, + Schemes: []string{"https"}, + Params: params, + Reader: &MetaGetReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + success, ok := result.(*MetaGetOK) + if ok { + return success, nil + } + // unexpected success response + // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for meta.get: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + +// SetTransport changes the transport on the client +func (a *Client) SetTransport(transport runtime.ClientTransport) { + a.transport = transport +} diff --git a/platform/dbops/binaries/weaviate-src/client/meta/meta_get_parameters.go b/platform/dbops/binaries/weaviate-src/client/meta/meta_get_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..1fb25f8035ad433478696c655328afd2fe7d0976 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/meta/meta_get_parameters.go @@ -0,0 +1,139 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package meta + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewMetaGetParams creates a new MetaGetParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewMetaGetParams() *MetaGetParams { + return &MetaGetParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewMetaGetParamsWithTimeout creates a new MetaGetParams object +// with the ability to set a timeout on a request. +func NewMetaGetParamsWithTimeout(timeout time.Duration) *MetaGetParams { + return &MetaGetParams{ + timeout: timeout, + } +} + +// NewMetaGetParamsWithContext creates a new MetaGetParams object +// with the ability to set a context for a request. +func NewMetaGetParamsWithContext(ctx context.Context) *MetaGetParams { + return &MetaGetParams{ + Context: ctx, + } +} + +// NewMetaGetParamsWithHTTPClient creates a new MetaGetParams object +// with the ability to set a custom HTTPClient for a request. +func NewMetaGetParamsWithHTTPClient(client *http.Client) *MetaGetParams { + return &MetaGetParams{ + HTTPClient: client, + } +} + +/* +MetaGetParams contains all the parameters to send to the API endpoint + + for the meta get operation. + + Typically these are written to a http.Request. +*/ +type MetaGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the meta get params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *MetaGetParams) WithDefaults() *MetaGetParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the meta get params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *MetaGetParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the meta get params +func (o *MetaGetParams) WithTimeout(timeout time.Duration) *MetaGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the meta get params +func (o *MetaGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the meta get params +func (o *MetaGetParams) WithContext(ctx context.Context) *MetaGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the meta get params +func (o *MetaGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the meta get params +func (o *MetaGetParams) WithHTTPClient(client *http.Client) *MetaGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the meta get params +func (o *MetaGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *MetaGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/meta/meta_get_responses.go b/platform/dbops/binaries/weaviate-src/client/meta/meta_get_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..8d196e6be2ed67857d94bdf1dab19cb083ebb861 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/meta/meta_get_responses.go @@ -0,0 +1,324 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package meta + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/weaviate/weaviate/entities/models" +) + +// MetaGetReader is a Reader for the MetaGet structure. +type MetaGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *MetaGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewMetaGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 401: + result := NewMetaGetUnauthorized() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 403: + result := NewMetaGetForbidden() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 500: + result := NewMetaGetInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + } +} + +// NewMetaGetOK creates a MetaGetOK with default headers values +func NewMetaGetOK() *MetaGetOK { + return &MetaGetOK{} +} + +/* +MetaGetOK describes a response with status code 200, with default header values. + +Successful response. +*/ +type MetaGetOK struct { + Payload *models.Meta +} + +// IsSuccess returns true when this meta get o k response has a 2xx status code +func (o *MetaGetOK) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this meta get o k response has a 3xx status code +func (o *MetaGetOK) IsRedirect() bool { + return false +} + +// IsClientError returns true when this meta get o k response has a 4xx status code +func (o *MetaGetOK) IsClientError() bool { + return false +} + +// IsServerError returns true when this meta get o k response has a 5xx status code +func (o *MetaGetOK) IsServerError() bool { + return false +} + +// IsCode returns true when this meta get o k response a status code equal to that given +func (o *MetaGetOK) IsCode(code int) bool { + return code == 200 +} + +// Code gets the status code for the meta get o k response +func (o *MetaGetOK) Code() int { + return 200 +} + +func (o *MetaGetOK) Error() string { + return fmt.Sprintf("[GET /meta][%d] metaGetOK %+v", 200, o.Payload) +} + +func (o *MetaGetOK) String() string { + return fmt.Sprintf("[GET /meta][%d] metaGetOK %+v", 200, o.Payload) +} + +func (o *MetaGetOK) GetPayload() *models.Meta { + return o.Payload +} + +func (o *MetaGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Meta) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewMetaGetUnauthorized creates a MetaGetUnauthorized with default headers values +func NewMetaGetUnauthorized() *MetaGetUnauthorized { + return &MetaGetUnauthorized{} +} + +/* +MetaGetUnauthorized describes a response with status code 401, with default header values. + +Unauthorized or invalid credentials. +*/ +type MetaGetUnauthorized struct { +} + +// IsSuccess returns true when this meta get unauthorized response has a 2xx status code +func (o *MetaGetUnauthorized) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this meta get unauthorized response has a 3xx status code +func (o *MetaGetUnauthorized) IsRedirect() bool { + return false +} + +// IsClientError returns true when this meta get unauthorized response has a 4xx status code +func (o *MetaGetUnauthorized) IsClientError() bool { + return true +} + +// IsServerError returns true when this meta get unauthorized response has a 5xx status code +func (o *MetaGetUnauthorized) IsServerError() bool { + return false +} + +// IsCode returns true when this meta get unauthorized response a status code equal to that given +func (o *MetaGetUnauthorized) IsCode(code int) bool { + return code == 401 +} + +// Code gets the status code for the meta get unauthorized response +func (o *MetaGetUnauthorized) Code() int { + return 401 +} + +func (o *MetaGetUnauthorized) Error() string { + return fmt.Sprintf("[GET /meta][%d] metaGetUnauthorized ", 401) +} + +func (o *MetaGetUnauthorized) String() string { + return fmt.Sprintf("[GET /meta][%d] metaGetUnauthorized ", 401) +} + +func (o *MetaGetUnauthorized) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewMetaGetForbidden creates a MetaGetForbidden with default headers values +func NewMetaGetForbidden() *MetaGetForbidden { + return &MetaGetForbidden{} +} + +/* +MetaGetForbidden describes a response with status code 403, with default header values. + +Forbidden +*/ +type MetaGetForbidden struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this meta get forbidden response has a 2xx status code +func (o *MetaGetForbidden) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this meta get forbidden response has a 3xx status code +func (o *MetaGetForbidden) IsRedirect() bool { + return false +} + +// IsClientError returns true when this meta get forbidden response has a 4xx status code +func (o *MetaGetForbidden) IsClientError() bool { + return true +} + +// IsServerError returns true when this meta get forbidden response has a 5xx status code +func (o *MetaGetForbidden) IsServerError() bool { + return false +} + +// IsCode returns true when this meta get forbidden response a status code equal to that given +func (o *MetaGetForbidden) IsCode(code int) bool { + return code == 403 +} + +// Code gets the status code for the meta get forbidden response +func (o *MetaGetForbidden) Code() int { + return 403 +} + +func (o *MetaGetForbidden) Error() string { + return fmt.Sprintf("[GET /meta][%d] metaGetForbidden %+v", 403, o.Payload) +} + +func (o *MetaGetForbidden) String() string { + return fmt.Sprintf("[GET /meta][%d] metaGetForbidden %+v", 403, o.Payload) +} + +func (o *MetaGetForbidden) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *MetaGetForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewMetaGetInternalServerError creates a MetaGetInternalServerError with default headers values +func NewMetaGetInternalServerError() *MetaGetInternalServerError { + return &MetaGetInternalServerError{} +} + +/* +MetaGetInternalServerError describes a response with status code 500, with default header values. + +An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. +*/ +type MetaGetInternalServerError struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this meta get internal server error response has a 2xx status code +func (o *MetaGetInternalServerError) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this meta get internal server error response has a 3xx status code +func (o *MetaGetInternalServerError) IsRedirect() bool { + return false +} + +// IsClientError returns true when this meta get internal server error response has a 4xx status code +func (o *MetaGetInternalServerError) IsClientError() bool { + return false +} + +// IsServerError returns true when this meta get internal server error response has a 5xx status code +func (o *MetaGetInternalServerError) IsServerError() bool { + return true +} + +// IsCode returns true when this meta get internal server error response a status code equal to that given +func (o *MetaGetInternalServerError) IsCode(code int) bool { + return code == 500 +} + +// Code gets the status code for the meta get internal server error response +func (o *MetaGetInternalServerError) Code() int { + return 500 +} + +func (o *MetaGetInternalServerError) Error() string { + return fmt.Sprintf("[GET /meta][%d] metaGetInternalServerError %+v", 500, o.Payload) +} + +func (o *MetaGetInternalServerError) String() string { + return fmt.Sprintf("[GET /meta][%d] metaGetInternalServerError %+v", 500, o.Payload) +} + +func (o *MetaGetInternalServerError) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *MetaGetInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/nodes/nodes_client.go b/platform/dbops/binaries/weaviate-src/client/nodes/nodes_client.go new file mode 100644 index 0000000000000000000000000000000000000000..bd99bbaef0281e5b7f788a3ed3793007acbebe16 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/nodes/nodes_client.go @@ -0,0 +1,136 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package nodes + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" +) + +// New creates a new nodes API client. +func New(transport runtime.ClientTransport, formats strfmt.Registry) ClientService { + return &Client{transport: transport, formats: formats} +} + +/* +Client for nodes API +*/ +type Client struct { + transport runtime.ClientTransport + formats strfmt.Registry +} + +// ClientOption is the option for Client methods +type ClientOption func(*runtime.ClientOperation) + +// ClientService is the interface for Client methods +type ClientService interface { + NodesGet(params *NodesGetParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*NodesGetOK, error) + + NodesGetClass(params *NodesGetClassParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*NodesGetClassOK, error) + + SetTransport(transport runtime.ClientTransport) +} + +/* +NodesGet nodes information for the database + +Returns node information for the entire database. +*/ +func (a *Client) NodesGet(params *NodesGetParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*NodesGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewNodesGetParams() + } + op := &runtime.ClientOperation{ + ID: "nodes.get", + Method: "GET", + PathPattern: "/nodes", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json", "application/yaml"}, + Schemes: []string{"https"}, + Params: params, + Reader: &NodesGetReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + success, ok := result.(*NodesGetOK) + if ok { + return success, nil + } + // unexpected success response + // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for nodes.get: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + +/* +NodesGetClass nodes information for a collection + +Returns node information for the nodes relevant to the collection. +*/ +func (a *Client) NodesGetClass(params *NodesGetClassParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*NodesGetClassOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewNodesGetClassParams() + } + op := &runtime.ClientOperation{ + ID: "nodes.get.class", + Method: "GET", + PathPattern: "/nodes/{className}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json", "application/yaml"}, + Schemes: []string{"https"}, + Params: params, + Reader: &NodesGetClassReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + success, ok := result.(*NodesGetClassOK) + if ok { + return success, nil + } + // unexpected success response + // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for nodes.get.class: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + +// SetTransport changes the transport on the client +func (a *Client) SetTransport(transport runtime.ClientTransport) { + a.transport = transport +} diff --git a/platform/dbops/binaries/weaviate-src/client/nodes/nodes_get_class_parameters.go b/platform/dbops/binaries/weaviate-src/client/nodes/nodes_get_class_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..53deea6e4d3557c97e929056b3a7bb31767ea5fb --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/nodes/nodes_get_class_parameters.go @@ -0,0 +1,237 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package nodes + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewNodesGetClassParams creates a new NodesGetClassParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewNodesGetClassParams() *NodesGetClassParams { + return &NodesGetClassParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewNodesGetClassParamsWithTimeout creates a new NodesGetClassParams object +// with the ability to set a timeout on a request. +func NewNodesGetClassParamsWithTimeout(timeout time.Duration) *NodesGetClassParams { + return &NodesGetClassParams{ + timeout: timeout, + } +} + +// NewNodesGetClassParamsWithContext creates a new NodesGetClassParams object +// with the ability to set a context for a request. +func NewNodesGetClassParamsWithContext(ctx context.Context) *NodesGetClassParams { + return &NodesGetClassParams{ + Context: ctx, + } +} + +// NewNodesGetClassParamsWithHTTPClient creates a new NodesGetClassParams object +// with the ability to set a custom HTTPClient for a request. +func NewNodesGetClassParamsWithHTTPClient(client *http.Client) *NodesGetClassParams { + return &NodesGetClassParams{ + HTTPClient: client, + } +} + +/* +NodesGetClassParams contains all the parameters to send to the API endpoint + + for the nodes get class operation. + + Typically these are written to a http.Request. +*/ +type NodesGetClassParams struct { + + // ClassName. + ClassName string + + /* Output. + + Controls the verbosity of the output, possible values are: "minimal", "verbose". Defaults to "minimal". + + Default: "minimal" + */ + Output *string + + // ShardName. + ShardName *string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the nodes get class params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *NodesGetClassParams) WithDefaults() *NodesGetClassParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the nodes get class params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *NodesGetClassParams) SetDefaults() { + var ( + outputDefault = string("minimal") + ) + + val := NodesGetClassParams{ + Output: &outputDefault, + } + + val.timeout = o.timeout + val.Context = o.Context + val.HTTPClient = o.HTTPClient + *o = val +} + +// WithTimeout adds the timeout to the nodes get class params +func (o *NodesGetClassParams) WithTimeout(timeout time.Duration) *NodesGetClassParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the nodes get class params +func (o *NodesGetClassParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the nodes get class params +func (o *NodesGetClassParams) WithContext(ctx context.Context) *NodesGetClassParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the nodes get class params +func (o *NodesGetClassParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the nodes get class params +func (o *NodesGetClassParams) WithHTTPClient(client *http.Client) *NodesGetClassParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the nodes get class params +func (o *NodesGetClassParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithClassName adds the className to the nodes get class params +func (o *NodesGetClassParams) WithClassName(className string) *NodesGetClassParams { + o.SetClassName(className) + return o +} + +// SetClassName adds the className to the nodes get class params +func (o *NodesGetClassParams) SetClassName(className string) { + o.ClassName = className +} + +// WithOutput adds the output to the nodes get class params +func (o *NodesGetClassParams) WithOutput(output *string) *NodesGetClassParams { + o.SetOutput(output) + return o +} + +// SetOutput adds the output to the nodes get class params +func (o *NodesGetClassParams) SetOutput(output *string) { + o.Output = output +} + +// WithShardName adds the shardName to the nodes get class params +func (o *NodesGetClassParams) WithShardName(shardName *string) *NodesGetClassParams { + o.SetShardName(shardName) + return o +} + +// SetShardName adds the shardName to the nodes get class params +func (o *NodesGetClassParams) SetShardName(shardName *string) { + o.ShardName = shardName +} + +// WriteToRequest writes these params to a swagger request +func (o *NodesGetClassParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param className + if err := r.SetPathParam("className", o.ClassName); err != nil { + return err + } + + if o.Output != nil { + + // query param output + var qrOutput string + + if o.Output != nil { + qrOutput = *o.Output + } + qOutput := qrOutput + if qOutput != "" { + + if err := r.SetQueryParam("output", qOutput); err != nil { + return err + } + } + } + + if o.ShardName != nil { + + // query param shardName + var qrShardName string + + if o.ShardName != nil { + qrShardName = *o.ShardName + } + qShardName := qrShardName + if qShardName != "" { + + if err := r.SetQueryParam("shardName", qShardName); err != nil { + return err + } + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/nodes/nodes_get_class_responses.go b/platform/dbops/binaries/weaviate-src/client/nodes/nodes_get_class_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..d189e2863b30cdef7c341d1428f87910d1b13d52 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/nodes/nodes_get_class_responses.go @@ -0,0 +1,472 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package nodes + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/weaviate/weaviate/entities/models" +) + +// NodesGetClassReader is a Reader for the NodesGetClass structure. +type NodesGetClassReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *NodesGetClassReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewNodesGetClassOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 401: + result := NewNodesGetClassUnauthorized() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 403: + result := NewNodesGetClassForbidden() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 404: + result := NewNodesGetClassNotFound() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 422: + result := NewNodesGetClassUnprocessableEntity() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 500: + result := NewNodesGetClassInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + } +} + +// NewNodesGetClassOK creates a NodesGetClassOK with default headers values +func NewNodesGetClassOK() *NodesGetClassOK { + return &NodesGetClassOK{} +} + +/* +NodesGetClassOK describes a response with status code 200, with default header values. + +Nodes status successfully returned +*/ +type NodesGetClassOK struct { + Payload *models.NodesStatusResponse +} + +// IsSuccess returns true when this nodes get class o k response has a 2xx status code +func (o *NodesGetClassOK) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this nodes get class o k response has a 3xx status code +func (o *NodesGetClassOK) IsRedirect() bool { + return false +} + +// IsClientError returns true when this nodes get class o k response has a 4xx status code +func (o *NodesGetClassOK) IsClientError() bool { + return false +} + +// IsServerError returns true when this nodes get class o k response has a 5xx status code +func (o *NodesGetClassOK) IsServerError() bool { + return false +} + +// IsCode returns true when this nodes get class o k response a status code equal to that given +func (o *NodesGetClassOK) IsCode(code int) bool { + return code == 200 +} + +// Code gets the status code for the nodes get class o k response +func (o *NodesGetClassOK) Code() int { + return 200 +} + +func (o *NodesGetClassOK) Error() string { + return fmt.Sprintf("[GET /nodes/{className}][%d] nodesGetClassOK %+v", 200, o.Payload) +} + +func (o *NodesGetClassOK) String() string { + return fmt.Sprintf("[GET /nodes/{className}][%d] nodesGetClassOK %+v", 200, o.Payload) +} + +func (o *NodesGetClassOK) GetPayload() *models.NodesStatusResponse { + return o.Payload +} + +func (o *NodesGetClassOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.NodesStatusResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewNodesGetClassUnauthorized creates a NodesGetClassUnauthorized with default headers values +func NewNodesGetClassUnauthorized() *NodesGetClassUnauthorized { + return &NodesGetClassUnauthorized{} +} + +/* +NodesGetClassUnauthorized describes a response with status code 401, with default header values. + +Unauthorized or invalid credentials. +*/ +type NodesGetClassUnauthorized struct { +} + +// IsSuccess returns true when this nodes get class unauthorized response has a 2xx status code +func (o *NodesGetClassUnauthorized) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this nodes get class unauthorized response has a 3xx status code +func (o *NodesGetClassUnauthorized) IsRedirect() bool { + return false +} + +// IsClientError returns true when this nodes get class unauthorized response has a 4xx status code +func (o *NodesGetClassUnauthorized) IsClientError() bool { + return true +} + +// IsServerError returns true when this nodes get class unauthorized response has a 5xx status code +func (o *NodesGetClassUnauthorized) IsServerError() bool { + return false +} + +// IsCode returns true when this nodes get class unauthorized response a status code equal to that given +func (o *NodesGetClassUnauthorized) IsCode(code int) bool { + return code == 401 +} + +// Code gets the status code for the nodes get class unauthorized response +func (o *NodesGetClassUnauthorized) Code() int { + return 401 +} + +func (o *NodesGetClassUnauthorized) Error() string { + return fmt.Sprintf("[GET /nodes/{className}][%d] nodesGetClassUnauthorized ", 401) +} + +func (o *NodesGetClassUnauthorized) String() string { + return fmt.Sprintf("[GET /nodes/{className}][%d] nodesGetClassUnauthorized ", 401) +} + +func (o *NodesGetClassUnauthorized) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewNodesGetClassForbidden creates a NodesGetClassForbidden with default headers values +func NewNodesGetClassForbidden() *NodesGetClassForbidden { + return &NodesGetClassForbidden{} +} + +/* +NodesGetClassForbidden describes a response with status code 403, with default header values. + +Forbidden +*/ +type NodesGetClassForbidden struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this nodes get class forbidden response has a 2xx status code +func (o *NodesGetClassForbidden) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this nodes get class forbidden response has a 3xx status code +func (o *NodesGetClassForbidden) IsRedirect() bool { + return false +} + +// IsClientError returns true when this nodes get class forbidden response has a 4xx status code +func (o *NodesGetClassForbidden) IsClientError() bool { + return true +} + +// IsServerError returns true when this nodes get class forbidden response has a 5xx status code +func (o *NodesGetClassForbidden) IsServerError() bool { + return false +} + +// IsCode returns true when this nodes get class forbidden response a status code equal to that given +func (o *NodesGetClassForbidden) IsCode(code int) bool { + return code == 403 +} + +// Code gets the status code for the nodes get class forbidden response +func (o *NodesGetClassForbidden) Code() int { + return 403 +} + +func (o *NodesGetClassForbidden) Error() string { + return fmt.Sprintf("[GET /nodes/{className}][%d] nodesGetClassForbidden %+v", 403, o.Payload) +} + +func (o *NodesGetClassForbidden) String() string { + return fmt.Sprintf("[GET /nodes/{className}][%d] nodesGetClassForbidden %+v", 403, o.Payload) +} + +func (o *NodesGetClassForbidden) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *NodesGetClassForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewNodesGetClassNotFound creates a NodesGetClassNotFound with default headers values +func NewNodesGetClassNotFound() *NodesGetClassNotFound { + return &NodesGetClassNotFound{} +} + +/* +NodesGetClassNotFound describes a response with status code 404, with default header values. + +Not Found - Backup does not exist +*/ +type NodesGetClassNotFound struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this nodes get class not found response has a 2xx status code +func (o *NodesGetClassNotFound) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this nodes get class not found response has a 3xx status code +func (o *NodesGetClassNotFound) IsRedirect() bool { + return false +} + +// IsClientError returns true when this nodes get class not found response has a 4xx status code +func (o *NodesGetClassNotFound) IsClientError() bool { + return true +} + +// IsServerError returns true when this nodes get class not found response has a 5xx status code +func (o *NodesGetClassNotFound) IsServerError() bool { + return false +} + +// IsCode returns true when this nodes get class not found response a status code equal to that given +func (o *NodesGetClassNotFound) IsCode(code int) bool { + return code == 404 +} + +// Code gets the status code for the nodes get class not found response +func (o *NodesGetClassNotFound) Code() int { + return 404 +} + +func (o *NodesGetClassNotFound) Error() string { + return fmt.Sprintf("[GET /nodes/{className}][%d] nodesGetClassNotFound %+v", 404, o.Payload) +} + +func (o *NodesGetClassNotFound) String() string { + return fmt.Sprintf("[GET /nodes/{className}][%d] nodesGetClassNotFound %+v", 404, o.Payload) +} + +func (o *NodesGetClassNotFound) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *NodesGetClassNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewNodesGetClassUnprocessableEntity creates a NodesGetClassUnprocessableEntity with default headers values +func NewNodesGetClassUnprocessableEntity() *NodesGetClassUnprocessableEntity { + return &NodesGetClassUnprocessableEntity{} +} + +/* +NodesGetClassUnprocessableEntity describes a response with status code 422, with default header values. + +Invalid backup restoration status attempt. +*/ +type NodesGetClassUnprocessableEntity struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this nodes get class unprocessable entity response has a 2xx status code +func (o *NodesGetClassUnprocessableEntity) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this nodes get class unprocessable entity response has a 3xx status code +func (o *NodesGetClassUnprocessableEntity) IsRedirect() bool { + return false +} + +// IsClientError returns true when this nodes get class unprocessable entity response has a 4xx status code +func (o *NodesGetClassUnprocessableEntity) IsClientError() bool { + return true +} + +// IsServerError returns true when this nodes get class unprocessable entity response has a 5xx status code +func (o *NodesGetClassUnprocessableEntity) IsServerError() bool { + return false +} + +// IsCode returns true when this nodes get class unprocessable entity response a status code equal to that given +func (o *NodesGetClassUnprocessableEntity) IsCode(code int) bool { + return code == 422 +} + +// Code gets the status code for the nodes get class unprocessable entity response +func (o *NodesGetClassUnprocessableEntity) Code() int { + return 422 +} + +func (o *NodesGetClassUnprocessableEntity) Error() string { + return fmt.Sprintf("[GET /nodes/{className}][%d] nodesGetClassUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *NodesGetClassUnprocessableEntity) String() string { + return fmt.Sprintf("[GET /nodes/{className}][%d] nodesGetClassUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *NodesGetClassUnprocessableEntity) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *NodesGetClassUnprocessableEntity) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewNodesGetClassInternalServerError creates a NodesGetClassInternalServerError with default headers values +func NewNodesGetClassInternalServerError() *NodesGetClassInternalServerError { + return &NodesGetClassInternalServerError{} +} + +/* +NodesGetClassInternalServerError describes a response with status code 500, with default header values. + +An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. +*/ +type NodesGetClassInternalServerError struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this nodes get class internal server error response has a 2xx status code +func (o *NodesGetClassInternalServerError) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this nodes get class internal server error response has a 3xx status code +func (o *NodesGetClassInternalServerError) IsRedirect() bool { + return false +} + +// IsClientError returns true when this nodes get class internal server error response has a 4xx status code +func (o *NodesGetClassInternalServerError) IsClientError() bool { + return false +} + +// IsServerError returns true when this nodes get class internal server error response has a 5xx status code +func (o *NodesGetClassInternalServerError) IsServerError() bool { + return true +} + +// IsCode returns true when this nodes get class internal server error response a status code equal to that given +func (o *NodesGetClassInternalServerError) IsCode(code int) bool { + return code == 500 +} + +// Code gets the status code for the nodes get class internal server error response +func (o *NodesGetClassInternalServerError) Code() int { + return 500 +} + +func (o *NodesGetClassInternalServerError) Error() string { + return fmt.Sprintf("[GET /nodes/{className}][%d] nodesGetClassInternalServerError %+v", 500, o.Payload) +} + +func (o *NodesGetClassInternalServerError) String() string { + return fmt.Sprintf("[GET /nodes/{className}][%d] nodesGetClassInternalServerError %+v", 500, o.Payload) +} + +func (o *NodesGetClassInternalServerError) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *NodesGetClassInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/nodes/nodes_get_parameters.go b/platform/dbops/binaries/weaviate-src/client/nodes/nodes_get_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..e582938b0cc4f3756710053719fae02676739dd2 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/nodes/nodes_get_parameters.go @@ -0,0 +1,187 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package nodes + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewNodesGetParams creates a new NodesGetParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewNodesGetParams() *NodesGetParams { + return &NodesGetParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewNodesGetParamsWithTimeout creates a new NodesGetParams object +// with the ability to set a timeout on a request. +func NewNodesGetParamsWithTimeout(timeout time.Duration) *NodesGetParams { + return &NodesGetParams{ + timeout: timeout, + } +} + +// NewNodesGetParamsWithContext creates a new NodesGetParams object +// with the ability to set a context for a request. +func NewNodesGetParamsWithContext(ctx context.Context) *NodesGetParams { + return &NodesGetParams{ + Context: ctx, + } +} + +// NewNodesGetParamsWithHTTPClient creates a new NodesGetParams object +// with the ability to set a custom HTTPClient for a request. +func NewNodesGetParamsWithHTTPClient(client *http.Client) *NodesGetParams { + return &NodesGetParams{ + HTTPClient: client, + } +} + +/* +NodesGetParams contains all the parameters to send to the API endpoint + + for the nodes get operation. + + Typically these are written to a http.Request. +*/ +type NodesGetParams struct { + + /* Output. + + Controls the verbosity of the output, possible values are: "minimal", "verbose". Defaults to "minimal". + + Default: "minimal" + */ + Output *string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the nodes get params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *NodesGetParams) WithDefaults() *NodesGetParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the nodes get params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *NodesGetParams) SetDefaults() { + var ( + outputDefault = string("minimal") + ) + + val := NodesGetParams{ + Output: &outputDefault, + } + + val.timeout = o.timeout + val.Context = o.Context + val.HTTPClient = o.HTTPClient + *o = val +} + +// WithTimeout adds the timeout to the nodes get params +func (o *NodesGetParams) WithTimeout(timeout time.Duration) *NodesGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the nodes get params +func (o *NodesGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the nodes get params +func (o *NodesGetParams) WithContext(ctx context.Context) *NodesGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the nodes get params +func (o *NodesGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the nodes get params +func (o *NodesGetParams) WithHTTPClient(client *http.Client) *NodesGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the nodes get params +func (o *NodesGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithOutput adds the output to the nodes get params +func (o *NodesGetParams) WithOutput(output *string) *NodesGetParams { + o.SetOutput(output) + return o +} + +// SetOutput adds the output to the nodes get params +func (o *NodesGetParams) SetOutput(output *string) { + o.Output = output +} + +// WriteToRequest writes these params to a swagger request +func (o *NodesGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if o.Output != nil { + + // query param output + var qrOutput string + + if o.Output != nil { + qrOutput = *o.Output + } + qOutput := qrOutput + if qOutput != "" { + + if err := r.SetQueryParam("output", qOutput); err != nil { + return err + } + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/nodes/nodes_get_responses.go b/platform/dbops/binaries/weaviate-src/client/nodes/nodes_get_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..4da1e23ccf2c3e2da97580081f47eaac915e40f2 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/nodes/nodes_get_responses.go @@ -0,0 +1,472 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package nodes + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/weaviate/weaviate/entities/models" +) + +// NodesGetReader is a Reader for the NodesGet structure. +type NodesGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *NodesGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewNodesGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 401: + result := NewNodesGetUnauthorized() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 403: + result := NewNodesGetForbidden() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 404: + result := NewNodesGetNotFound() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 422: + result := NewNodesGetUnprocessableEntity() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 500: + result := NewNodesGetInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + } +} + +// NewNodesGetOK creates a NodesGetOK with default headers values +func NewNodesGetOK() *NodesGetOK { + return &NodesGetOK{} +} + +/* +NodesGetOK describes a response with status code 200, with default header values. + +Nodes status successfully returned +*/ +type NodesGetOK struct { + Payload *models.NodesStatusResponse +} + +// IsSuccess returns true when this nodes get o k response has a 2xx status code +func (o *NodesGetOK) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this nodes get o k response has a 3xx status code +func (o *NodesGetOK) IsRedirect() bool { + return false +} + +// IsClientError returns true when this nodes get o k response has a 4xx status code +func (o *NodesGetOK) IsClientError() bool { + return false +} + +// IsServerError returns true when this nodes get o k response has a 5xx status code +func (o *NodesGetOK) IsServerError() bool { + return false +} + +// IsCode returns true when this nodes get o k response a status code equal to that given +func (o *NodesGetOK) IsCode(code int) bool { + return code == 200 +} + +// Code gets the status code for the nodes get o k response +func (o *NodesGetOK) Code() int { + return 200 +} + +func (o *NodesGetOK) Error() string { + return fmt.Sprintf("[GET /nodes][%d] nodesGetOK %+v", 200, o.Payload) +} + +func (o *NodesGetOK) String() string { + return fmt.Sprintf("[GET /nodes][%d] nodesGetOK %+v", 200, o.Payload) +} + +func (o *NodesGetOK) GetPayload() *models.NodesStatusResponse { + return o.Payload +} + +func (o *NodesGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.NodesStatusResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewNodesGetUnauthorized creates a NodesGetUnauthorized with default headers values +func NewNodesGetUnauthorized() *NodesGetUnauthorized { + return &NodesGetUnauthorized{} +} + +/* +NodesGetUnauthorized describes a response with status code 401, with default header values. + +Unauthorized or invalid credentials. +*/ +type NodesGetUnauthorized struct { +} + +// IsSuccess returns true when this nodes get unauthorized response has a 2xx status code +func (o *NodesGetUnauthorized) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this nodes get unauthorized response has a 3xx status code +func (o *NodesGetUnauthorized) IsRedirect() bool { + return false +} + +// IsClientError returns true when this nodes get unauthorized response has a 4xx status code +func (o *NodesGetUnauthorized) IsClientError() bool { + return true +} + +// IsServerError returns true when this nodes get unauthorized response has a 5xx status code +func (o *NodesGetUnauthorized) IsServerError() bool { + return false +} + +// IsCode returns true when this nodes get unauthorized response a status code equal to that given +func (o *NodesGetUnauthorized) IsCode(code int) bool { + return code == 401 +} + +// Code gets the status code for the nodes get unauthorized response +func (o *NodesGetUnauthorized) Code() int { + return 401 +} + +func (o *NodesGetUnauthorized) Error() string { + return fmt.Sprintf("[GET /nodes][%d] nodesGetUnauthorized ", 401) +} + +func (o *NodesGetUnauthorized) String() string { + return fmt.Sprintf("[GET /nodes][%d] nodesGetUnauthorized ", 401) +} + +func (o *NodesGetUnauthorized) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewNodesGetForbidden creates a NodesGetForbidden with default headers values +func NewNodesGetForbidden() *NodesGetForbidden { + return &NodesGetForbidden{} +} + +/* +NodesGetForbidden describes a response with status code 403, with default header values. + +Forbidden +*/ +type NodesGetForbidden struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this nodes get forbidden response has a 2xx status code +func (o *NodesGetForbidden) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this nodes get forbidden response has a 3xx status code +func (o *NodesGetForbidden) IsRedirect() bool { + return false +} + +// IsClientError returns true when this nodes get forbidden response has a 4xx status code +func (o *NodesGetForbidden) IsClientError() bool { + return true +} + +// IsServerError returns true when this nodes get forbidden response has a 5xx status code +func (o *NodesGetForbidden) IsServerError() bool { + return false +} + +// IsCode returns true when this nodes get forbidden response a status code equal to that given +func (o *NodesGetForbidden) IsCode(code int) bool { + return code == 403 +} + +// Code gets the status code for the nodes get forbidden response +func (o *NodesGetForbidden) Code() int { + return 403 +} + +func (o *NodesGetForbidden) Error() string { + return fmt.Sprintf("[GET /nodes][%d] nodesGetForbidden %+v", 403, o.Payload) +} + +func (o *NodesGetForbidden) String() string { + return fmt.Sprintf("[GET /nodes][%d] nodesGetForbidden %+v", 403, o.Payload) +} + +func (o *NodesGetForbidden) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *NodesGetForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewNodesGetNotFound creates a NodesGetNotFound with default headers values +func NewNodesGetNotFound() *NodesGetNotFound { + return &NodesGetNotFound{} +} + +/* +NodesGetNotFound describes a response with status code 404, with default header values. + +Not Found - Backup does not exist +*/ +type NodesGetNotFound struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this nodes get not found response has a 2xx status code +func (o *NodesGetNotFound) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this nodes get not found response has a 3xx status code +func (o *NodesGetNotFound) IsRedirect() bool { + return false +} + +// IsClientError returns true when this nodes get not found response has a 4xx status code +func (o *NodesGetNotFound) IsClientError() bool { + return true +} + +// IsServerError returns true when this nodes get not found response has a 5xx status code +func (o *NodesGetNotFound) IsServerError() bool { + return false +} + +// IsCode returns true when this nodes get not found response a status code equal to that given +func (o *NodesGetNotFound) IsCode(code int) bool { + return code == 404 +} + +// Code gets the status code for the nodes get not found response +func (o *NodesGetNotFound) Code() int { + return 404 +} + +func (o *NodesGetNotFound) Error() string { + return fmt.Sprintf("[GET /nodes][%d] nodesGetNotFound %+v", 404, o.Payload) +} + +func (o *NodesGetNotFound) String() string { + return fmt.Sprintf("[GET /nodes][%d] nodesGetNotFound %+v", 404, o.Payload) +} + +func (o *NodesGetNotFound) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *NodesGetNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewNodesGetUnprocessableEntity creates a NodesGetUnprocessableEntity with default headers values +func NewNodesGetUnprocessableEntity() *NodesGetUnprocessableEntity { + return &NodesGetUnprocessableEntity{} +} + +/* +NodesGetUnprocessableEntity describes a response with status code 422, with default header values. + +Invalid backup restoration status attempt. +*/ +type NodesGetUnprocessableEntity struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this nodes get unprocessable entity response has a 2xx status code +func (o *NodesGetUnprocessableEntity) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this nodes get unprocessable entity response has a 3xx status code +func (o *NodesGetUnprocessableEntity) IsRedirect() bool { + return false +} + +// IsClientError returns true when this nodes get unprocessable entity response has a 4xx status code +func (o *NodesGetUnprocessableEntity) IsClientError() bool { + return true +} + +// IsServerError returns true when this nodes get unprocessable entity response has a 5xx status code +func (o *NodesGetUnprocessableEntity) IsServerError() bool { + return false +} + +// IsCode returns true when this nodes get unprocessable entity response a status code equal to that given +func (o *NodesGetUnprocessableEntity) IsCode(code int) bool { + return code == 422 +} + +// Code gets the status code for the nodes get unprocessable entity response +func (o *NodesGetUnprocessableEntity) Code() int { + return 422 +} + +func (o *NodesGetUnprocessableEntity) Error() string { + return fmt.Sprintf("[GET /nodes][%d] nodesGetUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *NodesGetUnprocessableEntity) String() string { + return fmt.Sprintf("[GET /nodes][%d] nodesGetUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *NodesGetUnprocessableEntity) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *NodesGetUnprocessableEntity) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewNodesGetInternalServerError creates a NodesGetInternalServerError with default headers values +func NewNodesGetInternalServerError() *NodesGetInternalServerError { + return &NodesGetInternalServerError{} +} + +/* +NodesGetInternalServerError describes a response with status code 500, with default header values. + +An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. +*/ +type NodesGetInternalServerError struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this nodes get internal server error response has a 2xx status code +func (o *NodesGetInternalServerError) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this nodes get internal server error response has a 3xx status code +func (o *NodesGetInternalServerError) IsRedirect() bool { + return false +} + +// IsClientError returns true when this nodes get internal server error response has a 4xx status code +func (o *NodesGetInternalServerError) IsClientError() bool { + return false +} + +// IsServerError returns true when this nodes get internal server error response has a 5xx status code +func (o *NodesGetInternalServerError) IsServerError() bool { + return true +} + +// IsCode returns true when this nodes get internal server error response a status code equal to that given +func (o *NodesGetInternalServerError) IsCode(code int) bool { + return code == 500 +} + +// Code gets the status code for the nodes get internal server error response +func (o *NodesGetInternalServerError) Code() int { + return 500 +} + +func (o *NodesGetInternalServerError) Error() string { + return fmt.Sprintf("[GET /nodes][%d] nodesGetInternalServerError %+v", 500, o.Payload) +} + +func (o *NodesGetInternalServerError) String() string { + return fmt.Sprintf("[GET /nodes][%d] nodesGetInternalServerError %+v", 500, o.Payload) +} + +func (o *NodesGetInternalServerError) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *NodesGetInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/objects/objects_class_delete_parameters.go b/platform/dbops/binaries/weaviate-src/client/objects/objects_class_delete_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..fd5b8dc476e414afc4ebd5c2fb366737c3a8c274 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/objects/objects_class_delete_parameters.go @@ -0,0 +1,251 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package objects + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewObjectsClassDeleteParams creates a new ObjectsClassDeleteParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewObjectsClassDeleteParams() *ObjectsClassDeleteParams { + return &ObjectsClassDeleteParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewObjectsClassDeleteParamsWithTimeout creates a new ObjectsClassDeleteParams object +// with the ability to set a timeout on a request. +func NewObjectsClassDeleteParamsWithTimeout(timeout time.Duration) *ObjectsClassDeleteParams { + return &ObjectsClassDeleteParams{ + timeout: timeout, + } +} + +// NewObjectsClassDeleteParamsWithContext creates a new ObjectsClassDeleteParams object +// with the ability to set a context for a request. +func NewObjectsClassDeleteParamsWithContext(ctx context.Context) *ObjectsClassDeleteParams { + return &ObjectsClassDeleteParams{ + Context: ctx, + } +} + +// NewObjectsClassDeleteParamsWithHTTPClient creates a new ObjectsClassDeleteParams object +// with the ability to set a custom HTTPClient for a request. +func NewObjectsClassDeleteParamsWithHTTPClient(client *http.Client) *ObjectsClassDeleteParams { + return &ObjectsClassDeleteParams{ + HTTPClient: client, + } +} + +/* +ObjectsClassDeleteParams contains all the parameters to send to the API endpoint + + for the objects class delete operation. + + Typically these are written to a http.Request. +*/ +type ObjectsClassDeleteParams struct { + + // ClassName. + ClassName string + + /* ConsistencyLevel. + + Determines how many replicas must acknowledge a request before it is considered successful + */ + ConsistencyLevel *string + + /* ID. + + Unique ID of the Object. + + Format: uuid + */ + ID strfmt.UUID + + /* Tenant. + + Specifies the tenant in a request targeting a multi-tenant class + */ + Tenant *string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the objects class delete params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *ObjectsClassDeleteParams) WithDefaults() *ObjectsClassDeleteParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the objects class delete params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *ObjectsClassDeleteParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the objects class delete params +func (o *ObjectsClassDeleteParams) WithTimeout(timeout time.Duration) *ObjectsClassDeleteParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the objects class delete params +func (o *ObjectsClassDeleteParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the objects class delete params +func (o *ObjectsClassDeleteParams) WithContext(ctx context.Context) *ObjectsClassDeleteParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the objects class delete params +func (o *ObjectsClassDeleteParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the objects class delete params +func (o *ObjectsClassDeleteParams) WithHTTPClient(client *http.Client) *ObjectsClassDeleteParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the objects class delete params +func (o *ObjectsClassDeleteParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithClassName adds the className to the objects class delete params +func (o *ObjectsClassDeleteParams) WithClassName(className string) *ObjectsClassDeleteParams { + o.SetClassName(className) + return o +} + +// SetClassName adds the className to the objects class delete params +func (o *ObjectsClassDeleteParams) SetClassName(className string) { + o.ClassName = className +} + +// WithConsistencyLevel adds the consistencyLevel to the objects class delete params +func (o *ObjectsClassDeleteParams) WithConsistencyLevel(consistencyLevel *string) *ObjectsClassDeleteParams { + o.SetConsistencyLevel(consistencyLevel) + return o +} + +// SetConsistencyLevel adds the consistencyLevel to the objects class delete params +func (o *ObjectsClassDeleteParams) SetConsistencyLevel(consistencyLevel *string) { + o.ConsistencyLevel = consistencyLevel +} + +// WithID adds the id to the objects class delete params +func (o *ObjectsClassDeleteParams) WithID(id strfmt.UUID) *ObjectsClassDeleteParams { + o.SetID(id) + return o +} + +// SetID adds the id to the objects class delete params +func (o *ObjectsClassDeleteParams) SetID(id strfmt.UUID) { + o.ID = id +} + +// WithTenant adds the tenant to the objects class delete params +func (o *ObjectsClassDeleteParams) WithTenant(tenant *string) *ObjectsClassDeleteParams { + o.SetTenant(tenant) + return o +} + +// SetTenant adds the tenant to the objects class delete params +func (o *ObjectsClassDeleteParams) SetTenant(tenant *string) { + o.Tenant = tenant +} + +// WriteToRequest writes these params to a swagger request +func (o *ObjectsClassDeleteParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param className + if err := r.SetPathParam("className", o.ClassName); err != nil { + return err + } + + if o.ConsistencyLevel != nil { + + // query param consistency_level + var qrConsistencyLevel string + + if o.ConsistencyLevel != nil { + qrConsistencyLevel = *o.ConsistencyLevel + } + qConsistencyLevel := qrConsistencyLevel + if qConsistencyLevel != "" { + + if err := r.SetQueryParam("consistency_level", qConsistencyLevel); err != nil { + return err + } + } + } + + // path param id + if err := r.SetPathParam("id", o.ID.String()); err != nil { + return err + } + + if o.Tenant != nil { + + // query param tenant + var qrTenant string + + if o.Tenant != nil { + qrTenant = *o.Tenant + } + qTenant := qrTenant + if qTenant != "" { + + if err := r.SetQueryParam("tenant", qTenant); err != nil { + return err + } + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/objects/objects_class_delete_responses.go b/platform/dbops/binaries/weaviate-src/client/objects/objects_class_delete_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..a73da716efb7a91c5fc30a625ba5250c9b3dc468 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/objects/objects_class_delete_responses.go @@ -0,0 +1,522 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package objects + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/weaviate/weaviate/entities/models" +) + +// ObjectsClassDeleteReader is a Reader for the ObjectsClassDelete structure. +type ObjectsClassDeleteReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ObjectsClassDeleteReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 204: + result := NewObjectsClassDeleteNoContent() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 400: + result := NewObjectsClassDeleteBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 401: + result := NewObjectsClassDeleteUnauthorized() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 403: + result := NewObjectsClassDeleteForbidden() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 404: + result := NewObjectsClassDeleteNotFound() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 422: + result := NewObjectsClassDeleteUnprocessableEntity() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 500: + result := NewObjectsClassDeleteInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + } +} + +// NewObjectsClassDeleteNoContent creates a ObjectsClassDeleteNoContent with default headers values +func NewObjectsClassDeleteNoContent() *ObjectsClassDeleteNoContent { + return &ObjectsClassDeleteNoContent{} +} + +/* +ObjectsClassDeleteNoContent describes a response with status code 204, with default header values. + +Successfully deleted. +*/ +type ObjectsClassDeleteNoContent struct { +} + +// IsSuccess returns true when this objects class delete no content response has a 2xx status code +func (o *ObjectsClassDeleteNoContent) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this objects class delete no content response has a 3xx status code +func (o *ObjectsClassDeleteNoContent) IsRedirect() bool { + return false +} + +// IsClientError returns true when this objects class delete no content response has a 4xx status code +func (o *ObjectsClassDeleteNoContent) IsClientError() bool { + return false +} + +// IsServerError returns true when this objects class delete no content response has a 5xx status code +func (o *ObjectsClassDeleteNoContent) IsServerError() bool { + return false +} + +// IsCode returns true when this objects class delete no content response a status code equal to that given +func (o *ObjectsClassDeleteNoContent) IsCode(code int) bool { + return code == 204 +} + +// Code gets the status code for the objects class delete no content response +func (o *ObjectsClassDeleteNoContent) Code() int { + return 204 +} + +func (o *ObjectsClassDeleteNoContent) Error() string { + return fmt.Sprintf("[DELETE /objects/{className}/{id}][%d] objectsClassDeleteNoContent ", 204) +} + +func (o *ObjectsClassDeleteNoContent) String() string { + return fmt.Sprintf("[DELETE /objects/{className}/{id}][%d] objectsClassDeleteNoContent ", 204) +} + +func (o *ObjectsClassDeleteNoContent) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewObjectsClassDeleteBadRequest creates a ObjectsClassDeleteBadRequest with default headers values +func NewObjectsClassDeleteBadRequest() *ObjectsClassDeleteBadRequest { + return &ObjectsClassDeleteBadRequest{} +} + +/* +ObjectsClassDeleteBadRequest describes a response with status code 400, with default header values. + +Malformed request. +*/ +type ObjectsClassDeleteBadRequest struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this objects class delete bad request response has a 2xx status code +func (o *ObjectsClassDeleteBadRequest) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this objects class delete bad request response has a 3xx status code +func (o *ObjectsClassDeleteBadRequest) IsRedirect() bool { + return false +} + +// IsClientError returns true when this objects class delete bad request response has a 4xx status code +func (o *ObjectsClassDeleteBadRequest) IsClientError() bool { + return true +} + +// IsServerError returns true when this objects class delete bad request response has a 5xx status code +func (o *ObjectsClassDeleteBadRequest) IsServerError() bool { + return false +} + +// IsCode returns true when this objects class delete bad request response a status code equal to that given +func (o *ObjectsClassDeleteBadRequest) IsCode(code int) bool { + return code == 400 +} + +// Code gets the status code for the objects class delete bad request response +func (o *ObjectsClassDeleteBadRequest) Code() int { + return 400 +} + +func (o *ObjectsClassDeleteBadRequest) Error() string { + return fmt.Sprintf("[DELETE /objects/{className}/{id}][%d] objectsClassDeleteBadRequest %+v", 400, o.Payload) +} + +func (o *ObjectsClassDeleteBadRequest) String() string { + return fmt.Sprintf("[DELETE /objects/{className}/{id}][%d] objectsClassDeleteBadRequest %+v", 400, o.Payload) +} + +func (o *ObjectsClassDeleteBadRequest) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *ObjectsClassDeleteBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewObjectsClassDeleteUnauthorized creates a ObjectsClassDeleteUnauthorized with default headers values +func NewObjectsClassDeleteUnauthorized() *ObjectsClassDeleteUnauthorized { + return &ObjectsClassDeleteUnauthorized{} +} + +/* +ObjectsClassDeleteUnauthorized describes a response with status code 401, with default header values. + +Unauthorized or invalid credentials. +*/ +type ObjectsClassDeleteUnauthorized struct { +} + +// IsSuccess returns true when this objects class delete unauthorized response has a 2xx status code +func (o *ObjectsClassDeleteUnauthorized) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this objects class delete unauthorized response has a 3xx status code +func (o *ObjectsClassDeleteUnauthorized) IsRedirect() bool { + return false +} + +// IsClientError returns true when this objects class delete unauthorized response has a 4xx status code +func (o *ObjectsClassDeleteUnauthorized) IsClientError() bool { + return true +} + +// IsServerError returns true when this objects class delete unauthorized response has a 5xx status code +func (o *ObjectsClassDeleteUnauthorized) IsServerError() bool { + return false +} + +// IsCode returns true when this objects class delete unauthorized response a status code equal to that given +func (o *ObjectsClassDeleteUnauthorized) IsCode(code int) bool { + return code == 401 +} + +// Code gets the status code for the objects class delete unauthorized response +func (o *ObjectsClassDeleteUnauthorized) Code() int { + return 401 +} + +func (o *ObjectsClassDeleteUnauthorized) Error() string { + return fmt.Sprintf("[DELETE /objects/{className}/{id}][%d] objectsClassDeleteUnauthorized ", 401) +} + +func (o *ObjectsClassDeleteUnauthorized) String() string { + return fmt.Sprintf("[DELETE /objects/{className}/{id}][%d] objectsClassDeleteUnauthorized ", 401) +} + +func (o *ObjectsClassDeleteUnauthorized) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewObjectsClassDeleteForbidden creates a ObjectsClassDeleteForbidden with default headers values +func NewObjectsClassDeleteForbidden() *ObjectsClassDeleteForbidden { + return &ObjectsClassDeleteForbidden{} +} + +/* +ObjectsClassDeleteForbidden describes a response with status code 403, with default header values. + +Forbidden +*/ +type ObjectsClassDeleteForbidden struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this objects class delete forbidden response has a 2xx status code +func (o *ObjectsClassDeleteForbidden) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this objects class delete forbidden response has a 3xx status code +func (o *ObjectsClassDeleteForbidden) IsRedirect() bool { + return false +} + +// IsClientError returns true when this objects class delete forbidden response has a 4xx status code +func (o *ObjectsClassDeleteForbidden) IsClientError() bool { + return true +} + +// IsServerError returns true when this objects class delete forbidden response has a 5xx status code +func (o *ObjectsClassDeleteForbidden) IsServerError() bool { + return false +} + +// IsCode returns true when this objects class delete forbidden response a status code equal to that given +func (o *ObjectsClassDeleteForbidden) IsCode(code int) bool { + return code == 403 +} + +// Code gets the status code for the objects class delete forbidden response +func (o *ObjectsClassDeleteForbidden) Code() int { + return 403 +} + +func (o *ObjectsClassDeleteForbidden) Error() string { + return fmt.Sprintf("[DELETE /objects/{className}/{id}][%d] objectsClassDeleteForbidden %+v", 403, o.Payload) +} + +func (o *ObjectsClassDeleteForbidden) String() string { + return fmt.Sprintf("[DELETE /objects/{className}/{id}][%d] objectsClassDeleteForbidden %+v", 403, o.Payload) +} + +func (o *ObjectsClassDeleteForbidden) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *ObjectsClassDeleteForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewObjectsClassDeleteNotFound creates a ObjectsClassDeleteNotFound with default headers values +func NewObjectsClassDeleteNotFound() *ObjectsClassDeleteNotFound { + return &ObjectsClassDeleteNotFound{} +} + +/* +ObjectsClassDeleteNotFound describes a response with status code 404, with default header values. + +Successful query result but no resource was found. +*/ +type ObjectsClassDeleteNotFound struct { +} + +// IsSuccess returns true when this objects class delete not found response has a 2xx status code +func (o *ObjectsClassDeleteNotFound) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this objects class delete not found response has a 3xx status code +func (o *ObjectsClassDeleteNotFound) IsRedirect() bool { + return false +} + +// IsClientError returns true when this objects class delete not found response has a 4xx status code +func (o *ObjectsClassDeleteNotFound) IsClientError() bool { + return true +} + +// IsServerError returns true when this objects class delete not found response has a 5xx status code +func (o *ObjectsClassDeleteNotFound) IsServerError() bool { + return false +} + +// IsCode returns true when this objects class delete not found response a status code equal to that given +func (o *ObjectsClassDeleteNotFound) IsCode(code int) bool { + return code == 404 +} + +// Code gets the status code for the objects class delete not found response +func (o *ObjectsClassDeleteNotFound) Code() int { + return 404 +} + +func (o *ObjectsClassDeleteNotFound) Error() string { + return fmt.Sprintf("[DELETE /objects/{className}/{id}][%d] objectsClassDeleteNotFound ", 404) +} + +func (o *ObjectsClassDeleteNotFound) String() string { + return fmt.Sprintf("[DELETE /objects/{className}/{id}][%d] objectsClassDeleteNotFound ", 404) +} + +func (o *ObjectsClassDeleteNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewObjectsClassDeleteUnprocessableEntity creates a ObjectsClassDeleteUnprocessableEntity with default headers values +func NewObjectsClassDeleteUnprocessableEntity() *ObjectsClassDeleteUnprocessableEntity { + return &ObjectsClassDeleteUnprocessableEntity{} +} + +/* +ObjectsClassDeleteUnprocessableEntity describes a response with status code 422, with default header values. + +Request is well-formed (i.e., syntactically correct), but erroneous. +*/ +type ObjectsClassDeleteUnprocessableEntity struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this objects class delete unprocessable entity response has a 2xx status code +func (o *ObjectsClassDeleteUnprocessableEntity) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this objects class delete unprocessable entity response has a 3xx status code +func (o *ObjectsClassDeleteUnprocessableEntity) IsRedirect() bool { + return false +} + +// IsClientError returns true when this objects class delete unprocessable entity response has a 4xx status code +func (o *ObjectsClassDeleteUnprocessableEntity) IsClientError() bool { + return true +} + +// IsServerError returns true when this objects class delete unprocessable entity response has a 5xx status code +func (o *ObjectsClassDeleteUnprocessableEntity) IsServerError() bool { + return false +} + +// IsCode returns true when this objects class delete unprocessable entity response a status code equal to that given +func (o *ObjectsClassDeleteUnprocessableEntity) IsCode(code int) bool { + return code == 422 +} + +// Code gets the status code for the objects class delete unprocessable entity response +func (o *ObjectsClassDeleteUnprocessableEntity) Code() int { + return 422 +} + +func (o *ObjectsClassDeleteUnprocessableEntity) Error() string { + return fmt.Sprintf("[DELETE /objects/{className}/{id}][%d] objectsClassDeleteUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *ObjectsClassDeleteUnprocessableEntity) String() string { + return fmt.Sprintf("[DELETE /objects/{className}/{id}][%d] objectsClassDeleteUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *ObjectsClassDeleteUnprocessableEntity) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *ObjectsClassDeleteUnprocessableEntity) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewObjectsClassDeleteInternalServerError creates a ObjectsClassDeleteInternalServerError with default headers values +func NewObjectsClassDeleteInternalServerError() *ObjectsClassDeleteInternalServerError { + return &ObjectsClassDeleteInternalServerError{} +} + +/* +ObjectsClassDeleteInternalServerError describes a response with status code 500, with default header values. + +An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. +*/ +type ObjectsClassDeleteInternalServerError struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this objects class delete internal server error response has a 2xx status code +func (o *ObjectsClassDeleteInternalServerError) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this objects class delete internal server error response has a 3xx status code +func (o *ObjectsClassDeleteInternalServerError) IsRedirect() bool { + return false +} + +// IsClientError returns true when this objects class delete internal server error response has a 4xx status code +func (o *ObjectsClassDeleteInternalServerError) IsClientError() bool { + return false +} + +// IsServerError returns true when this objects class delete internal server error response has a 5xx status code +func (o *ObjectsClassDeleteInternalServerError) IsServerError() bool { + return true +} + +// IsCode returns true when this objects class delete internal server error response a status code equal to that given +func (o *ObjectsClassDeleteInternalServerError) IsCode(code int) bool { + return code == 500 +} + +// Code gets the status code for the objects class delete internal server error response +func (o *ObjectsClassDeleteInternalServerError) Code() int { + return 500 +} + +func (o *ObjectsClassDeleteInternalServerError) Error() string { + return fmt.Sprintf("[DELETE /objects/{className}/{id}][%d] objectsClassDeleteInternalServerError %+v", 500, o.Payload) +} + +func (o *ObjectsClassDeleteInternalServerError) String() string { + return fmt.Sprintf("[DELETE /objects/{className}/{id}][%d] objectsClassDeleteInternalServerError %+v", 500, o.Payload) +} + +func (o *ObjectsClassDeleteInternalServerError) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *ObjectsClassDeleteInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/objects/objects_class_get_parameters.go b/platform/dbops/binaries/weaviate-src/client/objects/objects_class_get_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..ce4fd2f031d5904c29dcafe6af4943db553ef18f --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/objects/objects_class_get_parameters.go @@ -0,0 +1,319 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package objects + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewObjectsClassGetParams creates a new ObjectsClassGetParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewObjectsClassGetParams() *ObjectsClassGetParams { + return &ObjectsClassGetParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewObjectsClassGetParamsWithTimeout creates a new ObjectsClassGetParams object +// with the ability to set a timeout on a request. +func NewObjectsClassGetParamsWithTimeout(timeout time.Duration) *ObjectsClassGetParams { + return &ObjectsClassGetParams{ + timeout: timeout, + } +} + +// NewObjectsClassGetParamsWithContext creates a new ObjectsClassGetParams object +// with the ability to set a context for a request. +func NewObjectsClassGetParamsWithContext(ctx context.Context) *ObjectsClassGetParams { + return &ObjectsClassGetParams{ + Context: ctx, + } +} + +// NewObjectsClassGetParamsWithHTTPClient creates a new ObjectsClassGetParams object +// with the ability to set a custom HTTPClient for a request. +func NewObjectsClassGetParamsWithHTTPClient(client *http.Client) *ObjectsClassGetParams { + return &ObjectsClassGetParams{ + HTTPClient: client, + } +} + +/* +ObjectsClassGetParams contains all the parameters to send to the API endpoint + + for the objects class get operation. + + Typically these are written to a http.Request. +*/ +type ObjectsClassGetParams struct { + + // ClassName. + ClassName string + + /* ConsistencyLevel. + + Determines how many replicas must acknowledge a request before it is considered successful + */ + ConsistencyLevel *string + + /* ID. + + Unique ID of the Object. + + Format: uuid + */ + ID strfmt.UUID + + /* Include. + + Include additional information, such as classification infos. Allowed values include: classification, vector, interpretation + */ + Include *string + + /* NodeName. + + The target node which should fulfill the request + */ + NodeName *string + + /* Tenant. + + Specifies the tenant in a request targeting a multi-tenant class + */ + Tenant *string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the objects class get params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *ObjectsClassGetParams) WithDefaults() *ObjectsClassGetParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the objects class get params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *ObjectsClassGetParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the objects class get params +func (o *ObjectsClassGetParams) WithTimeout(timeout time.Duration) *ObjectsClassGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the objects class get params +func (o *ObjectsClassGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the objects class get params +func (o *ObjectsClassGetParams) WithContext(ctx context.Context) *ObjectsClassGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the objects class get params +func (o *ObjectsClassGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the objects class get params +func (o *ObjectsClassGetParams) WithHTTPClient(client *http.Client) *ObjectsClassGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the objects class get params +func (o *ObjectsClassGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithClassName adds the className to the objects class get params +func (o *ObjectsClassGetParams) WithClassName(className string) *ObjectsClassGetParams { + o.SetClassName(className) + return o +} + +// SetClassName adds the className to the objects class get params +func (o *ObjectsClassGetParams) SetClassName(className string) { + o.ClassName = className +} + +// WithConsistencyLevel adds the consistencyLevel to the objects class get params +func (o *ObjectsClassGetParams) WithConsistencyLevel(consistencyLevel *string) *ObjectsClassGetParams { + o.SetConsistencyLevel(consistencyLevel) + return o +} + +// SetConsistencyLevel adds the consistencyLevel to the objects class get params +func (o *ObjectsClassGetParams) SetConsistencyLevel(consistencyLevel *string) { + o.ConsistencyLevel = consistencyLevel +} + +// WithID adds the id to the objects class get params +func (o *ObjectsClassGetParams) WithID(id strfmt.UUID) *ObjectsClassGetParams { + o.SetID(id) + return o +} + +// SetID adds the id to the objects class get params +func (o *ObjectsClassGetParams) SetID(id strfmt.UUID) { + o.ID = id +} + +// WithInclude adds the include to the objects class get params +func (o *ObjectsClassGetParams) WithInclude(include *string) *ObjectsClassGetParams { + o.SetInclude(include) + return o +} + +// SetInclude adds the include to the objects class get params +func (o *ObjectsClassGetParams) SetInclude(include *string) { + o.Include = include +} + +// WithNodeName adds the nodeName to the objects class get params +func (o *ObjectsClassGetParams) WithNodeName(nodeName *string) *ObjectsClassGetParams { + o.SetNodeName(nodeName) + return o +} + +// SetNodeName adds the nodeName to the objects class get params +func (o *ObjectsClassGetParams) SetNodeName(nodeName *string) { + o.NodeName = nodeName +} + +// WithTenant adds the tenant to the objects class get params +func (o *ObjectsClassGetParams) WithTenant(tenant *string) *ObjectsClassGetParams { + o.SetTenant(tenant) + return o +} + +// SetTenant adds the tenant to the objects class get params +func (o *ObjectsClassGetParams) SetTenant(tenant *string) { + o.Tenant = tenant +} + +// WriteToRequest writes these params to a swagger request +func (o *ObjectsClassGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param className + if err := r.SetPathParam("className", o.ClassName); err != nil { + return err + } + + if o.ConsistencyLevel != nil { + + // query param consistency_level + var qrConsistencyLevel string + + if o.ConsistencyLevel != nil { + qrConsistencyLevel = *o.ConsistencyLevel + } + qConsistencyLevel := qrConsistencyLevel + if qConsistencyLevel != "" { + + if err := r.SetQueryParam("consistency_level", qConsistencyLevel); err != nil { + return err + } + } + } + + // path param id + if err := r.SetPathParam("id", o.ID.String()); err != nil { + return err + } + + if o.Include != nil { + + // query param include + var qrInclude string + + if o.Include != nil { + qrInclude = *o.Include + } + qInclude := qrInclude + if qInclude != "" { + + if err := r.SetQueryParam("include", qInclude); err != nil { + return err + } + } + } + + if o.NodeName != nil { + + // query param node_name + var qrNodeName string + + if o.NodeName != nil { + qrNodeName = *o.NodeName + } + qNodeName := qrNodeName + if qNodeName != "" { + + if err := r.SetQueryParam("node_name", qNodeName); err != nil { + return err + } + } + } + + if o.Tenant != nil { + + // query param tenant + var qrTenant string + + if o.Tenant != nil { + qrTenant = *o.Tenant + } + qTenant := qrTenant + if qTenant != "" { + + if err := r.SetQueryParam("tenant", qTenant); err != nil { + return err + } + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/objects/objects_class_get_responses.go b/platform/dbops/binaries/weaviate-src/client/objects/objects_class_get_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..81048f154e33783b79d46d10a7ab4dff2a0370ba --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/objects/objects_class_get_responses.go @@ -0,0 +1,534 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package objects + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/weaviate/weaviate/entities/models" +) + +// ObjectsClassGetReader is a Reader for the ObjectsClassGet structure. +type ObjectsClassGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ObjectsClassGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewObjectsClassGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 400: + result := NewObjectsClassGetBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 401: + result := NewObjectsClassGetUnauthorized() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 403: + result := NewObjectsClassGetForbidden() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 404: + result := NewObjectsClassGetNotFound() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 422: + result := NewObjectsClassGetUnprocessableEntity() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 500: + result := NewObjectsClassGetInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + } +} + +// NewObjectsClassGetOK creates a ObjectsClassGetOK with default headers values +func NewObjectsClassGetOK() *ObjectsClassGetOK { + return &ObjectsClassGetOK{} +} + +/* +ObjectsClassGetOK describes a response with status code 200, with default header values. + +Successful response. +*/ +type ObjectsClassGetOK struct { + Payload *models.Object +} + +// IsSuccess returns true when this objects class get o k response has a 2xx status code +func (o *ObjectsClassGetOK) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this objects class get o k response has a 3xx status code +func (o *ObjectsClassGetOK) IsRedirect() bool { + return false +} + +// IsClientError returns true when this objects class get o k response has a 4xx status code +func (o *ObjectsClassGetOK) IsClientError() bool { + return false +} + +// IsServerError returns true when this objects class get o k response has a 5xx status code +func (o *ObjectsClassGetOK) IsServerError() bool { + return false +} + +// IsCode returns true when this objects class get o k response a status code equal to that given +func (o *ObjectsClassGetOK) IsCode(code int) bool { + return code == 200 +} + +// Code gets the status code for the objects class get o k response +func (o *ObjectsClassGetOK) Code() int { + return 200 +} + +func (o *ObjectsClassGetOK) Error() string { + return fmt.Sprintf("[GET /objects/{className}/{id}][%d] objectsClassGetOK %+v", 200, o.Payload) +} + +func (o *ObjectsClassGetOK) String() string { + return fmt.Sprintf("[GET /objects/{className}/{id}][%d] objectsClassGetOK %+v", 200, o.Payload) +} + +func (o *ObjectsClassGetOK) GetPayload() *models.Object { + return o.Payload +} + +func (o *ObjectsClassGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Object) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewObjectsClassGetBadRequest creates a ObjectsClassGetBadRequest with default headers values +func NewObjectsClassGetBadRequest() *ObjectsClassGetBadRequest { + return &ObjectsClassGetBadRequest{} +} + +/* +ObjectsClassGetBadRequest describes a response with status code 400, with default header values. + +Malformed request. +*/ +type ObjectsClassGetBadRequest struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this objects class get bad request response has a 2xx status code +func (o *ObjectsClassGetBadRequest) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this objects class get bad request response has a 3xx status code +func (o *ObjectsClassGetBadRequest) IsRedirect() bool { + return false +} + +// IsClientError returns true when this objects class get bad request response has a 4xx status code +func (o *ObjectsClassGetBadRequest) IsClientError() bool { + return true +} + +// IsServerError returns true when this objects class get bad request response has a 5xx status code +func (o *ObjectsClassGetBadRequest) IsServerError() bool { + return false +} + +// IsCode returns true when this objects class get bad request response a status code equal to that given +func (o *ObjectsClassGetBadRequest) IsCode(code int) bool { + return code == 400 +} + +// Code gets the status code for the objects class get bad request response +func (o *ObjectsClassGetBadRequest) Code() int { + return 400 +} + +func (o *ObjectsClassGetBadRequest) Error() string { + return fmt.Sprintf("[GET /objects/{className}/{id}][%d] objectsClassGetBadRequest %+v", 400, o.Payload) +} + +func (o *ObjectsClassGetBadRequest) String() string { + return fmt.Sprintf("[GET /objects/{className}/{id}][%d] objectsClassGetBadRequest %+v", 400, o.Payload) +} + +func (o *ObjectsClassGetBadRequest) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *ObjectsClassGetBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewObjectsClassGetUnauthorized creates a ObjectsClassGetUnauthorized with default headers values +func NewObjectsClassGetUnauthorized() *ObjectsClassGetUnauthorized { + return &ObjectsClassGetUnauthorized{} +} + +/* +ObjectsClassGetUnauthorized describes a response with status code 401, with default header values. + +Unauthorized or invalid credentials. +*/ +type ObjectsClassGetUnauthorized struct { +} + +// IsSuccess returns true when this objects class get unauthorized response has a 2xx status code +func (o *ObjectsClassGetUnauthorized) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this objects class get unauthorized response has a 3xx status code +func (o *ObjectsClassGetUnauthorized) IsRedirect() bool { + return false +} + +// IsClientError returns true when this objects class get unauthorized response has a 4xx status code +func (o *ObjectsClassGetUnauthorized) IsClientError() bool { + return true +} + +// IsServerError returns true when this objects class get unauthorized response has a 5xx status code +func (o *ObjectsClassGetUnauthorized) IsServerError() bool { + return false +} + +// IsCode returns true when this objects class get unauthorized response a status code equal to that given +func (o *ObjectsClassGetUnauthorized) IsCode(code int) bool { + return code == 401 +} + +// Code gets the status code for the objects class get unauthorized response +func (o *ObjectsClassGetUnauthorized) Code() int { + return 401 +} + +func (o *ObjectsClassGetUnauthorized) Error() string { + return fmt.Sprintf("[GET /objects/{className}/{id}][%d] objectsClassGetUnauthorized ", 401) +} + +func (o *ObjectsClassGetUnauthorized) String() string { + return fmt.Sprintf("[GET /objects/{className}/{id}][%d] objectsClassGetUnauthorized ", 401) +} + +func (o *ObjectsClassGetUnauthorized) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewObjectsClassGetForbidden creates a ObjectsClassGetForbidden with default headers values +func NewObjectsClassGetForbidden() *ObjectsClassGetForbidden { + return &ObjectsClassGetForbidden{} +} + +/* +ObjectsClassGetForbidden describes a response with status code 403, with default header values. + +Forbidden +*/ +type ObjectsClassGetForbidden struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this objects class get forbidden response has a 2xx status code +func (o *ObjectsClassGetForbidden) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this objects class get forbidden response has a 3xx status code +func (o *ObjectsClassGetForbidden) IsRedirect() bool { + return false +} + +// IsClientError returns true when this objects class get forbidden response has a 4xx status code +func (o *ObjectsClassGetForbidden) IsClientError() bool { + return true +} + +// IsServerError returns true when this objects class get forbidden response has a 5xx status code +func (o *ObjectsClassGetForbidden) IsServerError() bool { + return false +} + +// IsCode returns true when this objects class get forbidden response a status code equal to that given +func (o *ObjectsClassGetForbidden) IsCode(code int) bool { + return code == 403 +} + +// Code gets the status code for the objects class get forbidden response +func (o *ObjectsClassGetForbidden) Code() int { + return 403 +} + +func (o *ObjectsClassGetForbidden) Error() string { + return fmt.Sprintf("[GET /objects/{className}/{id}][%d] objectsClassGetForbidden %+v", 403, o.Payload) +} + +func (o *ObjectsClassGetForbidden) String() string { + return fmt.Sprintf("[GET /objects/{className}/{id}][%d] objectsClassGetForbidden %+v", 403, o.Payload) +} + +func (o *ObjectsClassGetForbidden) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *ObjectsClassGetForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewObjectsClassGetNotFound creates a ObjectsClassGetNotFound with default headers values +func NewObjectsClassGetNotFound() *ObjectsClassGetNotFound { + return &ObjectsClassGetNotFound{} +} + +/* +ObjectsClassGetNotFound describes a response with status code 404, with default header values. + +Successful query result but no resource was found. +*/ +type ObjectsClassGetNotFound struct { +} + +// IsSuccess returns true when this objects class get not found response has a 2xx status code +func (o *ObjectsClassGetNotFound) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this objects class get not found response has a 3xx status code +func (o *ObjectsClassGetNotFound) IsRedirect() bool { + return false +} + +// IsClientError returns true when this objects class get not found response has a 4xx status code +func (o *ObjectsClassGetNotFound) IsClientError() bool { + return true +} + +// IsServerError returns true when this objects class get not found response has a 5xx status code +func (o *ObjectsClassGetNotFound) IsServerError() bool { + return false +} + +// IsCode returns true when this objects class get not found response a status code equal to that given +func (o *ObjectsClassGetNotFound) IsCode(code int) bool { + return code == 404 +} + +// Code gets the status code for the objects class get not found response +func (o *ObjectsClassGetNotFound) Code() int { + return 404 +} + +func (o *ObjectsClassGetNotFound) Error() string { + return fmt.Sprintf("[GET /objects/{className}/{id}][%d] objectsClassGetNotFound ", 404) +} + +func (o *ObjectsClassGetNotFound) String() string { + return fmt.Sprintf("[GET /objects/{className}/{id}][%d] objectsClassGetNotFound ", 404) +} + +func (o *ObjectsClassGetNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewObjectsClassGetUnprocessableEntity creates a ObjectsClassGetUnprocessableEntity with default headers values +func NewObjectsClassGetUnprocessableEntity() *ObjectsClassGetUnprocessableEntity { + return &ObjectsClassGetUnprocessableEntity{} +} + +/* +ObjectsClassGetUnprocessableEntity describes a response with status code 422, with default header values. + +Request is well-formed (i.e., syntactically correct), but erroneous. +*/ +type ObjectsClassGetUnprocessableEntity struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this objects class get unprocessable entity response has a 2xx status code +func (o *ObjectsClassGetUnprocessableEntity) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this objects class get unprocessable entity response has a 3xx status code +func (o *ObjectsClassGetUnprocessableEntity) IsRedirect() bool { + return false +} + +// IsClientError returns true when this objects class get unprocessable entity response has a 4xx status code +func (o *ObjectsClassGetUnprocessableEntity) IsClientError() bool { + return true +} + +// IsServerError returns true when this objects class get unprocessable entity response has a 5xx status code +func (o *ObjectsClassGetUnprocessableEntity) IsServerError() bool { + return false +} + +// IsCode returns true when this objects class get unprocessable entity response a status code equal to that given +func (o *ObjectsClassGetUnprocessableEntity) IsCode(code int) bool { + return code == 422 +} + +// Code gets the status code for the objects class get unprocessable entity response +func (o *ObjectsClassGetUnprocessableEntity) Code() int { + return 422 +} + +func (o *ObjectsClassGetUnprocessableEntity) Error() string { + return fmt.Sprintf("[GET /objects/{className}/{id}][%d] objectsClassGetUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *ObjectsClassGetUnprocessableEntity) String() string { + return fmt.Sprintf("[GET /objects/{className}/{id}][%d] objectsClassGetUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *ObjectsClassGetUnprocessableEntity) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *ObjectsClassGetUnprocessableEntity) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewObjectsClassGetInternalServerError creates a ObjectsClassGetInternalServerError with default headers values +func NewObjectsClassGetInternalServerError() *ObjectsClassGetInternalServerError { + return &ObjectsClassGetInternalServerError{} +} + +/* +ObjectsClassGetInternalServerError describes a response with status code 500, with default header values. + +An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. +*/ +type ObjectsClassGetInternalServerError struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this objects class get internal server error response has a 2xx status code +func (o *ObjectsClassGetInternalServerError) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this objects class get internal server error response has a 3xx status code +func (o *ObjectsClassGetInternalServerError) IsRedirect() bool { + return false +} + +// IsClientError returns true when this objects class get internal server error response has a 4xx status code +func (o *ObjectsClassGetInternalServerError) IsClientError() bool { + return false +} + +// IsServerError returns true when this objects class get internal server error response has a 5xx status code +func (o *ObjectsClassGetInternalServerError) IsServerError() bool { + return true +} + +// IsCode returns true when this objects class get internal server error response a status code equal to that given +func (o *ObjectsClassGetInternalServerError) IsCode(code int) bool { + return code == 500 +} + +// Code gets the status code for the objects class get internal server error response +func (o *ObjectsClassGetInternalServerError) Code() int { + return 500 +} + +func (o *ObjectsClassGetInternalServerError) Error() string { + return fmt.Sprintf("[GET /objects/{className}/{id}][%d] objectsClassGetInternalServerError %+v", 500, o.Payload) +} + +func (o *ObjectsClassGetInternalServerError) String() string { + return fmt.Sprintf("[GET /objects/{className}/{id}][%d] objectsClassGetInternalServerError %+v", 500, o.Payload) +} + +func (o *ObjectsClassGetInternalServerError) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *ObjectsClassGetInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/objects/objects_class_head_parameters.go b/platform/dbops/binaries/weaviate-src/client/objects/objects_class_head_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..72dd5a1a9eb6685f09c4ece8ea6cbef5cb944ace --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/objects/objects_class_head_parameters.go @@ -0,0 +1,254 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package objects + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewObjectsClassHeadParams creates a new ObjectsClassHeadParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewObjectsClassHeadParams() *ObjectsClassHeadParams { + return &ObjectsClassHeadParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewObjectsClassHeadParamsWithTimeout creates a new ObjectsClassHeadParams object +// with the ability to set a timeout on a request. +func NewObjectsClassHeadParamsWithTimeout(timeout time.Duration) *ObjectsClassHeadParams { + return &ObjectsClassHeadParams{ + timeout: timeout, + } +} + +// NewObjectsClassHeadParamsWithContext creates a new ObjectsClassHeadParams object +// with the ability to set a context for a request. +func NewObjectsClassHeadParamsWithContext(ctx context.Context) *ObjectsClassHeadParams { + return &ObjectsClassHeadParams{ + Context: ctx, + } +} + +// NewObjectsClassHeadParamsWithHTTPClient creates a new ObjectsClassHeadParams object +// with the ability to set a custom HTTPClient for a request. +func NewObjectsClassHeadParamsWithHTTPClient(client *http.Client) *ObjectsClassHeadParams { + return &ObjectsClassHeadParams{ + HTTPClient: client, + } +} + +/* +ObjectsClassHeadParams contains all the parameters to send to the API endpoint + + for the objects class head operation. + + Typically these are written to a http.Request. +*/ +type ObjectsClassHeadParams struct { + + /* ClassName. + + The class name as defined in the schema + */ + ClassName string + + /* ConsistencyLevel. + + Determines how many replicas must acknowledge a request before it is considered successful + */ + ConsistencyLevel *string + + /* ID. + + The uuid of the data object + + Format: uuid + */ + ID strfmt.UUID + + /* Tenant. + + Specifies the tenant in a request targeting a multi-tenant class + */ + Tenant *string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the objects class head params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *ObjectsClassHeadParams) WithDefaults() *ObjectsClassHeadParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the objects class head params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *ObjectsClassHeadParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the objects class head params +func (o *ObjectsClassHeadParams) WithTimeout(timeout time.Duration) *ObjectsClassHeadParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the objects class head params +func (o *ObjectsClassHeadParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the objects class head params +func (o *ObjectsClassHeadParams) WithContext(ctx context.Context) *ObjectsClassHeadParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the objects class head params +func (o *ObjectsClassHeadParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the objects class head params +func (o *ObjectsClassHeadParams) WithHTTPClient(client *http.Client) *ObjectsClassHeadParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the objects class head params +func (o *ObjectsClassHeadParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithClassName adds the className to the objects class head params +func (o *ObjectsClassHeadParams) WithClassName(className string) *ObjectsClassHeadParams { + o.SetClassName(className) + return o +} + +// SetClassName adds the className to the objects class head params +func (o *ObjectsClassHeadParams) SetClassName(className string) { + o.ClassName = className +} + +// WithConsistencyLevel adds the consistencyLevel to the objects class head params +func (o *ObjectsClassHeadParams) WithConsistencyLevel(consistencyLevel *string) *ObjectsClassHeadParams { + o.SetConsistencyLevel(consistencyLevel) + return o +} + +// SetConsistencyLevel adds the consistencyLevel to the objects class head params +func (o *ObjectsClassHeadParams) SetConsistencyLevel(consistencyLevel *string) { + o.ConsistencyLevel = consistencyLevel +} + +// WithID adds the id to the objects class head params +func (o *ObjectsClassHeadParams) WithID(id strfmt.UUID) *ObjectsClassHeadParams { + o.SetID(id) + return o +} + +// SetID adds the id to the objects class head params +func (o *ObjectsClassHeadParams) SetID(id strfmt.UUID) { + o.ID = id +} + +// WithTenant adds the tenant to the objects class head params +func (o *ObjectsClassHeadParams) WithTenant(tenant *string) *ObjectsClassHeadParams { + o.SetTenant(tenant) + return o +} + +// SetTenant adds the tenant to the objects class head params +func (o *ObjectsClassHeadParams) SetTenant(tenant *string) { + o.Tenant = tenant +} + +// WriteToRequest writes these params to a swagger request +func (o *ObjectsClassHeadParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param className + if err := r.SetPathParam("className", o.ClassName); err != nil { + return err + } + + if o.ConsistencyLevel != nil { + + // query param consistency_level + var qrConsistencyLevel string + + if o.ConsistencyLevel != nil { + qrConsistencyLevel = *o.ConsistencyLevel + } + qConsistencyLevel := qrConsistencyLevel + if qConsistencyLevel != "" { + + if err := r.SetQueryParam("consistency_level", qConsistencyLevel); err != nil { + return err + } + } + } + + // path param id + if err := r.SetPathParam("id", o.ID.String()); err != nil { + return err + } + + if o.Tenant != nil { + + // query param tenant + var qrTenant string + + if o.Tenant != nil { + qrTenant = *o.Tenant + } + qTenant := qrTenant + if qTenant != "" { + + if err := r.SetQueryParam("tenant", qTenant); err != nil { + return err + } + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/objects/objects_class_head_responses.go b/platform/dbops/binaries/weaviate-src/client/objects/objects_class_head_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..134443a7b88affcd2d33c5e387b9292f32d9ff11 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/objects/objects_class_head_responses.go @@ -0,0 +1,448 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package objects + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/weaviate/weaviate/entities/models" +) + +// ObjectsClassHeadReader is a Reader for the ObjectsClassHead structure. +type ObjectsClassHeadReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ObjectsClassHeadReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 204: + result := NewObjectsClassHeadNoContent() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 401: + result := NewObjectsClassHeadUnauthorized() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 403: + result := NewObjectsClassHeadForbidden() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 404: + result := NewObjectsClassHeadNotFound() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 422: + result := NewObjectsClassHeadUnprocessableEntity() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 500: + result := NewObjectsClassHeadInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + } +} + +// NewObjectsClassHeadNoContent creates a ObjectsClassHeadNoContent with default headers values +func NewObjectsClassHeadNoContent() *ObjectsClassHeadNoContent { + return &ObjectsClassHeadNoContent{} +} + +/* +ObjectsClassHeadNoContent describes a response with status code 204, with default header values. + +Object exists. +*/ +type ObjectsClassHeadNoContent struct { +} + +// IsSuccess returns true when this objects class head no content response has a 2xx status code +func (o *ObjectsClassHeadNoContent) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this objects class head no content response has a 3xx status code +func (o *ObjectsClassHeadNoContent) IsRedirect() bool { + return false +} + +// IsClientError returns true when this objects class head no content response has a 4xx status code +func (o *ObjectsClassHeadNoContent) IsClientError() bool { + return false +} + +// IsServerError returns true when this objects class head no content response has a 5xx status code +func (o *ObjectsClassHeadNoContent) IsServerError() bool { + return false +} + +// IsCode returns true when this objects class head no content response a status code equal to that given +func (o *ObjectsClassHeadNoContent) IsCode(code int) bool { + return code == 204 +} + +// Code gets the status code for the objects class head no content response +func (o *ObjectsClassHeadNoContent) Code() int { + return 204 +} + +func (o *ObjectsClassHeadNoContent) Error() string { + return fmt.Sprintf("[HEAD /objects/{className}/{id}][%d] objectsClassHeadNoContent ", 204) +} + +func (o *ObjectsClassHeadNoContent) String() string { + return fmt.Sprintf("[HEAD /objects/{className}/{id}][%d] objectsClassHeadNoContent ", 204) +} + +func (o *ObjectsClassHeadNoContent) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewObjectsClassHeadUnauthorized creates a ObjectsClassHeadUnauthorized with default headers values +func NewObjectsClassHeadUnauthorized() *ObjectsClassHeadUnauthorized { + return &ObjectsClassHeadUnauthorized{} +} + +/* +ObjectsClassHeadUnauthorized describes a response with status code 401, with default header values. + +Unauthorized or invalid credentials. +*/ +type ObjectsClassHeadUnauthorized struct { +} + +// IsSuccess returns true when this objects class head unauthorized response has a 2xx status code +func (o *ObjectsClassHeadUnauthorized) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this objects class head unauthorized response has a 3xx status code +func (o *ObjectsClassHeadUnauthorized) IsRedirect() bool { + return false +} + +// IsClientError returns true when this objects class head unauthorized response has a 4xx status code +func (o *ObjectsClassHeadUnauthorized) IsClientError() bool { + return true +} + +// IsServerError returns true when this objects class head unauthorized response has a 5xx status code +func (o *ObjectsClassHeadUnauthorized) IsServerError() bool { + return false +} + +// IsCode returns true when this objects class head unauthorized response a status code equal to that given +func (o *ObjectsClassHeadUnauthorized) IsCode(code int) bool { + return code == 401 +} + +// Code gets the status code for the objects class head unauthorized response +func (o *ObjectsClassHeadUnauthorized) Code() int { + return 401 +} + +func (o *ObjectsClassHeadUnauthorized) Error() string { + return fmt.Sprintf("[HEAD /objects/{className}/{id}][%d] objectsClassHeadUnauthorized ", 401) +} + +func (o *ObjectsClassHeadUnauthorized) String() string { + return fmt.Sprintf("[HEAD /objects/{className}/{id}][%d] objectsClassHeadUnauthorized ", 401) +} + +func (o *ObjectsClassHeadUnauthorized) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewObjectsClassHeadForbidden creates a ObjectsClassHeadForbidden with default headers values +func NewObjectsClassHeadForbidden() *ObjectsClassHeadForbidden { + return &ObjectsClassHeadForbidden{} +} + +/* +ObjectsClassHeadForbidden describes a response with status code 403, with default header values. + +Forbidden +*/ +type ObjectsClassHeadForbidden struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this objects class head forbidden response has a 2xx status code +func (o *ObjectsClassHeadForbidden) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this objects class head forbidden response has a 3xx status code +func (o *ObjectsClassHeadForbidden) IsRedirect() bool { + return false +} + +// IsClientError returns true when this objects class head forbidden response has a 4xx status code +func (o *ObjectsClassHeadForbidden) IsClientError() bool { + return true +} + +// IsServerError returns true when this objects class head forbidden response has a 5xx status code +func (o *ObjectsClassHeadForbidden) IsServerError() bool { + return false +} + +// IsCode returns true when this objects class head forbidden response a status code equal to that given +func (o *ObjectsClassHeadForbidden) IsCode(code int) bool { + return code == 403 +} + +// Code gets the status code for the objects class head forbidden response +func (o *ObjectsClassHeadForbidden) Code() int { + return 403 +} + +func (o *ObjectsClassHeadForbidden) Error() string { + return fmt.Sprintf("[HEAD /objects/{className}/{id}][%d] objectsClassHeadForbidden %+v", 403, o.Payload) +} + +func (o *ObjectsClassHeadForbidden) String() string { + return fmt.Sprintf("[HEAD /objects/{className}/{id}][%d] objectsClassHeadForbidden %+v", 403, o.Payload) +} + +func (o *ObjectsClassHeadForbidden) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *ObjectsClassHeadForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewObjectsClassHeadNotFound creates a ObjectsClassHeadNotFound with default headers values +func NewObjectsClassHeadNotFound() *ObjectsClassHeadNotFound { + return &ObjectsClassHeadNotFound{} +} + +/* +ObjectsClassHeadNotFound describes a response with status code 404, with default header values. + +Object doesn't exist. +*/ +type ObjectsClassHeadNotFound struct { +} + +// IsSuccess returns true when this objects class head not found response has a 2xx status code +func (o *ObjectsClassHeadNotFound) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this objects class head not found response has a 3xx status code +func (o *ObjectsClassHeadNotFound) IsRedirect() bool { + return false +} + +// IsClientError returns true when this objects class head not found response has a 4xx status code +func (o *ObjectsClassHeadNotFound) IsClientError() bool { + return true +} + +// IsServerError returns true when this objects class head not found response has a 5xx status code +func (o *ObjectsClassHeadNotFound) IsServerError() bool { + return false +} + +// IsCode returns true when this objects class head not found response a status code equal to that given +func (o *ObjectsClassHeadNotFound) IsCode(code int) bool { + return code == 404 +} + +// Code gets the status code for the objects class head not found response +func (o *ObjectsClassHeadNotFound) Code() int { + return 404 +} + +func (o *ObjectsClassHeadNotFound) Error() string { + return fmt.Sprintf("[HEAD /objects/{className}/{id}][%d] objectsClassHeadNotFound ", 404) +} + +func (o *ObjectsClassHeadNotFound) String() string { + return fmt.Sprintf("[HEAD /objects/{className}/{id}][%d] objectsClassHeadNotFound ", 404) +} + +func (o *ObjectsClassHeadNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewObjectsClassHeadUnprocessableEntity creates a ObjectsClassHeadUnprocessableEntity with default headers values +func NewObjectsClassHeadUnprocessableEntity() *ObjectsClassHeadUnprocessableEntity { + return &ObjectsClassHeadUnprocessableEntity{} +} + +/* +ObjectsClassHeadUnprocessableEntity describes a response with status code 422, with default header values. + +Request is well-formed (i.e., syntactically correct), but erroneous. +*/ +type ObjectsClassHeadUnprocessableEntity struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this objects class head unprocessable entity response has a 2xx status code +func (o *ObjectsClassHeadUnprocessableEntity) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this objects class head unprocessable entity response has a 3xx status code +func (o *ObjectsClassHeadUnprocessableEntity) IsRedirect() bool { + return false +} + +// IsClientError returns true when this objects class head unprocessable entity response has a 4xx status code +func (o *ObjectsClassHeadUnprocessableEntity) IsClientError() bool { + return true +} + +// IsServerError returns true when this objects class head unprocessable entity response has a 5xx status code +func (o *ObjectsClassHeadUnprocessableEntity) IsServerError() bool { + return false +} + +// IsCode returns true when this objects class head unprocessable entity response a status code equal to that given +func (o *ObjectsClassHeadUnprocessableEntity) IsCode(code int) bool { + return code == 422 +} + +// Code gets the status code for the objects class head unprocessable entity response +func (o *ObjectsClassHeadUnprocessableEntity) Code() int { + return 422 +} + +func (o *ObjectsClassHeadUnprocessableEntity) Error() string { + return fmt.Sprintf("[HEAD /objects/{className}/{id}][%d] objectsClassHeadUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *ObjectsClassHeadUnprocessableEntity) String() string { + return fmt.Sprintf("[HEAD /objects/{className}/{id}][%d] objectsClassHeadUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *ObjectsClassHeadUnprocessableEntity) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *ObjectsClassHeadUnprocessableEntity) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewObjectsClassHeadInternalServerError creates a ObjectsClassHeadInternalServerError with default headers values +func NewObjectsClassHeadInternalServerError() *ObjectsClassHeadInternalServerError { + return &ObjectsClassHeadInternalServerError{} +} + +/* +ObjectsClassHeadInternalServerError describes a response with status code 500, with default header values. + +An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. +*/ +type ObjectsClassHeadInternalServerError struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this objects class head internal server error response has a 2xx status code +func (o *ObjectsClassHeadInternalServerError) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this objects class head internal server error response has a 3xx status code +func (o *ObjectsClassHeadInternalServerError) IsRedirect() bool { + return false +} + +// IsClientError returns true when this objects class head internal server error response has a 4xx status code +func (o *ObjectsClassHeadInternalServerError) IsClientError() bool { + return false +} + +// IsServerError returns true when this objects class head internal server error response has a 5xx status code +func (o *ObjectsClassHeadInternalServerError) IsServerError() bool { + return true +} + +// IsCode returns true when this objects class head internal server error response a status code equal to that given +func (o *ObjectsClassHeadInternalServerError) IsCode(code int) bool { + return code == 500 +} + +// Code gets the status code for the objects class head internal server error response +func (o *ObjectsClassHeadInternalServerError) Code() int { + return 500 +} + +func (o *ObjectsClassHeadInternalServerError) Error() string { + return fmt.Sprintf("[HEAD /objects/{className}/{id}][%d] objectsClassHeadInternalServerError %+v", 500, o.Payload) +} + +func (o *ObjectsClassHeadInternalServerError) String() string { + return fmt.Sprintf("[HEAD /objects/{className}/{id}][%d] objectsClassHeadInternalServerError %+v", 500, o.Payload) +} + +func (o *ObjectsClassHeadInternalServerError) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *ObjectsClassHeadInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/objects/objects_class_patch_parameters.go b/platform/dbops/binaries/weaviate-src/client/objects/objects_class_patch_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..e559df6ee2787ebbdc9de64712033ebcc80e6c34 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/objects/objects_class_patch_parameters.go @@ -0,0 +1,244 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package objects + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + + "github.com/weaviate/weaviate/entities/models" +) + +// NewObjectsClassPatchParams creates a new ObjectsClassPatchParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewObjectsClassPatchParams() *ObjectsClassPatchParams { + return &ObjectsClassPatchParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewObjectsClassPatchParamsWithTimeout creates a new ObjectsClassPatchParams object +// with the ability to set a timeout on a request. +func NewObjectsClassPatchParamsWithTimeout(timeout time.Duration) *ObjectsClassPatchParams { + return &ObjectsClassPatchParams{ + timeout: timeout, + } +} + +// NewObjectsClassPatchParamsWithContext creates a new ObjectsClassPatchParams object +// with the ability to set a context for a request. +func NewObjectsClassPatchParamsWithContext(ctx context.Context) *ObjectsClassPatchParams { + return &ObjectsClassPatchParams{ + Context: ctx, + } +} + +// NewObjectsClassPatchParamsWithHTTPClient creates a new ObjectsClassPatchParams object +// with the ability to set a custom HTTPClient for a request. +func NewObjectsClassPatchParamsWithHTTPClient(client *http.Client) *ObjectsClassPatchParams { + return &ObjectsClassPatchParams{ + HTTPClient: client, + } +} + +/* +ObjectsClassPatchParams contains all the parameters to send to the API endpoint + + for the objects class patch operation. + + Typically these are written to a http.Request. +*/ +type ObjectsClassPatchParams struct { + + /* Body. + + RFC 7396-style patch, the body contains the object to merge into the existing object. + */ + Body *models.Object + + /* ClassName. + + The class name as defined in the schema + */ + ClassName string + + /* ConsistencyLevel. + + Determines how many replicas must acknowledge a request before it is considered successful + */ + ConsistencyLevel *string + + /* ID. + + The uuid of the data object to update. + + Format: uuid + */ + ID strfmt.UUID + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the objects class patch params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *ObjectsClassPatchParams) WithDefaults() *ObjectsClassPatchParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the objects class patch params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *ObjectsClassPatchParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the objects class patch params +func (o *ObjectsClassPatchParams) WithTimeout(timeout time.Duration) *ObjectsClassPatchParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the objects class patch params +func (o *ObjectsClassPatchParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the objects class patch params +func (o *ObjectsClassPatchParams) WithContext(ctx context.Context) *ObjectsClassPatchParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the objects class patch params +func (o *ObjectsClassPatchParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the objects class patch params +func (o *ObjectsClassPatchParams) WithHTTPClient(client *http.Client) *ObjectsClassPatchParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the objects class patch params +func (o *ObjectsClassPatchParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithBody adds the body to the objects class patch params +func (o *ObjectsClassPatchParams) WithBody(body *models.Object) *ObjectsClassPatchParams { + o.SetBody(body) + return o +} + +// SetBody adds the body to the objects class patch params +func (o *ObjectsClassPatchParams) SetBody(body *models.Object) { + o.Body = body +} + +// WithClassName adds the className to the objects class patch params +func (o *ObjectsClassPatchParams) WithClassName(className string) *ObjectsClassPatchParams { + o.SetClassName(className) + return o +} + +// SetClassName adds the className to the objects class patch params +func (o *ObjectsClassPatchParams) SetClassName(className string) { + o.ClassName = className +} + +// WithConsistencyLevel adds the consistencyLevel to the objects class patch params +func (o *ObjectsClassPatchParams) WithConsistencyLevel(consistencyLevel *string) *ObjectsClassPatchParams { + o.SetConsistencyLevel(consistencyLevel) + return o +} + +// SetConsistencyLevel adds the consistencyLevel to the objects class patch params +func (o *ObjectsClassPatchParams) SetConsistencyLevel(consistencyLevel *string) { + o.ConsistencyLevel = consistencyLevel +} + +// WithID adds the id to the objects class patch params +func (o *ObjectsClassPatchParams) WithID(id strfmt.UUID) *ObjectsClassPatchParams { + o.SetID(id) + return o +} + +// SetID adds the id to the objects class patch params +func (o *ObjectsClassPatchParams) SetID(id strfmt.UUID) { + o.ID = id +} + +// WriteToRequest writes these params to a swagger request +func (o *ObjectsClassPatchParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + if o.Body != nil { + if err := r.SetBodyParam(o.Body); err != nil { + return err + } + } + + // path param className + if err := r.SetPathParam("className", o.ClassName); err != nil { + return err + } + + if o.ConsistencyLevel != nil { + + // query param consistency_level + var qrConsistencyLevel string + + if o.ConsistencyLevel != nil { + qrConsistencyLevel = *o.ConsistencyLevel + } + qConsistencyLevel := qrConsistencyLevel + if qConsistencyLevel != "" { + + if err := r.SetQueryParam("consistency_level", qConsistencyLevel); err != nil { + return err + } + } + } + + // path param id + if err := r.SetPathParam("id", o.ID.String()); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/objects/objects_class_patch_responses.go b/platform/dbops/binaries/weaviate-src/client/objects/objects_class_patch_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..66f5bb5bd4fea2cbd6956416057a52bdaba0c259 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/objects/objects_class_patch_responses.go @@ -0,0 +1,522 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package objects + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/weaviate/weaviate/entities/models" +) + +// ObjectsClassPatchReader is a Reader for the ObjectsClassPatch structure. +type ObjectsClassPatchReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ObjectsClassPatchReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 204: + result := NewObjectsClassPatchNoContent() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 400: + result := NewObjectsClassPatchBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 401: + result := NewObjectsClassPatchUnauthorized() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 403: + result := NewObjectsClassPatchForbidden() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 404: + result := NewObjectsClassPatchNotFound() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 422: + result := NewObjectsClassPatchUnprocessableEntity() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 500: + result := NewObjectsClassPatchInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + } +} + +// NewObjectsClassPatchNoContent creates a ObjectsClassPatchNoContent with default headers values +func NewObjectsClassPatchNoContent() *ObjectsClassPatchNoContent { + return &ObjectsClassPatchNoContent{} +} + +/* +ObjectsClassPatchNoContent describes a response with status code 204, with default header values. + +Successfully applied. No content provided. +*/ +type ObjectsClassPatchNoContent struct { +} + +// IsSuccess returns true when this objects class patch no content response has a 2xx status code +func (o *ObjectsClassPatchNoContent) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this objects class patch no content response has a 3xx status code +func (o *ObjectsClassPatchNoContent) IsRedirect() bool { + return false +} + +// IsClientError returns true when this objects class patch no content response has a 4xx status code +func (o *ObjectsClassPatchNoContent) IsClientError() bool { + return false +} + +// IsServerError returns true when this objects class patch no content response has a 5xx status code +func (o *ObjectsClassPatchNoContent) IsServerError() bool { + return false +} + +// IsCode returns true when this objects class patch no content response a status code equal to that given +func (o *ObjectsClassPatchNoContent) IsCode(code int) bool { + return code == 204 +} + +// Code gets the status code for the objects class patch no content response +func (o *ObjectsClassPatchNoContent) Code() int { + return 204 +} + +func (o *ObjectsClassPatchNoContent) Error() string { + return fmt.Sprintf("[PATCH /objects/{className}/{id}][%d] objectsClassPatchNoContent ", 204) +} + +func (o *ObjectsClassPatchNoContent) String() string { + return fmt.Sprintf("[PATCH /objects/{className}/{id}][%d] objectsClassPatchNoContent ", 204) +} + +func (o *ObjectsClassPatchNoContent) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewObjectsClassPatchBadRequest creates a ObjectsClassPatchBadRequest with default headers values +func NewObjectsClassPatchBadRequest() *ObjectsClassPatchBadRequest { + return &ObjectsClassPatchBadRequest{} +} + +/* +ObjectsClassPatchBadRequest describes a response with status code 400, with default header values. + +The patch-JSON is malformed. +*/ +type ObjectsClassPatchBadRequest struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this objects class patch bad request response has a 2xx status code +func (o *ObjectsClassPatchBadRequest) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this objects class patch bad request response has a 3xx status code +func (o *ObjectsClassPatchBadRequest) IsRedirect() bool { + return false +} + +// IsClientError returns true when this objects class patch bad request response has a 4xx status code +func (o *ObjectsClassPatchBadRequest) IsClientError() bool { + return true +} + +// IsServerError returns true when this objects class patch bad request response has a 5xx status code +func (o *ObjectsClassPatchBadRequest) IsServerError() bool { + return false +} + +// IsCode returns true when this objects class patch bad request response a status code equal to that given +func (o *ObjectsClassPatchBadRequest) IsCode(code int) bool { + return code == 400 +} + +// Code gets the status code for the objects class patch bad request response +func (o *ObjectsClassPatchBadRequest) Code() int { + return 400 +} + +func (o *ObjectsClassPatchBadRequest) Error() string { + return fmt.Sprintf("[PATCH /objects/{className}/{id}][%d] objectsClassPatchBadRequest %+v", 400, o.Payload) +} + +func (o *ObjectsClassPatchBadRequest) String() string { + return fmt.Sprintf("[PATCH /objects/{className}/{id}][%d] objectsClassPatchBadRequest %+v", 400, o.Payload) +} + +func (o *ObjectsClassPatchBadRequest) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *ObjectsClassPatchBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewObjectsClassPatchUnauthorized creates a ObjectsClassPatchUnauthorized with default headers values +func NewObjectsClassPatchUnauthorized() *ObjectsClassPatchUnauthorized { + return &ObjectsClassPatchUnauthorized{} +} + +/* +ObjectsClassPatchUnauthorized describes a response with status code 401, with default header values. + +Unauthorized or invalid credentials. +*/ +type ObjectsClassPatchUnauthorized struct { +} + +// IsSuccess returns true when this objects class patch unauthorized response has a 2xx status code +func (o *ObjectsClassPatchUnauthorized) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this objects class patch unauthorized response has a 3xx status code +func (o *ObjectsClassPatchUnauthorized) IsRedirect() bool { + return false +} + +// IsClientError returns true when this objects class patch unauthorized response has a 4xx status code +func (o *ObjectsClassPatchUnauthorized) IsClientError() bool { + return true +} + +// IsServerError returns true when this objects class patch unauthorized response has a 5xx status code +func (o *ObjectsClassPatchUnauthorized) IsServerError() bool { + return false +} + +// IsCode returns true when this objects class patch unauthorized response a status code equal to that given +func (o *ObjectsClassPatchUnauthorized) IsCode(code int) bool { + return code == 401 +} + +// Code gets the status code for the objects class patch unauthorized response +func (o *ObjectsClassPatchUnauthorized) Code() int { + return 401 +} + +func (o *ObjectsClassPatchUnauthorized) Error() string { + return fmt.Sprintf("[PATCH /objects/{className}/{id}][%d] objectsClassPatchUnauthorized ", 401) +} + +func (o *ObjectsClassPatchUnauthorized) String() string { + return fmt.Sprintf("[PATCH /objects/{className}/{id}][%d] objectsClassPatchUnauthorized ", 401) +} + +func (o *ObjectsClassPatchUnauthorized) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewObjectsClassPatchForbidden creates a ObjectsClassPatchForbidden with default headers values +func NewObjectsClassPatchForbidden() *ObjectsClassPatchForbidden { + return &ObjectsClassPatchForbidden{} +} + +/* +ObjectsClassPatchForbidden describes a response with status code 403, with default header values. + +Forbidden +*/ +type ObjectsClassPatchForbidden struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this objects class patch forbidden response has a 2xx status code +func (o *ObjectsClassPatchForbidden) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this objects class patch forbidden response has a 3xx status code +func (o *ObjectsClassPatchForbidden) IsRedirect() bool { + return false +} + +// IsClientError returns true when this objects class patch forbidden response has a 4xx status code +func (o *ObjectsClassPatchForbidden) IsClientError() bool { + return true +} + +// IsServerError returns true when this objects class patch forbidden response has a 5xx status code +func (o *ObjectsClassPatchForbidden) IsServerError() bool { + return false +} + +// IsCode returns true when this objects class patch forbidden response a status code equal to that given +func (o *ObjectsClassPatchForbidden) IsCode(code int) bool { + return code == 403 +} + +// Code gets the status code for the objects class patch forbidden response +func (o *ObjectsClassPatchForbidden) Code() int { + return 403 +} + +func (o *ObjectsClassPatchForbidden) Error() string { + return fmt.Sprintf("[PATCH /objects/{className}/{id}][%d] objectsClassPatchForbidden %+v", 403, o.Payload) +} + +func (o *ObjectsClassPatchForbidden) String() string { + return fmt.Sprintf("[PATCH /objects/{className}/{id}][%d] objectsClassPatchForbidden %+v", 403, o.Payload) +} + +func (o *ObjectsClassPatchForbidden) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *ObjectsClassPatchForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewObjectsClassPatchNotFound creates a ObjectsClassPatchNotFound with default headers values +func NewObjectsClassPatchNotFound() *ObjectsClassPatchNotFound { + return &ObjectsClassPatchNotFound{} +} + +/* +ObjectsClassPatchNotFound describes a response with status code 404, with default header values. + +Successful query result but no resource was found. +*/ +type ObjectsClassPatchNotFound struct { +} + +// IsSuccess returns true when this objects class patch not found response has a 2xx status code +func (o *ObjectsClassPatchNotFound) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this objects class patch not found response has a 3xx status code +func (o *ObjectsClassPatchNotFound) IsRedirect() bool { + return false +} + +// IsClientError returns true when this objects class patch not found response has a 4xx status code +func (o *ObjectsClassPatchNotFound) IsClientError() bool { + return true +} + +// IsServerError returns true when this objects class patch not found response has a 5xx status code +func (o *ObjectsClassPatchNotFound) IsServerError() bool { + return false +} + +// IsCode returns true when this objects class patch not found response a status code equal to that given +func (o *ObjectsClassPatchNotFound) IsCode(code int) bool { + return code == 404 +} + +// Code gets the status code for the objects class patch not found response +func (o *ObjectsClassPatchNotFound) Code() int { + return 404 +} + +func (o *ObjectsClassPatchNotFound) Error() string { + return fmt.Sprintf("[PATCH /objects/{className}/{id}][%d] objectsClassPatchNotFound ", 404) +} + +func (o *ObjectsClassPatchNotFound) String() string { + return fmt.Sprintf("[PATCH /objects/{className}/{id}][%d] objectsClassPatchNotFound ", 404) +} + +func (o *ObjectsClassPatchNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewObjectsClassPatchUnprocessableEntity creates a ObjectsClassPatchUnprocessableEntity with default headers values +func NewObjectsClassPatchUnprocessableEntity() *ObjectsClassPatchUnprocessableEntity { + return &ObjectsClassPatchUnprocessableEntity{} +} + +/* +ObjectsClassPatchUnprocessableEntity describes a response with status code 422, with default header values. + +The patch-JSON is valid but unprocessable. +*/ +type ObjectsClassPatchUnprocessableEntity struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this objects class patch unprocessable entity response has a 2xx status code +func (o *ObjectsClassPatchUnprocessableEntity) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this objects class patch unprocessable entity response has a 3xx status code +func (o *ObjectsClassPatchUnprocessableEntity) IsRedirect() bool { + return false +} + +// IsClientError returns true when this objects class patch unprocessable entity response has a 4xx status code +func (o *ObjectsClassPatchUnprocessableEntity) IsClientError() bool { + return true +} + +// IsServerError returns true when this objects class patch unprocessable entity response has a 5xx status code +func (o *ObjectsClassPatchUnprocessableEntity) IsServerError() bool { + return false +} + +// IsCode returns true when this objects class patch unprocessable entity response a status code equal to that given +func (o *ObjectsClassPatchUnprocessableEntity) IsCode(code int) bool { + return code == 422 +} + +// Code gets the status code for the objects class patch unprocessable entity response +func (o *ObjectsClassPatchUnprocessableEntity) Code() int { + return 422 +} + +func (o *ObjectsClassPatchUnprocessableEntity) Error() string { + return fmt.Sprintf("[PATCH /objects/{className}/{id}][%d] objectsClassPatchUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *ObjectsClassPatchUnprocessableEntity) String() string { + return fmt.Sprintf("[PATCH /objects/{className}/{id}][%d] objectsClassPatchUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *ObjectsClassPatchUnprocessableEntity) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *ObjectsClassPatchUnprocessableEntity) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewObjectsClassPatchInternalServerError creates a ObjectsClassPatchInternalServerError with default headers values +func NewObjectsClassPatchInternalServerError() *ObjectsClassPatchInternalServerError { + return &ObjectsClassPatchInternalServerError{} +} + +/* +ObjectsClassPatchInternalServerError describes a response with status code 500, with default header values. + +An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. +*/ +type ObjectsClassPatchInternalServerError struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this objects class patch internal server error response has a 2xx status code +func (o *ObjectsClassPatchInternalServerError) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this objects class patch internal server error response has a 3xx status code +func (o *ObjectsClassPatchInternalServerError) IsRedirect() bool { + return false +} + +// IsClientError returns true when this objects class patch internal server error response has a 4xx status code +func (o *ObjectsClassPatchInternalServerError) IsClientError() bool { + return false +} + +// IsServerError returns true when this objects class patch internal server error response has a 5xx status code +func (o *ObjectsClassPatchInternalServerError) IsServerError() bool { + return true +} + +// IsCode returns true when this objects class patch internal server error response a status code equal to that given +func (o *ObjectsClassPatchInternalServerError) IsCode(code int) bool { + return code == 500 +} + +// Code gets the status code for the objects class patch internal server error response +func (o *ObjectsClassPatchInternalServerError) Code() int { + return 500 +} + +func (o *ObjectsClassPatchInternalServerError) Error() string { + return fmt.Sprintf("[PATCH /objects/{className}/{id}][%d] objectsClassPatchInternalServerError %+v", 500, o.Payload) +} + +func (o *ObjectsClassPatchInternalServerError) String() string { + return fmt.Sprintf("[PATCH /objects/{className}/{id}][%d] objectsClassPatchInternalServerError %+v", 500, o.Payload) +} + +func (o *ObjectsClassPatchInternalServerError) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *ObjectsClassPatchInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/objects/objects_class_put_parameters.go b/platform/dbops/binaries/weaviate-src/client/objects/objects_class_put_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..746d69cec6c9cec90f286475b27849b80babe962 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/objects/objects_class_put_parameters.go @@ -0,0 +1,238 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package objects + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + + "github.com/weaviate/weaviate/entities/models" +) + +// NewObjectsClassPutParams creates a new ObjectsClassPutParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewObjectsClassPutParams() *ObjectsClassPutParams { + return &ObjectsClassPutParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewObjectsClassPutParamsWithTimeout creates a new ObjectsClassPutParams object +// with the ability to set a timeout on a request. +func NewObjectsClassPutParamsWithTimeout(timeout time.Duration) *ObjectsClassPutParams { + return &ObjectsClassPutParams{ + timeout: timeout, + } +} + +// NewObjectsClassPutParamsWithContext creates a new ObjectsClassPutParams object +// with the ability to set a context for a request. +func NewObjectsClassPutParamsWithContext(ctx context.Context) *ObjectsClassPutParams { + return &ObjectsClassPutParams{ + Context: ctx, + } +} + +// NewObjectsClassPutParamsWithHTTPClient creates a new ObjectsClassPutParams object +// with the ability to set a custom HTTPClient for a request. +func NewObjectsClassPutParamsWithHTTPClient(client *http.Client) *ObjectsClassPutParams { + return &ObjectsClassPutParams{ + HTTPClient: client, + } +} + +/* +ObjectsClassPutParams contains all the parameters to send to the API endpoint + + for the objects class put operation. + + Typically these are written to a http.Request. +*/ +type ObjectsClassPutParams struct { + + // Body. + Body *models.Object + + // ClassName. + ClassName string + + /* ConsistencyLevel. + + Determines how many replicas must acknowledge a request before it is considered successful + */ + ConsistencyLevel *string + + /* ID. + + The uuid of the data object to update. + + Format: uuid + */ + ID strfmt.UUID + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the objects class put params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *ObjectsClassPutParams) WithDefaults() *ObjectsClassPutParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the objects class put params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *ObjectsClassPutParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the objects class put params +func (o *ObjectsClassPutParams) WithTimeout(timeout time.Duration) *ObjectsClassPutParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the objects class put params +func (o *ObjectsClassPutParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the objects class put params +func (o *ObjectsClassPutParams) WithContext(ctx context.Context) *ObjectsClassPutParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the objects class put params +func (o *ObjectsClassPutParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the objects class put params +func (o *ObjectsClassPutParams) WithHTTPClient(client *http.Client) *ObjectsClassPutParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the objects class put params +func (o *ObjectsClassPutParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithBody adds the body to the objects class put params +func (o *ObjectsClassPutParams) WithBody(body *models.Object) *ObjectsClassPutParams { + o.SetBody(body) + return o +} + +// SetBody adds the body to the objects class put params +func (o *ObjectsClassPutParams) SetBody(body *models.Object) { + o.Body = body +} + +// WithClassName adds the className to the objects class put params +func (o *ObjectsClassPutParams) WithClassName(className string) *ObjectsClassPutParams { + o.SetClassName(className) + return o +} + +// SetClassName adds the className to the objects class put params +func (o *ObjectsClassPutParams) SetClassName(className string) { + o.ClassName = className +} + +// WithConsistencyLevel adds the consistencyLevel to the objects class put params +func (o *ObjectsClassPutParams) WithConsistencyLevel(consistencyLevel *string) *ObjectsClassPutParams { + o.SetConsistencyLevel(consistencyLevel) + return o +} + +// SetConsistencyLevel adds the consistencyLevel to the objects class put params +func (o *ObjectsClassPutParams) SetConsistencyLevel(consistencyLevel *string) { + o.ConsistencyLevel = consistencyLevel +} + +// WithID adds the id to the objects class put params +func (o *ObjectsClassPutParams) WithID(id strfmt.UUID) *ObjectsClassPutParams { + o.SetID(id) + return o +} + +// SetID adds the id to the objects class put params +func (o *ObjectsClassPutParams) SetID(id strfmt.UUID) { + o.ID = id +} + +// WriteToRequest writes these params to a swagger request +func (o *ObjectsClassPutParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + if o.Body != nil { + if err := r.SetBodyParam(o.Body); err != nil { + return err + } + } + + // path param className + if err := r.SetPathParam("className", o.ClassName); err != nil { + return err + } + + if o.ConsistencyLevel != nil { + + // query param consistency_level + var qrConsistencyLevel string + + if o.ConsistencyLevel != nil { + qrConsistencyLevel = *o.ConsistencyLevel + } + qConsistencyLevel := qrConsistencyLevel + if qConsistencyLevel != "" { + + if err := r.SetQueryParam("consistency_level", qConsistencyLevel); err != nil { + return err + } + } + } + + // path param id + if err := r.SetPathParam("id", o.ID.String()); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/objects/objects_class_put_responses.go b/platform/dbops/binaries/weaviate-src/client/objects/objects_class_put_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..22bf9977a11e09cc236f8dcd005cd000b5f6b819 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/objects/objects_class_put_responses.go @@ -0,0 +1,460 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package objects + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/weaviate/weaviate/entities/models" +) + +// ObjectsClassPutReader is a Reader for the ObjectsClassPut structure. +type ObjectsClassPutReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ObjectsClassPutReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewObjectsClassPutOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 401: + result := NewObjectsClassPutUnauthorized() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 403: + result := NewObjectsClassPutForbidden() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 404: + result := NewObjectsClassPutNotFound() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 422: + result := NewObjectsClassPutUnprocessableEntity() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 500: + result := NewObjectsClassPutInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + } +} + +// NewObjectsClassPutOK creates a ObjectsClassPutOK with default headers values +func NewObjectsClassPutOK() *ObjectsClassPutOK { + return &ObjectsClassPutOK{} +} + +/* +ObjectsClassPutOK describes a response with status code 200, with default header values. + +Successfully received. +*/ +type ObjectsClassPutOK struct { + Payload *models.Object +} + +// IsSuccess returns true when this objects class put o k response has a 2xx status code +func (o *ObjectsClassPutOK) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this objects class put o k response has a 3xx status code +func (o *ObjectsClassPutOK) IsRedirect() bool { + return false +} + +// IsClientError returns true when this objects class put o k response has a 4xx status code +func (o *ObjectsClassPutOK) IsClientError() bool { + return false +} + +// IsServerError returns true when this objects class put o k response has a 5xx status code +func (o *ObjectsClassPutOK) IsServerError() bool { + return false +} + +// IsCode returns true when this objects class put o k response a status code equal to that given +func (o *ObjectsClassPutOK) IsCode(code int) bool { + return code == 200 +} + +// Code gets the status code for the objects class put o k response +func (o *ObjectsClassPutOK) Code() int { + return 200 +} + +func (o *ObjectsClassPutOK) Error() string { + return fmt.Sprintf("[PUT /objects/{className}/{id}][%d] objectsClassPutOK %+v", 200, o.Payload) +} + +func (o *ObjectsClassPutOK) String() string { + return fmt.Sprintf("[PUT /objects/{className}/{id}][%d] objectsClassPutOK %+v", 200, o.Payload) +} + +func (o *ObjectsClassPutOK) GetPayload() *models.Object { + return o.Payload +} + +func (o *ObjectsClassPutOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Object) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewObjectsClassPutUnauthorized creates a ObjectsClassPutUnauthorized with default headers values +func NewObjectsClassPutUnauthorized() *ObjectsClassPutUnauthorized { + return &ObjectsClassPutUnauthorized{} +} + +/* +ObjectsClassPutUnauthorized describes a response with status code 401, with default header values. + +Unauthorized or invalid credentials. +*/ +type ObjectsClassPutUnauthorized struct { +} + +// IsSuccess returns true when this objects class put unauthorized response has a 2xx status code +func (o *ObjectsClassPutUnauthorized) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this objects class put unauthorized response has a 3xx status code +func (o *ObjectsClassPutUnauthorized) IsRedirect() bool { + return false +} + +// IsClientError returns true when this objects class put unauthorized response has a 4xx status code +func (o *ObjectsClassPutUnauthorized) IsClientError() bool { + return true +} + +// IsServerError returns true when this objects class put unauthorized response has a 5xx status code +func (o *ObjectsClassPutUnauthorized) IsServerError() bool { + return false +} + +// IsCode returns true when this objects class put unauthorized response a status code equal to that given +func (o *ObjectsClassPutUnauthorized) IsCode(code int) bool { + return code == 401 +} + +// Code gets the status code for the objects class put unauthorized response +func (o *ObjectsClassPutUnauthorized) Code() int { + return 401 +} + +func (o *ObjectsClassPutUnauthorized) Error() string { + return fmt.Sprintf("[PUT /objects/{className}/{id}][%d] objectsClassPutUnauthorized ", 401) +} + +func (o *ObjectsClassPutUnauthorized) String() string { + return fmt.Sprintf("[PUT /objects/{className}/{id}][%d] objectsClassPutUnauthorized ", 401) +} + +func (o *ObjectsClassPutUnauthorized) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewObjectsClassPutForbidden creates a ObjectsClassPutForbidden with default headers values +func NewObjectsClassPutForbidden() *ObjectsClassPutForbidden { + return &ObjectsClassPutForbidden{} +} + +/* +ObjectsClassPutForbidden describes a response with status code 403, with default header values. + +Forbidden +*/ +type ObjectsClassPutForbidden struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this objects class put forbidden response has a 2xx status code +func (o *ObjectsClassPutForbidden) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this objects class put forbidden response has a 3xx status code +func (o *ObjectsClassPutForbidden) IsRedirect() bool { + return false +} + +// IsClientError returns true when this objects class put forbidden response has a 4xx status code +func (o *ObjectsClassPutForbidden) IsClientError() bool { + return true +} + +// IsServerError returns true when this objects class put forbidden response has a 5xx status code +func (o *ObjectsClassPutForbidden) IsServerError() bool { + return false +} + +// IsCode returns true when this objects class put forbidden response a status code equal to that given +func (o *ObjectsClassPutForbidden) IsCode(code int) bool { + return code == 403 +} + +// Code gets the status code for the objects class put forbidden response +func (o *ObjectsClassPutForbidden) Code() int { + return 403 +} + +func (o *ObjectsClassPutForbidden) Error() string { + return fmt.Sprintf("[PUT /objects/{className}/{id}][%d] objectsClassPutForbidden %+v", 403, o.Payload) +} + +func (o *ObjectsClassPutForbidden) String() string { + return fmt.Sprintf("[PUT /objects/{className}/{id}][%d] objectsClassPutForbidden %+v", 403, o.Payload) +} + +func (o *ObjectsClassPutForbidden) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *ObjectsClassPutForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewObjectsClassPutNotFound creates a ObjectsClassPutNotFound with default headers values +func NewObjectsClassPutNotFound() *ObjectsClassPutNotFound { + return &ObjectsClassPutNotFound{} +} + +/* +ObjectsClassPutNotFound describes a response with status code 404, with default header values. + +Successful query result but no resource was found. +*/ +type ObjectsClassPutNotFound struct { +} + +// IsSuccess returns true when this objects class put not found response has a 2xx status code +func (o *ObjectsClassPutNotFound) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this objects class put not found response has a 3xx status code +func (o *ObjectsClassPutNotFound) IsRedirect() bool { + return false +} + +// IsClientError returns true when this objects class put not found response has a 4xx status code +func (o *ObjectsClassPutNotFound) IsClientError() bool { + return true +} + +// IsServerError returns true when this objects class put not found response has a 5xx status code +func (o *ObjectsClassPutNotFound) IsServerError() bool { + return false +} + +// IsCode returns true when this objects class put not found response a status code equal to that given +func (o *ObjectsClassPutNotFound) IsCode(code int) bool { + return code == 404 +} + +// Code gets the status code for the objects class put not found response +func (o *ObjectsClassPutNotFound) Code() int { + return 404 +} + +func (o *ObjectsClassPutNotFound) Error() string { + return fmt.Sprintf("[PUT /objects/{className}/{id}][%d] objectsClassPutNotFound ", 404) +} + +func (o *ObjectsClassPutNotFound) String() string { + return fmt.Sprintf("[PUT /objects/{className}/{id}][%d] objectsClassPutNotFound ", 404) +} + +func (o *ObjectsClassPutNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewObjectsClassPutUnprocessableEntity creates a ObjectsClassPutUnprocessableEntity with default headers values +func NewObjectsClassPutUnprocessableEntity() *ObjectsClassPutUnprocessableEntity { + return &ObjectsClassPutUnprocessableEntity{} +} + +/* +ObjectsClassPutUnprocessableEntity describes a response with status code 422, with default header values. + +Request body is well-formed (i.e., syntactically correct), but semantically erroneous. Are you sure the class is defined in the configuration file? +*/ +type ObjectsClassPutUnprocessableEntity struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this objects class put unprocessable entity response has a 2xx status code +func (o *ObjectsClassPutUnprocessableEntity) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this objects class put unprocessable entity response has a 3xx status code +func (o *ObjectsClassPutUnprocessableEntity) IsRedirect() bool { + return false +} + +// IsClientError returns true when this objects class put unprocessable entity response has a 4xx status code +func (o *ObjectsClassPutUnprocessableEntity) IsClientError() bool { + return true +} + +// IsServerError returns true when this objects class put unprocessable entity response has a 5xx status code +func (o *ObjectsClassPutUnprocessableEntity) IsServerError() bool { + return false +} + +// IsCode returns true when this objects class put unprocessable entity response a status code equal to that given +func (o *ObjectsClassPutUnprocessableEntity) IsCode(code int) bool { + return code == 422 +} + +// Code gets the status code for the objects class put unprocessable entity response +func (o *ObjectsClassPutUnprocessableEntity) Code() int { + return 422 +} + +func (o *ObjectsClassPutUnprocessableEntity) Error() string { + return fmt.Sprintf("[PUT /objects/{className}/{id}][%d] objectsClassPutUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *ObjectsClassPutUnprocessableEntity) String() string { + return fmt.Sprintf("[PUT /objects/{className}/{id}][%d] objectsClassPutUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *ObjectsClassPutUnprocessableEntity) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *ObjectsClassPutUnprocessableEntity) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewObjectsClassPutInternalServerError creates a ObjectsClassPutInternalServerError with default headers values +func NewObjectsClassPutInternalServerError() *ObjectsClassPutInternalServerError { + return &ObjectsClassPutInternalServerError{} +} + +/* +ObjectsClassPutInternalServerError describes a response with status code 500, with default header values. + +An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. +*/ +type ObjectsClassPutInternalServerError struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this objects class put internal server error response has a 2xx status code +func (o *ObjectsClassPutInternalServerError) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this objects class put internal server error response has a 3xx status code +func (o *ObjectsClassPutInternalServerError) IsRedirect() bool { + return false +} + +// IsClientError returns true when this objects class put internal server error response has a 4xx status code +func (o *ObjectsClassPutInternalServerError) IsClientError() bool { + return false +} + +// IsServerError returns true when this objects class put internal server error response has a 5xx status code +func (o *ObjectsClassPutInternalServerError) IsServerError() bool { + return true +} + +// IsCode returns true when this objects class put internal server error response a status code equal to that given +func (o *ObjectsClassPutInternalServerError) IsCode(code int) bool { + return code == 500 +} + +// Code gets the status code for the objects class put internal server error response +func (o *ObjectsClassPutInternalServerError) Code() int { + return 500 +} + +func (o *ObjectsClassPutInternalServerError) Error() string { + return fmt.Sprintf("[PUT /objects/{className}/{id}][%d] objectsClassPutInternalServerError %+v", 500, o.Payload) +} + +func (o *ObjectsClassPutInternalServerError) String() string { + return fmt.Sprintf("[PUT /objects/{className}/{id}][%d] objectsClassPutInternalServerError %+v", 500, o.Payload) +} + +func (o *ObjectsClassPutInternalServerError) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *ObjectsClassPutInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/objects/objects_class_references_create_parameters.go b/platform/dbops/binaries/weaviate-src/client/objects/objects_class_references_create_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..c274d0a6aa076c095de13c6b411ce6948035d872 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/objects/objects_class_references_create_parameters.go @@ -0,0 +1,297 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package objects + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + + "github.com/weaviate/weaviate/entities/models" +) + +// NewObjectsClassReferencesCreateParams creates a new ObjectsClassReferencesCreateParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewObjectsClassReferencesCreateParams() *ObjectsClassReferencesCreateParams { + return &ObjectsClassReferencesCreateParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewObjectsClassReferencesCreateParamsWithTimeout creates a new ObjectsClassReferencesCreateParams object +// with the ability to set a timeout on a request. +func NewObjectsClassReferencesCreateParamsWithTimeout(timeout time.Duration) *ObjectsClassReferencesCreateParams { + return &ObjectsClassReferencesCreateParams{ + timeout: timeout, + } +} + +// NewObjectsClassReferencesCreateParamsWithContext creates a new ObjectsClassReferencesCreateParams object +// with the ability to set a context for a request. +func NewObjectsClassReferencesCreateParamsWithContext(ctx context.Context) *ObjectsClassReferencesCreateParams { + return &ObjectsClassReferencesCreateParams{ + Context: ctx, + } +} + +// NewObjectsClassReferencesCreateParamsWithHTTPClient creates a new ObjectsClassReferencesCreateParams object +// with the ability to set a custom HTTPClient for a request. +func NewObjectsClassReferencesCreateParamsWithHTTPClient(client *http.Client) *ObjectsClassReferencesCreateParams { + return &ObjectsClassReferencesCreateParams{ + HTTPClient: client, + } +} + +/* +ObjectsClassReferencesCreateParams contains all the parameters to send to the API endpoint + + for the objects class references create operation. + + Typically these are written to a http.Request. +*/ +type ObjectsClassReferencesCreateParams struct { + + // Body. + Body *models.SingleRef + + /* ClassName. + + The class name as defined in the schema + */ + ClassName string + + /* ConsistencyLevel. + + Determines how many replicas must acknowledge a request before it is considered successful + */ + ConsistencyLevel *string + + /* ID. + + Unique ID of the Object. + + Format: uuid + */ + ID strfmt.UUID + + /* PropertyName. + + Unique name of the property related to the Object. + */ + PropertyName string + + /* Tenant. + + Specifies the tenant in a request targeting a multi-tenant class + */ + Tenant *string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the objects class references create params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *ObjectsClassReferencesCreateParams) WithDefaults() *ObjectsClassReferencesCreateParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the objects class references create params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *ObjectsClassReferencesCreateParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the objects class references create params +func (o *ObjectsClassReferencesCreateParams) WithTimeout(timeout time.Duration) *ObjectsClassReferencesCreateParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the objects class references create params +func (o *ObjectsClassReferencesCreateParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the objects class references create params +func (o *ObjectsClassReferencesCreateParams) WithContext(ctx context.Context) *ObjectsClassReferencesCreateParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the objects class references create params +func (o *ObjectsClassReferencesCreateParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the objects class references create params +func (o *ObjectsClassReferencesCreateParams) WithHTTPClient(client *http.Client) *ObjectsClassReferencesCreateParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the objects class references create params +func (o *ObjectsClassReferencesCreateParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithBody adds the body to the objects class references create params +func (o *ObjectsClassReferencesCreateParams) WithBody(body *models.SingleRef) *ObjectsClassReferencesCreateParams { + o.SetBody(body) + return o +} + +// SetBody adds the body to the objects class references create params +func (o *ObjectsClassReferencesCreateParams) SetBody(body *models.SingleRef) { + o.Body = body +} + +// WithClassName adds the className to the objects class references create params +func (o *ObjectsClassReferencesCreateParams) WithClassName(className string) *ObjectsClassReferencesCreateParams { + o.SetClassName(className) + return o +} + +// SetClassName adds the className to the objects class references create params +func (o *ObjectsClassReferencesCreateParams) SetClassName(className string) { + o.ClassName = className +} + +// WithConsistencyLevel adds the consistencyLevel to the objects class references create params +func (o *ObjectsClassReferencesCreateParams) WithConsistencyLevel(consistencyLevel *string) *ObjectsClassReferencesCreateParams { + o.SetConsistencyLevel(consistencyLevel) + return o +} + +// SetConsistencyLevel adds the consistencyLevel to the objects class references create params +func (o *ObjectsClassReferencesCreateParams) SetConsistencyLevel(consistencyLevel *string) { + o.ConsistencyLevel = consistencyLevel +} + +// WithID adds the id to the objects class references create params +func (o *ObjectsClassReferencesCreateParams) WithID(id strfmt.UUID) *ObjectsClassReferencesCreateParams { + o.SetID(id) + return o +} + +// SetID adds the id to the objects class references create params +func (o *ObjectsClassReferencesCreateParams) SetID(id strfmt.UUID) { + o.ID = id +} + +// WithPropertyName adds the propertyName to the objects class references create params +func (o *ObjectsClassReferencesCreateParams) WithPropertyName(propertyName string) *ObjectsClassReferencesCreateParams { + o.SetPropertyName(propertyName) + return o +} + +// SetPropertyName adds the propertyName to the objects class references create params +func (o *ObjectsClassReferencesCreateParams) SetPropertyName(propertyName string) { + o.PropertyName = propertyName +} + +// WithTenant adds the tenant to the objects class references create params +func (o *ObjectsClassReferencesCreateParams) WithTenant(tenant *string) *ObjectsClassReferencesCreateParams { + o.SetTenant(tenant) + return o +} + +// SetTenant adds the tenant to the objects class references create params +func (o *ObjectsClassReferencesCreateParams) SetTenant(tenant *string) { + o.Tenant = tenant +} + +// WriteToRequest writes these params to a swagger request +func (o *ObjectsClassReferencesCreateParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + if o.Body != nil { + if err := r.SetBodyParam(o.Body); err != nil { + return err + } + } + + // path param className + if err := r.SetPathParam("className", o.ClassName); err != nil { + return err + } + + if o.ConsistencyLevel != nil { + + // query param consistency_level + var qrConsistencyLevel string + + if o.ConsistencyLevel != nil { + qrConsistencyLevel = *o.ConsistencyLevel + } + qConsistencyLevel := qrConsistencyLevel + if qConsistencyLevel != "" { + + if err := r.SetQueryParam("consistency_level", qConsistencyLevel); err != nil { + return err + } + } + } + + // path param id + if err := r.SetPathParam("id", o.ID.String()); err != nil { + return err + } + + // path param propertyName + if err := r.SetPathParam("propertyName", o.PropertyName); err != nil { + return err + } + + if o.Tenant != nil { + + // query param tenant + var qrTenant string + + if o.Tenant != nil { + qrTenant = *o.Tenant + } + qTenant := qrTenant + if qTenant != "" { + + if err := r.SetQueryParam("tenant", qTenant); err != nil { + return err + } + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/objects/objects_class_references_create_responses.go b/platform/dbops/binaries/weaviate-src/client/objects/objects_class_references_create_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..c4fa38812ee364a6b0553649a914827964a11453 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/objects/objects_class_references_create_responses.go @@ -0,0 +1,522 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package objects + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/weaviate/weaviate/entities/models" +) + +// ObjectsClassReferencesCreateReader is a Reader for the ObjectsClassReferencesCreate structure. +type ObjectsClassReferencesCreateReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ObjectsClassReferencesCreateReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewObjectsClassReferencesCreateOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 400: + result := NewObjectsClassReferencesCreateBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 401: + result := NewObjectsClassReferencesCreateUnauthorized() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 403: + result := NewObjectsClassReferencesCreateForbidden() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 404: + result := NewObjectsClassReferencesCreateNotFound() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 422: + result := NewObjectsClassReferencesCreateUnprocessableEntity() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 500: + result := NewObjectsClassReferencesCreateInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + } +} + +// NewObjectsClassReferencesCreateOK creates a ObjectsClassReferencesCreateOK with default headers values +func NewObjectsClassReferencesCreateOK() *ObjectsClassReferencesCreateOK { + return &ObjectsClassReferencesCreateOK{} +} + +/* +ObjectsClassReferencesCreateOK describes a response with status code 200, with default header values. + +Successfully added the reference. +*/ +type ObjectsClassReferencesCreateOK struct { +} + +// IsSuccess returns true when this objects class references create o k response has a 2xx status code +func (o *ObjectsClassReferencesCreateOK) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this objects class references create o k response has a 3xx status code +func (o *ObjectsClassReferencesCreateOK) IsRedirect() bool { + return false +} + +// IsClientError returns true when this objects class references create o k response has a 4xx status code +func (o *ObjectsClassReferencesCreateOK) IsClientError() bool { + return false +} + +// IsServerError returns true when this objects class references create o k response has a 5xx status code +func (o *ObjectsClassReferencesCreateOK) IsServerError() bool { + return false +} + +// IsCode returns true when this objects class references create o k response a status code equal to that given +func (o *ObjectsClassReferencesCreateOK) IsCode(code int) bool { + return code == 200 +} + +// Code gets the status code for the objects class references create o k response +func (o *ObjectsClassReferencesCreateOK) Code() int { + return 200 +} + +func (o *ObjectsClassReferencesCreateOK) Error() string { + return fmt.Sprintf("[POST /objects/{className}/{id}/references/{propertyName}][%d] objectsClassReferencesCreateOK ", 200) +} + +func (o *ObjectsClassReferencesCreateOK) String() string { + return fmt.Sprintf("[POST /objects/{className}/{id}/references/{propertyName}][%d] objectsClassReferencesCreateOK ", 200) +} + +func (o *ObjectsClassReferencesCreateOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewObjectsClassReferencesCreateBadRequest creates a ObjectsClassReferencesCreateBadRequest with default headers values +func NewObjectsClassReferencesCreateBadRequest() *ObjectsClassReferencesCreateBadRequest { + return &ObjectsClassReferencesCreateBadRequest{} +} + +/* +ObjectsClassReferencesCreateBadRequest describes a response with status code 400, with default header values. + +Malformed request. +*/ +type ObjectsClassReferencesCreateBadRequest struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this objects class references create bad request response has a 2xx status code +func (o *ObjectsClassReferencesCreateBadRequest) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this objects class references create bad request response has a 3xx status code +func (o *ObjectsClassReferencesCreateBadRequest) IsRedirect() bool { + return false +} + +// IsClientError returns true when this objects class references create bad request response has a 4xx status code +func (o *ObjectsClassReferencesCreateBadRequest) IsClientError() bool { + return true +} + +// IsServerError returns true when this objects class references create bad request response has a 5xx status code +func (o *ObjectsClassReferencesCreateBadRequest) IsServerError() bool { + return false +} + +// IsCode returns true when this objects class references create bad request response a status code equal to that given +func (o *ObjectsClassReferencesCreateBadRequest) IsCode(code int) bool { + return code == 400 +} + +// Code gets the status code for the objects class references create bad request response +func (o *ObjectsClassReferencesCreateBadRequest) Code() int { + return 400 +} + +func (o *ObjectsClassReferencesCreateBadRequest) Error() string { + return fmt.Sprintf("[POST /objects/{className}/{id}/references/{propertyName}][%d] objectsClassReferencesCreateBadRequest %+v", 400, o.Payload) +} + +func (o *ObjectsClassReferencesCreateBadRequest) String() string { + return fmt.Sprintf("[POST /objects/{className}/{id}/references/{propertyName}][%d] objectsClassReferencesCreateBadRequest %+v", 400, o.Payload) +} + +func (o *ObjectsClassReferencesCreateBadRequest) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *ObjectsClassReferencesCreateBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewObjectsClassReferencesCreateUnauthorized creates a ObjectsClassReferencesCreateUnauthorized with default headers values +func NewObjectsClassReferencesCreateUnauthorized() *ObjectsClassReferencesCreateUnauthorized { + return &ObjectsClassReferencesCreateUnauthorized{} +} + +/* +ObjectsClassReferencesCreateUnauthorized describes a response with status code 401, with default header values. + +Unauthorized or invalid credentials. +*/ +type ObjectsClassReferencesCreateUnauthorized struct { +} + +// IsSuccess returns true when this objects class references create unauthorized response has a 2xx status code +func (o *ObjectsClassReferencesCreateUnauthorized) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this objects class references create unauthorized response has a 3xx status code +func (o *ObjectsClassReferencesCreateUnauthorized) IsRedirect() bool { + return false +} + +// IsClientError returns true when this objects class references create unauthorized response has a 4xx status code +func (o *ObjectsClassReferencesCreateUnauthorized) IsClientError() bool { + return true +} + +// IsServerError returns true when this objects class references create unauthorized response has a 5xx status code +func (o *ObjectsClassReferencesCreateUnauthorized) IsServerError() bool { + return false +} + +// IsCode returns true when this objects class references create unauthorized response a status code equal to that given +func (o *ObjectsClassReferencesCreateUnauthorized) IsCode(code int) bool { + return code == 401 +} + +// Code gets the status code for the objects class references create unauthorized response +func (o *ObjectsClassReferencesCreateUnauthorized) Code() int { + return 401 +} + +func (o *ObjectsClassReferencesCreateUnauthorized) Error() string { + return fmt.Sprintf("[POST /objects/{className}/{id}/references/{propertyName}][%d] objectsClassReferencesCreateUnauthorized ", 401) +} + +func (o *ObjectsClassReferencesCreateUnauthorized) String() string { + return fmt.Sprintf("[POST /objects/{className}/{id}/references/{propertyName}][%d] objectsClassReferencesCreateUnauthorized ", 401) +} + +func (o *ObjectsClassReferencesCreateUnauthorized) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewObjectsClassReferencesCreateForbidden creates a ObjectsClassReferencesCreateForbidden with default headers values +func NewObjectsClassReferencesCreateForbidden() *ObjectsClassReferencesCreateForbidden { + return &ObjectsClassReferencesCreateForbidden{} +} + +/* +ObjectsClassReferencesCreateForbidden describes a response with status code 403, with default header values. + +Forbidden +*/ +type ObjectsClassReferencesCreateForbidden struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this objects class references create forbidden response has a 2xx status code +func (o *ObjectsClassReferencesCreateForbidden) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this objects class references create forbidden response has a 3xx status code +func (o *ObjectsClassReferencesCreateForbidden) IsRedirect() bool { + return false +} + +// IsClientError returns true when this objects class references create forbidden response has a 4xx status code +func (o *ObjectsClassReferencesCreateForbidden) IsClientError() bool { + return true +} + +// IsServerError returns true when this objects class references create forbidden response has a 5xx status code +func (o *ObjectsClassReferencesCreateForbidden) IsServerError() bool { + return false +} + +// IsCode returns true when this objects class references create forbidden response a status code equal to that given +func (o *ObjectsClassReferencesCreateForbidden) IsCode(code int) bool { + return code == 403 +} + +// Code gets the status code for the objects class references create forbidden response +func (o *ObjectsClassReferencesCreateForbidden) Code() int { + return 403 +} + +func (o *ObjectsClassReferencesCreateForbidden) Error() string { + return fmt.Sprintf("[POST /objects/{className}/{id}/references/{propertyName}][%d] objectsClassReferencesCreateForbidden %+v", 403, o.Payload) +} + +func (o *ObjectsClassReferencesCreateForbidden) String() string { + return fmt.Sprintf("[POST /objects/{className}/{id}/references/{propertyName}][%d] objectsClassReferencesCreateForbidden %+v", 403, o.Payload) +} + +func (o *ObjectsClassReferencesCreateForbidden) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *ObjectsClassReferencesCreateForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewObjectsClassReferencesCreateNotFound creates a ObjectsClassReferencesCreateNotFound with default headers values +func NewObjectsClassReferencesCreateNotFound() *ObjectsClassReferencesCreateNotFound { + return &ObjectsClassReferencesCreateNotFound{} +} + +/* +ObjectsClassReferencesCreateNotFound describes a response with status code 404, with default header values. + +Source object doesn't exist. +*/ +type ObjectsClassReferencesCreateNotFound struct { +} + +// IsSuccess returns true when this objects class references create not found response has a 2xx status code +func (o *ObjectsClassReferencesCreateNotFound) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this objects class references create not found response has a 3xx status code +func (o *ObjectsClassReferencesCreateNotFound) IsRedirect() bool { + return false +} + +// IsClientError returns true when this objects class references create not found response has a 4xx status code +func (o *ObjectsClassReferencesCreateNotFound) IsClientError() bool { + return true +} + +// IsServerError returns true when this objects class references create not found response has a 5xx status code +func (o *ObjectsClassReferencesCreateNotFound) IsServerError() bool { + return false +} + +// IsCode returns true when this objects class references create not found response a status code equal to that given +func (o *ObjectsClassReferencesCreateNotFound) IsCode(code int) bool { + return code == 404 +} + +// Code gets the status code for the objects class references create not found response +func (o *ObjectsClassReferencesCreateNotFound) Code() int { + return 404 +} + +func (o *ObjectsClassReferencesCreateNotFound) Error() string { + return fmt.Sprintf("[POST /objects/{className}/{id}/references/{propertyName}][%d] objectsClassReferencesCreateNotFound ", 404) +} + +func (o *ObjectsClassReferencesCreateNotFound) String() string { + return fmt.Sprintf("[POST /objects/{className}/{id}/references/{propertyName}][%d] objectsClassReferencesCreateNotFound ", 404) +} + +func (o *ObjectsClassReferencesCreateNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewObjectsClassReferencesCreateUnprocessableEntity creates a ObjectsClassReferencesCreateUnprocessableEntity with default headers values +func NewObjectsClassReferencesCreateUnprocessableEntity() *ObjectsClassReferencesCreateUnprocessableEntity { + return &ObjectsClassReferencesCreateUnprocessableEntity{} +} + +/* +ObjectsClassReferencesCreateUnprocessableEntity describes a response with status code 422, with default header values. + +Request body is well-formed (i.e., syntactically correct), but semantically erroneous. Are you sure the property exists or that it is a class? +*/ +type ObjectsClassReferencesCreateUnprocessableEntity struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this objects class references create unprocessable entity response has a 2xx status code +func (o *ObjectsClassReferencesCreateUnprocessableEntity) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this objects class references create unprocessable entity response has a 3xx status code +func (o *ObjectsClassReferencesCreateUnprocessableEntity) IsRedirect() bool { + return false +} + +// IsClientError returns true when this objects class references create unprocessable entity response has a 4xx status code +func (o *ObjectsClassReferencesCreateUnprocessableEntity) IsClientError() bool { + return true +} + +// IsServerError returns true when this objects class references create unprocessable entity response has a 5xx status code +func (o *ObjectsClassReferencesCreateUnprocessableEntity) IsServerError() bool { + return false +} + +// IsCode returns true when this objects class references create unprocessable entity response a status code equal to that given +func (o *ObjectsClassReferencesCreateUnprocessableEntity) IsCode(code int) bool { + return code == 422 +} + +// Code gets the status code for the objects class references create unprocessable entity response +func (o *ObjectsClassReferencesCreateUnprocessableEntity) Code() int { + return 422 +} + +func (o *ObjectsClassReferencesCreateUnprocessableEntity) Error() string { + return fmt.Sprintf("[POST /objects/{className}/{id}/references/{propertyName}][%d] objectsClassReferencesCreateUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *ObjectsClassReferencesCreateUnprocessableEntity) String() string { + return fmt.Sprintf("[POST /objects/{className}/{id}/references/{propertyName}][%d] objectsClassReferencesCreateUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *ObjectsClassReferencesCreateUnprocessableEntity) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *ObjectsClassReferencesCreateUnprocessableEntity) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewObjectsClassReferencesCreateInternalServerError creates a ObjectsClassReferencesCreateInternalServerError with default headers values +func NewObjectsClassReferencesCreateInternalServerError() *ObjectsClassReferencesCreateInternalServerError { + return &ObjectsClassReferencesCreateInternalServerError{} +} + +/* +ObjectsClassReferencesCreateInternalServerError describes a response with status code 500, with default header values. + +An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. +*/ +type ObjectsClassReferencesCreateInternalServerError struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this objects class references create internal server error response has a 2xx status code +func (o *ObjectsClassReferencesCreateInternalServerError) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this objects class references create internal server error response has a 3xx status code +func (o *ObjectsClassReferencesCreateInternalServerError) IsRedirect() bool { + return false +} + +// IsClientError returns true when this objects class references create internal server error response has a 4xx status code +func (o *ObjectsClassReferencesCreateInternalServerError) IsClientError() bool { + return false +} + +// IsServerError returns true when this objects class references create internal server error response has a 5xx status code +func (o *ObjectsClassReferencesCreateInternalServerError) IsServerError() bool { + return true +} + +// IsCode returns true when this objects class references create internal server error response a status code equal to that given +func (o *ObjectsClassReferencesCreateInternalServerError) IsCode(code int) bool { + return code == 500 +} + +// Code gets the status code for the objects class references create internal server error response +func (o *ObjectsClassReferencesCreateInternalServerError) Code() int { + return 500 +} + +func (o *ObjectsClassReferencesCreateInternalServerError) Error() string { + return fmt.Sprintf("[POST /objects/{className}/{id}/references/{propertyName}][%d] objectsClassReferencesCreateInternalServerError %+v", 500, o.Payload) +} + +func (o *ObjectsClassReferencesCreateInternalServerError) String() string { + return fmt.Sprintf("[POST /objects/{className}/{id}/references/{propertyName}][%d] objectsClassReferencesCreateInternalServerError %+v", 500, o.Payload) +} + +func (o *ObjectsClassReferencesCreateInternalServerError) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *ObjectsClassReferencesCreateInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/objects/objects_class_references_delete_parameters.go b/platform/dbops/binaries/weaviate-src/client/objects/objects_class_references_delete_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..6c2859f46fa69275aa54f1a482e2c1f772504b3e --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/objects/objects_class_references_delete_parameters.go @@ -0,0 +1,297 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package objects + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + + "github.com/weaviate/weaviate/entities/models" +) + +// NewObjectsClassReferencesDeleteParams creates a new ObjectsClassReferencesDeleteParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewObjectsClassReferencesDeleteParams() *ObjectsClassReferencesDeleteParams { + return &ObjectsClassReferencesDeleteParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewObjectsClassReferencesDeleteParamsWithTimeout creates a new ObjectsClassReferencesDeleteParams object +// with the ability to set a timeout on a request. +func NewObjectsClassReferencesDeleteParamsWithTimeout(timeout time.Duration) *ObjectsClassReferencesDeleteParams { + return &ObjectsClassReferencesDeleteParams{ + timeout: timeout, + } +} + +// NewObjectsClassReferencesDeleteParamsWithContext creates a new ObjectsClassReferencesDeleteParams object +// with the ability to set a context for a request. +func NewObjectsClassReferencesDeleteParamsWithContext(ctx context.Context) *ObjectsClassReferencesDeleteParams { + return &ObjectsClassReferencesDeleteParams{ + Context: ctx, + } +} + +// NewObjectsClassReferencesDeleteParamsWithHTTPClient creates a new ObjectsClassReferencesDeleteParams object +// with the ability to set a custom HTTPClient for a request. +func NewObjectsClassReferencesDeleteParamsWithHTTPClient(client *http.Client) *ObjectsClassReferencesDeleteParams { + return &ObjectsClassReferencesDeleteParams{ + HTTPClient: client, + } +} + +/* +ObjectsClassReferencesDeleteParams contains all the parameters to send to the API endpoint + + for the objects class references delete operation. + + Typically these are written to a http.Request. +*/ +type ObjectsClassReferencesDeleteParams struct { + + // Body. + Body *models.SingleRef + + /* ClassName. + + The class name as defined in the schema + */ + ClassName string + + /* ConsistencyLevel. + + Determines how many replicas must acknowledge a request before it is considered successful + */ + ConsistencyLevel *string + + /* ID. + + Unique ID of the Object. + + Format: uuid + */ + ID strfmt.UUID + + /* PropertyName. + + Unique name of the property related to the Object. + */ + PropertyName string + + /* Tenant. + + Specifies the tenant in a request targeting a multi-tenant class + */ + Tenant *string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the objects class references delete params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *ObjectsClassReferencesDeleteParams) WithDefaults() *ObjectsClassReferencesDeleteParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the objects class references delete params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *ObjectsClassReferencesDeleteParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the objects class references delete params +func (o *ObjectsClassReferencesDeleteParams) WithTimeout(timeout time.Duration) *ObjectsClassReferencesDeleteParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the objects class references delete params +func (o *ObjectsClassReferencesDeleteParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the objects class references delete params +func (o *ObjectsClassReferencesDeleteParams) WithContext(ctx context.Context) *ObjectsClassReferencesDeleteParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the objects class references delete params +func (o *ObjectsClassReferencesDeleteParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the objects class references delete params +func (o *ObjectsClassReferencesDeleteParams) WithHTTPClient(client *http.Client) *ObjectsClassReferencesDeleteParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the objects class references delete params +func (o *ObjectsClassReferencesDeleteParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithBody adds the body to the objects class references delete params +func (o *ObjectsClassReferencesDeleteParams) WithBody(body *models.SingleRef) *ObjectsClassReferencesDeleteParams { + o.SetBody(body) + return o +} + +// SetBody adds the body to the objects class references delete params +func (o *ObjectsClassReferencesDeleteParams) SetBody(body *models.SingleRef) { + o.Body = body +} + +// WithClassName adds the className to the objects class references delete params +func (o *ObjectsClassReferencesDeleteParams) WithClassName(className string) *ObjectsClassReferencesDeleteParams { + o.SetClassName(className) + return o +} + +// SetClassName adds the className to the objects class references delete params +func (o *ObjectsClassReferencesDeleteParams) SetClassName(className string) { + o.ClassName = className +} + +// WithConsistencyLevel adds the consistencyLevel to the objects class references delete params +func (o *ObjectsClassReferencesDeleteParams) WithConsistencyLevel(consistencyLevel *string) *ObjectsClassReferencesDeleteParams { + o.SetConsistencyLevel(consistencyLevel) + return o +} + +// SetConsistencyLevel adds the consistencyLevel to the objects class references delete params +func (o *ObjectsClassReferencesDeleteParams) SetConsistencyLevel(consistencyLevel *string) { + o.ConsistencyLevel = consistencyLevel +} + +// WithID adds the id to the objects class references delete params +func (o *ObjectsClassReferencesDeleteParams) WithID(id strfmt.UUID) *ObjectsClassReferencesDeleteParams { + o.SetID(id) + return o +} + +// SetID adds the id to the objects class references delete params +func (o *ObjectsClassReferencesDeleteParams) SetID(id strfmt.UUID) { + o.ID = id +} + +// WithPropertyName adds the propertyName to the objects class references delete params +func (o *ObjectsClassReferencesDeleteParams) WithPropertyName(propertyName string) *ObjectsClassReferencesDeleteParams { + o.SetPropertyName(propertyName) + return o +} + +// SetPropertyName adds the propertyName to the objects class references delete params +func (o *ObjectsClassReferencesDeleteParams) SetPropertyName(propertyName string) { + o.PropertyName = propertyName +} + +// WithTenant adds the tenant to the objects class references delete params +func (o *ObjectsClassReferencesDeleteParams) WithTenant(tenant *string) *ObjectsClassReferencesDeleteParams { + o.SetTenant(tenant) + return o +} + +// SetTenant adds the tenant to the objects class references delete params +func (o *ObjectsClassReferencesDeleteParams) SetTenant(tenant *string) { + o.Tenant = tenant +} + +// WriteToRequest writes these params to a swagger request +func (o *ObjectsClassReferencesDeleteParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + if o.Body != nil { + if err := r.SetBodyParam(o.Body); err != nil { + return err + } + } + + // path param className + if err := r.SetPathParam("className", o.ClassName); err != nil { + return err + } + + if o.ConsistencyLevel != nil { + + // query param consistency_level + var qrConsistencyLevel string + + if o.ConsistencyLevel != nil { + qrConsistencyLevel = *o.ConsistencyLevel + } + qConsistencyLevel := qrConsistencyLevel + if qConsistencyLevel != "" { + + if err := r.SetQueryParam("consistency_level", qConsistencyLevel); err != nil { + return err + } + } + } + + // path param id + if err := r.SetPathParam("id", o.ID.String()); err != nil { + return err + } + + // path param propertyName + if err := r.SetPathParam("propertyName", o.PropertyName); err != nil { + return err + } + + if o.Tenant != nil { + + // query param tenant + var qrTenant string + + if o.Tenant != nil { + qrTenant = *o.Tenant + } + qTenant := qrTenant + if qTenant != "" { + + if err := r.SetQueryParam("tenant", qTenant); err != nil { + return err + } + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/objects/objects_class_references_delete_responses.go b/platform/dbops/binaries/weaviate-src/client/objects/objects_class_references_delete_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..c4cc0cc2b112dc20d41e686332e47d4a51e72851 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/objects/objects_class_references_delete_responses.go @@ -0,0 +1,534 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package objects + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/weaviate/weaviate/entities/models" +) + +// ObjectsClassReferencesDeleteReader is a Reader for the ObjectsClassReferencesDelete structure. +type ObjectsClassReferencesDeleteReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ObjectsClassReferencesDeleteReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 204: + result := NewObjectsClassReferencesDeleteNoContent() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 400: + result := NewObjectsClassReferencesDeleteBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 401: + result := NewObjectsClassReferencesDeleteUnauthorized() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 403: + result := NewObjectsClassReferencesDeleteForbidden() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 404: + result := NewObjectsClassReferencesDeleteNotFound() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 422: + result := NewObjectsClassReferencesDeleteUnprocessableEntity() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 500: + result := NewObjectsClassReferencesDeleteInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + } +} + +// NewObjectsClassReferencesDeleteNoContent creates a ObjectsClassReferencesDeleteNoContent with default headers values +func NewObjectsClassReferencesDeleteNoContent() *ObjectsClassReferencesDeleteNoContent { + return &ObjectsClassReferencesDeleteNoContent{} +} + +/* +ObjectsClassReferencesDeleteNoContent describes a response with status code 204, with default header values. + +Successfully deleted. +*/ +type ObjectsClassReferencesDeleteNoContent struct { +} + +// IsSuccess returns true when this objects class references delete no content response has a 2xx status code +func (o *ObjectsClassReferencesDeleteNoContent) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this objects class references delete no content response has a 3xx status code +func (o *ObjectsClassReferencesDeleteNoContent) IsRedirect() bool { + return false +} + +// IsClientError returns true when this objects class references delete no content response has a 4xx status code +func (o *ObjectsClassReferencesDeleteNoContent) IsClientError() bool { + return false +} + +// IsServerError returns true when this objects class references delete no content response has a 5xx status code +func (o *ObjectsClassReferencesDeleteNoContent) IsServerError() bool { + return false +} + +// IsCode returns true when this objects class references delete no content response a status code equal to that given +func (o *ObjectsClassReferencesDeleteNoContent) IsCode(code int) bool { + return code == 204 +} + +// Code gets the status code for the objects class references delete no content response +func (o *ObjectsClassReferencesDeleteNoContent) Code() int { + return 204 +} + +func (o *ObjectsClassReferencesDeleteNoContent) Error() string { + return fmt.Sprintf("[DELETE /objects/{className}/{id}/references/{propertyName}][%d] objectsClassReferencesDeleteNoContent ", 204) +} + +func (o *ObjectsClassReferencesDeleteNoContent) String() string { + return fmt.Sprintf("[DELETE /objects/{className}/{id}/references/{propertyName}][%d] objectsClassReferencesDeleteNoContent ", 204) +} + +func (o *ObjectsClassReferencesDeleteNoContent) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewObjectsClassReferencesDeleteBadRequest creates a ObjectsClassReferencesDeleteBadRequest with default headers values +func NewObjectsClassReferencesDeleteBadRequest() *ObjectsClassReferencesDeleteBadRequest { + return &ObjectsClassReferencesDeleteBadRequest{} +} + +/* +ObjectsClassReferencesDeleteBadRequest describes a response with status code 400, with default header values. + +Malformed request. +*/ +type ObjectsClassReferencesDeleteBadRequest struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this objects class references delete bad request response has a 2xx status code +func (o *ObjectsClassReferencesDeleteBadRequest) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this objects class references delete bad request response has a 3xx status code +func (o *ObjectsClassReferencesDeleteBadRequest) IsRedirect() bool { + return false +} + +// IsClientError returns true when this objects class references delete bad request response has a 4xx status code +func (o *ObjectsClassReferencesDeleteBadRequest) IsClientError() bool { + return true +} + +// IsServerError returns true when this objects class references delete bad request response has a 5xx status code +func (o *ObjectsClassReferencesDeleteBadRequest) IsServerError() bool { + return false +} + +// IsCode returns true when this objects class references delete bad request response a status code equal to that given +func (o *ObjectsClassReferencesDeleteBadRequest) IsCode(code int) bool { + return code == 400 +} + +// Code gets the status code for the objects class references delete bad request response +func (o *ObjectsClassReferencesDeleteBadRequest) Code() int { + return 400 +} + +func (o *ObjectsClassReferencesDeleteBadRequest) Error() string { + return fmt.Sprintf("[DELETE /objects/{className}/{id}/references/{propertyName}][%d] objectsClassReferencesDeleteBadRequest %+v", 400, o.Payload) +} + +func (o *ObjectsClassReferencesDeleteBadRequest) String() string { + return fmt.Sprintf("[DELETE /objects/{className}/{id}/references/{propertyName}][%d] objectsClassReferencesDeleteBadRequest %+v", 400, o.Payload) +} + +func (o *ObjectsClassReferencesDeleteBadRequest) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *ObjectsClassReferencesDeleteBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewObjectsClassReferencesDeleteUnauthorized creates a ObjectsClassReferencesDeleteUnauthorized with default headers values +func NewObjectsClassReferencesDeleteUnauthorized() *ObjectsClassReferencesDeleteUnauthorized { + return &ObjectsClassReferencesDeleteUnauthorized{} +} + +/* +ObjectsClassReferencesDeleteUnauthorized describes a response with status code 401, with default header values. + +Unauthorized or invalid credentials. +*/ +type ObjectsClassReferencesDeleteUnauthorized struct { +} + +// IsSuccess returns true when this objects class references delete unauthorized response has a 2xx status code +func (o *ObjectsClassReferencesDeleteUnauthorized) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this objects class references delete unauthorized response has a 3xx status code +func (o *ObjectsClassReferencesDeleteUnauthorized) IsRedirect() bool { + return false +} + +// IsClientError returns true when this objects class references delete unauthorized response has a 4xx status code +func (o *ObjectsClassReferencesDeleteUnauthorized) IsClientError() bool { + return true +} + +// IsServerError returns true when this objects class references delete unauthorized response has a 5xx status code +func (o *ObjectsClassReferencesDeleteUnauthorized) IsServerError() bool { + return false +} + +// IsCode returns true when this objects class references delete unauthorized response a status code equal to that given +func (o *ObjectsClassReferencesDeleteUnauthorized) IsCode(code int) bool { + return code == 401 +} + +// Code gets the status code for the objects class references delete unauthorized response +func (o *ObjectsClassReferencesDeleteUnauthorized) Code() int { + return 401 +} + +func (o *ObjectsClassReferencesDeleteUnauthorized) Error() string { + return fmt.Sprintf("[DELETE /objects/{className}/{id}/references/{propertyName}][%d] objectsClassReferencesDeleteUnauthorized ", 401) +} + +func (o *ObjectsClassReferencesDeleteUnauthorized) String() string { + return fmt.Sprintf("[DELETE /objects/{className}/{id}/references/{propertyName}][%d] objectsClassReferencesDeleteUnauthorized ", 401) +} + +func (o *ObjectsClassReferencesDeleteUnauthorized) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewObjectsClassReferencesDeleteForbidden creates a ObjectsClassReferencesDeleteForbidden with default headers values +func NewObjectsClassReferencesDeleteForbidden() *ObjectsClassReferencesDeleteForbidden { + return &ObjectsClassReferencesDeleteForbidden{} +} + +/* +ObjectsClassReferencesDeleteForbidden describes a response with status code 403, with default header values. + +Forbidden +*/ +type ObjectsClassReferencesDeleteForbidden struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this objects class references delete forbidden response has a 2xx status code +func (o *ObjectsClassReferencesDeleteForbidden) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this objects class references delete forbidden response has a 3xx status code +func (o *ObjectsClassReferencesDeleteForbidden) IsRedirect() bool { + return false +} + +// IsClientError returns true when this objects class references delete forbidden response has a 4xx status code +func (o *ObjectsClassReferencesDeleteForbidden) IsClientError() bool { + return true +} + +// IsServerError returns true when this objects class references delete forbidden response has a 5xx status code +func (o *ObjectsClassReferencesDeleteForbidden) IsServerError() bool { + return false +} + +// IsCode returns true when this objects class references delete forbidden response a status code equal to that given +func (o *ObjectsClassReferencesDeleteForbidden) IsCode(code int) bool { + return code == 403 +} + +// Code gets the status code for the objects class references delete forbidden response +func (o *ObjectsClassReferencesDeleteForbidden) Code() int { + return 403 +} + +func (o *ObjectsClassReferencesDeleteForbidden) Error() string { + return fmt.Sprintf("[DELETE /objects/{className}/{id}/references/{propertyName}][%d] objectsClassReferencesDeleteForbidden %+v", 403, o.Payload) +} + +func (o *ObjectsClassReferencesDeleteForbidden) String() string { + return fmt.Sprintf("[DELETE /objects/{className}/{id}/references/{propertyName}][%d] objectsClassReferencesDeleteForbidden %+v", 403, o.Payload) +} + +func (o *ObjectsClassReferencesDeleteForbidden) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *ObjectsClassReferencesDeleteForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewObjectsClassReferencesDeleteNotFound creates a ObjectsClassReferencesDeleteNotFound with default headers values +func NewObjectsClassReferencesDeleteNotFound() *ObjectsClassReferencesDeleteNotFound { + return &ObjectsClassReferencesDeleteNotFound{} +} + +/* +ObjectsClassReferencesDeleteNotFound describes a response with status code 404, with default header values. + +Successful query result but no resource was found. +*/ +type ObjectsClassReferencesDeleteNotFound struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this objects class references delete not found response has a 2xx status code +func (o *ObjectsClassReferencesDeleteNotFound) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this objects class references delete not found response has a 3xx status code +func (o *ObjectsClassReferencesDeleteNotFound) IsRedirect() bool { + return false +} + +// IsClientError returns true when this objects class references delete not found response has a 4xx status code +func (o *ObjectsClassReferencesDeleteNotFound) IsClientError() bool { + return true +} + +// IsServerError returns true when this objects class references delete not found response has a 5xx status code +func (o *ObjectsClassReferencesDeleteNotFound) IsServerError() bool { + return false +} + +// IsCode returns true when this objects class references delete not found response a status code equal to that given +func (o *ObjectsClassReferencesDeleteNotFound) IsCode(code int) bool { + return code == 404 +} + +// Code gets the status code for the objects class references delete not found response +func (o *ObjectsClassReferencesDeleteNotFound) Code() int { + return 404 +} + +func (o *ObjectsClassReferencesDeleteNotFound) Error() string { + return fmt.Sprintf("[DELETE /objects/{className}/{id}/references/{propertyName}][%d] objectsClassReferencesDeleteNotFound %+v", 404, o.Payload) +} + +func (o *ObjectsClassReferencesDeleteNotFound) String() string { + return fmt.Sprintf("[DELETE /objects/{className}/{id}/references/{propertyName}][%d] objectsClassReferencesDeleteNotFound %+v", 404, o.Payload) +} + +func (o *ObjectsClassReferencesDeleteNotFound) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *ObjectsClassReferencesDeleteNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewObjectsClassReferencesDeleteUnprocessableEntity creates a ObjectsClassReferencesDeleteUnprocessableEntity with default headers values +func NewObjectsClassReferencesDeleteUnprocessableEntity() *ObjectsClassReferencesDeleteUnprocessableEntity { + return &ObjectsClassReferencesDeleteUnprocessableEntity{} +} + +/* +ObjectsClassReferencesDeleteUnprocessableEntity describes a response with status code 422, with default header values. + +Request body is well-formed (i.e., syntactically correct), but semantically erroneous. Are you sure the property exists or that it is a class? +*/ +type ObjectsClassReferencesDeleteUnprocessableEntity struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this objects class references delete unprocessable entity response has a 2xx status code +func (o *ObjectsClassReferencesDeleteUnprocessableEntity) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this objects class references delete unprocessable entity response has a 3xx status code +func (o *ObjectsClassReferencesDeleteUnprocessableEntity) IsRedirect() bool { + return false +} + +// IsClientError returns true when this objects class references delete unprocessable entity response has a 4xx status code +func (o *ObjectsClassReferencesDeleteUnprocessableEntity) IsClientError() bool { + return true +} + +// IsServerError returns true when this objects class references delete unprocessable entity response has a 5xx status code +func (o *ObjectsClassReferencesDeleteUnprocessableEntity) IsServerError() bool { + return false +} + +// IsCode returns true when this objects class references delete unprocessable entity response a status code equal to that given +func (o *ObjectsClassReferencesDeleteUnprocessableEntity) IsCode(code int) bool { + return code == 422 +} + +// Code gets the status code for the objects class references delete unprocessable entity response +func (o *ObjectsClassReferencesDeleteUnprocessableEntity) Code() int { + return 422 +} + +func (o *ObjectsClassReferencesDeleteUnprocessableEntity) Error() string { + return fmt.Sprintf("[DELETE /objects/{className}/{id}/references/{propertyName}][%d] objectsClassReferencesDeleteUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *ObjectsClassReferencesDeleteUnprocessableEntity) String() string { + return fmt.Sprintf("[DELETE /objects/{className}/{id}/references/{propertyName}][%d] objectsClassReferencesDeleteUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *ObjectsClassReferencesDeleteUnprocessableEntity) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *ObjectsClassReferencesDeleteUnprocessableEntity) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewObjectsClassReferencesDeleteInternalServerError creates a ObjectsClassReferencesDeleteInternalServerError with default headers values +func NewObjectsClassReferencesDeleteInternalServerError() *ObjectsClassReferencesDeleteInternalServerError { + return &ObjectsClassReferencesDeleteInternalServerError{} +} + +/* +ObjectsClassReferencesDeleteInternalServerError describes a response with status code 500, with default header values. + +An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. +*/ +type ObjectsClassReferencesDeleteInternalServerError struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this objects class references delete internal server error response has a 2xx status code +func (o *ObjectsClassReferencesDeleteInternalServerError) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this objects class references delete internal server error response has a 3xx status code +func (o *ObjectsClassReferencesDeleteInternalServerError) IsRedirect() bool { + return false +} + +// IsClientError returns true when this objects class references delete internal server error response has a 4xx status code +func (o *ObjectsClassReferencesDeleteInternalServerError) IsClientError() bool { + return false +} + +// IsServerError returns true when this objects class references delete internal server error response has a 5xx status code +func (o *ObjectsClassReferencesDeleteInternalServerError) IsServerError() bool { + return true +} + +// IsCode returns true when this objects class references delete internal server error response a status code equal to that given +func (o *ObjectsClassReferencesDeleteInternalServerError) IsCode(code int) bool { + return code == 500 +} + +// Code gets the status code for the objects class references delete internal server error response +func (o *ObjectsClassReferencesDeleteInternalServerError) Code() int { + return 500 +} + +func (o *ObjectsClassReferencesDeleteInternalServerError) Error() string { + return fmt.Sprintf("[DELETE /objects/{className}/{id}/references/{propertyName}][%d] objectsClassReferencesDeleteInternalServerError %+v", 500, o.Payload) +} + +func (o *ObjectsClassReferencesDeleteInternalServerError) String() string { + return fmt.Sprintf("[DELETE /objects/{className}/{id}/references/{propertyName}][%d] objectsClassReferencesDeleteInternalServerError %+v", 500, o.Payload) +} + +func (o *ObjectsClassReferencesDeleteInternalServerError) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *ObjectsClassReferencesDeleteInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/objects/objects_class_references_put_parameters.go b/platform/dbops/binaries/weaviate-src/client/objects/objects_class_references_put_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..5e21682c74820cb53256bcbbd5da13dd22b688a6 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/objects/objects_class_references_put_parameters.go @@ -0,0 +1,297 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package objects + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + + "github.com/weaviate/weaviate/entities/models" +) + +// NewObjectsClassReferencesPutParams creates a new ObjectsClassReferencesPutParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewObjectsClassReferencesPutParams() *ObjectsClassReferencesPutParams { + return &ObjectsClassReferencesPutParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewObjectsClassReferencesPutParamsWithTimeout creates a new ObjectsClassReferencesPutParams object +// with the ability to set a timeout on a request. +func NewObjectsClassReferencesPutParamsWithTimeout(timeout time.Duration) *ObjectsClassReferencesPutParams { + return &ObjectsClassReferencesPutParams{ + timeout: timeout, + } +} + +// NewObjectsClassReferencesPutParamsWithContext creates a new ObjectsClassReferencesPutParams object +// with the ability to set a context for a request. +func NewObjectsClassReferencesPutParamsWithContext(ctx context.Context) *ObjectsClassReferencesPutParams { + return &ObjectsClassReferencesPutParams{ + Context: ctx, + } +} + +// NewObjectsClassReferencesPutParamsWithHTTPClient creates a new ObjectsClassReferencesPutParams object +// with the ability to set a custom HTTPClient for a request. +func NewObjectsClassReferencesPutParamsWithHTTPClient(client *http.Client) *ObjectsClassReferencesPutParams { + return &ObjectsClassReferencesPutParams{ + HTTPClient: client, + } +} + +/* +ObjectsClassReferencesPutParams contains all the parameters to send to the API endpoint + + for the objects class references put operation. + + Typically these are written to a http.Request. +*/ +type ObjectsClassReferencesPutParams struct { + + // Body. + Body models.MultipleRef + + /* ClassName. + + The class name as defined in the schema + */ + ClassName string + + /* ConsistencyLevel. + + Determines how many replicas must acknowledge a request before it is considered successful + */ + ConsistencyLevel *string + + /* ID. + + Unique ID of the Object. + + Format: uuid + */ + ID strfmt.UUID + + /* PropertyName. + + Unique name of the property related to the Object. + */ + PropertyName string + + /* Tenant. + + Specifies the tenant in a request targeting a multi-tenant class + */ + Tenant *string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the objects class references put params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *ObjectsClassReferencesPutParams) WithDefaults() *ObjectsClassReferencesPutParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the objects class references put params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *ObjectsClassReferencesPutParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the objects class references put params +func (o *ObjectsClassReferencesPutParams) WithTimeout(timeout time.Duration) *ObjectsClassReferencesPutParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the objects class references put params +func (o *ObjectsClassReferencesPutParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the objects class references put params +func (o *ObjectsClassReferencesPutParams) WithContext(ctx context.Context) *ObjectsClassReferencesPutParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the objects class references put params +func (o *ObjectsClassReferencesPutParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the objects class references put params +func (o *ObjectsClassReferencesPutParams) WithHTTPClient(client *http.Client) *ObjectsClassReferencesPutParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the objects class references put params +func (o *ObjectsClassReferencesPutParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithBody adds the body to the objects class references put params +func (o *ObjectsClassReferencesPutParams) WithBody(body models.MultipleRef) *ObjectsClassReferencesPutParams { + o.SetBody(body) + return o +} + +// SetBody adds the body to the objects class references put params +func (o *ObjectsClassReferencesPutParams) SetBody(body models.MultipleRef) { + o.Body = body +} + +// WithClassName adds the className to the objects class references put params +func (o *ObjectsClassReferencesPutParams) WithClassName(className string) *ObjectsClassReferencesPutParams { + o.SetClassName(className) + return o +} + +// SetClassName adds the className to the objects class references put params +func (o *ObjectsClassReferencesPutParams) SetClassName(className string) { + o.ClassName = className +} + +// WithConsistencyLevel adds the consistencyLevel to the objects class references put params +func (o *ObjectsClassReferencesPutParams) WithConsistencyLevel(consistencyLevel *string) *ObjectsClassReferencesPutParams { + o.SetConsistencyLevel(consistencyLevel) + return o +} + +// SetConsistencyLevel adds the consistencyLevel to the objects class references put params +func (o *ObjectsClassReferencesPutParams) SetConsistencyLevel(consistencyLevel *string) { + o.ConsistencyLevel = consistencyLevel +} + +// WithID adds the id to the objects class references put params +func (o *ObjectsClassReferencesPutParams) WithID(id strfmt.UUID) *ObjectsClassReferencesPutParams { + o.SetID(id) + return o +} + +// SetID adds the id to the objects class references put params +func (o *ObjectsClassReferencesPutParams) SetID(id strfmt.UUID) { + o.ID = id +} + +// WithPropertyName adds the propertyName to the objects class references put params +func (o *ObjectsClassReferencesPutParams) WithPropertyName(propertyName string) *ObjectsClassReferencesPutParams { + o.SetPropertyName(propertyName) + return o +} + +// SetPropertyName adds the propertyName to the objects class references put params +func (o *ObjectsClassReferencesPutParams) SetPropertyName(propertyName string) { + o.PropertyName = propertyName +} + +// WithTenant adds the tenant to the objects class references put params +func (o *ObjectsClassReferencesPutParams) WithTenant(tenant *string) *ObjectsClassReferencesPutParams { + o.SetTenant(tenant) + return o +} + +// SetTenant adds the tenant to the objects class references put params +func (o *ObjectsClassReferencesPutParams) SetTenant(tenant *string) { + o.Tenant = tenant +} + +// WriteToRequest writes these params to a swagger request +func (o *ObjectsClassReferencesPutParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + if o.Body != nil { + if err := r.SetBodyParam(o.Body); err != nil { + return err + } + } + + // path param className + if err := r.SetPathParam("className", o.ClassName); err != nil { + return err + } + + if o.ConsistencyLevel != nil { + + // query param consistency_level + var qrConsistencyLevel string + + if o.ConsistencyLevel != nil { + qrConsistencyLevel = *o.ConsistencyLevel + } + qConsistencyLevel := qrConsistencyLevel + if qConsistencyLevel != "" { + + if err := r.SetQueryParam("consistency_level", qConsistencyLevel); err != nil { + return err + } + } + } + + // path param id + if err := r.SetPathParam("id", o.ID.String()); err != nil { + return err + } + + // path param propertyName + if err := r.SetPathParam("propertyName", o.PropertyName); err != nil { + return err + } + + if o.Tenant != nil { + + // query param tenant + var qrTenant string + + if o.Tenant != nil { + qrTenant = *o.Tenant + } + qTenant := qrTenant + if qTenant != "" { + + if err := r.SetQueryParam("tenant", qTenant); err != nil { + return err + } + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/objects/objects_class_references_put_responses.go b/platform/dbops/binaries/weaviate-src/client/objects/objects_class_references_put_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..f0a8f312ff2531dbdc2c7581e9bf5395dc235932 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/objects/objects_class_references_put_responses.go @@ -0,0 +1,522 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package objects + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/weaviate/weaviate/entities/models" +) + +// ObjectsClassReferencesPutReader is a Reader for the ObjectsClassReferencesPut structure. +type ObjectsClassReferencesPutReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ObjectsClassReferencesPutReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewObjectsClassReferencesPutOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 400: + result := NewObjectsClassReferencesPutBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 401: + result := NewObjectsClassReferencesPutUnauthorized() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 403: + result := NewObjectsClassReferencesPutForbidden() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 404: + result := NewObjectsClassReferencesPutNotFound() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 422: + result := NewObjectsClassReferencesPutUnprocessableEntity() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 500: + result := NewObjectsClassReferencesPutInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + } +} + +// NewObjectsClassReferencesPutOK creates a ObjectsClassReferencesPutOK with default headers values +func NewObjectsClassReferencesPutOK() *ObjectsClassReferencesPutOK { + return &ObjectsClassReferencesPutOK{} +} + +/* +ObjectsClassReferencesPutOK describes a response with status code 200, with default header values. + +Successfully replaced all the references. +*/ +type ObjectsClassReferencesPutOK struct { +} + +// IsSuccess returns true when this objects class references put o k response has a 2xx status code +func (o *ObjectsClassReferencesPutOK) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this objects class references put o k response has a 3xx status code +func (o *ObjectsClassReferencesPutOK) IsRedirect() bool { + return false +} + +// IsClientError returns true when this objects class references put o k response has a 4xx status code +func (o *ObjectsClassReferencesPutOK) IsClientError() bool { + return false +} + +// IsServerError returns true when this objects class references put o k response has a 5xx status code +func (o *ObjectsClassReferencesPutOK) IsServerError() bool { + return false +} + +// IsCode returns true when this objects class references put o k response a status code equal to that given +func (o *ObjectsClassReferencesPutOK) IsCode(code int) bool { + return code == 200 +} + +// Code gets the status code for the objects class references put o k response +func (o *ObjectsClassReferencesPutOK) Code() int { + return 200 +} + +func (o *ObjectsClassReferencesPutOK) Error() string { + return fmt.Sprintf("[PUT /objects/{className}/{id}/references/{propertyName}][%d] objectsClassReferencesPutOK ", 200) +} + +func (o *ObjectsClassReferencesPutOK) String() string { + return fmt.Sprintf("[PUT /objects/{className}/{id}/references/{propertyName}][%d] objectsClassReferencesPutOK ", 200) +} + +func (o *ObjectsClassReferencesPutOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewObjectsClassReferencesPutBadRequest creates a ObjectsClassReferencesPutBadRequest with default headers values +func NewObjectsClassReferencesPutBadRequest() *ObjectsClassReferencesPutBadRequest { + return &ObjectsClassReferencesPutBadRequest{} +} + +/* +ObjectsClassReferencesPutBadRequest describes a response with status code 400, with default header values. + +Malformed request. +*/ +type ObjectsClassReferencesPutBadRequest struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this objects class references put bad request response has a 2xx status code +func (o *ObjectsClassReferencesPutBadRequest) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this objects class references put bad request response has a 3xx status code +func (o *ObjectsClassReferencesPutBadRequest) IsRedirect() bool { + return false +} + +// IsClientError returns true when this objects class references put bad request response has a 4xx status code +func (o *ObjectsClassReferencesPutBadRequest) IsClientError() bool { + return true +} + +// IsServerError returns true when this objects class references put bad request response has a 5xx status code +func (o *ObjectsClassReferencesPutBadRequest) IsServerError() bool { + return false +} + +// IsCode returns true when this objects class references put bad request response a status code equal to that given +func (o *ObjectsClassReferencesPutBadRequest) IsCode(code int) bool { + return code == 400 +} + +// Code gets the status code for the objects class references put bad request response +func (o *ObjectsClassReferencesPutBadRequest) Code() int { + return 400 +} + +func (o *ObjectsClassReferencesPutBadRequest) Error() string { + return fmt.Sprintf("[PUT /objects/{className}/{id}/references/{propertyName}][%d] objectsClassReferencesPutBadRequest %+v", 400, o.Payload) +} + +func (o *ObjectsClassReferencesPutBadRequest) String() string { + return fmt.Sprintf("[PUT /objects/{className}/{id}/references/{propertyName}][%d] objectsClassReferencesPutBadRequest %+v", 400, o.Payload) +} + +func (o *ObjectsClassReferencesPutBadRequest) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *ObjectsClassReferencesPutBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewObjectsClassReferencesPutUnauthorized creates a ObjectsClassReferencesPutUnauthorized with default headers values +func NewObjectsClassReferencesPutUnauthorized() *ObjectsClassReferencesPutUnauthorized { + return &ObjectsClassReferencesPutUnauthorized{} +} + +/* +ObjectsClassReferencesPutUnauthorized describes a response with status code 401, with default header values. + +Unauthorized or invalid credentials. +*/ +type ObjectsClassReferencesPutUnauthorized struct { +} + +// IsSuccess returns true when this objects class references put unauthorized response has a 2xx status code +func (o *ObjectsClassReferencesPutUnauthorized) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this objects class references put unauthorized response has a 3xx status code +func (o *ObjectsClassReferencesPutUnauthorized) IsRedirect() bool { + return false +} + +// IsClientError returns true when this objects class references put unauthorized response has a 4xx status code +func (o *ObjectsClassReferencesPutUnauthorized) IsClientError() bool { + return true +} + +// IsServerError returns true when this objects class references put unauthorized response has a 5xx status code +func (o *ObjectsClassReferencesPutUnauthorized) IsServerError() bool { + return false +} + +// IsCode returns true when this objects class references put unauthorized response a status code equal to that given +func (o *ObjectsClassReferencesPutUnauthorized) IsCode(code int) bool { + return code == 401 +} + +// Code gets the status code for the objects class references put unauthorized response +func (o *ObjectsClassReferencesPutUnauthorized) Code() int { + return 401 +} + +func (o *ObjectsClassReferencesPutUnauthorized) Error() string { + return fmt.Sprintf("[PUT /objects/{className}/{id}/references/{propertyName}][%d] objectsClassReferencesPutUnauthorized ", 401) +} + +func (o *ObjectsClassReferencesPutUnauthorized) String() string { + return fmt.Sprintf("[PUT /objects/{className}/{id}/references/{propertyName}][%d] objectsClassReferencesPutUnauthorized ", 401) +} + +func (o *ObjectsClassReferencesPutUnauthorized) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewObjectsClassReferencesPutForbidden creates a ObjectsClassReferencesPutForbidden with default headers values +func NewObjectsClassReferencesPutForbidden() *ObjectsClassReferencesPutForbidden { + return &ObjectsClassReferencesPutForbidden{} +} + +/* +ObjectsClassReferencesPutForbidden describes a response with status code 403, with default header values. + +Forbidden +*/ +type ObjectsClassReferencesPutForbidden struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this objects class references put forbidden response has a 2xx status code +func (o *ObjectsClassReferencesPutForbidden) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this objects class references put forbidden response has a 3xx status code +func (o *ObjectsClassReferencesPutForbidden) IsRedirect() bool { + return false +} + +// IsClientError returns true when this objects class references put forbidden response has a 4xx status code +func (o *ObjectsClassReferencesPutForbidden) IsClientError() bool { + return true +} + +// IsServerError returns true when this objects class references put forbidden response has a 5xx status code +func (o *ObjectsClassReferencesPutForbidden) IsServerError() bool { + return false +} + +// IsCode returns true when this objects class references put forbidden response a status code equal to that given +func (o *ObjectsClassReferencesPutForbidden) IsCode(code int) bool { + return code == 403 +} + +// Code gets the status code for the objects class references put forbidden response +func (o *ObjectsClassReferencesPutForbidden) Code() int { + return 403 +} + +func (o *ObjectsClassReferencesPutForbidden) Error() string { + return fmt.Sprintf("[PUT /objects/{className}/{id}/references/{propertyName}][%d] objectsClassReferencesPutForbidden %+v", 403, o.Payload) +} + +func (o *ObjectsClassReferencesPutForbidden) String() string { + return fmt.Sprintf("[PUT /objects/{className}/{id}/references/{propertyName}][%d] objectsClassReferencesPutForbidden %+v", 403, o.Payload) +} + +func (o *ObjectsClassReferencesPutForbidden) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *ObjectsClassReferencesPutForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewObjectsClassReferencesPutNotFound creates a ObjectsClassReferencesPutNotFound with default headers values +func NewObjectsClassReferencesPutNotFound() *ObjectsClassReferencesPutNotFound { + return &ObjectsClassReferencesPutNotFound{} +} + +/* +ObjectsClassReferencesPutNotFound describes a response with status code 404, with default header values. + +Source object doesn't exist. +*/ +type ObjectsClassReferencesPutNotFound struct { +} + +// IsSuccess returns true when this objects class references put not found response has a 2xx status code +func (o *ObjectsClassReferencesPutNotFound) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this objects class references put not found response has a 3xx status code +func (o *ObjectsClassReferencesPutNotFound) IsRedirect() bool { + return false +} + +// IsClientError returns true when this objects class references put not found response has a 4xx status code +func (o *ObjectsClassReferencesPutNotFound) IsClientError() bool { + return true +} + +// IsServerError returns true when this objects class references put not found response has a 5xx status code +func (o *ObjectsClassReferencesPutNotFound) IsServerError() bool { + return false +} + +// IsCode returns true when this objects class references put not found response a status code equal to that given +func (o *ObjectsClassReferencesPutNotFound) IsCode(code int) bool { + return code == 404 +} + +// Code gets the status code for the objects class references put not found response +func (o *ObjectsClassReferencesPutNotFound) Code() int { + return 404 +} + +func (o *ObjectsClassReferencesPutNotFound) Error() string { + return fmt.Sprintf("[PUT /objects/{className}/{id}/references/{propertyName}][%d] objectsClassReferencesPutNotFound ", 404) +} + +func (o *ObjectsClassReferencesPutNotFound) String() string { + return fmt.Sprintf("[PUT /objects/{className}/{id}/references/{propertyName}][%d] objectsClassReferencesPutNotFound ", 404) +} + +func (o *ObjectsClassReferencesPutNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewObjectsClassReferencesPutUnprocessableEntity creates a ObjectsClassReferencesPutUnprocessableEntity with default headers values +func NewObjectsClassReferencesPutUnprocessableEntity() *ObjectsClassReferencesPutUnprocessableEntity { + return &ObjectsClassReferencesPutUnprocessableEntity{} +} + +/* +ObjectsClassReferencesPutUnprocessableEntity describes a response with status code 422, with default header values. + +Request body is well-formed (i.e., syntactically correct), but semantically erroneous. Are you sure the property exists or that it is a class? +*/ +type ObjectsClassReferencesPutUnprocessableEntity struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this objects class references put unprocessable entity response has a 2xx status code +func (o *ObjectsClassReferencesPutUnprocessableEntity) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this objects class references put unprocessable entity response has a 3xx status code +func (o *ObjectsClassReferencesPutUnprocessableEntity) IsRedirect() bool { + return false +} + +// IsClientError returns true when this objects class references put unprocessable entity response has a 4xx status code +func (o *ObjectsClassReferencesPutUnprocessableEntity) IsClientError() bool { + return true +} + +// IsServerError returns true when this objects class references put unprocessable entity response has a 5xx status code +func (o *ObjectsClassReferencesPutUnprocessableEntity) IsServerError() bool { + return false +} + +// IsCode returns true when this objects class references put unprocessable entity response a status code equal to that given +func (o *ObjectsClassReferencesPutUnprocessableEntity) IsCode(code int) bool { + return code == 422 +} + +// Code gets the status code for the objects class references put unprocessable entity response +func (o *ObjectsClassReferencesPutUnprocessableEntity) Code() int { + return 422 +} + +func (o *ObjectsClassReferencesPutUnprocessableEntity) Error() string { + return fmt.Sprintf("[PUT /objects/{className}/{id}/references/{propertyName}][%d] objectsClassReferencesPutUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *ObjectsClassReferencesPutUnprocessableEntity) String() string { + return fmt.Sprintf("[PUT /objects/{className}/{id}/references/{propertyName}][%d] objectsClassReferencesPutUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *ObjectsClassReferencesPutUnprocessableEntity) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *ObjectsClassReferencesPutUnprocessableEntity) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewObjectsClassReferencesPutInternalServerError creates a ObjectsClassReferencesPutInternalServerError with default headers values +func NewObjectsClassReferencesPutInternalServerError() *ObjectsClassReferencesPutInternalServerError { + return &ObjectsClassReferencesPutInternalServerError{} +} + +/* +ObjectsClassReferencesPutInternalServerError describes a response with status code 500, with default header values. + +An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. +*/ +type ObjectsClassReferencesPutInternalServerError struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this objects class references put internal server error response has a 2xx status code +func (o *ObjectsClassReferencesPutInternalServerError) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this objects class references put internal server error response has a 3xx status code +func (o *ObjectsClassReferencesPutInternalServerError) IsRedirect() bool { + return false +} + +// IsClientError returns true when this objects class references put internal server error response has a 4xx status code +func (o *ObjectsClassReferencesPutInternalServerError) IsClientError() bool { + return false +} + +// IsServerError returns true when this objects class references put internal server error response has a 5xx status code +func (o *ObjectsClassReferencesPutInternalServerError) IsServerError() bool { + return true +} + +// IsCode returns true when this objects class references put internal server error response a status code equal to that given +func (o *ObjectsClassReferencesPutInternalServerError) IsCode(code int) bool { + return code == 500 +} + +// Code gets the status code for the objects class references put internal server error response +func (o *ObjectsClassReferencesPutInternalServerError) Code() int { + return 500 +} + +func (o *ObjectsClassReferencesPutInternalServerError) Error() string { + return fmt.Sprintf("[PUT /objects/{className}/{id}/references/{propertyName}][%d] objectsClassReferencesPutInternalServerError %+v", 500, o.Payload) +} + +func (o *ObjectsClassReferencesPutInternalServerError) String() string { + return fmt.Sprintf("[PUT /objects/{className}/{id}/references/{propertyName}][%d] objectsClassReferencesPutInternalServerError %+v", 500, o.Payload) +} + +func (o *ObjectsClassReferencesPutInternalServerError) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *ObjectsClassReferencesPutInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/objects/objects_client.go b/platform/dbops/binaries/weaviate-src/client/objects/objects_client.go new file mode 100644 index 0000000000000000000000000000000000000000..8178d4bcad31e48144c53dac2c830af81ee1cce7 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/objects/objects_client.go @@ -0,0 +1,867 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package objects + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" +) + +// New creates a new objects API client. +func New(transport runtime.ClientTransport, formats strfmt.Registry) ClientService { + return &Client{transport: transport, formats: formats} +} + +/* +Client for objects API +*/ +type Client struct { + transport runtime.ClientTransport + formats strfmt.Registry +} + +// ClientOption is the option for Client methods +type ClientOption func(*runtime.ClientOperation) + +// ClientService is the interface for Client methods +type ClientService interface { + ObjectsClassDelete(params *ObjectsClassDeleteParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ObjectsClassDeleteNoContent, error) + + ObjectsClassGet(params *ObjectsClassGetParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ObjectsClassGetOK, error) + + ObjectsClassHead(params *ObjectsClassHeadParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ObjectsClassHeadNoContent, error) + + ObjectsClassPatch(params *ObjectsClassPatchParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ObjectsClassPatchNoContent, error) + + ObjectsClassPut(params *ObjectsClassPutParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ObjectsClassPutOK, error) + + ObjectsClassReferencesCreate(params *ObjectsClassReferencesCreateParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ObjectsClassReferencesCreateOK, error) + + ObjectsClassReferencesDelete(params *ObjectsClassReferencesDeleteParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ObjectsClassReferencesDeleteNoContent, error) + + ObjectsClassReferencesPut(params *ObjectsClassReferencesPutParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ObjectsClassReferencesPutOK, error) + + ObjectsCreate(params *ObjectsCreateParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ObjectsCreateOK, error) + + ObjectsDelete(params *ObjectsDeleteParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ObjectsDeleteNoContent, error) + + ObjectsGet(params *ObjectsGetParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ObjectsGetOK, error) + + ObjectsHead(params *ObjectsHeadParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ObjectsHeadNoContent, error) + + ObjectsList(params *ObjectsListParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ObjectsListOK, error) + + ObjectsPatch(params *ObjectsPatchParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ObjectsPatchNoContent, error) + + ObjectsReferencesCreate(params *ObjectsReferencesCreateParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ObjectsReferencesCreateOK, error) + + ObjectsReferencesDelete(params *ObjectsReferencesDeleteParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ObjectsReferencesDeleteNoContent, error) + + ObjectsReferencesUpdate(params *ObjectsReferencesUpdateParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ObjectsReferencesUpdateOK, error) + + ObjectsUpdate(params *ObjectsUpdateParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ObjectsUpdateOK, error) + + ObjectsValidate(params *ObjectsValidateParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ObjectsValidateOK, error) + + SetTransport(transport runtime.ClientTransport) +} + +/* +ObjectsClassDelete deletes object based on its class and UUID + +Delete an object based on its collection and UUID.

Note: For backward compatibility, beacons also support an older, deprecated format without the collection name. As a result, when deleting a reference, the beacon specified has to match the beacon to be deleted exactly. In other words, if a beacon is present using the old format (without collection name) you also need to specify it the same way.

In the beacon format, you need to always use `localhost` as the host, rather than the actual hostname. `localhost` here refers to the fact that the beacon's target is on the same Weaviate instance, as opposed to a foreign instance. +*/ +func (a *Client) ObjectsClassDelete(params *ObjectsClassDeleteParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ObjectsClassDeleteNoContent, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewObjectsClassDeleteParams() + } + op := &runtime.ClientOperation{ + ID: "objects.class.delete", + Method: "DELETE", + PathPattern: "/objects/{className}/{id}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json", "application/yaml"}, + Schemes: []string{"https"}, + Params: params, + Reader: &ObjectsClassDeleteReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + success, ok := result.(*ObjectsClassDeleteNoContent) + if ok { + return success, nil + } + // unexpected success response + // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for objects.class.delete: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + +/* +ObjectsClassGet gets a specific object based on its class and UUID also available as websocket bus + +Get a data object based on its collection and UUID. +*/ +func (a *Client) ObjectsClassGet(params *ObjectsClassGetParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ObjectsClassGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewObjectsClassGetParams() + } + op := &runtime.ClientOperation{ + ID: "objects.class.get", + Method: "GET", + PathPattern: "/objects/{className}/{id}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json", "application/yaml"}, + Schemes: []string{"https"}, + Params: params, + Reader: &ObjectsClassGetReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + success, ok := result.(*ObjectsClassGetOK) + if ok { + return success, nil + } + // unexpected success response + // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for objects.class.get: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + +/* +ObjectsClassHead checks object s existence based on its class and uuid + +Checks if a data object exists based on its collection and uuid without retrieving it.

Internally it skips reading the object from disk other than checking if it is present. Thus it does not use resources on marshalling, parsing, etc., and is faster. Note the resulting HTTP request has no body; the existence of an object is indicated solely by the status code. +*/ +func (a *Client) ObjectsClassHead(params *ObjectsClassHeadParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ObjectsClassHeadNoContent, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewObjectsClassHeadParams() + } + op := &runtime.ClientOperation{ + ID: "objects.class.head", + Method: "HEAD", + PathPattern: "/objects/{className}/{id}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json", "application/yaml"}, + Schemes: []string{"https"}, + Params: params, + Reader: &ObjectsClassHeadReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + success, ok := result.(*ObjectsClassHeadNoContent) + if ok { + return success, nil + } + // unexpected success response + // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for objects.class.head: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + +/* +ObjectsClassPatch updates an object based on its UUID using patch semantics + +Update an individual data object based on its class and uuid. This method supports json-merge style patch semantics (RFC 7396). Provided meta-data and schema values are validated. LastUpdateTime is set to the time this function is called. +*/ +func (a *Client) ObjectsClassPatch(params *ObjectsClassPatchParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ObjectsClassPatchNoContent, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewObjectsClassPatchParams() + } + op := &runtime.ClientOperation{ + ID: "objects.class.patch", + Method: "PATCH", + PathPattern: "/objects/{className}/{id}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json", "application/yaml"}, + Schemes: []string{"https"}, + Params: params, + Reader: &ObjectsClassPatchReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + success, ok := result.(*ObjectsClassPatchNoContent) + if ok { + return success, nil + } + // unexpected success response + // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for objects.class.patch: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + +/* +ObjectsClassPut updates a class object based on its uuid + +Update an object based on its uuid and collection. This (`put`) method replaces the object with the provided object. +*/ +func (a *Client) ObjectsClassPut(params *ObjectsClassPutParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ObjectsClassPutOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewObjectsClassPutParams() + } + op := &runtime.ClientOperation{ + ID: "objects.class.put", + Method: "PUT", + PathPattern: "/objects/{className}/{id}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json", "application/yaml"}, + Schemes: []string{"https"}, + Params: params, + Reader: &ObjectsClassPutReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + success, ok := result.(*ObjectsClassPutOK) + if ok { + return success, nil + } + // unexpected success response + // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for objects.class.put: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + +/* +ObjectsClassReferencesCreate adds a single reference to a class property + +Add a single reference to an object. This adds a reference to the array of cross-references of the given property in the source object specified by its collection name and id +*/ +func (a *Client) ObjectsClassReferencesCreate(params *ObjectsClassReferencesCreateParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ObjectsClassReferencesCreateOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewObjectsClassReferencesCreateParams() + } + op := &runtime.ClientOperation{ + ID: "objects.class.references.create", + Method: "POST", + PathPattern: "/objects/{className}/{id}/references/{propertyName}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json", "application/yaml"}, + Schemes: []string{"https"}, + Params: params, + Reader: &ObjectsClassReferencesCreateReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + success, ok := result.(*ObjectsClassReferencesCreateOK) + if ok { + return success, nil + } + // unexpected success response + // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for objects.class.references.create: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + +/* +ObjectsClassReferencesDelete deletes a single reference from the list of references + +Delete the single reference that is given in the body from the list of references that this property has. +*/ +func (a *Client) ObjectsClassReferencesDelete(params *ObjectsClassReferencesDeleteParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ObjectsClassReferencesDeleteNoContent, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewObjectsClassReferencesDeleteParams() + } + op := &runtime.ClientOperation{ + ID: "objects.class.references.delete", + Method: "DELETE", + PathPattern: "/objects/{className}/{id}/references/{propertyName}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json", "application/yaml"}, + Schemes: []string{"https"}, + Params: params, + Reader: &ObjectsClassReferencesDeleteReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + success, ok := result.(*ObjectsClassReferencesDeleteNoContent) + if ok { + return success, nil + } + // unexpected success response + // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for objects.class.references.delete: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + +/* +ObjectsClassReferencesPut replaces all references to a class property + +Replace **all** references in cross-reference property of an object. +*/ +func (a *Client) ObjectsClassReferencesPut(params *ObjectsClassReferencesPutParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ObjectsClassReferencesPutOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewObjectsClassReferencesPutParams() + } + op := &runtime.ClientOperation{ + ID: "objects.class.references.put", + Method: "PUT", + PathPattern: "/objects/{className}/{id}/references/{propertyName}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json", "application/yaml"}, + Schemes: []string{"https"}, + Params: params, + Reader: &ObjectsClassReferencesPutReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + success, ok := result.(*ObjectsClassReferencesPutOK) + if ok { + return success, nil + } + // unexpected success response + // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for objects.class.references.put: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + +/* +ObjectsCreate creates a new object + +Create a new object.

Meta-data and schema values are validated.

**Note: Use `/batch` for importing many objects**:
If you plan on importing a large number of objects, it's much more efficient to use the `/batch` endpoint. Otherwise, sending multiple single requests sequentially would incur a large performance penalty.

**Note: idempotence of `/objects`**:
POST /objects will fail if an id is provided which already exists in the class. To update an existing object with the objects endpoint, use the PUT or PATCH method. +*/ +func (a *Client) ObjectsCreate(params *ObjectsCreateParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ObjectsCreateOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewObjectsCreateParams() + } + op := &runtime.ClientOperation{ + ID: "objects.create", + Method: "POST", + PathPattern: "/objects", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json", "application/yaml"}, + Schemes: []string{"https"}, + Params: params, + Reader: &ObjectsCreateReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + success, ok := result.(*ObjectsCreateOK) + if ok { + return success, nil + } + // unexpected success response + // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for objects.create: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + +/* +ObjectsDelete deletes an object based on its UUID + +Deletes an object from the database based on its UUID.

**Note**: This endpoint is deprecated and will be removed in a future version. Use the `/objects/{className}/{id}` endpoint instead. +*/ +func (a *Client) ObjectsDelete(params *ObjectsDeleteParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ObjectsDeleteNoContent, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewObjectsDeleteParams() + } + op := &runtime.ClientOperation{ + ID: "objects.delete", + Method: "DELETE", + PathPattern: "/objects/{id}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json", "application/yaml"}, + Schemes: []string{"https"}, + Params: params, + Reader: &ObjectsDeleteReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + success, ok := result.(*ObjectsDeleteNoContent) + if ok { + return success, nil + } + // unexpected success response + // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for objects.delete: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + +/* +ObjectsGet gets a specific object based on its UUID + +Get a specific object based on its UUID. Also available as Websocket bus.

**Note**: This endpoint is deprecated and will be removed in a future version. Use the `/objects/{className}/{id}` endpoint instead. +*/ +func (a *Client) ObjectsGet(params *ObjectsGetParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ObjectsGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewObjectsGetParams() + } + op := &runtime.ClientOperation{ + ID: "objects.get", + Method: "GET", + PathPattern: "/objects/{id}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json", "application/yaml"}, + Schemes: []string{"https"}, + Params: params, + Reader: &ObjectsGetReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + success, ok := result.(*ObjectsGetOK) + if ok { + return success, nil + } + // unexpected success response + // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for objects.get: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + +/* +ObjectsHead checks object s existence based on its UUID + +Checks if an object exists in the system based on its UUID.

**Note**: This endpoint is deprecated and will be removed in a future version. Use the `/objects/{className}/{id}` endpoint instead. +*/ +func (a *Client) ObjectsHead(params *ObjectsHeadParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ObjectsHeadNoContent, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewObjectsHeadParams() + } + op := &runtime.ClientOperation{ + ID: "objects.head", + Method: "HEAD", + PathPattern: "/objects/{id}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json", "application/yaml"}, + Schemes: []string{"https"}, + Params: params, + Reader: &ObjectsHeadReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + success, ok := result.(*ObjectsHeadNoContent) + if ok { + return success, nil + } + // unexpected success response + // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for objects.head: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + +/* +ObjectsList gets a list of objects + +Lists all Objects in reverse order of creation, owned by the user that belongs to the used token. +*/ +func (a *Client) ObjectsList(params *ObjectsListParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ObjectsListOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewObjectsListParams() + } + op := &runtime.ClientOperation{ + ID: "objects.list", + Method: "GET", + PathPattern: "/objects", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json", "application/yaml"}, + Schemes: []string{"https"}, + Params: params, + Reader: &ObjectsListReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + success, ok := result.(*ObjectsListOK) + if ok { + return success, nil + } + // unexpected success response + // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for objects.list: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + +/* +ObjectsPatch updates an object based on its UUID using patch semantics + +Update an object based on its UUID (using patch semantics). This method supports json-merge style patch semantics (RFC 7396). Provided meta-data and schema values are validated. LastUpdateTime is set to the time this function is called.

**Note**: This endpoint is deprecated and will be removed in a future version. Use the `/objects/{className}/{id}` endpoint instead. +*/ +func (a *Client) ObjectsPatch(params *ObjectsPatchParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ObjectsPatchNoContent, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewObjectsPatchParams() + } + op := &runtime.ClientOperation{ + ID: "objects.patch", + Method: "PATCH", + PathPattern: "/objects/{id}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json", "application/yaml"}, + Schemes: []string{"https"}, + Params: params, + Reader: &ObjectsPatchReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + success, ok := result.(*ObjectsPatchNoContent) + if ok { + return success, nil + } + // unexpected success response + // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for objects.patch: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + +/* +ObjectsReferencesCreate adds a single reference to a class property + +Add a cross-reference.

**Note**: This endpoint is deprecated and will be removed in a future version. Use the `/objects/{className}/{id}/references/{propertyName}` endpoint instead. +*/ +func (a *Client) ObjectsReferencesCreate(params *ObjectsReferencesCreateParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ObjectsReferencesCreateOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewObjectsReferencesCreateParams() + } + op := &runtime.ClientOperation{ + ID: "objects.references.create", + Method: "POST", + PathPattern: "/objects/{id}/references/{propertyName}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json", "application/yaml"}, + Schemes: []string{"https"}, + Params: params, + Reader: &ObjectsReferencesCreateReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + success, ok := result.(*ObjectsReferencesCreateOK) + if ok { + return success, nil + } + // unexpected success response + // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for objects.references.create: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + +/* +ObjectsReferencesDelete deletes a single reference from the list of references + +Delete the single reference that is given in the body from the list of references that this property has.

**Note**: This endpoint is deprecated and will be removed in a future version. Use the `/objects/{className}/{id}/references/{propertyName}` endpoint instead. +*/ +func (a *Client) ObjectsReferencesDelete(params *ObjectsReferencesDeleteParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ObjectsReferencesDeleteNoContent, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewObjectsReferencesDeleteParams() + } + op := &runtime.ClientOperation{ + ID: "objects.references.delete", + Method: "DELETE", + PathPattern: "/objects/{id}/references/{propertyName}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json", "application/yaml"}, + Schemes: []string{"https"}, + Params: params, + Reader: &ObjectsReferencesDeleteReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + success, ok := result.(*ObjectsReferencesDeleteNoContent) + if ok { + return success, nil + } + // unexpected success response + // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for objects.references.delete: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + +/* +ObjectsReferencesUpdate replaces all references to a class property + +Replace all references in cross-reference property of an object.

**Note**: This endpoint is deprecated and will be removed in a future version. Use the `/objects/{className}/{id}/references/{propertyName}` endpoint instead. +*/ +func (a *Client) ObjectsReferencesUpdate(params *ObjectsReferencesUpdateParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ObjectsReferencesUpdateOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewObjectsReferencesUpdateParams() + } + op := &runtime.ClientOperation{ + ID: "objects.references.update", + Method: "PUT", + PathPattern: "/objects/{id}/references/{propertyName}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json", "application/yaml"}, + Schemes: []string{"https"}, + Params: params, + Reader: &ObjectsReferencesUpdateReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + success, ok := result.(*ObjectsReferencesUpdateOK) + if ok { + return success, nil + } + // unexpected success response + // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for objects.references.update: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + +/* +ObjectsUpdate updates an object based on its UUID + +Updates an object based on its UUID. Given meta-data and schema values are validated. LastUpdateTime is set to the time this function is called.

**Note**: This endpoint is deprecated and will be removed in a future version. Use the `/objects/{className}/{id}` endpoint instead. +*/ +func (a *Client) ObjectsUpdate(params *ObjectsUpdateParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ObjectsUpdateOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewObjectsUpdateParams() + } + op := &runtime.ClientOperation{ + ID: "objects.update", + Method: "PUT", + PathPattern: "/objects/{id}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json", "application/yaml"}, + Schemes: []string{"https"}, + Params: params, + Reader: &ObjectsUpdateReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + success, ok := result.(*ObjectsUpdateOK) + if ok { + return success, nil + } + // unexpected success response + // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for objects.update: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + +/* +ObjectsValidate validates an object based on a schema + +Validate an object's schema and meta-data without creating it.

If the schema of the object is valid, the request should return nothing with a plain RESTful request. Otherwise, an error object will be returned. +*/ +func (a *Client) ObjectsValidate(params *ObjectsValidateParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ObjectsValidateOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewObjectsValidateParams() + } + op := &runtime.ClientOperation{ + ID: "objects.validate", + Method: "POST", + PathPattern: "/objects/validate", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json", "application/yaml"}, + Schemes: []string{"https"}, + Params: params, + Reader: &ObjectsValidateReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + success, ok := result.(*ObjectsValidateOK) + if ok { + return success, nil + } + // unexpected success response + // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for objects.validate: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + +// SetTransport changes the transport on the client +func (a *Client) SetTransport(transport runtime.ClientTransport) { + a.transport = transport +} diff --git a/platform/dbops/binaries/weaviate-src/client/objects/objects_create_parameters.go b/platform/dbops/binaries/weaviate-src/client/objects/objects_create_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..03476c1cdeeefebfbec40ba8e487c895f037b242 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/objects/objects_create_parameters.go @@ -0,0 +1,195 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package objects + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + + "github.com/weaviate/weaviate/entities/models" +) + +// NewObjectsCreateParams creates a new ObjectsCreateParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewObjectsCreateParams() *ObjectsCreateParams { + return &ObjectsCreateParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewObjectsCreateParamsWithTimeout creates a new ObjectsCreateParams object +// with the ability to set a timeout on a request. +func NewObjectsCreateParamsWithTimeout(timeout time.Duration) *ObjectsCreateParams { + return &ObjectsCreateParams{ + timeout: timeout, + } +} + +// NewObjectsCreateParamsWithContext creates a new ObjectsCreateParams object +// with the ability to set a context for a request. +func NewObjectsCreateParamsWithContext(ctx context.Context) *ObjectsCreateParams { + return &ObjectsCreateParams{ + Context: ctx, + } +} + +// NewObjectsCreateParamsWithHTTPClient creates a new ObjectsCreateParams object +// with the ability to set a custom HTTPClient for a request. +func NewObjectsCreateParamsWithHTTPClient(client *http.Client) *ObjectsCreateParams { + return &ObjectsCreateParams{ + HTTPClient: client, + } +} + +/* +ObjectsCreateParams contains all the parameters to send to the API endpoint + + for the objects create operation. + + Typically these are written to a http.Request. +*/ +type ObjectsCreateParams struct { + + // Body. + Body *models.Object + + /* ConsistencyLevel. + + Determines how many replicas must acknowledge a request before it is considered successful + */ + ConsistencyLevel *string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the objects create params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *ObjectsCreateParams) WithDefaults() *ObjectsCreateParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the objects create params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *ObjectsCreateParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the objects create params +func (o *ObjectsCreateParams) WithTimeout(timeout time.Duration) *ObjectsCreateParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the objects create params +func (o *ObjectsCreateParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the objects create params +func (o *ObjectsCreateParams) WithContext(ctx context.Context) *ObjectsCreateParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the objects create params +func (o *ObjectsCreateParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the objects create params +func (o *ObjectsCreateParams) WithHTTPClient(client *http.Client) *ObjectsCreateParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the objects create params +func (o *ObjectsCreateParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithBody adds the body to the objects create params +func (o *ObjectsCreateParams) WithBody(body *models.Object) *ObjectsCreateParams { + o.SetBody(body) + return o +} + +// SetBody adds the body to the objects create params +func (o *ObjectsCreateParams) SetBody(body *models.Object) { + o.Body = body +} + +// WithConsistencyLevel adds the consistencyLevel to the objects create params +func (o *ObjectsCreateParams) WithConsistencyLevel(consistencyLevel *string) *ObjectsCreateParams { + o.SetConsistencyLevel(consistencyLevel) + return o +} + +// SetConsistencyLevel adds the consistencyLevel to the objects create params +func (o *ObjectsCreateParams) SetConsistencyLevel(consistencyLevel *string) { + o.ConsistencyLevel = consistencyLevel +} + +// WriteToRequest writes these params to a swagger request +func (o *ObjectsCreateParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + if o.Body != nil { + if err := r.SetBodyParam(o.Body); err != nil { + return err + } + } + + if o.ConsistencyLevel != nil { + + // query param consistency_level + var qrConsistencyLevel string + + if o.ConsistencyLevel != nil { + qrConsistencyLevel = *o.ConsistencyLevel + } + qConsistencyLevel := qrConsistencyLevel + if qConsistencyLevel != "" { + + if err := r.SetQueryParam("consistency_level", qConsistencyLevel); err != nil { + return err + } + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/objects/objects_create_responses.go b/platform/dbops/binaries/weaviate-src/client/objects/objects_create_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..08537e4a466383777321bb1f23258ee6b0fee5e2 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/objects/objects_create_responses.go @@ -0,0 +1,472 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package objects + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/weaviate/weaviate/entities/models" +) + +// ObjectsCreateReader is a Reader for the ObjectsCreate structure. +type ObjectsCreateReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ObjectsCreateReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewObjectsCreateOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 400: + result := NewObjectsCreateBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 401: + result := NewObjectsCreateUnauthorized() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 403: + result := NewObjectsCreateForbidden() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 422: + result := NewObjectsCreateUnprocessableEntity() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 500: + result := NewObjectsCreateInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + } +} + +// NewObjectsCreateOK creates a ObjectsCreateOK with default headers values +func NewObjectsCreateOK() *ObjectsCreateOK { + return &ObjectsCreateOK{} +} + +/* +ObjectsCreateOK describes a response with status code 200, with default header values. + +Object created. +*/ +type ObjectsCreateOK struct { + Payload *models.Object +} + +// IsSuccess returns true when this objects create o k response has a 2xx status code +func (o *ObjectsCreateOK) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this objects create o k response has a 3xx status code +func (o *ObjectsCreateOK) IsRedirect() bool { + return false +} + +// IsClientError returns true when this objects create o k response has a 4xx status code +func (o *ObjectsCreateOK) IsClientError() bool { + return false +} + +// IsServerError returns true when this objects create o k response has a 5xx status code +func (o *ObjectsCreateOK) IsServerError() bool { + return false +} + +// IsCode returns true when this objects create o k response a status code equal to that given +func (o *ObjectsCreateOK) IsCode(code int) bool { + return code == 200 +} + +// Code gets the status code for the objects create o k response +func (o *ObjectsCreateOK) Code() int { + return 200 +} + +func (o *ObjectsCreateOK) Error() string { + return fmt.Sprintf("[POST /objects][%d] objectsCreateOK %+v", 200, o.Payload) +} + +func (o *ObjectsCreateOK) String() string { + return fmt.Sprintf("[POST /objects][%d] objectsCreateOK %+v", 200, o.Payload) +} + +func (o *ObjectsCreateOK) GetPayload() *models.Object { + return o.Payload +} + +func (o *ObjectsCreateOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Object) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewObjectsCreateBadRequest creates a ObjectsCreateBadRequest with default headers values +func NewObjectsCreateBadRequest() *ObjectsCreateBadRequest { + return &ObjectsCreateBadRequest{} +} + +/* +ObjectsCreateBadRequest describes a response with status code 400, with default header values. + +Malformed request. +*/ +type ObjectsCreateBadRequest struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this objects create bad request response has a 2xx status code +func (o *ObjectsCreateBadRequest) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this objects create bad request response has a 3xx status code +func (o *ObjectsCreateBadRequest) IsRedirect() bool { + return false +} + +// IsClientError returns true when this objects create bad request response has a 4xx status code +func (o *ObjectsCreateBadRequest) IsClientError() bool { + return true +} + +// IsServerError returns true when this objects create bad request response has a 5xx status code +func (o *ObjectsCreateBadRequest) IsServerError() bool { + return false +} + +// IsCode returns true when this objects create bad request response a status code equal to that given +func (o *ObjectsCreateBadRequest) IsCode(code int) bool { + return code == 400 +} + +// Code gets the status code for the objects create bad request response +func (o *ObjectsCreateBadRequest) Code() int { + return 400 +} + +func (o *ObjectsCreateBadRequest) Error() string { + return fmt.Sprintf("[POST /objects][%d] objectsCreateBadRequest %+v", 400, o.Payload) +} + +func (o *ObjectsCreateBadRequest) String() string { + return fmt.Sprintf("[POST /objects][%d] objectsCreateBadRequest %+v", 400, o.Payload) +} + +func (o *ObjectsCreateBadRequest) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *ObjectsCreateBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewObjectsCreateUnauthorized creates a ObjectsCreateUnauthorized with default headers values +func NewObjectsCreateUnauthorized() *ObjectsCreateUnauthorized { + return &ObjectsCreateUnauthorized{} +} + +/* +ObjectsCreateUnauthorized describes a response with status code 401, with default header values. + +Unauthorized or invalid credentials. +*/ +type ObjectsCreateUnauthorized struct { +} + +// IsSuccess returns true when this objects create unauthorized response has a 2xx status code +func (o *ObjectsCreateUnauthorized) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this objects create unauthorized response has a 3xx status code +func (o *ObjectsCreateUnauthorized) IsRedirect() bool { + return false +} + +// IsClientError returns true when this objects create unauthorized response has a 4xx status code +func (o *ObjectsCreateUnauthorized) IsClientError() bool { + return true +} + +// IsServerError returns true when this objects create unauthorized response has a 5xx status code +func (o *ObjectsCreateUnauthorized) IsServerError() bool { + return false +} + +// IsCode returns true when this objects create unauthorized response a status code equal to that given +func (o *ObjectsCreateUnauthorized) IsCode(code int) bool { + return code == 401 +} + +// Code gets the status code for the objects create unauthorized response +func (o *ObjectsCreateUnauthorized) Code() int { + return 401 +} + +func (o *ObjectsCreateUnauthorized) Error() string { + return fmt.Sprintf("[POST /objects][%d] objectsCreateUnauthorized ", 401) +} + +func (o *ObjectsCreateUnauthorized) String() string { + return fmt.Sprintf("[POST /objects][%d] objectsCreateUnauthorized ", 401) +} + +func (o *ObjectsCreateUnauthorized) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewObjectsCreateForbidden creates a ObjectsCreateForbidden with default headers values +func NewObjectsCreateForbidden() *ObjectsCreateForbidden { + return &ObjectsCreateForbidden{} +} + +/* +ObjectsCreateForbidden describes a response with status code 403, with default header values. + +Forbidden +*/ +type ObjectsCreateForbidden struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this objects create forbidden response has a 2xx status code +func (o *ObjectsCreateForbidden) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this objects create forbidden response has a 3xx status code +func (o *ObjectsCreateForbidden) IsRedirect() bool { + return false +} + +// IsClientError returns true when this objects create forbidden response has a 4xx status code +func (o *ObjectsCreateForbidden) IsClientError() bool { + return true +} + +// IsServerError returns true when this objects create forbidden response has a 5xx status code +func (o *ObjectsCreateForbidden) IsServerError() bool { + return false +} + +// IsCode returns true when this objects create forbidden response a status code equal to that given +func (o *ObjectsCreateForbidden) IsCode(code int) bool { + return code == 403 +} + +// Code gets the status code for the objects create forbidden response +func (o *ObjectsCreateForbidden) Code() int { + return 403 +} + +func (o *ObjectsCreateForbidden) Error() string { + return fmt.Sprintf("[POST /objects][%d] objectsCreateForbidden %+v", 403, o.Payload) +} + +func (o *ObjectsCreateForbidden) String() string { + return fmt.Sprintf("[POST /objects][%d] objectsCreateForbidden %+v", 403, o.Payload) +} + +func (o *ObjectsCreateForbidden) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *ObjectsCreateForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewObjectsCreateUnprocessableEntity creates a ObjectsCreateUnprocessableEntity with default headers values +func NewObjectsCreateUnprocessableEntity() *ObjectsCreateUnprocessableEntity { + return &ObjectsCreateUnprocessableEntity{} +} + +/* +ObjectsCreateUnprocessableEntity describes a response with status code 422, with default header values. + +Request body is well-formed (i.e., syntactically correct), but semantically erroneous. Are you sure the class is defined in the configuration file? +*/ +type ObjectsCreateUnprocessableEntity struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this objects create unprocessable entity response has a 2xx status code +func (o *ObjectsCreateUnprocessableEntity) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this objects create unprocessable entity response has a 3xx status code +func (o *ObjectsCreateUnprocessableEntity) IsRedirect() bool { + return false +} + +// IsClientError returns true when this objects create unprocessable entity response has a 4xx status code +func (o *ObjectsCreateUnprocessableEntity) IsClientError() bool { + return true +} + +// IsServerError returns true when this objects create unprocessable entity response has a 5xx status code +func (o *ObjectsCreateUnprocessableEntity) IsServerError() bool { + return false +} + +// IsCode returns true when this objects create unprocessable entity response a status code equal to that given +func (o *ObjectsCreateUnprocessableEntity) IsCode(code int) bool { + return code == 422 +} + +// Code gets the status code for the objects create unprocessable entity response +func (o *ObjectsCreateUnprocessableEntity) Code() int { + return 422 +} + +func (o *ObjectsCreateUnprocessableEntity) Error() string { + return fmt.Sprintf("[POST /objects][%d] objectsCreateUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *ObjectsCreateUnprocessableEntity) String() string { + return fmt.Sprintf("[POST /objects][%d] objectsCreateUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *ObjectsCreateUnprocessableEntity) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *ObjectsCreateUnprocessableEntity) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewObjectsCreateInternalServerError creates a ObjectsCreateInternalServerError with default headers values +func NewObjectsCreateInternalServerError() *ObjectsCreateInternalServerError { + return &ObjectsCreateInternalServerError{} +} + +/* +ObjectsCreateInternalServerError describes a response with status code 500, with default header values. + +An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. +*/ +type ObjectsCreateInternalServerError struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this objects create internal server error response has a 2xx status code +func (o *ObjectsCreateInternalServerError) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this objects create internal server error response has a 3xx status code +func (o *ObjectsCreateInternalServerError) IsRedirect() bool { + return false +} + +// IsClientError returns true when this objects create internal server error response has a 4xx status code +func (o *ObjectsCreateInternalServerError) IsClientError() bool { + return false +} + +// IsServerError returns true when this objects create internal server error response has a 5xx status code +func (o *ObjectsCreateInternalServerError) IsServerError() bool { + return true +} + +// IsCode returns true when this objects create internal server error response a status code equal to that given +func (o *ObjectsCreateInternalServerError) IsCode(code int) bool { + return code == 500 +} + +// Code gets the status code for the objects create internal server error response +func (o *ObjectsCreateInternalServerError) Code() int { + return 500 +} + +func (o *ObjectsCreateInternalServerError) Error() string { + return fmt.Sprintf("[POST /objects][%d] objectsCreateInternalServerError %+v", 500, o.Payload) +} + +func (o *ObjectsCreateInternalServerError) String() string { + return fmt.Sprintf("[POST /objects][%d] objectsCreateInternalServerError %+v", 500, o.Payload) +} + +func (o *ObjectsCreateInternalServerError) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *ObjectsCreateInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/objects/objects_delete_parameters.go b/platform/dbops/binaries/weaviate-src/client/objects/objects_delete_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..40fcd73fd6c2f198c60ccdab783fff8b66e05941 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/objects/objects_delete_parameters.go @@ -0,0 +1,232 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package objects + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewObjectsDeleteParams creates a new ObjectsDeleteParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewObjectsDeleteParams() *ObjectsDeleteParams { + return &ObjectsDeleteParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewObjectsDeleteParamsWithTimeout creates a new ObjectsDeleteParams object +// with the ability to set a timeout on a request. +func NewObjectsDeleteParamsWithTimeout(timeout time.Duration) *ObjectsDeleteParams { + return &ObjectsDeleteParams{ + timeout: timeout, + } +} + +// NewObjectsDeleteParamsWithContext creates a new ObjectsDeleteParams object +// with the ability to set a context for a request. +func NewObjectsDeleteParamsWithContext(ctx context.Context) *ObjectsDeleteParams { + return &ObjectsDeleteParams{ + Context: ctx, + } +} + +// NewObjectsDeleteParamsWithHTTPClient creates a new ObjectsDeleteParams object +// with the ability to set a custom HTTPClient for a request. +func NewObjectsDeleteParamsWithHTTPClient(client *http.Client) *ObjectsDeleteParams { + return &ObjectsDeleteParams{ + HTTPClient: client, + } +} + +/* +ObjectsDeleteParams contains all the parameters to send to the API endpoint + + for the objects delete operation. + + Typically these are written to a http.Request. +*/ +type ObjectsDeleteParams struct { + + /* ConsistencyLevel. + + Determines how many replicas must acknowledge a request before it is considered successful + */ + ConsistencyLevel *string + + /* ID. + + Unique ID of the Object. + + Format: uuid + */ + ID strfmt.UUID + + /* Tenant. + + Specifies the tenant in a request targeting a multi-tenant class + */ + Tenant *string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the objects delete params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *ObjectsDeleteParams) WithDefaults() *ObjectsDeleteParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the objects delete params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *ObjectsDeleteParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the objects delete params +func (o *ObjectsDeleteParams) WithTimeout(timeout time.Duration) *ObjectsDeleteParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the objects delete params +func (o *ObjectsDeleteParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the objects delete params +func (o *ObjectsDeleteParams) WithContext(ctx context.Context) *ObjectsDeleteParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the objects delete params +func (o *ObjectsDeleteParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the objects delete params +func (o *ObjectsDeleteParams) WithHTTPClient(client *http.Client) *ObjectsDeleteParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the objects delete params +func (o *ObjectsDeleteParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithConsistencyLevel adds the consistencyLevel to the objects delete params +func (o *ObjectsDeleteParams) WithConsistencyLevel(consistencyLevel *string) *ObjectsDeleteParams { + o.SetConsistencyLevel(consistencyLevel) + return o +} + +// SetConsistencyLevel adds the consistencyLevel to the objects delete params +func (o *ObjectsDeleteParams) SetConsistencyLevel(consistencyLevel *string) { + o.ConsistencyLevel = consistencyLevel +} + +// WithID adds the id to the objects delete params +func (o *ObjectsDeleteParams) WithID(id strfmt.UUID) *ObjectsDeleteParams { + o.SetID(id) + return o +} + +// SetID adds the id to the objects delete params +func (o *ObjectsDeleteParams) SetID(id strfmt.UUID) { + o.ID = id +} + +// WithTenant adds the tenant to the objects delete params +func (o *ObjectsDeleteParams) WithTenant(tenant *string) *ObjectsDeleteParams { + o.SetTenant(tenant) + return o +} + +// SetTenant adds the tenant to the objects delete params +func (o *ObjectsDeleteParams) SetTenant(tenant *string) { + o.Tenant = tenant +} + +// WriteToRequest writes these params to a swagger request +func (o *ObjectsDeleteParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if o.ConsistencyLevel != nil { + + // query param consistency_level + var qrConsistencyLevel string + + if o.ConsistencyLevel != nil { + qrConsistencyLevel = *o.ConsistencyLevel + } + qConsistencyLevel := qrConsistencyLevel + if qConsistencyLevel != "" { + + if err := r.SetQueryParam("consistency_level", qConsistencyLevel); err != nil { + return err + } + } + } + + // path param id + if err := r.SetPathParam("id", o.ID.String()); err != nil { + return err + } + + if o.Tenant != nil { + + // query param tenant + var qrTenant string + + if o.Tenant != nil { + qrTenant = *o.Tenant + } + qTenant := qrTenant + if qTenant != "" { + + if err := r.SetQueryParam("tenant", qTenant); err != nil { + return err + } + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/objects/objects_delete_responses.go b/platform/dbops/binaries/weaviate-src/client/objects/objects_delete_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..e54a3d97b36d50a4f2fa5b89bd01ab33032b9c4a --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/objects/objects_delete_responses.go @@ -0,0 +1,374 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package objects + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/weaviate/weaviate/entities/models" +) + +// ObjectsDeleteReader is a Reader for the ObjectsDelete structure. +type ObjectsDeleteReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ObjectsDeleteReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 204: + result := NewObjectsDeleteNoContent() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 401: + result := NewObjectsDeleteUnauthorized() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 403: + result := NewObjectsDeleteForbidden() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 404: + result := NewObjectsDeleteNotFound() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 500: + result := NewObjectsDeleteInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + } +} + +// NewObjectsDeleteNoContent creates a ObjectsDeleteNoContent with default headers values +func NewObjectsDeleteNoContent() *ObjectsDeleteNoContent { + return &ObjectsDeleteNoContent{} +} + +/* +ObjectsDeleteNoContent describes a response with status code 204, with default header values. + +Successfully deleted. +*/ +type ObjectsDeleteNoContent struct { +} + +// IsSuccess returns true when this objects delete no content response has a 2xx status code +func (o *ObjectsDeleteNoContent) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this objects delete no content response has a 3xx status code +func (o *ObjectsDeleteNoContent) IsRedirect() bool { + return false +} + +// IsClientError returns true when this objects delete no content response has a 4xx status code +func (o *ObjectsDeleteNoContent) IsClientError() bool { + return false +} + +// IsServerError returns true when this objects delete no content response has a 5xx status code +func (o *ObjectsDeleteNoContent) IsServerError() bool { + return false +} + +// IsCode returns true when this objects delete no content response a status code equal to that given +func (o *ObjectsDeleteNoContent) IsCode(code int) bool { + return code == 204 +} + +// Code gets the status code for the objects delete no content response +func (o *ObjectsDeleteNoContent) Code() int { + return 204 +} + +func (o *ObjectsDeleteNoContent) Error() string { + return fmt.Sprintf("[DELETE /objects/{id}][%d] objectsDeleteNoContent ", 204) +} + +func (o *ObjectsDeleteNoContent) String() string { + return fmt.Sprintf("[DELETE /objects/{id}][%d] objectsDeleteNoContent ", 204) +} + +func (o *ObjectsDeleteNoContent) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewObjectsDeleteUnauthorized creates a ObjectsDeleteUnauthorized with default headers values +func NewObjectsDeleteUnauthorized() *ObjectsDeleteUnauthorized { + return &ObjectsDeleteUnauthorized{} +} + +/* +ObjectsDeleteUnauthorized describes a response with status code 401, with default header values. + +Unauthorized or invalid credentials. +*/ +type ObjectsDeleteUnauthorized struct { +} + +// IsSuccess returns true when this objects delete unauthorized response has a 2xx status code +func (o *ObjectsDeleteUnauthorized) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this objects delete unauthorized response has a 3xx status code +func (o *ObjectsDeleteUnauthorized) IsRedirect() bool { + return false +} + +// IsClientError returns true when this objects delete unauthorized response has a 4xx status code +func (o *ObjectsDeleteUnauthorized) IsClientError() bool { + return true +} + +// IsServerError returns true when this objects delete unauthorized response has a 5xx status code +func (o *ObjectsDeleteUnauthorized) IsServerError() bool { + return false +} + +// IsCode returns true when this objects delete unauthorized response a status code equal to that given +func (o *ObjectsDeleteUnauthorized) IsCode(code int) bool { + return code == 401 +} + +// Code gets the status code for the objects delete unauthorized response +func (o *ObjectsDeleteUnauthorized) Code() int { + return 401 +} + +func (o *ObjectsDeleteUnauthorized) Error() string { + return fmt.Sprintf("[DELETE /objects/{id}][%d] objectsDeleteUnauthorized ", 401) +} + +func (o *ObjectsDeleteUnauthorized) String() string { + return fmt.Sprintf("[DELETE /objects/{id}][%d] objectsDeleteUnauthorized ", 401) +} + +func (o *ObjectsDeleteUnauthorized) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewObjectsDeleteForbidden creates a ObjectsDeleteForbidden with default headers values +func NewObjectsDeleteForbidden() *ObjectsDeleteForbidden { + return &ObjectsDeleteForbidden{} +} + +/* +ObjectsDeleteForbidden describes a response with status code 403, with default header values. + +Forbidden +*/ +type ObjectsDeleteForbidden struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this objects delete forbidden response has a 2xx status code +func (o *ObjectsDeleteForbidden) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this objects delete forbidden response has a 3xx status code +func (o *ObjectsDeleteForbidden) IsRedirect() bool { + return false +} + +// IsClientError returns true when this objects delete forbidden response has a 4xx status code +func (o *ObjectsDeleteForbidden) IsClientError() bool { + return true +} + +// IsServerError returns true when this objects delete forbidden response has a 5xx status code +func (o *ObjectsDeleteForbidden) IsServerError() bool { + return false +} + +// IsCode returns true when this objects delete forbidden response a status code equal to that given +func (o *ObjectsDeleteForbidden) IsCode(code int) bool { + return code == 403 +} + +// Code gets the status code for the objects delete forbidden response +func (o *ObjectsDeleteForbidden) Code() int { + return 403 +} + +func (o *ObjectsDeleteForbidden) Error() string { + return fmt.Sprintf("[DELETE /objects/{id}][%d] objectsDeleteForbidden %+v", 403, o.Payload) +} + +func (o *ObjectsDeleteForbidden) String() string { + return fmt.Sprintf("[DELETE /objects/{id}][%d] objectsDeleteForbidden %+v", 403, o.Payload) +} + +func (o *ObjectsDeleteForbidden) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *ObjectsDeleteForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewObjectsDeleteNotFound creates a ObjectsDeleteNotFound with default headers values +func NewObjectsDeleteNotFound() *ObjectsDeleteNotFound { + return &ObjectsDeleteNotFound{} +} + +/* +ObjectsDeleteNotFound describes a response with status code 404, with default header values. + +Successful query result but no resource was found. +*/ +type ObjectsDeleteNotFound struct { +} + +// IsSuccess returns true when this objects delete not found response has a 2xx status code +func (o *ObjectsDeleteNotFound) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this objects delete not found response has a 3xx status code +func (o *ObjectsDeleteNotFound) IsRedirect() bool { + return false +} + +// IsClientError returns true when this objects delete not found response has a 4xx status code +func (o *ObjectsDeleteNotFound) IsClientError() bool { + return true +} + +// IsServerError returns true when this objects delete not found response has a 5xx status code +func (o *ObjectsDeleteNotFound) IsServerError() bool { + return false +} + +// IsCode returns true when this objects delete not found response a status code equal to that given +func (o *ObjectsDeleteNotFound) IsCode(code int) bool { + return code == 404 +} + +// Code gets the status code for the objects delete not found response +func (o *ObjectsDeleteNotFound) Code() int { + return 404 +} + +func (o *ObjectsDeleteNotFound) Error() string { + return fmt.Sprintf("[DELETE /objects/{id}][%d] objectsDeleteNotFound ", 404) +} + +func (o *ObjectsDeleteNotFound) String() string { + return fmt.Sprintf("[DELETE /objects/{id}][%d] objectsDeleteNotFound ", 404) +} + +func (o *ObjectsDeleteNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewObjectsDeleteInternalServerError creates a ObjectsDeleteInternalServerError with default headers values +func NewObjectsDeleteInternalServerError() *ObjectsDeleteInternalServerError { + return &ObjectsDeleteInternalServerError{} +} + +/* +ObjectsDeleteInternalServerError describes a response with status code 500, with default header values. + +An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. +*/ +type ObjectsDeleteInternalServerError struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this objects delete internal server error response has a 2xx status code +func (o *ObjectsDeleteInternalServerError) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this objects delete internal server error response has a 3xx status code +func (o *ObjectsDeleteInternalServerError) IsRedirect() bool { + return false +} + +// IsClientError returns true when this objects delete internal server error response has a 4xx status code +func (o *ObjectsDeleteInternalServerError) IsClientError() bool { + return false +} + +// IsServerError returns true when this objects delete internal server error response has a 5xx status code +func (o *ObjectsDeleteInternalServerError) IsServerError() bool { + return true +} + +// IsCode returns true when this objects delete internal server error response a status code equal to that given +func (o *ObjectsDeleteInternalServerError) IsCode(code int) bool { + return code == 500 +} + +// Code gets the status code for the objects delete internal server error response +func (o *ObjectsDeleteInternalServerError) Code() int { + return 500 +} + +func (o *ObjectsDeleteInternalServerError) Error() string { + return fmt.Sprintf("[DELETE /objects/{id}][%d] objectsDeleteInternalServerError %+v", 500, o.Payload) +} + +func (o *ObjectsDeleteInternalServerError) String() string { + return fmt.Sprintf("[DELETE /objects/{id}][%d] objectsDeleteInternalServerError %+v", 500, o.Payload) +} + +func (o *ObjectsDeleteInternalServerError) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *ObjectsDeleteInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/objects/objects_get_parameters.go b/platform/dbops/binaries/weaviate-src/client/objects/objects_get_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..fa48149ad162409f360cd4bcd9ae11d1028cbde1 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/objects/objects_get_parameters.go @@ -0,0 +1,198 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package objects + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewObjectsGetParams creates a new ObjectsGetParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewObjectsGetParams() *ObjectsGetParams { + return &ObjectsGetParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewObjectsGetParamsWithTimeout creates a new ObjectsGetParams object +// with the ability to set a timeout on a request. +func NewObjectsGetParamsWithTimeout(timeout time.Duration) *ObjectsGetParams { + return &ObjectsGetParams{ + timeout: timeout, + } +} + +// NewObjectsGetParamsWithContext creates a new ObjectsGetParams object +// with the ability to set a context for a request. +func NewObjectsGetParamsWithContext(ctx context.Context) *ObjectsGetParams { + return &ObjectsGetParams{ + Context: ctx, + } +} + +// NewObjectsGetParamsWithHTTPClient creates a new ObjectsGetParams object +// with the ability to set a custom HTTPClient for a request. +func NewObjectsGetParamsWithHTTPClient(client *http.Client) *ObjectsGetParams { + return &ObjectsGetParams{ + HTTPClient: client, + } +} + +/* +ObjectsGetParams contains all the parameters to send to the API endpoint + + for the objects get operation. + + Typically these are written to a http.Request. +*/ +type ObjectsGetParams struct { + + /* ID. + + Unique ID of the Object. + + Format: uuid + */ + ID strfmt.UUID + + /* Include. + + Include additional information, such as classification infos. Allowed values include: classification, vector, interpretation + */ + Include *string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the objects get params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *ObjectsGetParams) WithDefaults() *ObjectsGetParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the objects get params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *ObjectsGetParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the objects get params +func (o *ObjectsGetParams) WithTimeout(timeout time.Duration) *ObjectsGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the objects get params +func (o *ObjectsGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the objects get params +func (o *ObjectsGetParams) WithContext(ctx context.Context) *ObjectsGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the objects get params +func (o *ObjectsGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the objects get params +func (o *ObjectsGetParams) WithHTTPClient(client *http.Client) *ObjectsGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the objects get params +func (o *ObjectsGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithID adds the id to the objects get params +func (o *ObjectsGetParams) WithID(id strfmt.UUID) *ObjectsGetParams { + o.SetID(id) + return o +} + +// SetID adds the id to the objects get params +func (o *ObjectsGetParams) SetID(id strfmt.UUID) { + o.ID = id +} + +// WithInclude adds the include to the objects get params +func (o *ObjectsGetParams) WithInclude(include *string) *ObjectsGetParams { + o.SetInclude(include) + return o +} + +// SetInclude adds the include to the objects get params +func (o *ObjectsGetParams) SetInclude(include *string) { + o.Include = include +} + +// WriteToRequest writes these params to a swagger request +func (o *ObjectsGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param id + if err := r.SetPathParam("id", o.ID.String()); err != nil { + return err + } + + if o.Include != nil { + + // query param include + var qrInclude string + + if o.Include != nil { + qrInclude = *o.Include + } + qInclude := qrInclude + if qInclude != "" { + + if err := r.SetQueryParam("include", qInclude); err != nil { + return err + } + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/objects/objects_get_responses.go b/platform/dbops/binaries/weaviate-src/client/objects/objects_get_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..9367a2faec14df5c0dbcea299d07226df3752a8d --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/objects/objects_get_responses.go @@ -0,0 +1,460 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package objects + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/weaviate/weaviate/entities/models" +) + +// ObjectsGetReader is a Reader for the ObjectsGet structure. +type ObjectsGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ObjectsGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewObjectsGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 400: + result := NewObjectsGetBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 401: + result := NewObjectsGetUnauthorized() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 403: + result := NewObjectsGetForbidden() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 404: + result := NewObjectsGetNotFound() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 500: + result := NewObjectsGetInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + } +} + +// NewObjectsGetOK creates a ObjectsGetOK with default headers values +func NewObjectsGetOK() *ObjectsGetOK { + return &ObjectsGetOK{} +} + +/* +ObjectsGetOK describes a response with status code 200, with default header values. + +Successful response. +*/ +type ObjectsGetOK struct { + Payload *models.Object +} + +// IsSuccess returns true when this objects get o k response has a 2xx status code +func (o *ObjectsGetOK) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this objects get o k response has a 3xx status code +func (o *ObjectsGetOK) IsRedirect() bool { + return false +} + +// IsClientError returns true when this objects get o k response has a 4xx status code +func (o *ObjectsGetOK) IsClientError() bool { + return false +} + +// IsServerError returns true when this objects get o k response has a 5xx status code +func (o *ObjectsGetOK) IsServerError() bool { + return false +} + +// IsCode returns true when this objects get o k response a status code equal to that given +func (o *ObjectsGetOK) IsCode(code int) bool { + return code == 200 +} + +// Code gets the status code for the objects get o k response +func (o *ObjectsGetOK) Code() int { + return 200 +} + +func (o *ObjectsGetOK) Error() string { + return fmt.Sprintf("[GET /objects/{id}][%d] objectsGetOK %+v", 200, o.Payload) +} + +func (o *ObjectsGetOK) String() string { + return fmt.Sprintf("[GET /objects/{id}][%d] objectsGetOK %+v", 200, o.Payload) +} + +func (o *ObjectsGetOK) GetPayload() *models.Object { + return o.Payload +} + +func (o *ObjectsGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Object) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewObjectsGetBadRequest creates a ObjectsGetBadRequest with default headers values +func NewObjectsGetBadRequest() *ObjectsGetBadRequest { + return &ObjectsGetBadRequest{} +} + +/* +ObjectsGetBadRequest describes a response with status code 400, with default header values. + +Malformed request. +*/ +type ObjectsGetBadRequest struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this objects get bad request response has a 2xx status code +func (o *ObjectsGetBadRequest) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this objects get bad request response has a 3xx status code +func (o *ObjectsGetBadRequest) IsRedirect() bool { + return false +} + +// IsClientError returns true when this objects get bad request response has a 4xx status code +func (o *ObjectsGetBadRequest) IsClientError() bool { + return true +} + +// IsServerError returns true when this objects get bad request response has a 5xx status code +func (o *ObjectsGetBadRequest) IsServerError() bool { + return false +} + +// IsCode returns true when this objects get bad request response a status code equal to that given +func (o *ObjectsGetBadRequest) IsCode(code int) bool { + return code == 400 +} + +// Code gets the status code for the objects get bad request response +func (o *ObjectsGetBadRequest) Code() int { + return 400 +} + +func (o *ObjectsGetBadRequest) Error() string { + return fmt.Sprintf("[GET /objects/{id}][%d] objectsGetBadRequest %+v", 400, o.Payload) +} + +func (o *ObjectsGetBadRequest) String() string { + return fmt.Sprintf("[GET /objects/{id}][%d] objectsGetBadRequest %+v", 400, o.Payload) +} + +func (o *ObjectsGetBadRequest) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *ObjectsGetBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewObjectsGetUnauthorized creates a ObjectsGetUnauthorized with default headers values +func NewObjectsGetUnauthorized() *ObjectsGetUnauthorized { + return &ObjectsGetUnauthorized{} +} + +/* +ObjectsGetUnauthorized describes a response with status code 401, with default header values. + +Unauthorized or invalid credentials. +*/ +type ObjectsGetUnauthorized struct { +} + +// IsSuccess returns true when this objects get unauthorized response has a 2xx status code +func (o *ObjectsGetUnauthorized) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this objects get unauthorized response has a 3xx status code +func (o *ObjectsGetUnauthorized) IsRedirect() bool { + return false +} + +// IsClientError returns true when this objects get unauthorized response has a 4xx status code +func (o *ObjectsGetUnauthorized) IsClientError() bool { + return true +} + +// IsServerError returns true when this objects get unauthorized response has a 5xx status code +func (o *ObjectsGetUnauthorized) IsServerError() bool { + return false +} + +// IsCode returns true when this objects get unauthorized response a status code equal to that given +func (o *ObjectsGetUnauthorized) IsCode(code int) bool { + return code == 401 +} + +// Code gets the status code for the objects get unauthorized response +func (o *ObjectsGetUnauthorized) Code() int { + return 401 +} + +func (o *ObjectsGetUnauthorized) Error() string { + return fmt.Sprintf("[GET /objects/{id}][%d] objectsGetUnauthorized ", 401) +} + +func (o *ObjectsGetUnauthorized) String() string { + return fmt.Sprintf("[GET /objects/{id}][%d] objectsGetUnauthorized ", 401) +} + +func (o *ObjectsGetUnauthorized) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewObjectsGetForbidden creates a ObjectsGetForbidden with default headers values +func NewObjectsGetForbidden() *ObjectsGetForbidden { + return &ObjectsGetForbidden{} +} + +/* +ObjectsGetForbidden describes a response with status code 403, with default header values. + +Forbidden +*/ +type ObjectsGetForbidden struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this objects get forbidden response has a 2xx status code +func (o *ObjectsGetForbidden) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this objects get forbidden response has a 3xx status code +func (o *ObjectsGetForbidden) IsRedirect() bool { + return false +} + +// IsClientError returns true when this objects get forbidden response has a 4xx status code +func (o *ObjectsGetForbidden) IsClientError() bool { + return true +} + +// IsServerError returns true when this objects get forbidden response has a 5xx status code +func (o *ObjectsGetForbidden) IsServerError() bool { + return false +} + +// IsCode returns true when this objects get forbidden response a status code equal to that given +func (o *ObjectsGetForbidden) IsCode(code int) bool { + return code == 403 +} + +// Code gets the status code for the objects get forbidden response +func (o *ObjectsGetForbidden) Code() int { + return 403 +} + +func (o *ObjectsGetForbidden) Error() string { + return fmt.Sprintf("[GET /objects/{id}][%d] objectsGetForbidden %+v", 403, o.Payload) +} + +func (o *ObjectsGetForbidden) String() string { + return fmt.Sprintf("[GET /objects/{id}][%d] objectsGetForbidden %+v", 403, o.Payload) +} + +func (o *ObjectsGetForbidden) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *ObjectsGetForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewObjectsGetNotFound creates a ObjectsGetNotFound with default headers values +func NewObjectsGetNotFound() *ObjectsGetNotFound { + return &ObjectsGetNotFound{} +} + +/* +ObjectsGetNotFound describes a response with status code 404, with default header values. + +Successful query result but no resource was found. +*/ +type ObjectsGetNotFound struct { +} + +// IsSuccess returns true when this objects get not found response has a 2xx status code +func (o *ObjectsGetNotFound) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this objects get not found response has a 3xx status code +func (o *ObjectsGetNotFound) IsRedirect() bool { + return false +} + +// IsClientError returns true when this objects get not found response has a 4xx status code +func (o *ObjectsGetNotFound) IsClientError() bool { + return true +} + +// IsServerError returns true when this objects get not found response has a 5xx status code +func (o *ObjectsGetNotFound) IsServerError() bool { + return false +} + +// IsCode returns true when this objects get not found response a status code equal to that given +func (o *ObjectsGetNotFound) IsCode(code int) bool { + return code == 404 +} + +// Code gets the status code for the objects get not found response +func (o *ObjectsGetNotFound) Code() int { + return 404 +} + +func (o *ObjectsGetNotFound) Error() string { + return fmt.Sprintf("[GET /objects/{id}][%d] objectsGetNotFound ", 404) +} + +func (o *ObjectsGetNotFound) String() string { + return fmt.Sprintf("[GET /objects/{id}][%d] objectsGetNotFound ", 404) +} + +func (o *ObjectsGetNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewObjectsGetInternalServerError creates a ObjectsGetInternalServerError with default headers values +func NewObjectsGetInternalServerError() *ObjectsGetInternalServerError { + return &ObjectsGetInternalServerError{} +} + +/* +ObjectsGetInternalServerError describes a response with status code 500, with default header values. + +An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. +*/ +type ObjectsGetInternalServerError struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this objects get internal server error response has a 2xx status code +func (o *ObjectsGetInternalServerError) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this objects get internal server error response has a 3xx status code +func (o *ObjectsGetInternalServerError) IsRedirect() bool { + return false +} + +// IsClientError returns true when this objects get internal server error response has a 4xx status code +func (o *ObjectsGetInternalServerError) IsClientError() bool { + return false +} + +// IsServerError returns true when this objects get internal server error response has a 5xx status code +func (o *ObjectsGetInternalServerError) IsServerError() bool { + return true +} + +// IsCode returns true when this objects get internal server error response a status code equal to that given +func (o *ObjectsGetInternalServerError) IsCode(code int) bool { + return code == 500 +} + +// Code gets the status code for the objects get internal server error response +func (o *ObjectsGetInternalServerError) Code() int { + return 500 +} + +func (o *ObjectsGetInternalServerError) Error() string { + return fmt.Sprintf("[GET /objects/{id}][%d] objectsGetInternalServerError %+v", 500, o.Payload) +} + +func (o *ObjectsGetInternalServerError) String() string { + return fmt.Sprintf("[GET /objects/{id}][%d] objectsGetInternalServerError %+v", 500, o.Payload) +} + +func (o *ObjectsGetInternalServerError) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *ObjectsGetInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/objects/objects_head_parameters.go b/platform/dbops/binaries/weaviate-src/client/objects/objects_head_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..3d677dc9f46639f8f3d851905b873ce13c0cd136 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/objects/objects_head_parameters.go @@ -0,0 +1,164 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package objects + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewObjectsHeadParams creates a new ObjectsHeadParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewObjectsHeadParams() *ObjectsHeadParams { + return &ObjectsHeadParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewObjectsHeadParamsWithTimeout creates a new ObjectsHeadParams object +// with the ability to set a timeout on a request. +func NewObjectsHeadParamsWithTimeout(timeout time.Duration) *ObjectsHeadParams { + return &ObjectsHeadParams{ + timeout: timeout, + } +} + +// NewObjectsHeadParamsWithContext creates a new ObjectsHeadParams object +// with the ability to set a context for a request. +func NewObjectsHeadParamsWithContext(ctx context.Context) *ObjectsHeadParams { + return &ObjectsHeadParams{ + Context: ctx, + } +} + +// NewObjectsHeadParamsWithHTTPClient creates a new ObjectsHeadParams object +// with the ability to set a custom HTTPClient for a request. +func NewObjectsHeadParamsWithHTTPClient(client *http.Client) *ObjectsHeadParams { + return &ObjectsHeadParams{ + HTTPClient: client, + } +} + +/* +ObjectsHeadParams contains all the parameters to send to the API endpoint + + for the objects head operation. + + Typically these are written to a http.Request. +*/ +type ObjectsHeadParams struct { + + /* ID. + + Unique ID of the Object. + + Format: uuid + */ + ID strfmt.UUID + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the objects head params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *ObjectsHeadParams) WithDefaults() *ObjectsHeadParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the objects head params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *ObjectsHeadParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the objects head params +func (o *ObjectsHeadParams) WithTimeout(timeout time.Duration) *ObjectsHeadParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the objects head params +func (o *ObjectsHeadParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the objects head params +func (o *ObjectsHeadParams) WithContext(ctx context.Context) *ObjectsHeadParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the objects head params +func (o *ObjectsHeadParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the objects head params +func (o *ObjectsHeadParams) WithHTTPClient(client *http.Client) *ObjectsHeadParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the objects head params +func (o *ObjectsHeadParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithID adds the id to the objects head params +func (o *ObjectsHeadParams) WithID(id strfmt.UUID) *ObjectsHeadParams { + o.SetID(id) + return o +} + +// SetID adds the id to the objects head params +func (o *ObjectsHeadParams) SetID(id strfmt.UUID) { + o.ID = id +} + +// WriteToRequest writes these params to a swagger request +func (o *ObjectsHeadParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param id + if err := r.SetPathParam("id", o.ID.String()); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/objects/objects_head_responses.go b/platform/dbops/binaries/weaviate-src/client/objects/objects_head_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..560930484bba9e0073dad89ca2a79c18b2988ec5 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/objects/objects_head_responses.go @@ -0,0 +1,374 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package objects + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/weaviate/weaviate/entities/models" +) + +// ObjectsHeadReader is a Reader for the ObjectsHead structure. +type ObjectsHeadReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ObjectsHeadReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 204: + result := NewObjectsHeadNoContent() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 401: + result := NewObjectsHeadUnauthorized() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 403: + result := NewObjectsHeadForbidden() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 404: + result := NewObjectsHeadNotFound() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 500: + result := NewObjectsHeadInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + } +} + +// NewObjectsHeadNoContent creates a ObjectsHeadNoContent with default headers values +func NewObjectsHeadNoContent() *ObjectsHeadNoContent { + return &ObjectsHeadNoContent{} +} + +/* +ObjectsHeadNoContent describes a response with status code 204, with default header values. + +Object exists. +*/ +type ObjectsHeadNoContent struct { +} + +// IsSuccess returns true when this objects head no content response has a 2xx status code +func (o *ObjectsHeadNoContent) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this objects head no content response has a 3xx status code +func (o *ObjectsHeadNoContent) IsRedirect() bool { + return false +} + +// IsClientError returns true when this objects head no content response has a 4xx status code +func (o *ObjectsHeadNoContent) IsClientError() bool { + return false +} + +// IsServerError returns true when this objects head no content response has a 5xx status code +func (o *ObjectsHeadNoContent) IsServerError() bool { + return false +} + +// IsCode returns true when this objects head no content response a status code equal to that given +func (o *ObjectsHeadNoContent) IsCode(code int) bool { + return code == 204 +} + +// Code gets the status code for the objects head no content response +func (o *ObjectsHeadNoContent) Code() int { + return 204 +} + +func (o *ObjectsHeadNoContent) Error() string { + return fmt.Sprintf("[HEAD /objects/{id}][%d] objectsHeadNoContent ", 204) +} + +func (o *ObjectsHeadNoContent) String() string { + return fmt.Sprintf("[HEAD /objects/{id}][%d] objectsHeadNoContent ", 204) +} + +func (o *ObjectsHeadNoContent) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewObjectsHeadUnauthorized creates a ObjectsHeadUnauthorized with default headers values +func NewObjectsHeadUnauthorized() *ObjectsHeadUnauthorized { + return &ObjectsHeadUnauthorized{} +} + +/* +ObjectsHeadUnauthorized describes a response with status code 401, with default header values. + +Unauthorized or invalid credentials. +*/ +type ObjectsHeadUnauthorized struct { +} + +// IsSuccess returns true when this objects head unauthorized response has a 2xx status code +func (o *ObjectsHeadUnauthorized) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this objects head unauthorized response has a 3xx status code +func (o *ObjectsHeadUnauthorized) IsRedirect() bool { + return false +} + +// IsClientError returns true when this objects head unauthorized response has a 4xx status code +func (o *ObjectsHeadUnauthorized) IsClientError() bool { + return true +} + +// IsServerError returns true when this objects head unauthorized response has a 5xx status code +func (o *ObjectsHeadUnauthorized) IsServerError() bool { + return false +} + +// IsCode returns true when this objects head unauthorized response a status code equal to that given +func (o *ObjectsHeadUnauthorized) IsCode(code int) bool { + return code == 401 +} + +// Code gets the status code for the objects head unauthorized response +func (o *ObjectsHeadUnauthorized) Code() int { + return 401 +} + +func (o *ObjectsHeadUnauthorized) Error() string { + return fmt.Sprintf("[HEAD /objects/{id}][%d] objectsHeadUnauthorized ", 401) +} + +func (o *ObjectsHeadUnauthorized) String() string { + return fmt.Sprintf("[HEAD /objects/{id}][%d] objectsHeadUnauthorized ", 401) +} + +func (o *ObjectsHeadUnauthorized) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewObjectsHeadForbidden creates a ObjectsHeadForbidden with default headers values +func NewObjectsHeadForbidden() *ObjectsHeadForbidden { + return &ObjectsHeadForbidden{} +} + +/* +ObjectsHeadForbidden describes a response with status code 403, with default header values. + +Forbidden +*/ +type ObjectsHeadForbidden struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this objects head forbidden response has a 2xx status code +func (o *ObjectsHeadForbidden) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this objects head forbidden response has a 3xx status code +func (o *ObjectsHeadForbidden) IsRedirect() bool { + return false +} + +// IsClientError returns true when this objects head forbidden response has a 4xx status code +func (o *ObjectsHeadForbidden) IsClientError() bool { + return true +} + +// IsServerError returns true when this objects head forbidden response has a 5xx status code +func (o *ObjectsHeadForbidden) IsServerError() bool { + return false +} + +// IsCode returns true when this objects head forbidden response a status code equal to that given +func (o *ObjectsHeadForbidden) IsCode(code int) bool { + return code == 403 +} + +// Code gets the status code for the objects head forbidden response +func (o *ObjectsHeadForbidden) Code() int { + return 403 +} + +func (o *ObjectsHeadForbidden) Error() string { + return fmt.Sprintf("[HEAD /objects/{id}][%d] objectsHeadForbidden %+v", 403, o.Payload) +} + +func (o *ObjectsHeadForbidden) String() string { + return fmt.Sprintf("[HEAD /objects/{id}][%d] objectsHeadForbidden %+v", 403, o.Payload) +} + +func (o *ObjectsHeadForbidden) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *ObjectsHeadForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewObjectsHeadNotFound creates a ObjectsHeadNotFound with default headers values +func NewObjectsHeadNotFound() *ObjectsHeadNotFound { + return &ObjectsHeadNotFound{} +} + +/* +ObjectsHeadNotFound describes a response with status code 404, with default header values. + +Object doesn't exist. +*/ +type ObjectsHeadNotFound struct { +} + +// IsSuccess returns true when this objects head not found response has a 2xx status code +func (o *ObjectsHeadNotFound) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this objects head not found response has a 3xx status code +func (o *ObjectsHeadNotFound) IsRedirect() bool { + return false +} + +// IsClientError returns true when this objects head not found response has a 4xx status code +func (o *ObjectsHeadNotFound) IsClientError() bool { + return true +} + +// IsServerError returns true when this objects head not found response has a 5xx status code +func (o *ObjectsHeadNotFound) IsServerError() bool { + return false +} + +// IsCode returns true when this objects head not found response a status code equal to that given +func (o *ObjectsHeadNotFound) IsCode(code int) bool { + return code == 404 +} + +// Code gets the status code for the objects head not found response +func (o *ObjectsHeadNotFound) Code() int { + return 404 +} + +func (o *ObjectsHeadNotFound) Error() string { + return fmt.Sprintf("[HEAD /objects/{id}][%d] objectsHeadNotFound ", 404) +} + +func (o *ObjectsHeadNotFound) String() string { + return fmt.Sprintf("[HEAD /objects/{id}][%d] objectsHeadNotFound ", 404) +} + +func (o *ObjectsHeadNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewObjectsHeadInternalServerError creates a ObjectsHeadInternalServerError with default headers values +func NewObjectsHeadInternalServerError() *ObjectsHeadInternalServerError { + return &ObjectsHeadInternalServerError{} +} + +/* +ObjectsHeadInternalServerError describes a response with status code 500, with default header values. + +An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. +*/ +type ObjectsHeadInternalServerError struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this objects head internal server error response has a 2xx status code +func (o *ObjectsHeadInternalServerError) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this objects head internal server error response has a 3xx status code +func (o *ObjectsHeadInternalServerError) IsRedirect() bool { + return false +} + +// IsClientError returns true when this objects head internal server error response has a 4xx status code +func (o *ObjectsHeadInternalServerError) IsClientError() bool { + return false +} + +// IsServerError returns true when this objects head internal server error response has a 5xx status code +func (o *ObjectsHeadInternalServerError) IsServerError() bool { + return true +} + +// IsCode returns true when this objects head internal server error response a status code equal to that given +func (o *ObjectsHeadInternalServerError) IsCode(code int) bool { + return code == 500 +} + +// Code gets the status code for the objects head internal server error response +func (o *ObjectsHeadInternalServerError) Code() int { + return 500 +} + +func (o *ObjectsHeadInternalServerError) Error() string { + return fmt.Sprintf("[HEAD /objects/{id}][%d] objectsHeadInternalServerError %+v", 500, o.Payload) +} + +func (o *ObjectsHeadInternalServerError) String() string { + return fmt.Sprintf("[HEAD /objects/{id}][%d] objectsHeadInternalServerError %+v", 500, o.Payload) +} + +func (o *ObjectsHeadInternalServerError) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *ObjectsHeadInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/objects/objects_list_parameters.go b/platform/dbops/binaries/weaviate-src/client/objects/objects_list_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..172184a7c3986d2ed94dd85a3d366c3a54f50eed --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/objects/objects_list_parameters.go @@ -0,0 +1,428 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package objects + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// NewObjectsListParams creates a new ObjectsListParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewObjectsListParams() *ObjectsListParams { + return &ObjectsListParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewObjectsListParamsWithTimeout creates a new ObjectsListParams object +// with the ability to set a timeout on a request. +func NewObjectsListParamsWithTimeout(timeout time.Duration) *ObjectsListParams { + return &ObjectsListParams{ + timeout: timeout, + } +} + +// NewObjectsListParamsWithContext creates a new ObjectsListParams object +// with the ability to set a context for a request. +func NewObjectsListParamsWithContext(ctx context.Context) *ObjectsListParams { + return &ObjectsListParams{ + Context: ctx, + } +} + +// NewObjectsListParamsWithHTTPClient creates a new ObjectsListParams object +// with the ability to set a custom HTTPClient for a request. +func NewObjectsListParamsWithHTTPClient(client *http.Client) *ObjectsListParams { + return &ObjectsListParams{ + HTTPClient: client, + } +} + +/* +ObjectsListParams contains all the parameters to send to the API endpoint + + for the objects list operation. + + Typically these are written to a http.Request. +*/ +type ObjectsListParams struct { + + /* After. + + A threshold UUID of the objects to retrieve after, using an UUID-based ordering. This object is not part of the set.

Must be used with `class`, typically in conjunction with `limit`.

Note `after` cannot be used with `offset` or `sort`.

For a null value similar to offset=0, set an empty string in the request, i.e. `after=` or `after`. + */ + After *string + + /* Class. + + The collection from which to query objects.

Note that if `class` is not provided, the response will not include any objects. + */ + Class *string + + /* Include. + + Include additional information, such as classification infos. Allowed values include: classification, vector, interpretation + */ + Include *string + + /* Limit. + + The maximum number of items to be returned per page. The default is 25 unless set otherwise as an environment variable. + + Format: int64 + */ + Limit *int64 + + /* Offset. + + The starting index of the result window. Note `offset` will retrieve `offset+limit` results and return `limit` results from the object with index `offset` onwards. Limited by the value of `QUERY_MAXIMUM_RESULTS`.

Should be used in conjunction with `limit`.

Cannot be used with `after`. + + Format: int64 + */ + Offset *int64 + + /* Order. + + Order parameter to tell how to order (asc or desc) data within given field. Should be used in conjunction with `sort` parameter. If providing multiple `sort` values, provide multiple `order` values in corresponding order, e.g.: `sort=author_name,title&order=desc,asc`. + */ + Order *string + + /* Sort. + + Name(s) of the property to sort by - e.g. `city`, or `country,city`. + */ + Sort *string + + /* Tenant. + + Specifies the tenant in a request targeting a multi-tenant class + */ + Tenant *string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the objects list params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *ObjectsListParams) WithDefaults() *ObjectsListParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the objects list params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *ObjectsListParams) SetDefaults() { + var ( + offsetDefault = int64(0) + ) + + val := ObjectsListParams{ + Offset: &offsetDefault, + } + + val.timeout = o.timeout + val.Context = o.Context + val.HTTPClient = o.HTTPClient + *o = val +} + +// WithTimeout adds the timeout to the objects list params +func (o *ObjectsListParams) WithTimeout(timeout time.Duration) *ObjectsListParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the objects list params +func (o *ObjectsListParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the objects list params +func (o *ObjectsListParams) WithContext(ctx context.Context) *ObjectsListParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the objects list params +func (o *ObjectsListParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the objects list params +func (o *ObjectsListParams) WithHTTPClient(client *http.Client) *ObjectsListParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the objects list params +func (o *ObjectsListParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithAfter adds the after to the objects list params +func (o *ObjectsListParams) WithAfter(after *string) *ObjectsListParams { + o.SetAfter(after) + return o +} + +// SetAfter adds the after to the objects list params +func (o *ObjectsListParams) SetAfter(after *string) { + o.After = after +} + +// WithClass adds the class to the objects list params +func (o *ObjectsListParams) WithClass(class *string) *ObjectsListParams { + o.SetClass(class) + return o +} + +// SetClass adds the class to the objects list params +func (o *ObjectsListParams) SetClass(class *string) { + o.Class = class +} + +// WithInclude adds the include to the objects list params +func (o *ObjectsListParams) WithInclude(include *string) *ObjectsListParams { + o.SetInclude(include) + return o +} + +// SetInclude adds the include to the objects list params +func (o *ObjectsListParams) SetInclude(include *string) { + o.Include = include +} + +// WithLimit adds the limit to the objects list params +func (o *ObjectsListParams) WithLimit(limit *int64) *ObjectsListParams { + o.SetLimit(limit) + return o +} + +// SetLimit adds the limit to the objects list params +func (o *ObjectsListParams) SetLimit(limit *int64) { + o.Limit = limit +} + +// WithOffset adds the offset to the objects list params +func (o *ObjectsListParams) WithOffset(offset *int64) *ObjectsListParams { + o.SetOffset(offset) + return o +} + +// SetOffset adds the offset to the objects list params +func (o *ObjectsListParams) SetOffset(offset *int64) { + o.Offset = offset +} + +// WithOrder adds the order to the objects list params +func (o *ObjectsListParams) WithOrder(order *string) *ObjectsListParams { + o.SetOrder(order) + return o +} + +// SetOrder adds the order to the objects list params +func (o *ObjectsListParams) SetOrder(order *string) { + o.Order = order +} + +// WithSort adds the sort to the objects list params +func (o *ObjectsListParams) WithSort(sort *string) *ObjectsListParams { + o.SetSort(sort) + return o +} + +// SetSort adds the sort to the objects list params +func (o *ObjectsListParams) SetSort(sort *string) { + o.Sort = sort +} + +// WithTenant adds the tenant to the objects list params +func (o *ObjectsListParams) WithTenant(tenant *string) *ObjectsListParams { + o.SetTenant(tenant) + return o +} + +// SetTenant adds the tenant to the objects list params +func (o *ObjectsListParams) SetTenant(tenant *string) { + o.Tenant = tenant +} + +// WriteToRequest writes these params to a swagger request +func (o *ObjectsListParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if o.After != nil { + + // query param after + var qrAfter string + + if o.After != nil { + qrAfter = *o.After + } + qAfter := qrAfter + if qAfter != "" { + + if err := r.SetQueryParam("after", qAfter); err != nil { + return err + } + } + } + + if o.Class != nil { + + // query param class + var qrClass string + + if o.Class != nil { + qrClass = *o.Class + } + qClass := qrClass + if qClass != "" { + + if err := r.SetQueryParam("class", qClass); err != nil { + return err + } + } + } + + if o.Include != nil { + + // query param include + var qrInclude string + + if o.Include != nil { + qrInclude = *o.Include + } + qInclude := qrInclude + if qInclude != "" { + + if err := r.SetQueryParam("include", qInclude); err != nil { + return err + } + } + } + + if o.Limit != nil { + + // query param limit + var qrLimit int64 + + if o.Limit != nil { + qrLimit = *o.Limit + } + qLimit := swag.FormatInt64(qrLimit) + if qLimit != "" { + + if err := r.SetQueryParam("limit", qLimit); err != nil { + return err + } + } + } + + if o.Offset != nil { + + // query param offset + var qrOffset int64 + + if o.Offset != nil { + qrOffset = *o.Offset + } + qOffset := swag.FormatInt64(qrOffset) + if qOffset != "" { + + if err := r.SetQueryParam("offset", qOffset); err != nil { + return err + } + } + } + + if o.Order != nil { + + // query param order + var qrOrder string + + if o.Order != nil { + qrOrder = *o.Order + } + qOrder := qrOrder + if qOrder != "" { + + if err := r.SetQueryParam("order", qOrder); err != nil { + return err + } + } + } + + if o.Sort != nil { + + // query param sort + var qrSort string + + if o.Sort != nil { + qrSort = *o.Sort + } + qSort := qrSort + if qSort != "" { + + if err := r.SetQueryParam("sort", qSort); err != nil { + return err + } + } + } + + if o.Tenant != nil { + + // query param tenant + var qrTenant string + + if o.Tenant != nil { + qrTenant = *o.Tenant + } + qTenant := qrTenant + if qTenant != "" { + + if err := r.SetQueryParam("tenant", qTenant); err != nil { + return err + } + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/objects/objects_list_responses.go b/platform/dbops/binaries/weaviate-src/client/objects/objects_list_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..898850c3d51661d0b698aa16acc24ab5851cc21c --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/objects/objects_list_responses.go @@ -0,0 +1,534 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package objects + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/weaviate/weaviate/entities/models" +) + +// ObjectsListReader is a Reader for the ObjectsList structure. +type ObjectsListReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ObjectsListReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewObjectsListOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 400: + result := NewObjectsListBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 401: + result := NewObjectsListUnauthorized() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 403: + result := NewObjectsListForbidden() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 404: + result := NewObjectsListNotFound() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 422: + result := NewObjectsListUnprocessableEntity() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 500: + result := NewObjectsListInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + } +} + +// NewObjectsListOK creates a ObjectsListOK with default headers values +func NewObjectsListOK() *ObjectsListOK { + return &ObjectsListOK{} +} + +/* +ObjectsListOK describes a response with status code 200, with default header values. + +Successful response.

If `class` is not provided, the response will not include any objects. +*/ +type ObjectsListOK struct { + Payload *models.ObjectsListResponse +} + +// IsSuccess returns true when this objects list o k response has a 2xx status code +func (o *ObjectsListOK) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this objects list o k response has a 3xx status code +func (o *ObjectsListOK) IsRedirect() bool { + return false +} + +// IsClientError returns true when this objects list o k response has a 4xx status code +func (o *ObjectsListOK) IsClientError() bool { + return false +} + +// IsServerError returns true when this objects list o k response has a 5xx status code +func (o *ObjectsListOK) IsServerError() bool { + return false +} + +// IsCode returns true when this objects list o k response a status code equal to that given +func (o *ObjectsListOK) IsCode(code int) bool { + return code == 200 +} + +// Code gets the status code for the objects list o k response +func (o *ObjectsListOK) Code() int { + return 200 +} + +func (o *ObjectsListOK) Error() string { + return fmt.Sprintf("[GET /objects][%d] objectsListOK %+v", 200, o.Payload) +} + +func (o *ObjectsListOK) String() string { + return fmt.Sprintf("[GET /objects][%d] objectsListOK %+v", 200, o.Payload) +} + +func (o *ObjectsListOK) GetPayload() *models.ObjectsListResponse { + return o.Payload +} + +func (o *ObjectsListOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ObjectsListResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewObjectsListBadRequest creates a ObjectsListBadRequest with default headers values +func NewObjectsListBadRequest() *ObjectsListBadRequest { + return &ObjectsListBadRequest{} +} + +/* +ObjectsListBadRequest describes a response with status code 400, with default header values. + +Malformed request. +*/ +type ObjectsListBadRequest struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this objects list bad request response has a 2xx status code +func (o *ObjectsListBadRequest) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this objects list bad request response has a 3xx status code +func (o *ObjectsListBadRequest) IsRedirect() bool { + return false +} + +// IsClientError returns true when this objects list bad request response has a 4xx status code +func (o *ObjectsListBadRequest) IsClientError() bool { + return true +} + +// IsServerError returns true when this objects list bad request response has a 5xx status code +func (o *ObjectsListBadRequest) IsServerError() bool { + return false +} + +// IsCode returns true when this objects list bad request response a status code equal to that given +func (o *ObjectsListBadRequest) IsCode(code int) bool { + return code == 400 +} + +// Code gets the status code for the objects list bad request response +func (o *ObjectsListBadRequest) Code() int { + return 400 +} + +func (o *ObjectsListBadRequest) Error() string { + return fmt.Sprintf("[GET /objects][%d] objectsListBadRequest %+v", 400, o.Payload) +} + +func (o *ObjectsListBadRequest) String() string { + return fmt.Sprintf("[GET /objects][%d] objectsListBadRequest %+v", 400, o.Payload) +} + +func (o *ObjectsListBadRequest) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *ObjectsListBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewObjectsListUnauthorized creates a ObjectsListUnauthorized with default headers values +func NewObjectsListUnauthorized() *ObjectsListUnauthorized { + return &ObjectsListUnauthorized{} +} + +/* +ObjectsListUnauthorized describes a response with status code 401, with default header values. + +Unauthorized or invalid credentials. +*/ +type ObjectsListUnauthorized struct { +} + +// IsSuccess returns true when this objects list unauthorized response has a 2xx status code +func (o *ObjectsListUnauthorized) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this objects list unauthorized response has a 3xx status code +func (o *ObjectsListUnauthorized) IsRedirect() bool { + return false +} + +// IsClientError returns true when this objects list unauthorized response has a 4xx status code +func (o *ObjectsListUnauthorized) IsClientError() bool { + return true +} + +// IsServerError returns true when this objects list unauthorized response has a 5xx status code +func (o *ObjectsListUnauthorized) IsServerError() bool { + return false +} + +// IsCode returns true when this objects list unauthorized response a status code equal to that given +func (o *ObjectsListUnauthorized) IsCode(code int) bool { + return code == 401 +} + +// Code gets the status code for the objects list unauthorized response +func (o *ObjectsListUnauthorized) Code() int { + return 401 +} + +func (o *ObjectsListUnauthorized) Error() string { + return fmt.Sprintf("[GET /objects][%d] objectsListUnauthorized ", 401) +} + +func (o *ObjectsListUnauthorized) String() string { + return fmt.Sprintf("[GET /objects][%d] objectsListUnauthorized ", 401) +} + +func (o *ObjectsListUnauthorized) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewObjectsListForbidden creates a ObjectsListForbidden with default headers values +func NewObjectsListForbidden() *ObjectsListForbidden { + return &ObjectsListForbidden{} +} + +/* +ObjectsListForbidden describes a response with status code 403, with default header values. + +Forbidden +*/ +type ObjectsListForbidden struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this objects list forbidden response has a 2xx status code +func (o *ObjectsListForbidden) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this objects list forbidden response has a 3xx status code +func (o *ObjectsListForbidden) IsRedirect() bool { + return false +} + +// IsClientError returns true when this objects list forbidden response has a 4xx status code +func (o *ObjectsListForbidden) IsClientError() bool { + return true +} + +// IsServerError returns true when this objects list forbidden response has a 5xx status code +func (o *ObjectsListForbidden) IsServerError() bool { + return false +} + +// IsCode returns true when this objects list forbidden response a status code equal to that given +func (o *ObjectsListForbidden) IsCode(code int) bool { + return code == 403 +} + +// Code gets the status code for the objects list forbidden response +func (o *ObjectsListForbidden) Code() int { + return 403 +} + +func (o *ObjectsListForbidden) Error() string { + return fmt.Sprintf("[GET /objects][%d] objectsListForbidden %+v", 403, o.Payload) +} + +func (o *ObjectsListForbidden) String() string { + return fmt.Sprintf("[GET /objects][%d] objectsListForbidden %+v", 403, o.Payload) +} + +func (o *ObjectsListForbidden) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *ObjectsListForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewObjectsListNotFound creates a ObjectsListNotFound with default headers values +func NewObjectsListNotFound() *ObjectsListNotFound { + return &ObjectsListNotFound{} +} + +/* +ObjectsListNotFound describes a response with status code 404, with default header values. + +Successful query result but no resource was found. +*/ +type ObjectsListNotFound struct { +} + +// IsSuccess returns true when this objects list not found response has a 2xx status code +func (o *ObjectsListNotFound) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this objects list not found response has a 3xx status code +func (o *ObjectsListNotFound) IsRedirect() bool { + return false +} + +// IsClientError returns true when this objects list not found response has a 4xx status code +func (o *ObjectsListNotFound) IsClientError() bool { + return true +} + +// IsServerError returns true when this objects list not found response has a 5xx status code +func (o *ObjectsListNotFound) IsServerError() bool { + return false +} + +// IsCode returns true when this objects list not found response a status code equal to that given +func (o *ObjectsListNotFound) IsCode(code int) bool { + return code == 404 +} + +// Code gets the status code for the objects list not found response +func (o *ObjectsListNotFound) Code() int { + return 404 +} + +func (o *ObjectsListNotFound) Error() string { + return fmt.Sprintf("[GET /objects][%d] objectsListNotFound ", 404) +} + +func (o *ObjectsListNotFound) String() string { + return fmt.Sprintf("[GET /objects][%d] objectsListNotFound ", 404) +} + +func (o *ObjectsListNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewObjectsListUnprocessableEntity creates a ObjectsListUnprocessableEntity with default headers values +func NewObjectsListUnprocessableEntity() *ObjectsListUnprocessableEntity { + return &ObjectsListUnprocessableEntity{} +} + +/* +ObjectsListUnprocessableEntity describes a response with status code 422, with default header values. + +Request body is well-formed (i.e., syntactically correct), but semantically erroneous. Are you sure the class is defined in the configuration file? +*/ +type ObjectsListUnprocessableEntity struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this objects list unprocessable entity response has a 2xx status code +func (o *ObjectsListUnprocessableEntity) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this objects list unprocessable entity response has a 3xx status code +func (o *ObjectsListUnprocessableEntity) IsRedirect() bool { + return false +} + +// IsClientError returns true when this objects list unprocessable entity response has a 4xx status code +func (o *ObjectsListUnprocessableEntity) IsClientError() bool { + return true +} + +// IsServerError returns true when this objects list unprocessable entity response has a 5xx status code +func (o *ObjectsListUnprocessableEntity) IsServerError() bool { + return false +} + +// IsCode returns true when this objects list unprocessable entity response a status code equal to that given +func (o *ObjectsListUnprocessableEntity) IsCode(code int) bool { + return code == 422 +} + +// Code gets the status code for the objects list unprocessable entity response +func (o *ObjectsListUnprocessableEntity) Code() int { + return 422 +} + +func (o *ObjectsListUnprocessableEntity) Error() string { + return fmt.Sprintf("[GET /objects][%d] objectsListUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *ObjectsListUnprocessableEntity) String() string { + return fmt.Sprintf("[GET /objects][%d] objectsListUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *ObjectsListUnprocessableEntity) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *ObjectsListUnprocessableEntity) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewObjectsListInternalServerError creates a ObjectsListInternalServerError with default headers values +func NewObjectsListInternalServerError() *ObjectsListInternalServerError { + return &ObjectsListInternalServerError{} +} + +/* +ObjectsListInternalServerError describes a response with status code 500, with default header values. + +An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. +*/ +type ObjectsListInternalServerError struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this objects list internal server error response has a 2xx status code +func (o *ObjectsListInternalServerError) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this objects list internal server error response has a 3xx status code +func (o *ObjectsListInternalServerError) IsRedirect() bool { + return false +} + +// IsClientError returns true when this objects list internal server error response has a 4xx status code +func (o *ObjectsListInternalServerError) IsClientError() bool { + return false +} + +// IsServerError returns true when this objects list internal server error response has a 5xx status code +func (o *ObjectsListInternalServerError) IsServerError() bool { + return true +} + +// IsCode returns true when this objects list internal server error response a status code equal to that given +func (o *ObjectsListInternalServerError) IsCode(code int) bool { + return code == 500 +} + +// Code gets the status code for the objects list internal server error response +func (o *ObjectsListInternalServerError) Code() int { + return 500 +} + +func (o *ObjectsListInternalServerError) Error() string { + return fmt.Sprintf("[GET /objects][%d] objectsListInternalServerError %+v", 500, o.Payload) +} + +func (o *ObjectsListInternalServerError) String() string { + return fmt.Sprintf("[GET /objects][%d] objectsListInternalServerError %+v", 500, o.Payload) +} + +func (o *ObjectsListInternalServerError) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *ObjectsListInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/objects/objects_patch_parameters.go b/platform/dbops/binaries/weaviate-src/client/objects/objects_patch_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..5f2e7297553afcedfa27113f1afa24a843599009 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/objects/objects_patch_parameters.go @@ -0,0 +1,222 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package objects + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + + "github.com/weaviate/weaviate/entities/models" +) + +// NewObjectsPatchParams creates a new ObjectsPatchParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewObjectsPatchParams() *ObjectsPatchParams { + return &ObjectsPatchParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewObjectsPatchParamsWithTimeout creates a new ObjectsPatchParams object +// with the ability to set a timeout on a request. +func NewObjectsPatchParamsWithTimeout(timeout time.Duration) *ObjectsPatchParams { + return &ObjectsPatchParams{ + timeout: timeout, + } +} + +// NewObjectsPatchParamsWithContext creates a new ObjectsPatchParams object +// with the ability to set a context for a request. +func NewObjectsPatchParamsWithContext(ctx context.Context) *ObjectsPatchParams { + return &ObjectsPatchParams{ + Context: ctx, + } +} + +// NewObjectsPatchParamsWithHTTPClient creates a new ObjectsPatchParams object +// with the ability to set a custom HTTPClient for a request. +func NewObjectsPatchParamsWithHTTPClient(client *http.Client) *ObjectsPatchParams { + return &ObjectsPatchParams{ + HTTPClient: client, + } +} + +/* +ObjectsPatchParams contains all the parameters to send to the API endpoint + + for the objects patch operation. + + Typically these are written to a http.Request. +*/ +type ObjectsPatchParams struct { + + /* Body. + + RFC 7396-style patch, the body contains the object to merge into the existing object. + */ + Body *models.Object + + /* ConsistencyLevel. + + Determines how many replicas must acknowledge a request before it is considered successful + */ + ConsistencyLevel *string + + /* ID. + + Unique ID of the Object. + + Format: uuid + */ + ID strfmt.UUID + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the objects patch params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *ObjectsPatchParams) WithDefaults() *ObjectsPatchParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the objects patch params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *ObjectsPatchParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the objects patch params +func (o *ObjectsPatchParams) WithTimeout(timeout time.Duration) *ObjectsPatchParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the objects patch params +func (o *ObjectsPatchParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the objects patch params +func (o *ObjectsPatchParams) WithContext(ctx context.Context) *ObjectsPatchParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the objects patch params +func (o *ObjectsPatchParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the objects patch params +func (o *ObjectsPatchParams) WithHTTPClient(client *http.Client) *ObjectsPatchParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the objects patch params +func (o *ObjectsPatchParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithBody adds the body to the objects patch params +func (o *ObjectsPatchParams) WithBody(body *models.Object) *ObjectsPatchParams { + o.SetBody(body) + return o +} + +// SetBody adds the body to the objects patch params +func (o *ObjectsPatchParams) SetBody(body *models.Object) { + o.Body = body +} + +// WithConsistencyLevel adds the consistencyLevel to the objects patch params +func (o *ObjectsPatchParams) WithConsistencyLevel(consistencyLevel *string) *ObjectsPatchParams { + o.SetConsistencyLevel(consistencyLevel) + return o +} + +// SetConsistencyLevel adds the consistencyLevel to the objects patch params +func (o *ObjectsPatchParams) SetConsistencyLevel(consistencyLevel *string) { + o.ConsistencyLevel = consistencyLevel +} + +// WithID adds the id to the objects patch params +func (o *ObjectsPatchParams) WithID(id strfmt.UUID) *ObjectsPatchParams { + o.SetID(id) + return o +} + +// SetID adds the id to the objects patch params +func (o *ObjectsPatchParams) SetID(id strfmt.UUID) { + o.ID = id +} + +// WriteToRequest writes these params to a swagger request +func (o *ObjectsPatchParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + if o.Body != nil { + if err := r.SetBodyParam(o.Body); err != nil { + return err + } + } + + if o.ConsistencyLevel != nil { + + // query param consistency_level + var qrConsistencyLevel string + + if o.ConsistencyLevel != nil { + qrConsistencyLevel = *o.ConsistencyLevel + } + qConsistencyLevel := qrConsistencyLevel + if qConsistencyLevel != "" { + + if err := r.SetQueryParam("consistency_level", qConsistencyLevel); err != nil { + return err + } + } + } + + // path param id + if err := r.SetPathParam("id", o.ID.String()); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/objects/objects_patch_responses.go b/platform/dbops/binaries/weaviate-src/client/objects/objects_patch_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..e67df42f5b854864f3ebc8b4258d2b8109537aa6 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/objects/objects_patch_responses.go @@ -0,0 +1,510 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package objects + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/weaviate/weaviate/entities/models" +) + +// ObjectsPatchReader is a Reader for the ObjectsPatch structure. +type ObjectsPatchReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ObjectsPatchReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 204: + result := NewObjectsPatchNoContent() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 400: + result := NewObjectsPatchBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 401: + result := NewObjectsPatchUnauthorized() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 403: + result := NewObjectsPatchForbidden() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 404: + result := NewObjectsPatchNotFound() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 422: + result := NewObjectsPatchUnprocessableEntity() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 500: + result := NewObjectsPatchInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + } +} + +// NewObjectsPatchNoContent creates a ObjectsPatchNoContent with default headers values +func NewObjectsPatchNoContent() *ObjectsPatchNoContent { + return &ObjectsPatchNoContent{} +} + +/* +ObjectsPatchNoContent describes a response with status code 204, with default header values. + +Successfully applied. No content provided. +*/ +type ObjectsPatchNoContent struct { +} + +// IsSuccess returns true when this objects patch no content response has a 2xx status code +func (o *ObjectsPatchNoContent) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this objects patch no content response has a 3xx status code +func (o *ObjectsPatchNoContent) IsRedirect() bool { + return false +} + +// IsClientError returns true when this objects patch no content response has a 4xx status code +func (o *ObjectsPatchNoContent) IsClientError() bool { + return false +} + +// IsServerError returns true when this objects patch no content response has a 5xx status code +func (o *ObjectsPatchNoContent) IsServerError() bool { + return false +} + +// IsCode returns true when this objects patch no content response a status code equal to that given +func (o *ObjectsPatchNoContent) IsCode(code int) bool { + return code == 204 +} + +// Code gets the status code for the objects patch no content response +func (o *ObjectsPatchNoContent) Code() int { + return 204 +} + +func (o *ObjectsPatchNoContent) Error() string { + return fmt.Sprintf("[PATCH /objects/{id}][%d] objectsPatchNoContent ", 204) +} + +func (o *ObjectsPatchNoContent) String() string { + return fmt.Sprintf("[PATCH /objects/{id}][%d] objectsPatchNoContent ", 204) +} + +func (o *ObjectsPatchNoContent) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewObjectsPatchBadRequest creates a ObjectsPatchBadRequest with default headers values +func NewObjectsPatchBadRequest() *ObjectsPatchBadRequest { + return &ObjectsPatchBadRequest{} +} + +/* +ObjectsPatchBadRequest describes a response with status code 400, with default header values. + +The patch-JSON is malformed. +*/ +type ObjectsPatchBadRequest struct { +} + +// IsSuccess returns true when this objects patch bad request response has a 2xx status code +func (o *ObjectsPatchBadRequest) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this objects patch bad request response has a 3xx status code +func (o *ObjectsPatchBadRequest) IsRedirect() bool { + return false +} + +// IsClientError returns true when this objects patch bad request response has a 4xx status code +func (o *ObjectsPatchBadRequest) IsClientError() bool { + return true +} + +// IsServerError returns true when this objects patch bad request response has a 5xx status code +func (o *ObjectsPatchBadRequest) IsServerError() bool { + return false +} + +// IsCode returns true when this objects patch bad request response a status code equal to that given +func (o *ObjectsPatchBadRequest) IsCode(code int) bool { + return code == 400 +} + +// Code gets the status code for the objects patch bad request response +func (o *ObjectsPatchBadRequest) Code() int { + return 400 +} + +func (o *ObjectsPatchBadRequest) Error() string { + return fmt.Sprintf("[PATCH /objects/{id}][%d] objectsPatchBadRequest ", 400) +} + +func (o *ObjectsPatchBadRequest) String() string { + return fmt.Sprintf("[PATCH /objects/{id}][%d] objectsPatchBadRequest ", 400) +} + +func (o *ObjectsPatchBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewObjectsPatchUnauthorized creates a ObjectsPatchUnauthorized with default headers values +func NewObjectsPatchUnauthorized() *ObjectsPatchUnauthorized { + return &ObjectsPatchUnauthorized{} +} + +/* +ObjectsPatchUnauthorized describes a response with status code 401, with default header values. + +Unauthorized or invalid credentials. +*/ +type ObjectsPatchUnauthorized struct { +} + +// IsSuccess returns true when this objects patch unauthorized response has a 2xx status code +func (o *ObjectsPatchUnauthorized) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this objects patch unauthorized response has a 3xx status code +func (o *ObjectsPatchUnauthorized) IsRedirect() bool { + return false +} + +// IsClientError returns true when this objects patch unauthorized response has a 4xx status code +func (o *ObjectsPatchUnauthorized) IsClientError() bool { + return true +} + +// IsServerError returns true when this objects patch unauthorized response has a 5xx status code +func (o *ObjectsPatchUnauthorized) IsServerError() bool { + return false +} + +// IsCode returns true when this objects patch unauthorized response a status code equal to that given +func (o *ObjectsPatchUnauthorized) IsCode(code int) bool { + return code == 401 +} + +// Code gets the status code for the objects patch unauthorized response +func (o *ObjectsPatchUnauthorized) Code() int { + return 401 +} + +func (o *ObjectsPatchUnauthorized) Error() string { + return fmt.Sprintf("[PATCH /objects/{id}][%d] objectsPatchUnauthorized ", 401) +} + +func (o *ObjectsPatchUnauthorized) String() string { + return fmt.Sprintf("[PATCH /objects/{id}][%d] objectsPatchUnauthorized ", 401) +} + +func (o *ObjectsPatchUnauthorized) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewObjectsPatchForbidden creates a ObjectsPatchForbidden with default headers values +func NewObjectsPatchForbidden() *ObjectsPatchForbidden { + return &ObjectsPatchForbidden{} +} + +/* +ObjectsPatchForbidden describes a response with status code 403, with default header values. + +Forbidden +*/ +type ObjectsPatchForbidden struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this objects patch forbidden response has a 2xx status code +func (o *ObjectsPatchForbidden) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this objects patch forbidden response has a 3xx status code +func (o *ObjectsPatchForbidden) IsRedirect() bool { + return false +} + +// IsClientError returns true when this objects patch forbidden response has a 4xx status code +func (o *ObjectsPatchForbidden) IsClientError() bool { + return true +} + +// IsServerError returns true when this objects patch forbidden response has a 5xx status code +func (o *ObjectsPatchForbidden) IsServerError() bool { + return false +} + +// IsCode returns true when this objects patch forbidden response a status code equal to that given +func (o *ObjectsPatchForbidden) IsCode(code int) bool { + return code == 403 +} + +// Code gets the status code for the objects patch forbidden response +func (o *ObjectsPatchForbidden) Code() int { + return 403 +} + +func (o *ObjectsPatchForbidden) Error() string { + return fmt.Sprintf("[PATCH /objects/{id}][%d] objectsPatchForbidden %+v", 403, o.Payload) +} + +func (o *ObjectsPatchForbidden) String() string { + return fmt.Sprintf("[PATCH /objects/{id}][%d] objectsPatchForbidden %+v", 403, o.Payload) +} + +func (o *ObjectsPatchForbidden) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *ObjectsPatchForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewObjectsPatchNotFound creates a ObjectsPatchNotFound with default headers values +func NewObjectsPatchNotFound() *ObjectsPatchNotFound { + return &ObjectsPatchNotFound{} +} + +/* +ObjectsPatchNotFound describes a response with status code 404, with default header values. + +Successful query result but no resource was found. +*/ +type ObjectsPatchNotFound struct { +} + +// IsSuccess returns true when this objects patch not found response has a 2xx status code +func (o *ObjectsPatchNotFound) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this objects patch not found response has a 3xx status code +func (o *ObjectsPatchNotFound) IsRedirect() bool { + return false +} + +// IsClientError returns true when this objects patch not found response has a 4xx status code +func (o *ObjectsPatchNotFound) IsClientError() bool { + return true +} + +// IsServerError returns true when this objects patch not found response has a 5xx status code +func (o *ObjectsPatchNotFound) IsServerError() bool { + return false +} + +// IsCode returns true when this objects patch not found response a status code equal to that given +func (o *ObjectsPatchNotFound) IsCode(code int) bool { + return code == 404 +} + +// Code gets the status code for the objects patch not found response +func (o *ObjectsPatchNotFound) Code() int { + return 404 +} + +func (o *ObjectsPatchNotFound) Error() string { + return fmt.Sprintf("[PATCH /objects/{id}][%d] objectsPatchNotFound ", 404) +} + +func (o *ObjectsPatchNotFound) String() string { + return fmt.Sprintf("[PATCH /objects/{id}][%d] objectsPatchNotFound ", 404) +} + +func (o *ObjectsPatchNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewObjectsPatchUnprocessableEntity creates a ObjectsPatchUnprocessableEntity with default headers values +func NewObjectsPatchUnprocessableEntity() *ObjectsPatchUnprocessableEntity { + return &ObjectsPatchUnprocessableEntity{} +} + +/* +ObjectsPatchUnprocessableEntity describes a response with status code 422, with default header values. + +The patch-JSON is valid but unprocessable. +*/ +type ObjectsPatchUnprocessableEntity struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this objects patch unprocessable entity response has a 2xx status code +func (o *ObjectsPatchUnprocessableEntity) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this objects patch unprocessable entity response has a 3xx status code +func (o *ObjectsPatchUnprocessableEntity) IsRedirect() bool { + return false +} + +// IsClientError returns true when this objects patch unprocessable entity response has a 4xx status code +func (o *ObjectsPatchUnprocessableEntity) IsClientError() bool { + return true +} + +// IsServerError returns true when this objects patch unprocessable entity response has a 5xx status code +func (o *ObjectsPatchUnprocessableEntity) IsServerError() bool { + return false +} + +// IsCode returns true when this objects patch unprocessable entity response a status code equal to that given +func (o *ObjectsPatchUnprocessableEntity) IsCode(code int) bool { + return code == 422 +} + +// Code gets the status code for the objects patch unprocessable entity response +func (o *ObjectsPatchUnprocessableEntity) Code() int { + return 422 +} + +func (o *ObjectsPatchUnprocessableEntity) Error() string { + return fmt.Sprintf("[PATCH /objects/{id}][%d] objectsPatchUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *ObjectsPatchUnprocessableEntity) String() string { + return fmt.Sprintf("[PATCH /objects/{id}][%d] objectsPatchUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *ObjectsPatchUnprocessableEntity) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *ObjectsPatchUnprocessableEntity) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewObjectsPatchInternalServerError creates a ObjectsPatchInternalServerError with default headers values +func NewObjectsPatchInternalServerError() *ObjectsPatchInternalServerError { + return &ObjectsPatchInternalServerError{} +} + +/* +ObjectsPatchInternalServerError describes a response with status code 500, with default header values. + +An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. +*/ +type ObjectsPatchInternalServerError struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this objects patch internal server error response has a 2xx status code +func (o *ObjectsPatchInternalServerError) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this objects patch internal server error response has a 3xx status code +func (o *ObjectsPatchInternalServerError) IsRedirect() bool { + return false +} + +// IsClientError returns true when this objects patch internal server error response has a 4xx status code +func (o *ObjectsPatchInternalServerError) IsClientError() bool { + return false +} + +// IsServerError returns true when this objects patch internal server error response has a 5xx status code +func (o *ObjectsPatchInternalServerError) IsServerError() bool { + return true +} + +// IsCode returns true when this objects patch internal server error response a status code equal to that given +func (o *ObjectsPatchInternalServerError) IsCode(code int) bool { + return code == 500 +} + +// Code gets the status code for the objects patch internal server error response +func (o *ObjectsPatchInternalServerError) Code() int { + return 500 +} + +func (o *ObjectsPatchInternalServerError) Error() string { + return fmt.Sprintf("[PATCH /objects/{id}][%d] objectsPatchInternalServerError %+v", 500, o.Payload) +} + +func (o *ObjectsPatchInternalServerError) String() string { + return fmt.Sprintf("[PATCH /objects/{id}][%d] objectsPatchInternalServerError %+v", 500, o.Payload) +} + +func (o *ObjectsPatchInternalServerError) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *ObjectsPatchInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/objects/objects_references_create_parameters.go b/platform/dbops/binaries/weaviate-src/client/objects/objects_references_create_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..02a62be6bed85f4a6efa419f52b4c31da1e77248 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/objects/objects_references_create_parameters.go @@ -0,0 +1,241 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package objects + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + + "github.com/weaviate/weaviate/entities/models" +) + +// NewObjectsReferencesCreateParams creates a new ObjectsReferencesCreateParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewObjectsReferencesCreateParams() *ObjectsReferencesCreateParams { + return &ObjectsReferencesCreateParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewObjectsReferencesCreateParamsWithTimeout creates a new ObjectsReferencesCreateParams object +// with the ability to set a timeout on a request. +func NewObjectsReferencesCreateParamsWithTimeout(timeout time.Duration) *ObjectsReferencesCreateParams { + return &ObjectsReferencesCreateParams{ + timeout: timeout, + } +} + +// NewObjectsReferencesCreateParamsWithContext creates a new ObjectsReferencesCreateParams object +// with the ability to set a context for a request. +func NewObjectsReferencesCreateParamsWithContext(ctx context.Context) *ObjectsReferencesCreateParams { + return &ObjectsReferencesCreateParams{ + Context: ctx, + } +} + +// NewObjectsReferencesCreateParamsWithHTTPClient creates a new ObjectsReferencesCreateParams object +// with the ability to set a custom HTTPClient for a request. +func NewObjectsReferencesCreateParamsWithHTTPClient(client *http.Client) *ObjectsReferencesCreateParams { + return &ObjectsReferencesCreateParams{ + HTTPClient: client, + } +} + +/* +ObjectsReferencesCreateParams contains all the parameters to send to the API endpoint + + for the objects references create operation. + + Typically these are written to a http.Request. +*/ +type ObjectsReferencesCreateParams struct { + + // Body. + Body *models.SingleRef + + /* ID. + + Unique ID of the Object. + + Format: uuid + */ + ID strfmt.UUID + + /* PropertyName. + + Unique name of the property related to the Object. + */ + PropertyName string + + /* Tenant. + + Specifies the tenant in a request targeting a multi-tenant class + */ + Tenant *string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the objects references create params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *ObjectsReferencesCreateParams) WithDefaults() *ObjectsReferencesCreateParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the objects references create params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *ObjectsReferencesCreateParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the objects references create params +func (o *ObjectsReferencesCreateParams) WithTimeout(timeout time.Duration) *ObjectsReferencesCreateParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the objects references create params +func (o *ObjectsReferencesCreateParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the objects references create params +func (o *ObjectsReferencesCreateParams) WithContext(ctx context.Context) *ObjectsReferencesCreateParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the objects references create params +func (o *ObjectsReferencesCreateParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the objects references create params +func (o *ObjectsReferencesCreateParams) WithHTTPClient(client *http.Client) *ObjectsReferencesCreateParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the objects references create params +func (o *ObjectsReferencesCreateParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithBody adds the body to the objects references create params +func (o *ObjectsReferencesCreateParams) WithBody(body *models.SingleRef) *ObjectsReferencesCreateParams { + o.SetBody(body) + return o +} + +// SetBody adds the body to the objects references create params +func (o *ObjectsReferencesCreateParams) SetBody(body *models.SingleRef) { + o.Body = body +} + +// WithID adds the id to the objects references create params +func (o *ObjectsReferencesCreateParams) WithID(id strfmt.UUID) *ObjectsReferencesCreateParams { + o.SetID(id) + return o +} + +// SetID adds the id to the objects references create params +func (o *ObjectsReferencesCreateParams) SetID(id strfmt.UUID) { + o.ID = id +} + +// WithPropertyName adds the propertyName to the objects references create params +func (o *ObjectsReferencesCreateParams) WithPropertyName(propertyName string) *ObjectsReferencesCreateParams { + o.SetPropertyName(propertyName) + return o +} + +// SetPropertyName adds the propertyName to the objects references create params +func (o *ObjectsReferencesCreateParams) SetPropertyName(propertyName string) { + o.PropertyName = propertyName +} + +// WithTenant adds the tenant to the objects references create params +func (o *ObjectsReferencesCreateParams) WithTenant(tenant *string) *ObjectsReferencesCreateParams { + o.SetTenant(tenant) + return o +} + +// SetTenant adds the tenant to the objects references create params +func (o *ObjectsReferencesCreateParams) SetTenant(tenant *string) { + o.Tenant = tenant +} + +// WriteToRequest writes these params to a swagger request +func (o *ObjectsReferencesCreateParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + if o.Body != nil { + if err := r.SetBodyParam(o.Body); err != nil { + return err + } + } + + // path param id + if err := r.SetPathParam("id", o.ID.String()); err != nil { + return err + } + + // path param propertyName + if err := r.SetPathParam("propertyName", o.PropertyName); err != nil { + return err + } + + if o.Tenant != nil { + + // query param tenant + var qrTenant string + + if o.Tenant != nil { + qrTenant = *o.Tenant + } + qTenant := qrTenant + if qTenant != "" { + + if err := r.SetQueryParam("tenant", qTenant); err != nil { + return err + } + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/objects/objects_references_create_responses.go b/platform/dbops/binaries/weaviate-src/client/objects/objects_references_create_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..3fe2c0f09b0d593fdc494b12b75c6db7164ef23f --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/objects/objects_references_create_responses.go @@ -0,0 +1,386 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package objects + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/weaviate/weaviate/entities/models" +) + +// ObjectsReferencesCreateReader is a Reader for the ObjectsReferencesCreate structure. +type ObjectsReferencesCreateReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ObjectsReferencesCreateReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewObjectsReferencesCreateOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 401: + result := NewObjectsReferencesCreateUnauthorized() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 403: + result := NewObjectsReferencesCreateForbidden() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 422: + result := NewObjectsReferencesCreateUnprocessableEntity() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 500: + result := NewObjectsReferencesCreateInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + } +} + +// NewObjectsReferencesCreateOK creates a ObjectsReferencesCreateOK with default headers values +func NewObjectsReferencesCreateOK() *ObjectsReferencesCreateOK { + return &ObjectsReferencesCreateOK{} +} + +/* +ObjectsReferencesCreateOK describes a response with status code 200, with default header values. + +Successfully added the reference. +*/ +type ObjectsReferencesCreateOK struct { +} + +// IsSuccess returns true when this objects references create o k response has a 2xx status code +func (o *ObjectsReferencesCreateOK) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this objects references create o k response has a 3xx status code +func (o *ObjectsReferencesCreateOK) IsRedirect() bool { + return false +} + +// IsClientError returns true when this objects references create o k response has a 4xx status code +func (o *ObjectsReferencesCreateOK) IsClientError() bool { + return false +} + +// IsServerError returns true when this objects references create o k response has a 5xx status code +func (o *ObjectsReferencesCreateOK) IsServerError() bool { + return false +} + +// IsCode returns true when this objects references create o k response a status code equal to that given +func (o *ObjectsReferencesCreateOK) IsCode(code int) bool { + return code == 200 +} + +// Code gets the status code for the objects references create o k response +func (o *ObjectsReferencesCreateOK) Code() int { + return 200 +} + +func (o *ObjectsReferencesCreateOK) Error() string { + return fmt.Sprintf("[POST /objects/{id}/references/{propertyName}][%d] objectsReferencesCreateOK ", 200) +} + +func (o *ObjectsReferencesCreateOK) String() string { + return fmt.Sprintf("[POST /objects/{id}/references/{propertyName}][%d] objectsReferencesCreateOK ", 200) +} + +func (o *ObjectsReferencesCreateOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewObjectsReferencesCreateUnauthorized creates a ObjectsReferencesCreateUnauthorized with default headers values +func NewObjectsReferencesCreateUnauthorized() *ObjectsReferencesCreateUnauthorized { + return &ObjectsReferencesCreateUnauthorized{} +} + +/* +ObjectsReferencesCreateUnauthorized describes a response with status code 401, with default header values. + +Unauthorized or invalid credentials. +*/ +type ObjectsReferencesCreateUnauthorized struct { +} + +// IsSuccess returns true when this objects references create unauthorized response has a 2xx status code +func (o *ObjectsReferencesCreateUnauthorized) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this objects references create unauthorized response has a 3xx status code +func (o *ObjectsReferencesCreateUnauthorized) IsRedirect() bool { + return false +} + +// IsClientError returns true when this objects references create unauthorized response has a 4xx status code +func (o *ObjectsReferencesCreateUnauthorized) IsClientError() bool { + return true +} + +// IsServerError returns true when this objects references create unauthorized response has a 5xx status code +func (o *ObjectsReferencesCreateUnauthorized) IsServerError() bool { + return false +} + +// IsCode returns true when this objects references create unauthorized response a status code equal to that given +func (o *ObjectsReferencesCreateUnauthorized) IsCode(code int) bool { + return code == 401 +} + +// Code gets the status code for the objects references create unauthorized response +func (o *ObjectsReferencesCreateUnauthorized) Code() int { + return 401 +} + +func (o *ObjectsReferencesCreateUnauthorized) Error() string { + return fmt.Sprintf("[POST /objects/{id}/references/{propertyName}][%d] objectsReferencesCreateUnauthorized ", 401) +} + +func (o *ObjectsReferencesCreateUnauthorized) String() string { + return fmt.Sprintf("[POST /objects/{id}/references/{propertyName}][%d] objectsReferencesCreateUnauthorized ", 401) +} + +func (o *ObjectsReferencesCreateUnauthorized) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewObjectsReferencesCreateForbidden creates a ObjectsReferencesCreateForbidden with default headers values +func NewObjectsReferencesCreateForbidden() *ObjectsReferencesCreateForbidden { + return &ObjectsReferencesCreateForbidden{} +} + +/* +ObjectsReferencesCreateForbidden describes a response with status code 403, with default header values. + +Forbidden +*/ +type ObjectsReferencesCreateForbidden struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this objects references create forbidden response has a 2xx status code +func (o *ObjectsReferencesCreateForbidden) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this objects references create forbidden response has a 3xx status code +func (o *ObjectsReferencesCreateForbidden) IsRedirect() bool { + return false +} + +// IsClientError returns true when this objects references create forbidden response has a 4xx status code +func (o *ObjectsReferencesCreateForbidden) IsClientError() bool { + return true +} + +// IsServerError returns true when this objects references create forbidden response has a 5xx status code +func (o *ObjectsReferencesCreateForbidden) IsServerError() bool { + return false +} + +// IsCode returns true when this objects references create forbidden response a status code equal to that given +func (o *ObjectsReferencesCreateForbidden) IsCode(code int) bool { + return code == 403 +} + +// Code gets the status code for the objects references create forbidden response +func (o *ObjectsReferencesCreateForbidden) Code() int { + return 403 +} + +func (o *ObjectsReferencesCreateForbidden) Error() string { + return fmt.Sprintf("[POST /objects/{id}/references/{propertyName}][%d] objectsReferencesCreateForbidden %+v", 403, o.Payload) +} + +func (o *ObjectsReferencesCreateForbidden) String() string { + return fmt.Sprintf("[POST /objects/{id}/references/{propertyName}][%d] objectsReferencesCreateForbidden %+v", 403, o.Payload) +} + +func (o *ObjectsReferencesCreateForbidden) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *ObjectsReferencesCreateForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewObjectsReferencesCreateUnprocessableEntity creates a ObjectsReferencesCreateUnprocessableEntity with default headers values +func NewObjectsReferencesCreateUnprocessableEntity() *ObjectsReferencesCreateUnprocessableEntity { + return &ObjectsReferencesCreateUnprocessableEntity{} +} + +/* +ObjectsReferencesCreateUnprocessableEntity describes a response with status code 422, with default header values. + +Request body is well-formed (i.e., syntactically correct), but semantically erroneous. Are you sure the property exists or that it is a class? +*/ +type ObjectsReferencesCreateUnprocessableEntity struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this objects references create unprocessable entity response has a 2xx status code +func (o *ObjectsReferencesCreateUnprocessableEntity) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this objects references create unprocessable entity response has a 3xx status code +func (o *ObjectsReferencesCreateUnprocessableEntity) IsRedirect() bool { + return false +} + +// IsClientError returns true when this objects references create unprocessable entity response has a 4xx status code +func (o *ObjectsReferencesCreateUnprocessableEntity) IsClientError() bool { + return true +} + +// IsServerError returns true when this objects references create unprocessable entity response has a 5xx status code +func (o *ObjectsReferencesCreateUnprocessableEntity) IsServerError() bool { + return false +} + +// IsCode returns true when this objects references create unprocessable entity response a status code equal to that given +func (o *ObjectsReferencesCreateUnprocessableEntity) IsCode(code int) bool { + return code == 422 +} + +// Code gets the status code for the objects references create unprocessable entity response +func (o *ObjectsReferencesCreateUnprocessableEntity) Code() int { + return 422 +} + +func (o *ObjectsReferencesCreateUnprocessableEntity) Error() string { + return fmt.Sprintf("[POST /objects/{id}/references/{propertyName}][%d] objectsReferencesCreateUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *ObjectsReferencesCreateUnprocessableEntity) String() string { + return fmt.Sprintf("[POST /objects/{id}/references/{propertyName}][%d] objectsReferencesCreateUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *ObjectsReferencesCreateUnprocessableEntity) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *ObjectsReferencesCreateUnprocessableEntity) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewObjectsReferencesCreateInternalServerError creates a ObjectsReferencesCreateInternalServerError with default headers values +func NewObjectsReferencesCreateInternalServerError() *ObjectsReferencesCreateInternalServerError { + return &ObjectsReferencesCreateInternalServerError{} +} + +/* +ObjectsReferencesCreateInternalServerError describes a response with status code 500, with default header values. + +An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. +*/ +type ObjectsReferencesCreateInternalServerError struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this objects references create internal server error response has a 2xx status code +func (o *ObjectsReferencesCreateInternalServerError) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this objects references create internal server error response has a 3xx status code +func (o *ObjectsReferencesCreateInternalServerError) IsRedirect() bool { + return false +} + +// IsClientError returns true when this objects references create internal server error response has a 4xx status code +func (o *ObjectsReferencesCreateInternalServerError) IsClientError() bool { + return false +} + +// IsServerError returns true when this objects references create internal server error response has a 5xx status code +func (o *ObjectsReferencesCreateInternalServerError) IsServerError() bool { + return true +} + +// IsCode returns true when this objects references create internal server error response a status code equal to that given +func (o *ObjectsReferencesCreateInternalServerError) IsCode(code int) bool { + return code == 500 +} + +// Code gets the status code for the objects references create internal server error response +func (o *ObjectsReferencesCreateInternalServerError) Code() int { + return 500 +} + +func (o *ObjectsReferencesCreateInternalServerError) Error() string { + return fmt.Sprintf("[POST /objects/{id}/references/{propertyName}][%d] objectsReferencesCreateInternalServerError %+v", 500, o.Payload) +} + +func (o *ObjectsReferencesCreateInternalServerError) String() string { + return fmt.Sprintf("[POST /objects/{id}/references/{propertyName}][%d] objectsReferencesCreateInternalServerError %+v", 500, o.Payload) +} + +func (o *ObjectsReferencesCreateInternalServerError) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *ObjectsReferencesCreateInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/objects/objects_references_delete_parameters.go b/platform/dbops/binaries/weaviate-src/client/objects/objects_references_delete_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..279996aff30a22ffc3960f68b3b8296b0dd78272 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/objects/objects_references_delete_parameters.go @@ -0,0 +1,241 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package objects + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + + "github.com/weaviate/weaviate/entities/models" +) + +// NewObjectsReferencesDeleteParams creates a new ObjectsReferencesDeleteParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewObjectsReferencesDeleteParams() *ObjectsReferencesDeleteParams { + return &ObjectsReferencesDeleteParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewObjectsReferencesDeleteParamsWithTimeout creates a new ObjectsReferencesDeleteParams object +// with the ability to set a timeout on a request. +func NewObjectsReferencesDeleteParamsWithTimeout(timeout time.Duration) *ObjectsReferencesDeleteParams { + return &ObjectsReferencesDeleteParams{ + timeout: timeout, + } +} + +// NewObjectsReferencesDeleteParamsWithContext creates a new ObjectsReferencesDeleteParams object +// with the ability to set a context for a request. +func NewObjectsReferencesDeleteParamsWithContext(ctx context.Context) *ObjectsReferencesDeleteParams { + return &ObjectsReferencesDeleteParams{ + Context: ctx, + } +} + +// NewObjectsReferencesDeleteParamsWithHTTPClient creates a new ObjectsReferencesDeleteParams object +// with the ability to set a custom HTTPClient for a request. +func NewObjectsReferencesDeleteParamsWithHTTPClient(client *http.Client) *ObjectsReferencesDeleteParams { + return &ObjectsReferencesDeleteParams{ + HTTPClient: client, + } +} + +/* +ObjectsReferencesDeleteParams contains all the parameters to send to the API endpoint + + for the objects references delete operation. + + Typically these are written to a http.Request. +*/ +type ObjectsReferencesDeleteParams struct { + + // Body. + Body *models.SingleRef + + /* ID. + + Unique ID of the Object. + + Format: uuid + */ + ID strfmt.UUID + + /* PropertyName. + + Unique name of the property related to the Object. + */ + PropertyName string + + /* Tenant. + + Specifies the tenant in a request targeting a multi-tenant class + */ + Tenant *string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the objects references delete params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *ObjectsReferencesDeleteParams) WithDefaults() *ObjectsReferencesDeleteParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the objects references delete params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *ObjectsReferencesDeleteParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the objects references delete params +func (o *ObjectsReferencesDeleteParams) WithTimeout(timeout time.Duration) *ObjectsReferencesDeleteParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the objects references delete params +func (o *ObjectsReferencesDeleteParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the objects references delete params +func (o *ObjectsReferencesDeleteParams) WithContext(ctx context.Context) *ObjectsReferencesDeleteParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the objects references delete params +func (o *ObjectsReferencesDeleteParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the objects references delete params +func (o *ObjectsReferencesDeleteParams) WithHTTPClient(client *http.Client) *ObjectsReferencesDeleteParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the objects references delete params +func (o *ObjectsReferencesDeleteParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithBody adds the body to the objects references delete params +func (o *ObjectsReferencesDeleteParams) WithBody(body *models.SingleRef) *ObjectsReferencesDeleteParams { + o.SetBody(body) + return o +} + +// SetBody adds the body to the objects references delete params +func (o *ObjectsReferencesDeleteParams) SetBody(body *models.SingleRef) { + o.Body = body +} + +// WithID adds the id to the objects references delete params +func (o *ObjectsReferencesDeleteParams) WithID(id strfmt.UUID) *ObjectsReferencesDeleteParams { + o.SetID(id) + return o +} + +// SetID adds the id to the objects references delete params +func (o *ObjectsReferencesDeleteParams) SetID(id strfmt.UUID) { + o.ID = id +} + +// WithPropertyName adds the propertyName to the objects references delete params +func (o *ObjectsReferencesDeleteParams) WithPropertyName(propertyName string) *ObjectsReferencesDeleteParams { + o.SetPropertyName(propertyName) + return o +} + +// SetPropertyName adds the propertyName to the objects references delete params +func (o *ObjectsReferencesDeleteParams) SetPropertyName(propertyName string) { + o.PropertyName = propertyName +} + +// WithTenant adds the tenant to the objects references delete params +func (o *ObjectsReferencesDeleteParams) WithTenant(tenant *string) *ObjectsReferencesDeleteParams { + o.SetTenant(tenant) + return o +} + +// SetTenant adds the tenant to the objects references delete params +func (o *ObjectsReferencesDeleteParams) SetTenant(tenant *string) { + o.Tenant = tenant +} + +// WriteToRequest writes these params to a swagger request +func (o *ObjectsReferencesDeleteParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + if o.Body != nil { + if err := r.SetBodyParam(o.Body); err != nil { + return err + } + } + + // path param id + if err := r.SetPathParam("id", o.ID.String()); err != nil { + return err + } + + // path param propertyName + if err := r.SetPathParam("propertyName", o.PropertyName); err != nil { + return err + } + + if o.Tenant != nil { + + // query param tenant + var qrTenant string + + if o.Tenant != nil { + qrTenant = *o.Tenant + } + qTenant := qrTenant + if qTenant != "" { + + if err := r.SetQueryParam("tenant", qTenant); err != nil { + return err + } + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/objects/objects_references_delete_responses.go b/platform/dbops/binaries/weaviate-src/client/objects/objects_references_delete_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..034664c5c6f5cd3f66f2e44665b3dc74ab70edd1 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/objects/objects_references_delete_responses.go @@ -0,0 +1,386 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package objects + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/weaviate/weaviate/entities/models" +) + +// ObjectsReferencesDeleteReader is a Reader for the ObjectsReferencesDelete structure. +type ObjectsReferencesDeleteReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ObjectsReferencesDeleteReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 204: + result := NewObjectsReferencesDeleteNoContent() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 401: + result := NewObjectsReferencesDeleteUnauthorized() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 403: + result := NewObjectsReferencesDeleteForbidden() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 404: + result := NewObjectsReferencesDeleteNotFound() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 500: + result := NewObjectsReferencesDeleteInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + } +} + +// NewObjectsReferencesDeleteNoContent creates a ObjectsReferencesDeleteNoContent with default headers values +func NewObjectsReferencesDeleteNoContent() *ObjectsReferencesDeleteNoContent { + return &ObjectsReferencesDeleteNoContent{} +} + +/* +ObjectsReferencesDeleteNoContent describes a response with status code 204, with default header values. + +Successfully deleted. +*/ +type ObjectsReferencesDeleteNoContent struct { +} + +// IsSuccess returns true when this objects references delete no content response has a 2xx status code +func (o *ObjectsReferencesDeleteNoContent) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this objects references delete no content response has a 3xx status code +func (o *ObjectsReferencesDeleteNoContent) IsRedirect() bool { + return false +} + +// IsClientError returns true when this objects references delete no content response has a 4xx status code +func (o *ObjectsReferencesDeleteNoContent) IsClientError() bool { + return false +} + +// IsServerError returns true when this objects references delete no content response has a 5xx status code +func (o *ObjectsReferencesDeleteNoContent) IsServerError() bool { + return false +} + +// IsCode returns true when this objects references delete no content response a status code equal to that given +func (o *ObjectsReferencesDeleteNoContent) IsCode(code int) bool { + return code == 204 +} + +// Code gets the status code for the objects references delete no content response +func (o *ObjectsReferencesDeleteNoContent) Code() int { + return 204 +} + +func (o *ObjectsReferencesDeleteNoContent) Error() string { + return fmt.Sprintf("[DELETE /objects/{id}/references/{propertyName}][%d] objectsReferencesDeleteNoContent ", 204) +} + +func (o *ObjectsReferencesDeleteNoContent) String() string { + return fmt.Sprintf("[DELETE /objects/{id}/references/{propertyName}][%d] objectsReferencesDeleteNoContent ", 204) +} + +func (o *ObjectsReferencesDeleteNoContent) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewObjectsReferencesDeleteUnauthorized creates a ObjectsReferencesDeleteUnauthorized with default headers values +func NewObjectsReferencesDeleteUnauthorized() *ObjectsReferencesDeleteUnauthorized { + return &ObjectsReferencesDeleteUnauthorized{} +} + +/* +ObjectsReferencesDeleteUnauthorized describes a response with status code 401, with default header values. + +Unauthorized or invalid credentials. +*/ +type ObjectsReferencesDeleteUnauthorized struct { +} + +// IsSuccess returns true when this objects references delete unauthorized response has a 2xx status code +func (o *ObjectsReferencesDeleteUnauthorized) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this objects references delete unauthorized response has a 3xx status code +func (o *ObjectsReferencesDeleteUnauthorized) IsRedirect() bool { + return false +} + +// IsClientError returns true when this objects references delete unauthorized response has a 4xx status code +func (o *ObjectsReferencesDeleteUnauthorized) IsClientError() bool { + return true +} + +// IsServerError returns true when this objects references delete unauthorized response has a 5xx status code +func (o *ObjectsReferencesDeleteUnauthorized) IsServerError() bool { + return false +} + +// IsCode returns true when this objects references delete unauthorized response a status code equal to that given +func (o *ObjectsReferencesDeleteUnauthorized) IsCode(code int) bool { + return code == 401 +} + +// Code gets the status code for the objects references delete unauthorized response +func (o *ObjectsReferencesDeleteUnauthorized) Code() int { + return 401 +} + +func (o *ObjectsReferencesDeleteUnauthorized) Error() string { + return fmt.Sprintf("[DELETE /objects/{id}/references/{propertyName}][%d] objectsReferencesDeleteUnauthorized ", 401) +} + +func (o *ObjectsReferencesDeleteUnauthorized) String() string { + return fmt.Sprintf("[DELETE /objects/{id}/references/{propertyName}][%d] objectsReferencesDeleteUnauthorized ", 401) +} + +func (o *ObjectsReferencesDeleteUnauthorized) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewObjectsReferencesDeleteForbidden creates a ObjectsReferencesDeleteForbidden with default headers values +func NewObjectsReferencesDeleteForbidden() *ObjectsReferencesDeleteForbidden { + return &ObjectsReferencesDeleteForbidden{} +} + +/* +ObjectsReferencesDeleteForbidden describes a response with status code 403, with default header values. + +Forbidden +*/ +type ObjectsReferencesDeleteForbidden struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this objects references delete forbidden response has a 2xx status code +func (o *ObjectsReferencesDeleteForbidden) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this objects references delete forbidden response has a 3xx status code +func (o *ObjectsReferencesDeleteForbidden) IsRedirect() bool { + return false +} + +// IsClientError returns true when this objects references delete forbidden response has a 4xx status code +func (o *ObjectsReferencesDeleteForbidden) IsClientError() bool { + return true +} + +// IsServerError returns true when this objects references delete forbidden response has a 5xx status code +func (o *ObjectsReferencesDeleteForbidden) IsServerError() bool { + return false +} + +// IsCode returns true when this objects references delete forbidden response a status code equal to that given +func (o *ObjectsReferencesDeleteForbidden) IsCode(code int) bool { + return code == 403 +} + +// Code gets the status code for the objects references delete forbidden response +func (o *ObjectsReferencesDeleteForbidden) Code() int { + return 403 +} + +func (o *ObjectsReferencesDeleteForbidden) Error() string { + return fmt.Sprintf("[DELETE /objects/{id}/references/{propertyName}][%d] objectsReferencesDeleteForbidden %+v", 403, o.Payload) +} + +func (o *ObjectsReferencesDeleteForbidden) String() string { + return fmt.Sprintf("[DELETE /objects/{id}/references/{propertyName}][%d] objectsReferencesDeleteForbidden %+v", 403, o.Payload) +} + +func (o *ObjectsReferencesDeleteForbidden) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *ObjectsReferencesDeleteForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewObjectsReferencesDeleteNotFound creates a ObjectsReferencesDeleteNotFound with default headers values +func NewObjectsReferencesDeleteNotFound() *ObjectsReferencesDeleteNotFound { + return &ObjectsReferencesDeleteNotFound{} +} + +/* +ObjectsReferencesDeleteNotFound describes a response with status code 404, with default header values. + +Successful query result but no resource was found. +*/ +type ObjectsReferencesDeleteNotFound struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this objects references delete not found response has a 2xx status code +func (o *ObjectsReferencesDeleteNotFound) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this objects references delete not found response has a 3xx status code +func (o *ObjectsReferencesDeleteNotFound) IsRedirect() bool { + return false +} + +// IsClientError returns true when this objects references delete not found response has a 4xx status code +func (o *ObjectsReferencesDeleteNotFound) IsClientError() bool { + return true +} + +// IsServerError returns true when this objects references delete not found response has a 5xx status code +func (o *ObjectsReferencesDeleteNotFound) IsServerError() bool { + return false +} + +// IsCode returns true when this objects references delete not found response a status code equal to that given +func (o *ObjectsReferencesDeleteNotFound) IsCode(code int) bool { + return code == 404 +} + +// Code gets the status code for the objects references delete not found response +func (o *ObjectsReferencesDeleteNotFound) Code() int { + return 404 +} + +func (o *ObjectsReferencesDeleteNotFound) Error() string { + return fmt.Sprintf("[DELETE /objects/{id}/references/{propertyName}][%d] objectsReferencesDeleteNotFound %+v", 404, o.Payload) +} + +func (o *ObjectsReferencesDeleteNotFound) String() string { + return fmt.Sprintf("[DELETE /objects/{id}/references/{propertyName}][%d] objectsReferencesDeleteNotFound %+v", 404, o.Payload) +} + +func (o *ObjectsReferencesDeleteNotFound) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *ObjectsReferencesDeleteNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewObjectsReferencesDeleteInternalServerError creates a ObjectsReferencesDeleteInternalServerError with default headers values +func NewObjectsReferencesDeleteInternalServerError() *ObjectsReferencesDeleteInternalServerError { + return &ObjectsReferencesDeleteInternalServerError{} +} + +/* +ObjectsReferencesDeleteInternalServerError describes a response with status code 500, with default header values. + +An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. +*/ +type ObjectsReferencesDeleteInternalServerError struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this objects references delete internal server error response has a 2xx status code +func (o *ObjectsReferencesDeleteInternalServerError) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this objects references delete internal server error response has a 3xx status code +func (o *ObjectsReferencesDeleteInternalServerError) IsRedirect() bool { + return false +} + +// IsClientError returns true when this objects references delete internal server error response has a 4xx status code +func (o *ObjectsReferencesDeleteInternalServerError) IsClientError() bool { + return false +} + +// IsServerError returns true when this objects references delete internal server error response has a 5xx status code +func (o *ObjectsReferencesDeleteInternalServerError) IsServerError() bool { + return true +} + +// IsCode returns true when this objects references delete internal server error response a status code equal to that given +func (o *ObjectsReferencesDeleteInternalServerError) IsCode(code int) bool { + return code == 500 +} + +// Code gets the status code for the objects references delete internal server error response +func (o *ObjectsReferencesDeleteInternalServerError) Code() int { + return 500 +} + +func (o *ObjectsReferencesDeleteInternalServerError) Error() string { + return fmt.Sprintf("[DELETE /objects/{id}/references/{propertyName}][%d] objectsReferencesDeleteInternalServerError %+v", 500, o.Payload) +} + +func (o *ObjectsReferencesDeleteInternalServerError) String() string { + return fmt.Sprintf("[DELETE /objects/{id}/references/{propertyName}][%d] objectsReferencesDeleteInternalServerError %+v", 500, o.Payload) +} + +func (o *ObjectsReferencesDeleteInternalServerError) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *ObjectsReferencesDeleteInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/objects/objects_references_update_parameters.go b/platform/dbops/binaries/weaviate-src/client/objects/objects_references_update_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..180dacc0919835e65326c019705719e1244f39fb --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/objects/objects_references_update_parameters.go @@ -0,0 +1,241 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package objects + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + + "github.com/weaviate/weaviate/entities/models" +) + +// NewObjectsReferencesUpdateParams creates a new ObjectsReferencesUpdateParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewObjectsReferencesUpdateParams() *ObjectsReferencesUpdateParams { + return &ObjectsReferencesUpdateParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewObjectsReferencesUpdateParamsWithTimeout creates a new ObjectsReferencesUpdateParams object +// with the ability to set a timeout on a request. +func NewObjectsReferencesUpdateParamsWithTimeout(timeout time.Duration) *ObjectsReferencesUpdateParams { + return &ObjectsReferencesUpdateParams{ + timeout: timeout, + } +} + +// NewObjectsReferencesUpdateParamsWithContext creates a new ObjectsReferencesUpdateParams object +// with the ability to set a context for a request. +func NewObjectsReferencesUpdateParamsWithContext(ctx context.Context) *ObjectsReferencesUpdateParams { + return &ObjectsReferencesUpdateParams{ + Context: ctx, + } +} + +// NewObjectsReferencesUpdateParamsWithHTTPClient creates a new ObjectsReferencesUpdateParams object +// with the ability to set a custom HTTPClient for a request. +func NewObjectsReferencesUpdateParamsWithHTTPClient(client *http.Client) *ObjectsReferencesUpdateParams { + return &ObjectsReferencesUpdateParams{ + HTTPClient: client, + } +} + +/* +ObjectsReferencesUpdateParams contains all the parameters to send to the API endpoint + + for the objects references update operation. + + Typically these are written to a http.Request. +*/ +type ObjectsReferencesUpdateParams struct { + + // Body. + Body models.MultipleRef + + /* ID. + + Unique ID of the Object. + + Format: uuid + */ + ID strfmt.UUID + + /* PropertyName. + + Unique name of the property related to the Object. + */ + PropertyName string + + /* Tenant. + + Specifies the tenant in a request targeting a multi-tenant class + */ + Tenant *string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the objects references update params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *ObjectsReferencesUpdateParams) WithDefaults() *ObjectsReferencesUpdateParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the objects references update params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *ObjectsReferencesUpdateParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the objects references update params +func (o *ObjectsReferencesUpdateParams) WithTimeout(timeout time.Duration) *ObjectsReferencesUpdateParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the objects references update params +func (o *ObjectsReferencesUpdateParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the objects references update params +func (o *ObjectsReferencesUpdateParams) WithContext(ctx context.Context) *ObjectsReferencesUpdateParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the objects references update params +func (o *ObjectsReferencesUpdateParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the objects references update params +func (o *ObjectsReferencesUpdateParams) WithHTTPClient(client *http.Client) *ObjectsReferencesUpdateParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the objects references update params +func (o *ObjectsReferencesUpdateParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithBody adds the body to the objects references update params +func (o *ObjectsReferencesUpdateParams) WithBody(body models.MultipleRef) *ObjectsReferencesUpdateParams { + o.SetBody(body) + return o +} + +// SetBody adds the body to the objects references update params +func (o *ObjectsReferencesUpdateParams) SetBody(body models.MultipleRef) { + o.Body = body +} + +// WithID adds the id to the objects references update params +func (o *ObjectsReferencesUpdateParams) WithID(id strfmt.UUID) *ObjectsReferencesUpdateParams { + o.SetID(id) + return o +} + +// SetID adds the id to the objects references update params +func (o *ObjectsReferencesUpdateParams) SetID(id strfmt.UUID) { + o.ID = id +} + +// WithPropertyName adds the propertyName to the objects references update params +func (o *ObjectsReferencesUpdateParams) WithPropertyName(propertyName string) *ObjectsReferencesUpdateParams { + o.SetPropertyName(propertyName) + return o +} + +// SetPropertyName adds the propertyName to the objects references update params +func (o *ObjectsReferencesUpdateParams) SetPropertyName(propertyName string) { + o.PropertyName = propertyName +} + +// WithTenant adds the tenant to the objects references update params +func (o *ObjectsReferencesUpdateParams) WithTenant(tenant *string) *ObjectsReferencesUpdateParams { + o.SetTenant(tenant) + return o +} + +// SetTenant adds the tenant to the objects references update params +func (o *ObjectsReferencesUpdateParams) SetTenant(tenant *string) { + o.Tenant = tenant +} + +// WriteToRequest writes these params to a swagger request +func (o *ObjectsReferencesUpdateParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + if o.Body != nil { + if err := r.SetBodyParam(o.Body); err != nil { + return err + } + } + + // path param id + if err := r.SetPathParam("id", o.ID.String()); err != nil { + return err + } + + // path param propertyName + if err := r.SetPathParam("propertyName", o.PropertyName); err != nil { + return err + } + + if o.Tenant != nil { + + // query param tenant + var qrTenant string + + if o.Tenant != nil { + qrTenant = *o.Tenant + } + qTenant := qrTenant + if qTenant != "" { + + if err := r.SetQueryParam("tenant", qTenant); err != nil { + return err + } + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/objects/objects_references_update_responses.go b/platform/dbops/binaries/weaviate-src/client/objects/objects_references_update_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..92caedb1fc65ad162b2ee0c067ba56a15ae211e5 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/objects/objects_references_update_responses.go @@ -0,0 +1,386 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package objects + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/weaviate/weaviate/entities/models" +) + +// ObjectsReferencesUpdateReader is a Reader for the ObjectsReferencesUpdate structure. +type ObjectsReferencesUpdateReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ObjectsReferencesUpdateReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewObjectsReferencesUpdateOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 401: + result := NewObjectsReferencesUpdateUnauthorized() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 403: + result := NewObjectsReferencesUpdateForbidden() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 422: + result := NewObjectsReferencesUpdateUnprocessableEntity() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 500: + result := NewObjectsReferencesUpdateInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + } +} + +// NewObjectsReferencesUpdateOK creates a ObjectsReferencesUpdateOK with default headers values +func NewObjectsReferencesUpdateOK() *ObjectsReferencesUpdateOK { + return &ObjectsReferencesUpdateOK{} +} + +/* +ObjectsReferencesUpdateOK describes a response with status code 200, with default header values. + +Successfully replaced all the references. +*/ +type ObjectsReferencesUpdateOK struct { +} + +// IsSuccess returns true when this objects references update o k response has a 2xx status code +func (o *ObjectsReferencesUpdateOK) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this objects references update o k response has a 3xx status code +func (o *ObjectsReferencesUpdateOK) IsRedirect() bool { + return false +} + +// IsClientError returns true when this objects references update o k response has a 4xx status code +func (o *ObjectsReferencesUpdateOK) IsClientError() bool { + return false +} + +// IsServerError returns true when this objects references update o k response has a 5xx status code +func (o *ObjectsReferencesUpdateOK) IsServerError() bool { + return false +} + +// IsCode returns true when this objects references update o k response a status code equal to that given +func (o *ObjectsReferencesUpdateOK) IsCode(code int) bool { + return code == 200 +} + +// Code gets the status code for the objects references update o k response +func (o *ObjectsReferencesUpdateOK) Code() int { + return 200 +} + +func (o *ObjectsReferencesUpdateOK) Error() string { + return fmt.Sprintf("[PUT /objects/{id}/references/{propertyName}][%d] objectsReferencesUpdateOK ", 200) +} + +func (o *ObjectsReferencesUpdateOK) String() string { + return fmt.Sprintf("[PUT /objects/{id}/references/{propertyName}][%d] objectsReferencesUpdateOK ", 200) +} + +func (o *ObjectsReferencesUpdateOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewObjectsReferencesUpdateUnauthorized creates a ObjectsReferencesUpdateUnauthorized with default headers values +func NewObjectsReferencesUpdateUnauthorized() *ObjectsReferencesUpdateUnauthorized { + return &ObjectsReferencesUpdateUnauthorized{} +} + +/* +ObjectsReferencesUpdateUnauthorized describes a response with status code 401, with default header values. + +Unauthorized or invalid credentials. +*/ +type ObjectsReferencesUpdateUnauthorized struct { +} + +// IsSuccess returns true when this objects references update unauthorized response has a 2xx status code +func (o *ObjectsReferencesUpdateUnauthorized) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this objects references update unauthorized response has a 3xx status code +func (o *ObjectsReferencesUpdateUnauthorized) IsRedirect() bool { + return false +} + +// IsClientError returns true when this objects references update unauthorized response has a 4xx status code +func (o *ObjectsReferencesUpdateUnauthorized) IsClientError() bool { + return true +} + +// IsServerError returns true when this objects references update unauthorized response has a 5xx status code +func (o *ObjectsReferencesUpdateUnauthorized) IsServerError() bool { + return false +} + +// IsCode returns true when this objects references update unauthorized response a status code equal to that given +func (o *ObjectsReferencesUpdateUnauthorized) IsCode(code int) bool { + return code == 401 +} + +// Code gets the status code for the objects references update unauthorized response +func (o *ObjectsReferencesUpdateUnauthorized) Code() int { + return 401 +} + +func (o *ObjectsReferencesUpdateUnauthorized) Error() string { + return fmt.Sprintf("[PUT /objects/{id}/references/{propertyName}][%d] objectsReferencesUpdateUnauthorized ", 401) +} + +func (o *ObjectsReferencesUpdateUnauthorized) String() string { + return fmt.Sprintf("[PUT /objects/{id}/references/{propertyName}][%d] objectsReferencesUpdateUnauthorized ", 401) +} + +func (o *ObjectsReferencesUpdateUnauthorized) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewObjectsReferencesUpdateForbidden creates a ObjectsReferencesUpdateForbidden with default headers values +func NewObjectsReferencesUpdateForbidden() *ObjectsReferencesUpdateForbidden { + return &ObjectsReferencesUpdateForbidden{} +} + +/* +ObjectsReferencesUpdateForbidden describes a response with status code 403, with default header values. + +Forbidden +*/ +type ObjectsReferencesUpdateForbidden struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this objects references update forbidden response has a 2xx status code +func (o *ObjectsReferencesUpdateForbidden) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this objects references update forbidden response has a 3xx status code +func (o *ObjectsReferencesUpdateForbidden) IsRedirect() bool { + return false +} + +// IsClientError returns true when this objects references update forbidden response has a 4xx status code +func (o *ObjectsReferencesUpdateForbidden) IsClientError() bool { + return true +} + +// IsServerError returns true when this objects references update forbidden response has a 5xx status code +func (o *ObjectsReferencesUpdateForbidden) IsServerError() bool { + return false +} + +// IsCode returns true when this objects references update forbidden response a status code equal to that given +func (o *ObjectsReferencesUpdateForbidden) IsCode(code int) bool { + return code == 403 +} + +// Code gets the status code for the objects references update forbidden response +func (o *ObjectsReferencesUpdateForbidden) Code() int { + return 403 +} + +func (o *ObjectsReferencesUpdateForbidden) Error() string { + return fmt.Sprintf("[PUT /objects/{id}/references/{propertyName}][%d] objectsReferencesUpdateForbidden %+v", 403, o.Payload) +} + +func (o *ObjectsReferencesUpdateForbidden) String() string { + return fmt.Sprintf("[PUT /objects/{id}/references/{propertyName}][%d] objectsReferencesUpdateForbidden %+v", 403, o.Payload) +} + +func (o *ObjectsReferencesUpdateForbidden) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *ObjectsReferencesUpdateForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewObjectsReferencesUpdateUnprocessableEntity creates a ObjectsReferencesUpdateUnprocessableEntity with default headers values +func NewObjectsReferencesUpdateUnprocessableEntity() *ObjectsReferencesUpdateUnprocessableEntity { + return &ObjectsReferencesUpdateUnprocessableEntity{} +} + +/* +ObjectsReferencesUpdateUnprocessableEntity describes a response with status code 422, with default header values. + +Request body is well-formed (i.e., syntactically correct), but semantically erroneous. Are you sure the property exists or that it is a class? +*/ +type ObjectsReferencesUpdateUnprocessableEntity struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this objects references update unprocessable entity response has a 2xx status code +func (o *ObjectsReferencesUpdateUnprocessableEntity) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this objects references update unprocessable entity response has a 3xx status code +func (o *ObjectsReferencesUpdateUnprocessableEntity) IsRedirect() bool { + return false +} + +// IsClientError returns true when this objects references update unprocessable entity response has a 4xx status code +func (o *ObjectsReferencesUpdateUnprocessableEntity) IsClientError() bool { + return true +} + +// IsServerError returns true when this objects references update unprocessable entity response has a 5xx status code +func (o *ObjectsReferencesUpdateUnprocessableEntity) IsServerError() bool { + return false +} + +// IsCode returns true when this objects references update unprocessable entity response a status code equal to that given +func (o *ObjectsReferencesUpdateUnprocessableEntity) IsCode(code int) bool { + return code == 422 +} + +// Code gets the status code for the objects references update unprocessable entity response +func (o *ObjectsReferencesUpdateUnprocessableEntity) Code() int { + return 422 +} + +func (o *ObjectsReferencesUpdateUnprocessableEntity) Error() string { + return fmt.Sprintf("[PUT /objects/{id}/references/{propertyName}][%d] objectsReferencesUpdateUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *ObjectsReferencesUpdateUnprocessableEntity) String() string { + return fmt.Sprintf("[PUT /objects/{id}/references/{propertyName}][%d] objectsReferencesUpdateUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *ObjectsReferencesUpdateUnprocessableEntity) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *ObjectsReferencesUpdateUnprocessableEntity) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewObjectsReferencesUpdateInternalServerError creates a ObjectsReferencesUpdateInternalServerError with default headers values +func NewObjectsReferencesUpdateInternalServerError() *ObjectsReferencesUpdateInternalServerError { + return &ObjectsReferencesUpdateInternalServerError{} +} + +/* +ObjectsReferencesUpdateInternalServerError describes a response with status code 500, with default header values. + +An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. +*/ +type ObjectsReferencesUpdateInternalServerError struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this objects references update internal server error response has a 2xx status code +func (o *ObjectsReferencesUpdateInternalServerError) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this objects references update internal server error response has a 3xx status code +func (o *ObjectsReferencesUpdateInternalServerError) IsRedirect() bool { + return false +} + +// IsClientError returns true when this objects references update internal server error response has a 4xx status code +func (o *ObjectsReferencesUpdateInternalServerError) IsClientError() bool { + return false +} + +// IsServerError returns true when this objects references update internal server error response has a 5xx status code +func (o *ObjectsReferencesUpdateInternalServerError) IsServerError() bool { + return true +} + +// IsCode returns true when this objects references update internal server error response a status code equal to that given +func (o *ObjectsReferencesUpdateInternalServerError) IsCode(code int) bool { + return code == 500 +} + +// Code gets the status code for the objects references update internal server error response +func (o *ObjectsReferencesUpdateInternalServerError) Code() int { + return 500 +} + +func (o *ObjectsReferencesUpdateInternalServerError) Error() string { + return fmt.Sprintf("[PUT /objects/{id}/references/{propertyName}][%d] objectsReferencesUpdateInternalServerError %+v", 500, o.Payload) +} + +func (o *ObjectsReferencesUpdateInternalServerError) String() string { + return fmt.Sprintf("[PUT /objects/{id}/references/{propertyName}][%d] objectsReferencesUpdateInternalServerError %+v", 500, o.Payload) +} + +func (o *ObjectsReferencesUpdateInternalServerError) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *ObjectsReferencesUpdateInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/objects/objects_update_parameters.go b/platform/dbops/binaries/weaviate-src/client/objects/objects_update_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..7c9b32e13cc6dbc929e20151db2b36fdf0caf86c --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/objects/objects_update_parameters.go @@ -0,0 +1,219 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package objects + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + + "github.com/weaviate/weaviate/entities/models" +) + +// NewObjectsUpdateParams creates a new ObjectsUpdateParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewObjectsUpdateParams() *ObjectsUpdateParams { + return &ObjectsUpdateParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewObjectsUpdateParamsWithTimeout creates a new ObjectsUpdateParams object +// with the ability to set a timeout on a request. +func NewObjectsUpdateParamsWithTimeout(timeout time.Duration) *ObjectsUpdateParams { + return &ObjectsUpdateParams{ + timeout: timeout, + } +} + +// NewObjectsUpdateParamsWithContext creates a new ObjectsUpdateParams object +// with the ability to set a context for a request. +func NewObjectsUpdateParamsWithContext(ctx context.Context) *ObjectsUpdateParams { + return &ObjectsUpdateParams{ + Context: ctx, + } +} + +// NewObjectsUpdateParamsWithHTTPClient creates a new ObjectsUpdateParams object +// with the ability to set a custom HTTPClient for a request. +func NewObjectsUpdateParamsWithHTTPClient(client *http.Client) *ObjectsUpdateParams { + return &ObjectsUpdateParams{ + HTTPClient: client, + } +} + +/* +ObjectsUpdateParams contains all the parameters to send to the API endpoint + + for the objects update operation. + + Typically these are written to a http.Request. +*/ +type ObjectsUpdateParams struct { + + // Body. + Body *models.Object + + /* ConsistencyLevel. + + Determines how many replicas must acknowledge a request before it is considered successful + */ + ConsistencyLevel *string + + /* ID. + + Unique ID of the Object. + + Format: uuid + */ + ID strfmt.UUID + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the objects update params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *ObjectsUpdateParams) WithDefaults() *ObjectsUpdateParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the objects update params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *ObjectsUpdateParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the objects update params +func (o *ObjectsUpdateParams) WithTimeout(timeout time.Duration) *ObjectsUpdateParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the objects update params +func (o *ObjectsUpdateParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the objects update params +func (o *ObjectsUpdateParams) WithContext(ctx context.Context) *ObjectsUpdateParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the objects update params +func (o *ObjectsUpdateParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the objects update params +func (o *ObjectsUpdateParams) WithHTTPClient(client *http.Client) *ObjectsUpdateParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the objects update params +func (o *ObjectsUpdateParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithBody adds the body to the objects update params +func (o *ObjectsUpdateParams) WithBody(body *models.Object) *ObjectsUpdateParams { + o.SetBody(body) + return o +} + +// SetBody adds the body to the objects update params +func (o *ObjectsUpdateParams) SetBody(body *models.Object) { + o.Body = body +} + +// WithConsistencyLevel adds the consistencyLevel to the objects update params +func (o *ObjectsUpdateParams) WithConsistencyLevel(consistencyLevel *string) *ObjectsUpdateParams { + o.SetConsistencyLevel(consistencyLevel) + return o +} + +// SetConsistencyLevel adds the consistencyLevel to the objects update params +func (o *ObjectsUpdateParams) SetConsistencyLevel(consistencyLevel *string) { + o.ConsistencyLevel = consistencyLevel +} + +// WithID adds the id to the objects update params +func (o *ObjectsUpdateParams) WithID(id strfmt.UUID) *ObjectsUpdateParams { + o.SetID(id) + return o +} + +// SetID adds the id to the objects update params +func (o *ObjectsUpdateParams) SetID(id strfmt.UUID) { + o.ID = id +} + +// WriteToRequest writes these params to a swagger request +func (o *ObjectsUpdateParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + if o.Body != nil { + if err := r.SetBodyParam(o.Body); err != nil { + return err + } + } + + if o.ConsistencyLevel != nil { + + // query param consistency_level + var qrConsistencyLevel string + + if o.ConsistencyLevel != nil { + qrConsistencyLevel = *o.ConsistencyLevel + } + qConsistencyLevel := qrConsistencyLevel + if qConsistencyLevel != "" { + + if err := r.SetQueryParam("consistency_level", qConsistencyLevel); err != nil { + return err + } + } + } + + // path param id + if err := r.SetPathParam("id", o.ID.String()); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/objects/objects_update_responses.go b/platform/dbops/binaries/weaviate-src/client/objects/objects_update_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..a1b51317cc576cbd85cd7f66599e4d5d2135f5ea --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/objects/objects_update_responses.go @@ -0,0 +1,460 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package objects + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/weaviate/weaviate/entities/models" +) + +// ObjectsUpdateReader is a Reader for the ObjectsUpdate structure. +type ObjectsUpdateReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ObjectsUpdateReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewObjectsUpdateOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 401: + result := NewObjectsUpdateUnauthorized() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 403: + result := NewObjectsUpdateForbidden() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 404: + result := NewObjectsUpdateNotFound() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 422: + result := NewObjectsUpdateUnprocessableEntity() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 500: + result := NewObjectsUpdateInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + } +} + +// NewObjectsUpdateOK creates a ObjectsUpdateOK with default headers values +func NewObjectsUpdateOK() *ObjectsUpdateOK { + return &ObjectsUpdateOK{} +} + +/* +ObjectsUpdateOK describes a response with status code 200, with default header values. + +Successfully received. +*/ +type ObjectsUpdateOK struct { + Payload *models.Object +} + +// IsSuccess returns true when this objects update o k response has a 2xx status code +func (o *ObjectsUpdateOK) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this objects update o k response has a 3xx status code +func (o *ObjectsUpdateOK) IsRedirect() bool { + return false +} + +// IsClientError returns true when this objects update o k response has a 4xx status code +func (o *ObjectsUpdateOK) IsClientError() bool { + return false +} + +// IsServerError returns true when this objects update o k response has a 5xx status code +func (o *ObjectsUpdateOK) IsServerError() bool { + return false +} + +// IsCode returns true when this objects update o k response a status code equal to that given +func (o *ObjectsUpdateOK) IsCode(code int) bool { + return code == 200 +} + +// Code gets the status code for the objects update o k response +func (o *ObjectsUpdateOK) Code() int { + return 200 +} + +func (o *ObjectsUpdateOK) Error() string { + return fmt.Sprintf("[PUT /objects/{id}][%d] objectsUpdateOK %+v", 200, o.Payload) +} + +func (o *ObjectsUpdateOK) String() string { + return fmt.Sprintf("[PUT /objects/{id}][%d] objectsUpdateOK %+v", 200, o.Payload) +} + +func (o *ObjectsUpdateOK) GetPayload() *models.Object { + return o.Payload +} + +func (o *ObjectsUpdateOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Object) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewObjectsUpdateUnauthorized creates a ObjectsUpdateUnauthorized with default headers values +func NewObjectsUpdateUnauthorized() *ObjectsUpdateUnauthorized { + return &ObjectsUpdateUnauthorized{} +} + +/* +ObjectsUpdateUnauthorized describes a response with status code 401, with default header values. + +Unauthorized or invalid credentials. +*/ +type ObjectsUpdateUnauthorized struct { +} + +// IsSuccess returns true when this objects update unauthorized response has a 2xx status code +func (o *ObjectsUpdateUnauthorized) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this objects update unauthorized response has a 3xx status code +func (o *ObjectsUpdateUnauthorized) IsRedirect() bool { + return false +} + +// IsClientError returns true when this objects update unauthorized response has a 4xx status code +func (o *ObjectsUpdateUnauthorized) IsClientError() bool { + return true +} + +// IsServerError returns true when this objects update unauthorized response has a 5xx status code +func (o *ObjectsUpdateUnauthorized) IsServerError() bool { + return false +} + +// IsCode returns true when this objects update unauthorized response a status code equal to that given +func (o *ObjectsUpdateUnauthorized) IsCode(code int) bool { + return code == 401 +} + +// Code gets the status code for the objects update unauthorized response +func (o *ObjectsUpdateUnauthorized) Code() int { + return 401 +} + +func (o *ObjectsUpdateUnauthorized) Error() string { + return fmt.Sprintf("[PUT /objects/{id}][%d] objectsUpdateUnauthorized ", 401) +} + +func (o *ObjectsUpdateUnauthorized) String() string { + return fmt.Sprintf("[PUT /objects/{id}][%d] objectsUpdateUnauthorized ", 401) +} + +func (o *ObjectsUpdateUnauthorized) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewObjectsUpdateForbidden creates a ObjectsUpdateForbidden with default headers values +func NewObjectsUpdateForbidden() *ObjectsUpdateForbidden { + return &ObjectsUpdateForbidden{} +} + +/* +ObjectsUpdateForbidden describes a response with status code 403, with default header values. + +Forbidden +*/ +type ObjectsUpdateForbidden struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this objects update forbidden response has a 2xx status code +func (o *ObjectsUpdateForbidden) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this objects update forbidden response has a 3xx status code +func (o *ObjectsUpdateForbidden) IsRedirect() bool { + return false +} + +// IsClientError returns true when this objects update forbidden response has a 4xx status code +func (o *ObjectsUpdateForbidden) IsClientError() bool { + return true +} + +// IsServerError returns true when this objects update forbidden response has a 5xx status code +func (o *ObjectsUpdateForbidden) IsServerError() bool { + return false +} + +// IsCode returns true when this objects update forbidden response a status code equal to that given +func (o *ObjectsUpdateForbidden) IsCode(code int) bool { + return code == 403 +} + +// Code gets the status code for the objects update forbidden response +func (o *ObjectsUpdateForbidden) Code() int { + return 403 +} + +func (o *ObjectsUpdateForbidden) Error() string { + return fmt.Sprintf("[PUT /objects/{id}][%d] objectsUpdateForbidden %+v", 403, o.Payload) +} + +func (o *ObjectsUpdateForbidden) String() string { + return fmt.Sprintf("[PUT /objects/{id}][%d] objectsUpdateForbidden %+v", 403, o.Payload) +} + +func (o *ObjectsUpdateForbidden) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *ObjectsUpdateForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewObjectsUpdateNotFound creates a ObjectsUpdateNotFound with default headers values +func NewObjectsUpdateNotFound() *ObjectsUpdateNotFound { + return &ObjectsUpdateNotFound{} +} + +/* +ObjectsUpdateNotFound describes a response with status code 404, with default header values. + +Successful query result but no resource was found. +*/ +type ObjectsUpdateNotFound struct { +} + +// IsSuccess returns true when this objects update not found response has a 2xx status code +func (o *ObjectsUpdateNotFound) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this objects update not found response has a 3xx status code +func (o *ObjectsUpdateNotFound) IsRedirect() bool { + return false +} + +// IsClientError returns true when this objects update not found response has a 4xx status code +func (o *ObjectsUpdateNotFound) IsClientError() bool { + return true +} + +// IsServerError returns true when this objects update not found response has a 5xx status code +func (o *ObjectsUpdateNotFound) IsServerError() bool { + return false +} + +// IsCode returns true when this objects update not found response a status code equal to that given +func (o *ObjectsUpdateNotFound) IsCode(code int) bool { + return code == 404 +} + +// Code gets the status code for the objects update not found response +func (o *ObjectsUpdateNotFound) Code() int { + return 404 +} + +func (o *ObjectsUpdateNotFound) Error() string { + return fmt.Sprintf("[PUT /objects/{id}][%d] objectsUpdateNotFound ", 404) +} + +func (o *ObjectsUpdateNotFound) String() string { + return fmt.Sprintf("[PUT /objects/{id}][%d] objectsUpdateNotFound ", 404) +} + +func (o *ObjectsUpdateNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewObjectsUpdateUnprocessableEntity creates a ObjectsUpdateUnprocessableEntity with default headers values +func NewObjectsUpdateUnprocessableEntity() *ObjectsUpdateUnprocessableEntity { + return &ObjectsUpdateUnprocessableEntity{} +} + +/* +ObjectsUpdateUnprocessableEntity describes a response with status code 422, with default header values. + +Request body is well-formed (i.e., syntactically correct), but semantically erroneous. Are you sure the class is defined in the configuration file? +*/ +type ObjectsUpdateUnprocessableEntity struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this objects update unprocessable entity response has a 2xx status code +func (o *ObjectsUpdateUnprocessableEntity) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this objects update unprocessable entity response has a 3xx status code +func (o *ObjectsUpdateUnprocessableEntity) IsRedirect() bool { + return false +} + +// IsClientError returns true when this objects update unprocessable entity response has a 4xx status code +func (o *ObjectsUpdateUnprocessableEntity) IsClientError() bool { + return true +} + +// IsServerError returns true when this objects update unprocessable entity response has a 5xx status code +func (o *ObjectsUpdateUnprocessableEntity) IsServerError() bool { + return false +} + +// IsCode returns true when this objects update unprocessable entity response a status code equal to that given +func (o *ObjectsUpdateUnprocessableEntity) IsCode(code int) bool { + return code == 422 +} + +// Code gets the status code for the objects update unprocessable entity response +func (o *ObjectsUpdateUnprocessableEntity) Code() int { + return 422 +} + +func (o *ObjectsUpdateUnprocessableEntity) Error() string { + return fmt.Sprintf("[PUT /objects/{id}][%d] objectsUpdateUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *ObjectsUpdateUnprocessableEntity) String() string { + return fmt.Sprintf("[PUT /objects/{id}][%d] objectsUpdateUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *ObjectsUpdateUnprocessableEntity) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *ObjectsUpdateUnprocessableEntity) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewObjectsUpdateInternalServerError creates a ObjectsUpdateInternalServerError with default headers values +func NewObjectsUpdateInternalServerError() *ObjectsUpdateInternalServerError { + return &ObjectsUpdateInternalServerError{} +} + +/* +ObjectsUpdateInternalServerError describes a response with status code 500, with default header values. + +An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. +*/ +type ObjectsUpdateInternalServerError struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this objects update internal server error response has a 2xx status code +func (o *ObjectsUpdateInternalServerError) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this objects update internal server error response has a 3xx status code +func (o *ObjectsUpdateInternalServerError) IsRedirect() bool { + return false +} + +// IsClientError returns true when this objects update internal server error response has a 4xx status code +func (o *ObjectsUpdateInternalServerError) IsClientError() bool { + return false +} + +// IsServerError returns true when this objects update internal server error response has a 5xx status code +func (o *ObjectsUpdateInternalServerError) IsServerError() bool { + return true +} + +// IsCode returns true when this objects update internal server error response a status code equal to that given +func (o *ObjectsUpdateInternalServerError) IsCode(code int) bool { + return code == 500 +} + +// Code gets the status code for the objects update internal server error response +func (o *ObjectsUpdateInternalServerError) Code() int { + return 500 +} + +func (o *ObjectsUpdateInternalServerError) Error() string { + return fmt.Sprintf("[PUT /objects/{id}][%d] objectsUpdateInternalServerError %+v", 500, o.Payload) +} + +func (o *ObjectsUpdateInternalServerError) String() string { + return fmt.Sprintf("[PUT /objects/{id}][%d] objectsUpdateInternalServerError %+v", 500, o.Payload) +} + +func (o *ObjectsUpdateInternalServerError) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *ObjectsUpdateInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/objects/objects_validate_parameters.go b/platform/dbops/binaries/weaviate-src/client/objects/objects_validate_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..4ee782eaf83c392e0d81afda2170179c5a684991 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/objects/objects_validate_parameters.go @@ -0,0 +1,161 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package objects + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + + "github.com/weaviate/weaviate/entities/models" +) + +// NewObjectsValidateParams creates a new ObjectsValidateParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewObjectsValidateParams() *ObjectsValidateParams { + return &ObjectsValidateParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewObjectsValidateParamsWithTimeout creates a new ObjectsValidateParams object +// with the ability to set a timeout on a request. +func NewObjectsValidateParamsWithTimeout(timeout time.Duration) *ObjectsValidateParams { + return &ObjectsValidateParams{ + timeout: timeout, + } +} + +// NewObjectsValidateParamsWithContext creates a new ObjectsValidateParams object +// with the ability to set a context for a request. +func NewObjectsValidateParamsWithContext(ctx context.Context) *ObjectsValidateParams { + return &ObjectsValidateParams{ + Context: ctx, + } +} + +// NewObjectsValidateParamsWithHTTPClient creates a new ObjectsValidateParams object +// with the ability to set a custom HTTPClient for a request. +func NewObjectsValidateParamsWithHTTPClient(client *http.Client) *ObjectsValidateParams { + return &ObjectsValidateParams{ + HTTPClient: client, + } +} + +/* +ObjectsValidateParams contains all the parameters to send to the API endpoint + + for the objects validate operation. + + Typically these are written to a http.Request. +*/ +type ObjectsValidateParams struct { + + // Body. + Body *models.Object + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the objects validate params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *ObjectsValidateParams) WithDefaults() *ObjectsValidateParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the objects validate params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *ObjectsValidateParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the objects validate params +func (o *ObjectsValidateParams) WithTimeout(timeout time.Duration) *ObjectsValidateParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the objects validate params +func (o *ObjectsValidateParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the objects validate params +func (o *ObjectsValidateParams) WithContext(ctx context.Context) *ObjectsValidateParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the objects validate params +func (o *ObjectsValidateParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the objects validate params +func (o *ObjectsValidateParams) WithHTTPClient(client *http.Client) *ObjectsValidateParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the objects validate params +func (o *ObjectsValidateParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithBody adds the body to the objects validate params +func (o *ObjectsValidateParams) WithBody(body *models.Object) *ObjectsValidateParams { + o.SetBody(body) + return o +} + +// SetBody adds the body to the objects validate params +func (o *ObjectsValidateParams) SetBody(body *models.Object) { + o.Body = body +} + +// WriteToRequest writes these params to a swagger request +func (o *ObjectsValidateParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + if o.Body != nil { + if err := r.SetBodyParam(o.Body); err != nil { + return err + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/objects/objects_validate_responses.go b/platform/dbops/binaries/weaviate-src/client/objects/objects_validate_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..76846970b4dfad8a1655203978b7cf37a366c1dc --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/objects/objects_validate_responses.go @@ -0,0 +1,386 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package objects + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/weaviate/weaviate/entities/models" +) + +// ObjectsValidateReader is a Reader for the ObjectsValidate structure. +type ObjectsValidateReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ObjectsValidateReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewObjectsValidateOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 401: + result := NewObjectsValidateUnauthorized() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 403: + result := NewObjectsValidateForbidden() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 422: + result := NewObjectsValidateUnprocessableEntity() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 500: + result := NewObjectsValidateInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + } +} + +// NewObjectsValidateOK creates a ObjectsValidateOK with default headers values +func NewObjectsValidateOK() *ObjectsValidateOK { + return &ObjectsValidateOK{} +} + +/* +ObjectsValidateOK describes a response with status code 200, with default header values. + +Successfully validated. +*/ +type ObjectsValidateOK struct { +} + +// IsSuccess returns true when this objects validate o k response has a 2xx status code +func (o *ObjectsValidateOK) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this objects validate o k response has a 3xx status code +func (o *ObjectsValidateOK) IsRedirect() bool { + return false +} + +// IsClientError returns true when this objects validate o k response has a 4xx status code +func (o *ObjectsValidateOK) IsClientError() bool { + return false +} + +// IsServerError returns true when this objects validate o k response has a 5xx status code +func (o *ObjectsValidateOK) IsServerError() bool { + return false +} + +// IsCode returns true when this objects validate o k response a status code equal to that given +func (o *ObjectsValidateOK) IsCode(code int) bool { + return code == 200 +} + +// Code gets the status code for the objects validate o k response +func (o *ObjectsValidateOK) Code() int { + return 200 +} + +func (o *ObjectsValidateOK) Error() string { + return fmt.Sprintf("[POST /objects/validate][%d] objectsValidateOK ", 200) +} + +func (o *ObjectsValidateOK) String() string { + return fmt.Sprintf("[POST /objects/validate][%d] objectsValidateOK ", 200) +} + +func (o *ObjectsValidateOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewObjectsValidateUnauthorized creates a ObjectsValidateUnauthorized with default headers values +func NewObjectsValidateUnauthorized() *ObjectsValidateUnauthorized { + return &ObjectsValidateUnauthorized{} +} + +/* +ObjectsValidateUnauthorized describes a response with status code 401, with default header values. + +Unauthorized or invalid credentials. +*/ +type ObjectsValidateUnauthorized struct { +} + +// IsSuccess returns true when this objects validate unauthorized response has a 2xx status code +func (o *ObjectsValidateUnauthorized) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this objects validate unauthorized response has a 3xx status code +func (o *ObjectsValidateUnauthorized) IsRedirect() bool { + return false +} + +// IsClientError returns true when this objects validate unauthorized response has a 4xx status code +func (o *ObjectsValidateUnauthorized) IsClientError() bool { + return true +} + +// IsServerError returns true when this objects validate unauthorized response has a 5xx status code +func (o *ObjectsValidateUnauthorized) IsServerError() bool { + return false +} + +// IsCode returns true when this objects validate unauthorized response a status code equal to that given +func (o *ObjectsValidateUnauthorized) IsCode(code int) bool { + return code == 401 +} + +// Code gets the status code for the objects validate unauthorized response +func (o *ObjectsValidateUnauthorized) Code() int { + return 401 +} + +func (o *ObjectsValidateUnauthorized) Error() string { + return fmt.Sprintf("[POST /objects/validate][%d] objectsValidateUnauthorized ", 401) +} + +func (o *ObjectsValidateUnauthorized) String() string { + return fmt.Sprintf("[POST /objects/validate][%d] objectsValidateUnauthorized ", 401) +} + +func (o *ObjectsValidateUnauthorized) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewObjectsValidateForbidden creates a ObjectsValidateForbidden with default headers values +func NewObjectsValidateForbidden() *ObjectsValidateForbidden { + return &ObjectsValidateForbidden{} +} + +/* +ObjectsValidateForbidden describes a response with status code 403, with default header values. + +Forbidden +*/ +type ObjectsValidateForbidden struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this objects validate forbidden response has a 2xx status code +func (o *ObjectsValidateForbidden) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this objects validate forbidden response has a 3xx status code +func (o *ObjectsValidateForbidden) IsRedirect() bool { + return false +} + +// IsClientError returns true when this objects validate forbidden response has a 4xx status code +func (o *ObjectsValidateForbidden) IsClientError() bool { + return true +} + +// IsServerError returns true when this objects validate forbidden response has a 5xx status code +func (o *ObjectsValidateForbidden) IsServerError() bool { + return false +} + +// IsCode returns true when this objects validate forbidden response a status code equal to that given +func (o *ObjectsValidateForbidden) IsCode(code int) bool { + return code == 403 +} + +// Code gets the status code for the objects validate forbidden response +func (o *ObjectsValidateForbidden) Code() int { + return 403 +} + +func (o *ObjectsValidateForbidden) Error() string { + return fmt.Sprintf("[POST /objects/validate][%d] objectsValidateForbidden %+v", 403, o.Payload) +} + +func (o *ObjectsValidateForbidden) String() string { + return fmt.Sprintf("[POST /objects/validate][%d] objectsValidateForbidden %+v", 403, o.Payload) +} + +func (o *ObjectsValidateForbidden) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *ObjectsValidateForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewObjectsValidateUnprocessableEntity creates a ObjectsValidateUnprocessableEntity with default headers values +func NewObjectsValidateUnprocessableEntity() *ObjectsValidateUnprocessableEntity { + return &ObjectsValidateUnprocessableEntity{} +} + +/* +ObjectsValidateUnprocessableEntity describes a response with status code 422, with default header values. + +Request body is well-formed (i.e., syntactically correct), but semantically erroneous. Are you sure the class is defined in the configuration file? +*/ +type ObjectsValidateUnprocessableEntity struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this objects validate unprocessable entity response has a 2xx status code +func (o *ObjectsValidateUnprocessableEntity) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this objects validate unprocessable entity response has a 3xx status code +func (o *ObjectsValidateUnprocessableEntity) IsRedirect() bool { + return false +} + +// IsClientError returns true when this objects validate unprocessable entity response has a 4xx status code +func (o *ObjectsValidateUnprocessableEntity) IsClientError() bool { + return true +} + +// IsServerError returns true when this objects validate unprocessable entity response has a 5xx status code +func (o *ObjectsValidateUnprocessableEntity) IsServerError() bool { + return false +} + +// IsCode returns true when this objects validate unprocessable entity response a status code equal to that given +func (o *ObjectsValidateUnprocessableEntity) IsCode(code int) bool { + return code == 422 +} + +// Code gets the status code for the objects validate unprocessable entity response +func (o *ObjectsValidateUnprocessableEntity) Code() int { + return 422 +} + +func (o *ObjectsValidateUnprocessableEntity) Error() string { + return fmt.Sprintf("[POST /objects/validate][%d] objectsValidateUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *ObjectsValidateUnprocessableEntity) String() string { + return fmt.Sprintf("[POST /objects/validate][%d] objectsValidateUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *ObjectsValidateUnprocessableEntity) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *ObjectsValidateUnprocessableEntity) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewObjectsValidateInternalServerError creates a ObjectsValidateInternalServerError with default headers values +func NewObjectsValidateInternalServerError() *ObjectsValidateInternalServerError { + return &ObjectsValidateInternalServerError{} +} + +/* +ObjectsValidateInternalServerError describes a response with status code 500, with default header values. + +An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. +*/ +type ObjectsValidateInternalServerError struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this objects validate internal server error response has a 2xx status code +func (o *ObjectsValidateInternalServerError) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this objects validate internal server error response has a 3xx status code +func (o *ObjectsValidateInternalServerError) IsRedirect() bool { + return false +} + +// IsClientError returns true when this objects validate internal server error response has a 4xx status code +func (o *ObjectsValidateInternalServerError) IsClientError() bool { + return false +} + +// IsServerError returns true when this objects validate internal server error response has a 5xx status code +func (o *ObjectsValidateInternalServerError) IsServerError() bool { + return true +} + +// IsCode returns true when this objects validate internal server error response a status code equal to that given +func (o *ObjectsValidateInternalServerError) IsCode(code int) bool { + return code == 500 +} + +// Code gets the status code for the objects validate internal server error response +func (o *ObjectsValidateInternalServerError) Code() int { + return 500 +} + +func (o *ObjectsValidateInternalServerError) Error() string { + return fmt.Sprintf("[POST /objects/validate][%d] objectsValidateInternalServerError %+v", 500, o.Payload) +} + +func (o *ObjectsValidateInternalServerError) String() string { + return fmt.Sprintf("[POST /objects/validate][%d] objectsValidateInternalServerError %+v", 500, o.Payload) +} + +func (o *ObjectsValidateInternalServerError) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *ObjectsValidateInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/operations/operations_client.go b/platform/dbops/binaries/weaviate-src/client/operations/operations_client.go new file mode 100644 index 0000000000000000000000000000000000000000..e893958b87ad1425fcfad837554f781cb1c12c14 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/operations/operations_client.go @@ -0,0 +1,179 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" +) + +// New creates a new operations API client. +func New(transport runtime.ClientTransport, formats strfmt.Registry) ClientService { + return &Client{transport: transport, formats: formats} +} + +/* +Client for operations API +*/ +type Client struct { + transport runtime.ClientTransport + formats strfmt.Registry +} + +// ClientOption is the option for Client methods +type ClientOption func(*runtime.ClientOperation) + +// ClientService is the interface for Client methods +type ClientService interface { + WeaviateRoot(params *WeaviateRootParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*WeaviateRootOK, error) + + WeaviateWellknownLiveness(params *WeaviateWellknownLivenessParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*WeaviateWellknownLivenessOK, error) + + WeaviateWellknownReadiness(params *WeaviateWellknownReadinessParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*WeaviateWellknownReadinessOK, error) + + SetTransport(transport runtime.ClientTransport) +} + +/* +WeaviateRoot lists available endpoints + +Get links to other endpoints to help discover the REST API +*/ +func (a *Client) WeaviateRoot(params *WeaviateRootParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*WeaviateRootOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewWeaviateRootParams() + } + op := &runtime.ClientOperation{ + ID: "weaviate.root", + Method: "GET", + PathPattern: "/", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json", "application/yaml"}, + Schemes: []string{"https"}, + Params: params, + Reader: &WeaviateRootReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + success, ok := result.(*WeaviateRootOK) + if ok { + return success, nil + } + // unexpected success response + // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for weaviate.root: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + +/* +WeaviateWellknownLiveness gets application liveness + +Determines whether the application is alive. Can be used for kubernetes liveness probe +*/ +func (a *Client) WeaviateWellknownLiveness(params *WeaviateWellknownLivenessParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*WeaviateWellknownLivenessOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewWeaviateWellknownLivenessParams() + } + op := &runtime.ClientOperation{ + ID: "weaviate.wellknown.liveness", + Method: "GET", + PathPattern: "/.well-known/live", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json", "application/yaml"}, + Schemes: []string{"https"}, + Params: params, + Reader: &WeaviateWellknownLivenessReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + success, ok := result.(*WeaviateWellknownLivenessOK) + if ok { + return success, nil + } + // unexpected success response + // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for weaviate.wellknown.liveness: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + +/* +WeaviateWellknownReadiness gets application readiness + +Determines whether the application is ready to receive traffic. Can be used for kubernetes readiness probe. +*/ +func (a *Client) WeaviateWellknownReadiness(params *WeaviateWellknownReadinessParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*WeaviateWellknownReadinessOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewWeaviateWellknownReadinessParams() + } + op := &runtime.ClientOperation{ + ID: "weaviate.wellknown.readiness", + Method: "GET", + PathPattern: "/.well-known/ready", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json", "application/yaml"}, + Schemes: []string{"https"}, + Params: params, + Reader: &WeaviateWellknownReadinessReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + success, ok := result.(*WeaviateWellknownReadinessOK) + if ok { + return success, nil + } + // unexpected success response + // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for weaviate.wellknown.readiness: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + +// SetTransport changes the transport on the client +func (a *Client) SetTransport(transport runtime.ClientTransport) { + a.transport = transport +} diff --git a/platform/dbops/binaries/weaviate-src/client/operations/weaviate_root_parameters.go b/platform/dbops/binaries/weaviate-src/client/operations/weaviate_root_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..705a86205246cb31e6b9c434044f5cc2dad9bb0d --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/operations/weaviate_root_parameters.go @@ -0,0 +1,139 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewWeaviateRootParams creates a new WeaviateRootParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewWeaviateRootParams() *WeaviateRootParams { + return &WeaviateRootParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewWeaviateRootParamsWithTimeout creates a new WeaviateRootParams object +// with the ability to set a timeout on a request. +func NewWeaviateRootParamsWithTimeout(timeout time.Duration) *WeaviateRootParams { + return &WeaviateRootParams{ + timeout: timeout, + } +} + +// NewWeaviateRootParamsWithContext creates a new WeaviateRootParams object +// with the ability to set a context for a request. +func NewWeaviateRootParamsWithContext(ctx context.Context) *WeaviateRootParams { + return &WeaviateRootParams{ + Context: ctx, + } +} + +// NewWeaviateRootParamsWithHTTPClient creates a new WeaviateRootParams object +// with the ability to set a custom HTTPClient for a request. +func NewWeaviateRootParamsWithHTTPClient(client *http.Client) *WeaviateRootParams { + return &WeaviateRootParams{ + HTTPClient: client, + } +} + +/* +WeaviateRootParams contains all the parameters to send to the API endpoint + + for the weaviate root operation. + + Typically these are written to a http.Request. +*/ +type WeaviateRootParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the weaviate root params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *WeaviateRootParams) WithDefaults() *WeaviateRootParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the weaviate root params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *WeaviateRootParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the weaviate root params +func (o *WeaviateRootParams) WithTimeout(timeout time.Duration) *WeaviateRootParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the weaviate root params +func (o *WeaviateRootParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the weaviate root params +func (o *WeaviateRootParams) WithContext(ctx context.Context) *WeaviateRootParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the weaviate root params +func (o *WeaviateRootParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the weaviate root params +func (o *WeaviateRootParams) WithHTTPClient(client *http.Client) *WeaviateRootParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the weaviate root params +func (o *WeaviateRootParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *WeaviateRootParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/operations/weaviate_root_responses.go b/platform/dbops/binaries/weaviate-src/client/operations/weaviate_root_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..b3edc10e5dd0f8ee15b3c7aa761e23e569c9b88f --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/operations/weaviate_root_responses.go @@ -0,0 +1,220 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "fmt" + "io" + "strconv" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + + "github.com/weaviate/weaviate/entities/models" +) + +// WeaviateRootReader is a Reader for the WeaviateRoot structure. +type WeaviateRootReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *WeaviateRootReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewWeaviateRootOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + } +} + +// NewWeaviateRootOK creates a WeaviateRootOK with default headers values +func NewWeaviateRootOK() *WeaviateRootOK { + return &WeaviateRootOK{} +} + +/* +WeaviateRootOK describes a response with status code 200, with default header values. + +Weaviate is alive and ready to serve content +*/ +type WeaviateRootOK struct { + Payload *WeaviateRootOKBody +} + +// IsSuccess returns true when this weaviate root o k response has a 2xx status code +func (o *WeaviateRootOK) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this weaviate root o k response has a 3xx status code +func (o *WeaviateRootOK) IsRedirect() bool { + return false +} + +// IsClientError returns true when this weaviate root o k response has a 4xx status code +func (o *WeaviateRootOK) IsClientError() bool { + return false +} + +// IsServerError returns true when this weaviate root o k response has a 5xx status code +func (o *WeaviateRootOK) IsServerError() bool { + return false +} + +// IsCode returns true when this weaviate root o k response a status code equal to that given +func (o *WeaviateRootOK) IsCode(code int) bool { + return code == 200 +} + +// Code gets the status code for the weaviate root o k response +func (o *WeaviateRootOK) Code() int { + return 200 +} + +func (o *WeaviateRootOK) Error() string { + return fmt.Sprintf("[GET /][%d] weaviateRootOK %+v", 200, o.Payload) +} + +func (o *WeaviateRootOK) String() string { + return fmt.Sprintf("[GET /][%d] weaviateRootOK %+v", 200, o.Payload) +} + +func (o *WeaviateRootOK) GetPayload() *WeaviateRootOKBody { + return o.Payload +} + +func (o *WeaviateRootOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(WeaviateRootOKBody) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +/* +WeaviateRootOKBody weaviate root o k body +swagger:model WeaviateRootOKBody +*/ +type WeaviateRootOKBody struct { + + // links + Links []*models.Link `json:"links"` +} + +// Validate validates this weaviate root o k body +func (o *WeaviateRootOKBody) Validate(formats strfmt.Registry) error { + var res []error + + if err := o.validateLinks(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (o *WeaviateRootOKBody) validateLinks(formats strfmt.Registry) error { + if swag.IsZero(o.Links) { // not required + return nil + } + + for i := 0; i < len(o.Links); i++ { + if swag.IsZero(o.Links[i]) { // not required + continue + } + + if o.Links[i] != nil { + if err := o.Links[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("weaviateRootOK" + "." + "links" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("weaviateRootOK" + "." + "links" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +// ContextValidate validate this weaviate root o k body based on the context it is used +func (o *WeaviateRootOKBody) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := o.contextValidateLinks(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (o *WeaviateRootOKBody) contextValidateLinks(ctx context.Context, formats strfmt.Registry) error { + + for i := 0; i < len(o.Links); i++ { + + if o.Links[i] != nil { + if err := o.Links[i].ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("weaviateRootOK" + "." + "links" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("weaviateRootOK" + "." + "links" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +// MarshalBinary interface implementation +func (o *WeaviateRootOKBody) MarshalBinary() ([]byte, error) { + if o == nil { + return nil, nil + } + return swag.WriteJSON(o) +} + +// UnmarshalBinary interface implementation +func (o *WeaviateRootOKBody) UnmarshalBinary(b []byte) error { + var res WeaviateRootOKBody + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *o = res + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/operations/weaviate_wellknown_liveness_parameters.go b/platform/dbops/binaries/weaviate-src/client/operations/weaviate_wellknown_liveness_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..2d3071628c8dd05832ac7422211786c825977033 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/operations/weaviate_wellknown_liveness_parameters.go @@ -0,0 +1,139 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewWeaviateWellknownLivenessParams creates a new WeaviateWellknownLivenessParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewWeaviateWellknownLivenessParams() *WeaviateWellknownLivenessParams { + return &WeaviateWellknownLivenessParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewWeaviateWellknownLivenessParamsWithTimeout creates a new WeaviateWellknownLivenessParams object +// with the ability to set a timeout on a request. +func NewWeaviateWellknownLivenessParamsWithTimeout(timeout time.Duration) *WeaviateWellknownLivenessParams { + return &WeaviateWellknownLivenessParams{ + timeout: timeout, + } +} + +// NewWeaviateWellknownLivenessParamsWithContext creates a new WeaviateWellknownLivenessParams object +// with the ability to set a context for a request. +func NewWeaviateWellknownLivenessParamsWithContext(ctx context.Context) *WeaviateWellknownLivenessParams { + return &WeaviateWellknownLivenessParams{ + Context: ctx, + } +} + +// NewWeaviateWellknownLivenessParamsWithHTTPClient creates a new WeaviateWellknownLivenessParams object +// with the ability to set a custom HTTPClient for a request. +func NewWeaviateWellknownLivenessParamsWithHTTPClient(client *http.Client) *WeaviateWellknownLivenessParams { + return &WeaviateWellknownLivenessParams{ + HTTPClient: client, + } +} + +/* +WeaviateWellknownLivenessParams contains all the parameters to send to the API endpoint + + for the weaviate wellknown liveness operation. + + Typically these are written to a http.Request. +*/ +type WeaviateWellknownLivenessParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the weaviate wellknown liveness params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *WeaviateWellknownLivenessParams) WithDefaults() *WeaviateWellknownLivenessParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the weaviate wellknown liveness params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *WeaviateWellknownLivenessParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the weaviate wellknown liveness params +func (o *WeaviateWellknownLivenessParams) WithTimeout(timeout time.Duration) *WeaviateWellknownLivenessParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the weaviate wellknown liveness params +func (o *WeaviateWellknownLivenessParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the weaviate wellknown liveness params +func (o *WeaviateWellknownLivenessParams) WithContext(ctx context.Context) *WeaviateWellknownLivenessParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the weaviate wellknown liveness params +func (o *WeaviateWellknownLivenessParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the weaviate wellknown liveness params +func (o *WeaviateWellknownLivenessParams) WithHTTPClient(client *http.Client) *WeaviateWellknownLivenessParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the weaviate wellknown liveness params +func (o *WeaviateWellknownLivenessParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *WeaviateWellknownLivenessParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/operations/weaviate_wellknown_liveness_responses.go b/platform/dbops/binaries/weaviate-src/client/operations/weaviate_wellknown_liveness_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..ddd7fd4813ddb847e5f2154a38f358c3e02f5e6d --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/operations/weaviate_wellknown_liveness_responses.go @@ -0,0 +1,99 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" +) + +// WeaviateWellknownLivenessReader is a Reader for the WeaviateWellknownLiveness structure. +type WeaviateWellknownLivenessReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *WeaviateWellknownLivenessReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewWeaviateWellknownLivenessOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + } +} + +// NewWeaviateWellknownLivenessOK creates a WeaviateWellknownLivenessOK with default headers values +func NewWeaviateWellknownLivenessOK() *WeaviateWellknownLivenessOK { + return &WeaviateWellknownLivenessOK{} +} + +/* +WeaviateWellknownLivenessOK describes a response with status code 200, with default header values. + +The application is able to respond to HTTP requests +*/ +type WeaviateWellknownLivenessOK struct { +} + +// IsSuccess returns true when this weaviate wellknown liveness o k response has a 2xx status code +func (o *WeaviateWellknownLivenessOK) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this weaviate wellknown liveness o k response has a 3xx status code +func (o *WeaviateWellknownLivenessOK) IsRedirect() bool { + return false +} + +// IsClientError returns true when this weaviate wellknown liveness o k response has a 4xx status code +func (o *WeaviateWellknownLivenessOK) IsClientError() bool { + return false +} + +// IsServerError returns true when this weaviate wellknown liveness o k response has a 5xx status code +func (o *WeaviateWellknownLivenessOK) IsServerError() bool { + return false +} + +// IsCode returns true when this weaviate wellknown liveness o k response a status code equal to that given +func (o *WeaviateWellknownLivenessOK) IsCode(code int) bool { + return code == 200 +} + +// Code gets the status code for the weaviate wellknown liveness o k response +func (o *WeaviateWellknownLivenessOK) Code() int { + return 200 +} + +func (o *WeaviateWellknownLivenessOK) Error() string { + return fmt.Sprintf("[GET /.well-known/live][%d] weaviateWellknownLivenessOK ", 200) +} + +func (o *WeaviateWellknownLivenessOK) String() string { + return fmt.Sprintf("[GET /.well-known/live][%d] weaviateWellknownLivenessOK ", 200) +} + +func (o *WeaviateWellknownLivenessOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/operations/weaviate_wellknown_readiness_parameters.go b/platform/dbops/binaries/weaviate-src/client/operations/weaviate_wellknown_readiness_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..23dea64281651727a9723ea91c4aa2f13eed6756 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/operations/weaviate_wellknown_readiness_parameters.go @@ -0,0 +1,139 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewWeaviateWellknownReadinessParams creates a new WeaviateWellknownReadinessParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewWeaviateWellknownReadinessParams() *WeaviateWellknownReadinessParams { + return &WeaviateWellknownReadinessParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewWeaviateWellknownReadinessParamsWithTimeout creates a new WeaviateWellknownReadinessParams object +// with the ability to set a timeout on a request. +func NewWeaviateWellknownReadinessParamsWithTimeout(timeout time.Duration) *WeaviateWellknownReadinessParams { + return &WeaviateWellknownReadinessParams{ + timeout: timeout, + } +} + +// NewWeaviateWellknownReadinessParamsWithContext creates a new WeaviateWellknownReadinessParams object +// with the ability to set a context for a request. +func NewWeaviateWellknownReadinessParamsWithContext(ctx context.Context) *WeaviateWellknownReadinessParams { + return &WeaviateWellknownReadinessParams{ + Context: ctx, + } +} + +// NewWeaviateWellknownReadinessParamsWithHTTPClient creates a new WeaviateWellknownReadinessParams object +// with the ability to set a custom HTTPClient for a request. +func NewWeaviateWellknownReadinessParamsWithHTTPClient(client *http.Client) *WeaviateWellknownReadinessParams { + return &WeaviateWellknownReadinessParams{ + HTTPClient: client, + } +} + +/* +WeaviateWellknownReadinessParams contains all the parameters to send to the API endpoint + + for the weaviate wellknown readiness operation. + + Typically these are written to a http.Request. +*/ +type WeaviateWellknownReadinessParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the weaviate wellknown readiness params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *WeaviateWellknownReadinessParams) WithDefaults() *WeaviateWellknownReadinessParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the weaviate wellknown readiness params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *WeaviateWellknownReadinessParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the weaviate wellknown readiness params +func (o *WeaviateWellknownReadinessParams) WithTimeout(timeout time.Duration) *WeaviateWellknownReadinessParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the weaviate wellknown readiness params +func (o *WeaviateWellknownReadinessParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the weaviate wellknown readiness params +func (o *WeaviateWellknownReadinessParams) WithContext(ctx context.Context) *WeaviateWellknownReadinessParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the weaviate wellknown readiness params +func (o *WeaviateWellknownReadinessParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the weaviate wellknown readiness params +func (o *WeaviateWellknownReadinessParams) WithHTTPClient(client *http.Client) *WeaviateWellknownReadinessParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the weaviate wellknown readiness params +func (o *WeaviateWellknownReadinessParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *WeaviateWellknownReadinessParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/operations/weaviate_wellknown_readiness_responses.go b/platform/dbops/binaries/weaviate-src/client/operations/weaviate_wellknown_readiness_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..2e567698486ae074fd2c6d36d1af1aca430f0995 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/operations/weaviate_wellknown_readiness_responses.go @@ -0,0 +1,161 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" +) + +// WeaviateWellknownReadinessReader is a Reader for the WeaviateWellknownReadiness structure. +type WeaviateWellknownReadinessReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *WeaviateWellknownReadinessReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewWeaviateWellknownReadinessOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 503: + result := NewWeaviateWellknownReadinessServiceUnavailable() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + } +} + +// NewWeaviateWellknownReadinessOK creates a WeaviateWellknownReadinessOK with default headers values +func NewWeaviateWellknownReadinessOK() *WeaviateWellknownReadinessOK { + return &WeaviateWellknownReadinessOK{} +} + +/* +WeaviateWellknownReadinessOK describes a response with status code 200, with default header values. + +The application has completed its start-up routine and is ready to accept traffic. +*/ +type WeaviateWellknownReadinessOK struct { +} + +// IsSuccess returns true when this weaviate wellknown readiness o k response has a 2xx status code +func (o *WeaviateWellknownReadinessOK) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this weaviate wellknown readiness o k response has a 3xx status code +func (o *WeaviateWellknownReadinessOK) IsRedirect() bool { + return false +} + +// IsClientError returns true when this weaviate wellknown readiness o k response has a 4xx status code +func (o *WeaviateWellknownReadinessOK) IsClientError() bool { + return false +} + +// IsServerError returns true when this weaviate wellknown readiness o k response has a 5xx status code +func (o *WeaviateWellknownReadinessOK) IsServerError() bool { + return false +} + +// IsCode returns true when this weaviate wellknown readiness o k response a status code equal to that given +func (o *WeaviateWellknownReadinessOK) IsCode(code int) bool { + return code == 200 +} + +// Code gets the status code for the weaviate wellknown readiness o k response +func (o *WeaviateWellknownReadinessOK) Code() int { + return 200 +} + +func (o *WeaviateWellknownReadinessOK) Error() string { + return fmt.Sprintf("[GET /.well-known/ready][%d] weaviateWellknownReadinessOK ", 200) +} + +func (o *WeaviateWellknownReadinessOK) String() string { + return fmt.Sprintf("[GET /.well-known/ready][%d] weaviateWellknownReadinessOK ", 200) +} + +func (o *WeaviateWellknownReadinessOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewWeaviateWellknownReadinessServiceUnavailable creates a WeaviateWellknownReadinessServiceUnavailable with default headers values +func NewWeaviateWellknownReadinessServiceUnavailable() *WeaviateWellknownReadinessServiceUnavailable { + return &WeaviateWellknownReadinessServiceUnavailable{} +} + +/* +WeaviateWellknownReadinessServiceUnavailable describes a response with status code 503, with default header values. + +The application is currently not able to serve traffic. If other horizontal replicas of weaviate are available and they are capable of receiving traffic, all traffic should be redirected there instead. +*/ +type WeaviateWellknownReadinessServiceUnavailable struct { +} + +// IsSuccess returns true when this weaviate wellknown readiness service unavailable response has a 2xx status code +func (o *WeaviateWellknownReadinessServiceUnavailable) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this weaviate wellknown readiness service unavailable response has a 3xx status code +func (o *WeaviateWellknownReadinessServiceUnavailable) IsRedirect() bool { + return false +} + +// IsClientError returns true when this weaviate wellknown readiness service unavailable response has a 4xx status code +func (o *WeaviateWellknownReadinessServiceUnavailable) IsClientError() bool { + return false +} + +// IsServerError returns true when this weaviate wellknown readiness service unavailable response has a 5xx status code +func (o *WeaviateWellknownReadinessServiceUnavailable) IsServerError() bool { + return true +} + +// IsCode returns true when this weaviate wellknown readiness service unavailable response a status code equal to that given +func (o *WeaviateWellknownReadinessServiceUnavailable) IsCode(code int) bool { + return code == 503 +} + +// Code gets the status code for the weaviate wellknown readiness service unavailable response +func (o *WeaviateWellknownReadinessServiceUnavailable) Code() int { + return 503 +} + +func (o *WeaviateWellknownReadinessServiceUnavailable) Error() string { + return fmt.Sprintf("[GET /.well-known/ready][%d] weaviateWellknownReadinessServiceUnavailable ", 503) +} + +func (o *WeaviateWellknownReadinessServiceUnavailable) String() string { + return fmt.Sprintf("[GET /.well-known/ready][%d] weaviateWellknownReadinessServiceUnavailable ", 503) +} + +func (o *WeaviateWellknownReadinessServiceUnavailable) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/replication/cancel_replication_parameters.go b/platform/dbops/binaries/weaviate-src/client/replication/cancel_replication_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..f7de98d3b509fa73081b2487d5f8a81ec25573bb --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/replication/cancel_replication_parameters.go @@ -0,0 +1,164 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package replication + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewCancelReplicationParams creates a new CancelReplicationParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewCancelReplicationParams() *CancelReplicationParams { + return &CancelReplicationParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewCancelReplicationParamsWithTimeout creates a new CancelReplicationParams object +// with the ability to set a timeout on a request. +func NewCancelReplicationParamsWithTimeout(timeout time.Duration) *CancelReplicationParams { + return &CancelReplicationParams{ + timeout: timeout, + } +} + +// NewCancelReplicationParamsWithContext creates a new CancelReplicationParams object +// with the ability to set a context for a request. +func NewCancelReplicationParamsWithContext(ctx context.Context) *CancelReplicationParams { + return &CancelReplicationParams{ + Context: ctx, + } +} + +// NewCancelReplicationParamsWithHTTPClient creates a new CancelReplicationParams object +// with the ability to set a custom HTTPClient for a request. +func NewCancelReplicationParamsWithHTTPClient(client *http.Client) *CancelReplicationParams { + return &CancelReplicationParams{ + HTTPClient: client, + } +} + +/* +CancelReplicationParams contains all the parameters to send to the API endpoint + + for the cancel replication operation. + + Typically these are written to a http.Request. +*/ +type CancelReplicationParams struct { + + /* ID. + + The ID of the replication operation to cancel. + + Format: uuid + */ + ID strfmt.UUID + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the cancel replication params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *CancelReplicationParams) WithDefaults() *CancelReplicationParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the cancel replication params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *CancelReplicationParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the cancel replication params +func (o *CancelReplicationParams) WithTimeout(timeout time.Duration) *CancelReplicationParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the cancel replication params +func (o *CancelReplicationParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the cancel replication params +func (o *CancelReplicationParams) WithContext(ctx context.Context) *CancelReplicationParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the cancel replication params +func (o *CancelReplicationParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the cancel replication params +func (o *CancelReplicationParams) WithHTTPClient(client *http.Client) *CancelReplicationParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the cancel replication params +func (o *CancelReplicationParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithID adds the id to the cancel replication params +func (o *CancelReplicationParams) WithID(id strfmt.UUID) *CancelReplicationParams { + o.SetID(id) + return o +} + +// SetID adds the id to the cancel replication params +func (o *CancelReplicationParams) SetID(id strfmt.UUID) { + o.ID = id +} + +// WriteToRequest writes these params to a swagger request +func (o *CancelReplicationParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param id + if err := r.SetPathParam("id", o.ID.String()); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/replication/cancel_replication_responses.go b/platform/dbops/binaries/weaviate-src/client/replication/cancel_replication_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..04abf32e3766dabb6420d9dfa8c1df07d31c9d8c --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/replication/cancel_replication_responses.go @@ -0,0 +1,596 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package replication + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/weaviate/weaviate/entities/models" +) + +// CancelReplicationReader is a Reader for the CancelReplication structure. +type CancelReplicationReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *CancelReplicationReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 204: + result := NewCancelReplicationNoContent() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 401: + result := NewCancelReplicationUnauthorized() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 403: + result := NewCancelReplicationForbidden() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 404: + result := NewCancelReplicationNotFound() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 409: + result := NewCancelReplicationConflict() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 422: + result := NewCancelReplicationUnprocessableEntity() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 500: + result := NewCancelReplicationInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 501: + result := NewCancelReplicationNotImplemented() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + } +} + +// NewCancelReplicationNoContent creates a CancelReplicationNoContent with default headers values +func NewCancelReplicationNoContent() *CancelReplicationNoContent { + return &CancelReplicationNoContent{} +} + +/* +CancelReplicationNoContent describes a response with status code 204, with default header values. + +Successfully cancelled. +*/ +type CancelReplicationNoContent struct { +} + +// IsSuccess returns true when this cancel replication no content response has a 2xx status code +func (o *CancelReplicationNoContent) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this cancel replication no content response has a 3xx status code +func (o *CancelReplicationNoContent) IsRedirect() bool { + return false +} + +// IsClientError returns true when this cancel replication no content response has a 4xx status code +func (o *CancelReplicationNoContent) IsClientError() bool { + return false +} + +// IsServerError returns true when this cancel replication no content response has a 5xx status code +func (o *CancelReplicationNoContent) IsServerError() bool { + return false +} + +// IsCode returns true when this cancel replication no content response a status code equal to that given +func (o *CancelReplicationNoContent) IsCode(code int) bool { + return code == 204 +} + +// Code gets the status code for the cancel replication no content response +func (o *CancelReplicationNoContent) Code() int { + return 204 +} + +func (o *CancelReplicationNoContent) Error() string { + return fmt.Sprintf("[POST /replication/replicate/{id}/cancel][%d] cancelReplicationNoContent ", 204) +} + +func (o *CancelReplicationNoContent) String() string { + return fmt.Sprintf("[POST /replication/replicate/{id}/cancel][%d] cancelReplicationNoContent ", 204) +} + +func (o *CancelReplicationNoContent) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewCancelReplicationUnauthorized creates a CancelReplicationUnauthorized with default headers values +func NewCancelReplicationUnauthorized() *CancelReplicationUnauthorized { + return &CancelReplicationUnauthorized{} +} + +/* +CancelReplicationUnauthorized describes a response with status code 401, with default header values. + +Unauthorized or invalid credentials. +*/ +type CancelReplicationUnauthorized struct { +} + +// IsSuccess returns true when this cancel replication unauthorized response has a 2xx status code +func (o *CancelReplicationUnauthorized) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this cancel replication unauthorized response has a 3xx status code +func (o *CancelReplicationUnauthorized) IsRedirect() bool { + return false +} + +// IsClientError returns true when this cancel replication unauthorized response has a 4xx status code +func (o *CancelReplicationUnauthorized) IsClientError() bool { + return true +} + +// IsServerError returns true when this cancel replication unauthorized response has a 5xx status code +func (o *CancelReplicationUnauthorized) IsServerError() bool { + return false +} + +// IsCode returns true when this cancel replication unauthorized response a status code equal to that given +func (o *CancelReplicationUnauthorized) IsCode(code int) bool { + return code == 401 +} + +// Code gets the status code for the cancel replication unauthorized response +func (o *CancelReplicationUnauthorized) Code() int { + return 401 +} + +func (o *CancelReplicationUnauthorized) Error() string { + return fmt.Sprintf("[POST /replication/replicate/{id}/cancel][%d] cancelReplicationUnauthorized ", 401) +} + +func (o *CancelReplicationUnauthorized) String() string { + return fmt.Sprintf("[POST /replication/replicate/{id}/cancel][%d] cancelReplicationUnauthorized ", 401) +} + +func (o *CancelReplicationUnauthorized) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewCancelReplicationForbidden creates a CancelReplicationForbidden with default headers values +func NewCancelReplicationForbidden() *CancelReplicationForbidden { + return &CancelReplicationForbidden{} +} + +/* +CancelReplicationForbidden describes a response with status code 403, with default header values. + +Forbidden +*/ +type CancelReplicationForbidden struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this cancel replication forbidden response has a 2xx status code +func (o *CancelReplicationForbidden) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this cancel replication forbidden response has a 3xx status code +func (o *CancelReplicationForbidden) IsRedirect() bool { + return false +} + +// IsClientError returns true when this cancel replication forbidden response has a 4xx status code +func (o *CancelReplicationForbidden) IsClientError() bool { + return true +} + +// IsServerError returns true when this cancel replication forbidden response has a 5xx status code +func (o *CancelReplicationForbidden) IsServerError() bool { + return false +} + +// IsCode returns true when this cancel replication forbidden response a status code equal to that given +func (o *CancelReplicationForbidden) IsCode(code int) bool { + return code == 403 +} + +// Code gets the status code for the cancel replication forbidden response +func (o *CancelReplicationForbidden) Code() int { + return 403 +} + +func (o *CancelReplicationForbidden) Error() string { + return fmt.Sprintf("[POST /replication/replicate/{id}/cancel][%d] cancelReplicationForbidden %+v", 403, o.Payload) +} + +func (o *CancelReplicationForbidden) String() string { + return fmt.Sprintf("[POST /replication/replicate/{id}/cancel][%d] cancelReplicationForbidden %+v", 403, o.Payload) +} + +func (o *CancelReplicationForbidden) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *CancelReplicationForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewCancelReplicationNotFound creates a CancelReplicationNotFound with default headers values +func NewCancelReplicationNotFound() *CancelReplicationNotFound { + return &CancelReplicationNotFound{} +} + +/* +CancelReplicationNotFound describes a response with status code 404, with default header values. + +Shard replica operation not found. +*/ +type CancelReplicationNotFound struct { +} + +// IsSuccess returns true when this cancel replication not found response has a 2xx status code +func (o *CancelReplicationNotFound) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this cancel replication not found response has a 3xx status code +func (o *CancelReplicationNotFound) IsRedirect() bool { + return false +} + +// IsClientError returns true when this cancel replication not found response has a 4xx status code +func (o *CancelReplicationNotFound) IsClientError() bool { + return true +} + +// IsServerError returns true when this cancel replication not found response has a 5xx status code +func (o *CancelReplicationNotFound) IsServerError() bool { + return false +} + +// IsCode returns true when this cancel replication not found response a status code equal to that given +func (o *CancelReplicationNotFound) IsCode(code int) bool { + return code == 404 +} + +// Code gets the status code for the cancel replication not found response +func (o *CancelReplicationNotFound) Code() int { + return 404 +} + +func (o *CancelReplicationNotFound) Error() string { + return fmt.Sprintf("[POST /replication/replicate/{id}/cancel][%d] cancelReplicationNotFound ", 404) +} + +func (o *CancelReplicationNotFound) String() string { + return fmt.Sprintf("[POST /replication/replicate/{id}/cancel][%d] cancelReplicationNotFound ", 404) +} + +func (o *CancelReplicationNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewCancelReplicationConflict creates a CancelReplicationConflict with default headers values +func NewCancelReplicationConflict() *CancelReplicationConflict { + return &CancelReplicationConflict{} +} + +/* +CancelReplicationConflict describes a response with status code 409, with default header values. + +The operation is not in a cancellable state, e.g. it is READY or is a MOVE op in the DEHYDRATING state. +*/ +type CancelReplicationConflict struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this cancel replication conflict response has a 2xx status code +func (o *CancelReplicationConflict) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this cancel replication conflict response has a 3xx status code +func (o *CancelReplicationConflict) IsRedirect() bool { + return false +} + +// IsClientError returns true when this cancel replication conflict response has a 4xx status code +func (o *CancelReplicationConflict) IsClientError() bool { + return true +} + +// IsServerError returns true when this cancel replication conflict response has a 5xx status code +func (o *CancelReplicationConflict) IsServerError() bool { + return false +} + +// IsCode returns true when this cancel replication conflict response a status code equal to that given +func (o *CancelReplicationConflict) IsCode(code int) bool { + return code == 409 +} + +// Code gets the status code for the cancel replication conflict response +func (o *CancelReplicationConflict) Code() int { + return 409 +} + +func (o *CancelReplicationConflict) Error() string { + return fmt.Sprintf("[POST /replication/replicate/{id}/cancel][%d] cancelReplicationConflict %+v", 409, o.Payload) +} + +func (o *CancelReplicationConflict) String() string { + return fmt.Sprintf("[POST /replication/replicate/{id}/cancel][%d] cancelReplicationConflict %+v", 409, o.Payload) +} + +func (o *CancelReplicationConflict) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *CancelReplicationConflict) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewCancelReplicationUnprocessableEntity creates a CancelReplicationUnprocessableEntity with default headers values +func NewCancelReplicationUnprocessableEntity() *CancelReplicationUnprocessableEntity { + return &CancelReplicationUnprocessableEntity{} +} + +/* +CancelReplicationUnprocessableEntity describes a response with status code 422, with default header values. + +Request body is well-formed (i.e., syntactically correct), but semantically erroneous. +*/ +type CancelReplicationUnprocessableEntity struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this cancel replication unprocessable entity response has a 2xx status code +func (o *CancelReplicationUnprocessableEntity) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this cancel replication unprocessable entity response has a 3xx status code +func (o *CancelReplicationUnprocessableEntity) IsRedirect() bool { + return false +} + +// IsClientError returns true when this cancel replication unprocessable entity response has a 4xx status code +func (o *CancelReplicationUnprocessableEntity) IsClientError() bool { + return true +} + +// IsServerError returns true when this cancel replication unprocessable entity response has a 5xx status code +func (o *CancelReplicationUnprocessableEntity) IsServerError() bool { + return false +} + +// IsCode returns true when this cancel replication unprocessable entity response a status code equal to that given +func (o *CancelReplicationUnprocessableEntity) IsCode(code int) bool { + return code == 422 +} + +// Code gets the status code for the cancel replication unprocessable entity response +func (o *CancelReplicationUnprocessableEntity) Code() int { + return 422 +} + +func (o *CancelReplicationUnprocessableEntity) Error() string { + return fmt.Sprintf("[POST /replication/replicate/{id}/cancel][%d] cancelReplicationUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *CancelReplicationUnprocessableEntity) String() string { + return fmt.Sprintf("[POST /replication/replicate/{id}/cancel][%d] cancelReplicationUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *CancelReplicationUnprocessableEntity) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *CancelReplicationUnprocessableEntity) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewCancelReplicationInternalServerError creates a CancelReplicationInternalServerError with default headers values +func NewCancelReplicationInternalServerError() *CancelReplicationInternalServerError { + return &CancelReplicationInternalServerError{} +} + +/* +CancelReplicationInternalServerError describes a response with status code 500, with default header values. + +An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. +*/ +type CancelReplicationInternalServerError struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this cancel replication internal server error response has a 2xx status code +func (o *CancelReplicationInternalServerError) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this cancel replication internal server error response has a 3xx status code +func (o *CancelReplicationInternalServerError) IsRedirect() bool { + return false +} + +// IsClientError returns true when this cancel replication internal server error response has a 4xx status code +func (o *CancelReplicationInternalServerError) IsClientError() bool { + return false +} + +// IsServerError returns true when this cancel replication internal server error response has a 5xx status code +func (o *CancelReplicationInternalServerError) IsServerError() bool { + return true +} + +// IsCode returns true when this cancel replication internal server error response a status code equal to that given +func (o *CancelReplicationInternalServerError) IsCode(code int) bool { + return code == 500 +} + +// Code gets the status code for the cancel replication internal server error response +func (o *CancelReplicationInternalServerError) Code() int { + return 500 +} + +func (o *CancelReplicationInternalServerError) Error() string { + return fmt.Sprintf("[POST /replication/replicate/{id}/cancel][%d] cancelReplicationInternalServerError %+v", 500, o.Payload) +} + +func (o *CancelReplicationInternalServerError) String() string { + return fmt.Sprintf("[POST /replication/replicate/{id}/cancel][%d] cancelReplicationInternalServerError %+v", 500, o.Payload) +} + +func (o *CancelReplicationInternalServerError) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *CancelReplicationInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewCancelReplicationNotImplemented creates a CancelReplicationNotImplemented with default headers values +func NewCancelReplicationNotImplemented() *CancelReplicationNotImplemented { + return &CancelReplicationNotImplemented{} +} + +/* +CancelReplicationNotImplemented describes a response with status code 501, with default header values. + +Replica movement operations are disabled. +*/ +type CancelReplicationNotImplemented struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this cancel replication not implemented response has a 2xx status code +func (o *CancelReplicationNotImplemented) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this cancel replication not implemented response has a 3xx status code +func (o *CancelReplicationNotImplemented) IsRedirect() bool { + return false +} + +// IsClientError returns true when this cancel replication not implemented response has a 4xx status code +func (o *CancelReplicationNotImplemented) IsClientError() bool { + return false +} + +// IsServerError returns true when this cancel replication not implemented response has a 5xx status code +func (o *CancelReplicationNotImplemented) IsServerError() bool { + return true +} + +// IsCode returns true when this cancel replication not implemented response a status code equal to that given +func (o *CancelReplicationNotImplemented) IsCode(code int) bool { + return code == 501 +} + +// Code gets the status code for the cancel replication not implemented response +func (o *CancelReplicationNotImplemented) Code() int { + return 501 +} + +func (o *CancelReplicationNotImplemented) Error() string { + return fmt.Sprintf("[POST /replication/replicate/{id}/cancel][%d] cancelReplicationNotImplemented %+v", 501, o.Payload) +} + +func (o *CancelReplicationNotImplemented) String() string { + return fmt.Sprintf("[POST /replication/replicate/{id}/cancel][%d] cancelReplicationNotImplemented %+v", 501, o.Payload) +} + +func (o *CancelReplicationNotImplemented) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *CancelReplicationNotImplemented) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/replication/delete_all_replications_parameters.go b/platform/dbops/binaries/weaviate-src/client/replication/delete_all_replications_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..709592e3790f50294026b83298a42e687be76177 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/replication/delete_all_replications_parameters.go @@ -0,0 +1,139 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package replication + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewDeleteAllReplicationsParams creates a new DeleteAllReplicationsParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewDeleteAllReplicationsParams() *DeleteAllReplicationsParams { + return &DeleteAllReplicationsParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewDeleteAllReplicationsParamsWithTimeout creates a new DeleteAllReplicationsParams object +// with the ability to set a timeout on a request. +func NewDeleteAllReplicationsParamsWithTimeout(timeout time.Duration) *DeleteAllReplicationsParams { + return &DeleteAllReplicationsParams{ + timeout: timeout, + } +} + +// NewDeleteAllReplicationsParamsWithContext creates a new DeleteAllReplicationsParams object +// with the ability to set a context for a request. +func NewDeleteAllReplicationsParamsWithContext(ctx context.Context) *DeleteAllReplicationsParams { + return &DeleteAllReplicationsParams{ + Context: ctx, + } +} + +// NewDeleteAllReplicationsParamsWithHTTPClient creates a new DeleteAllReplicationsParams object +// with the ability to set a custom HTTPClient for a request. +func NewDeleteAllReplicationsParamsWithHTTPClient(client *http.Client) *DeleteAllReplicationsParams { + return &DeleteAllReplicationsParams{ + HTTPClient: client, + } +} + +/* +DeleteAllReplicationsParams contains all the parameters to send to the API endpoint + + for the delete all replications operation. + + Typically these are written to a http.Request. +*/ +type DeleteAllReplicationsParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the delete all replications params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *DeleteAllReplicationsParams) WithDefaults() *DeleteAllReplicationsParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the delete all replications params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *DeleteAllReplicationsParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the delete all replications params +func (o *DeleteAllReplicationsParams) WithTimeout(timeout time.Duration) *DeleteAllReplicationsParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the delete all replications params +func (o *DeleteAllReplicationsParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the delete all replications params +func (o *DeleteAllReplicationsParams) WithContext(ctx context.Context) *DeleteAllReplicationsParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the delete all replications params +func (o *DeleteAllReplicationsParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the delete all replications params +func (o *DeleteAllReplicationsParams) WithHTTPClient(client *http.Client) *DeleteAllReplicationsParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the delete all replications params +func (o *DeleteAllReplicationsParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *DeleteAllReplicationsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/replication/delete_all_replications_responses.go b/platform/dbops/binaries/weaviate-src/client/replication/delete_all_replications_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..1ba2ed6523b3ad591deff9159c0bcfdf5717d28f --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/replication/delete_all_replications_responses.go @@ -0,0 +1,534 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package replication + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/weaviate/weaviate/entities/models" +) + +// DeleteAllReplicationsReader is a Reader for the DeleteAllReplications structure. +type DeleteAllReplicationsReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *DeleteAllReplicationsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 204: + result := NewDeleteAllReplicationsNoContent() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 400: + result := NewDeleteAllReplicationsBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 401: + result := NewDeleteAllReplicationsUnauthorized() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 403: + result := NewDeleteAllReplicationsForbidden() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 422: + result := NewDeleteAllReplicationsUnprocessableEntity() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 500: + result := NewDeleteAllReplicationsInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 501: + result := NewDeleteAllReplicationsNotImplemented() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + } +} + +// NewDeleteAllReplicationsNoContent creates a DeleteAllReplicationsNoContent with default headers values +func NewDeleteAllReplicationsNoContent() *DeleteAllReplicationsNoContent { + return &DeleteAllReplicationsNoContent{} +} + +/* +DeleteAllReplicationsNoContent describes a response with status code 204, with default header values. + +Replication operation registered successfully +*/ +type DeleteAllReplicationsNoContent struct { +} + +// IsSuccess returns true when this delete all replications no content response has a 2xx status code +func (o *DeleteAllReplicationsNoContent) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this delete all replications no content response has a 3xx status code +func (o *DeleteAllReplicationsNoContent) IsRedirect() bool { + return false +} + +// IsClientError returns true when this delete all replications no content response has a 4xx status code +func (o *DeleteAllReplicationsNoContent) IsClientError() bool { + return false +} + +// IsServerError returns true when this delete all replications no content response has a 5xx status code +func (o *DeleteAllReplicationsNoContent) IsServerError() bool { + return false +} + +// IsCode returns true when this delete all replications no content response a status code equal to that given +func (o *DeleteAllReplicationsNoContent) IsCode(code int) bool { + return code == 204 +} + +// Code gets the status code for the delete all replications no content response +func (o *DeleteAllReplicationsNoContent) Code() int { + return 204 +} + +func (o *DeleteAllReplicationsNoContent) Error() string { + return fmt.Sprintf("[DELETE /replication/replicate][%d] deleteAllReplicationsNoContent ", 204) +} + +func (o *DeleteAllReplicationsNoContent) String() string { + return fmt.Sprintf("[DELETE /replication/replicate][%d] deleteAllReplicationsNoContent ", 204) +} + +func (o *DeleteAllReplicationsNoContent) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewDeleteAllReplicationsBadRequest creates a DeleteAllReplicationsBadRequest with default headers values +func NewDeleteAllReplicationsBadRequest() *DeleteAllReplicationsBadRequest { + return &DeleteAllReplicationsBadRequest{} +} + +/* +DeleteAllReplicationsBadRequest describes a response with status code 400, with default header values. + +Malformed request. +*/ +type DeleteAllReplicationsBadRequest struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this delete all replications bad request response has a 2xx status code +func (o *DeleteAllReplicationsBadRequest) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this delete all replications bad request response has a 3xx status code +func (o *DeleteAllReplicationsBadRequest) IsRedirect() bool { + return false +} + +// IsClientError returns true when this delete all replications bad request response has a 4xx status code +func (o *DeleteAllReplicationsBadRequest) IsClientError() bool { + return true +} + +// IsServerError returns true when this delete all replications bad request response has a 5xx status code +func (o *DeleteAllReplicationsBadRequest) IsServerError() bool { + return false +} + +// IsCode returns true when this delete all replications bad request response a status code equal to that given +func (o *DeleteAllReplicationsBadRequest) IsCode(code int) bool { + return code == 400 +} + +// Code gets the status code for the delete all replications bad request response +func (o *DeleteAllReplicationsBadRequest) Code() int { + return 400 +} + +func (o *DeleteAllReplicationsBadRequest) Error() string { + return fmt.Sprintf("[DELETE /replication/replicate][%d] deleteAllReplicationsBadRequest %+v", 400, o.Payload) +} + +func (o *DeleteAllReplicationsBadRequest) String() string { + return fmt.Sprintf("[DELETE /replication/replicate][%d] deleteAllReplicationsBadRequest %+v", 400, o.Payload) +} + +func (o *DeleteAllReplicationsBadRequest) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *DeleteAllReplicationsBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewDeleteAllReplicationsUnauthorized creates a DeleteAllReplicationsUnauthorized with default headers values +func NewDeleteAllReplicationsUnauthorized() *DeleteAllReplicationsUnauthorized { + return &DeleteAllReplicationsUnauthorized{} +} + +/* +DeleteAllReplicationsUnauthorized describes a response with status code 401, with default header values. + +Unauthorized or invalid credentials. +*/ +type DeleteAllReplicationsUnauthorized struct { +} + +// IsSuccess returns true when this delete all replications unauthorized response has a 2xx status code +func (o *DeleteAllReplicationsUnauthorized) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this delete all replications unauthorized response has a 3xx status code +func (o *DeleteAllReplicationsUnauthorized) IsRedirect() bool { + return false +} + +// IsClientError returns true when this delete all replications unauthorized response has a 4xx status code +func (o *DeleteAllReplicationsUnauthorized) IsClientError() bool { + return true +} + +// IsServerError returns true when this delete all replications unauthorized response has a 5xx status code +func (o *DeleteAllReplicationsUnauthorized) IsServerError() bool { + return false +} + +// IsCode returns true when this delete all replications unauthorized response a status code equal to that given +func (o *DeleteAllReplicationsUnauthorized) IsCode(code int) bool { + return code == 401 +} + +// Code gets the status code for the delete all replications unauthorized response +func (o *DeleteAllReplicationsUnauthorized) Code() int { + return 401 +} + +func (o *DeleteAllReplicationsUnauthorized) Error() string { + return fmt.Sprintf("[DELETE /replication/replicate][%d] deleteAllReplicationsUnauthorized ", 401) +} + +func (o *DeleteAllReplicationsUnauthorized) String() string { + return fmt.Sprintf("[DELETE /replication/replicate][%d] deleteAllReplicationsUnauthorized ", 401) +} + +func (o *DeleteAllReplicationsUnauthorized) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewDeleteAllReplicationsForbidden creates a DeleteAllReplicationsForbidden with default headers values +func NewDeleteAllReplicationsForbidden() *DeleteAllReplicationsForbidden { + return &DeleteAllReplicationsForbidden{} +} + +/* +DeleteAllReplicationsForbidden describes a response with status code 403, with default header values. + +Forbidden +*/ +type DeleteAllReplicationsForbidden struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this delete all replications forbidden response has a 2xx status code +func (o *DeleteAllReplicationsForbidden) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this delete all replications forbidden response has a 3xx status code +func (o *DeleteAllReplicationsForbidden) IsRedirect() bool { + return false +} + +// IsClientError returns true when this delete all replications forbidden response has a 4xx status code +func (o *DeleteAllReplicationsForbidden) IsClientError() bool { + return true +} + +// IsServerError returns true when this delete all replications forbidden response has a 5xx status code +func (o *DeleteAllReplicationsForbidden) IsServerError() bool { + return false +} + +// IsCode returns true when this delete all replications forbidden response a status code equal to that given +func (o *DeleteAllReplicationsForbidden) IsCode(code int) bool { + return code == 403 +} + +// Code gets the status code for the delete all replications forbidden response +func (o *DeleteAllReplicationsForbidden) Code() int { + return 403 +} + +func (o *DeleteAllReplicationsForbidden) Error() string { + return fmt.Sprintf("[DELETE /replication/replicate][%d] deleteAllReplicationsForbidden %+v", 403, o.Payload) +} + +func (o *DeleteAllReplicationsForbidden) String() string { + return fmt.Sprintf("[DELETE /replication/replicate][%d] deleteAllReplicationsForbidden %+v", 403, o.Payload) +} + +func (o *DeleteAllReplicationsForbidden) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *DeleteAllReplicationsForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewDeleteAllReplicationsUnprocessableEntity creates a DeleteAllReplicationsUnprocessableEntity with default headers values +func NewDeleteAllReplicationsUnprocessableEntity() *DeleteAllReplicationsUnprocessableEntity { + return &DeleteAllReplicationsUnprocessableEntity{} +} + +/* +DeleteAllReplicationsUnprocessableEntity describes a response with status code 422, with default header values. + +Request body is well-formed (i.e., syntactically correct), but semantically erroneous. +*/ +type DeleteAllReplicationsUnprocessableEntity struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this delete all replications unprocessable entity response has a 2xx status code +func (o *DeleteAllReplicationsUnprocessableEntity) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this delete all replications unprocessable entity response has a 3xx status code +func (o *DeleteAllReplicationsUnprocessableEntity) IsRedirect() bool { + return false +} + +// IsClientError returns true when this delete all replications unprocessable entity response has a 4xx status code +func (o *DeleteAllReplicationsUnprocessableEntity) IsClientError() bool { + return true +} + +// IsServerError returns true when this delete all replications unprocessable entity response has a 5xx status code +func (o *DeleteAllReplicationsUnprocessableEntity) IsServerError() bool { + return false +} + +// IsCode returns true when this delete all replications unprocessable entity response a status code equal to that given +func (o *DeleteAllReplicationsUnprocessableEntity) IsCode(code int) bool { + return code == 422 +} + +// Code gets the status code for the delete all replications unprocessable entity response +func (o *DeleteAllReplicationsUnprocessableEntity) Code() int { + return 422 +} + +func (o *DeleteAllReplicationsUnprocessableEntity) Error() string { + return fmt.Sprintf("[DELETE /replication/replicate][%d] deleteAllReplicationsUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *DeleteAllReplicationsUnprocessableEntity) String() string { + return fmt.Sprintf("[DELETE /replication/replicate][%d] deleteAllReplicationsUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *DeleteAllReplicationsUnprocessableEntity) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *DeleteAllReplicationsUnprocessableEntity) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewDeleteAllReplicationsInternalServerError creates a DeleteAllReplicationsInternalServerError with default headers values +func NewDeleteAllReplicationsInternalServerError() *DeleteAllReplicationsInternalServerError { + return &DeleteAllReplicationsInternalServerError{} +} + +/* +DeleteAllReplicationsInternalServerError describes a response with status code 500, with default header values. + +An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. +*/ +type DeleteAllReplicationsInternalServerError struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this delete all replications internal server error response has a 2xx status code +func (o *DeleteAllReplicationsInternalServerError) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this delete all replications internal server error response has a 3xx status code +func (o *DeleteAllReplicationsInternalServerError) IsRedirect() bool { + return false +} + +// IsClientError returns true when this delete all replications internal server error response has a 4xx status code +func (o *DeleteAllReplicationsInternalServerError) IsClientError() bool { + return false +} + +// IsServerError returns true when this delete all replications internal server error response has a 5xx status code +func (o *DeleteAllReplicationsInternalServerError) IsServerError() bool { + return true +} + +// IsCode returns true when this delete all replications internal server error response a status code equal to that given +func (o *DeleteAllReplicationsInternalServerError) IsCode(code int) bool { + return code == 500 +} + +// Code gets the status code for the delete all replications internal server error response +func (o *DeleteAllReplicationsInternalServerError) Code() int { + return 500 +} + +func (o *DeleteAllReplicationsInternalServerError) Error() string { + return fmt.Sprintf("[DELETE /replication/replicate][%d] deleteAllReplicationsInternalServerError %+v", 500, o.Payload) +} + +func (o *DeleteAllReplicationsInternalServerError) String() string { + return fmt.Sprintf("[DELETE /replication/replicate][%d] deleteAllReplicationsInternalServerError %+v", 500, o.Payload) +} + +func (o *DeleteAllReplicationsInternalServerError) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *DeleteAllReplicationsInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewDeleteAllReplicationsNotImplemented creates a DeleteAllReplicationsNotImplemented with default headers values +func NewDeleteAllReplicationsNotImplemented() *DeleteAllReplicationsNotImplemented { + return &DeleteAllReplicationsNotImplemented{} +} + +/* +DeleteAllReplicationsNotImplemented describes a response with status code 501, with default header values. + +Replica movement operations are disabled. +*/ +type DeleteAllReplicationsNotImplemented struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this delete all replications not implemented response has a 2xx status code +func (o *DeleteAllReplicationsNotImplemented) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this delete all replications not implemented response has a 3xx status code +func (o *DeleteAllReplicationsNotImplemented) IsRedirect() bool { + return false +} + +// IsClientError returns true when this delete all replications not implemented response has a 4xx status code +func (o *DeleteAllReplicationsNotImplemented) IsClientError() bool { + return false +} + +// IsServerError returns true when this delete all replications not implemented response has a 5xx status code +func (o *DeleteAllReplicationsNotImplemented) IsServerError() bool { + return true +} + +// IsCode returns true when this delete all replications not implemented response a status code equal to that given +func (o *DeleteAllReplicationsNotImplemented) IsCode(code int) bool { + return code == 501 +} + +// Code gets the status code for the delete all replications not implemented response +func (o *DeleteAllReplicationsNotImplemented) Code() int { + return 501 +} + +func (o *DeleteAllReplicationsNotImplemented) Error() string { + return fmt.Sprintf("[DELETE /replication/replicate][%d] deleteAllReplicationsNotImplemented %+v", 501, o.Payload) +} + +func (o *DeleteAllReplicationsNotImplemented) String() string { + return fmt.Sprintf("[DELETE /replication/replicate][%d] deleteAllReplicationsNotImplemented %+v", 501, o.Payload) +} + +func (o *DeleteAllReplicationsNotImplemented) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *DeleteAllReplicationsNotImplemented) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/replication/delete_replication_parameters.go b/platform/dbops/binaries/weaviate-src/client/replication/delete_replication_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..50f530709b0f7f08658f4ab6993d4f35b2901219 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/replication/delete_replication_parameters.go @@ -0,0 +1,164 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package replication + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewDeleteReplicationParams creates a new DeleteReplicationParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewDeleteReplicationParams() *DeleteReplicationParams { + return &DeleteReplicationParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewDeleteReplicationParamsWithTimeout creates a new DeleteReplicationParams object +// with the ability to set a timeout on a request. +func NewDeleteReplicationParamsWithTimeout(timeout time.Duration) *DeleteReplicationParams { + return &DeleteReplicationParams{ + timeout: timeout, + } +} + +// NewDeleteReplicationParamsWithContext creates a new DeleteReplicationParams object +// with the ability to set a context for a request. +func NewDeleteReplicationParamsWithContext(ctx context.Context) *DeleteReplicationParams { + return &DeleteReplicationParams{ + Context: ctx, + } +} + +// NewDeleteReplicationParamsWithHTTPClient creates a new DeleteReplicationParams object +// with the ability to set a custom HTTPClient for a request. +func NewDeleteReplicationParamsWithHTTPClient(client *http.Client) *DeleteReplicationParams { + return &DeleteReplicationParams{ + HTTPClient: client, + } +} + +/* +DeleteReplicationParams contains all the parameters to send to the API endpoint + + for the delete replication operation. + + Typically these are written to a http.Request. +*/ +type DeleteReplicationParams struct { + + /* ID. + + The ID of the replication operation to delete. + + Format: uuid + */ + ID strfmt.UUID + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the delete replication params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *DeleteReplicationParams) WithDefaults() *DeleteReplicationParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the delete replication params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *DeleteReplicationParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the delete replication params +func (o *DeleteReplicationParams) WithTimeout(timeout time.Duration) *DeleteReplicationParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the delete replication params +func (o *DeleteReplicationParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the delete replication params +func (o *DeleteReplicationParams) WithContext(ctx context.Context) *DeleteReplicationParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the delete replication params +func (o *DeleteReplicationParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the delete replication params +func (o *DeleteReplicationParams) WithHTTPClient(client *http.Client) *DeleteReplicationParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the delete replication params +func (o *DeleteReplicationParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithID adds the id to the delete replication params +func (o *DeleteReplicationParams) WithID(id strfmt.UUID) *DeleteReplicationParams { + o.SetID(id) + return o +} + +// SetID adds the id to the delete replication params +func (o *DeleteReplicationParams) SetID(id strfmt.UUID) { + o.ID = id +} + +// WriteToRequest writes these params to a swagger request +func (o *DeleteReplicationParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param id + if err := r.SetPathParam("id", o.ID.String()); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/replication/delete_replication_responses.go b/platform/dbops/binaries/weaviate-src/client/replication/delete_replication_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..c472eb7606fa35ad1af2b37432aa5ce7f59b8b02 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/replication/delete_replication_responses.go @@ -0,0 +1,596 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package replication + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/weaviate/weaviate/entities/models" +) + +// DeleteReplicationReader is a Reader for the DeleteReplication structure. +type DeleteReplicationReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *DeleteReplicationReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 204: + result := NewDeleteReplicationNoContent() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 401: + result := NewDeleteReplicationUnauthorized() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 403: + result := NewDeleteReplicationForbidden() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 404: + result := NewDeleteReplicationNotFound() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 409: + result := NewDeleteReplicationConflict() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 422: + result := NewDeleteReplicationUnprocessableEntity() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 500: + result := NewDeleteReplicationInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 501: + result := NewDeleteReplicationNotImplemented() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + } +} + +// NewDeleteReplicationNoContent creates a DeleteReplicationNoContent with default headers values +func NewDeleteReplicationNoContent() *DeleteReplicationNoContent { + return &DeleteReplicationNoContent{} +} + +/* +DeleteReplicationNoContent describes a response with status code 204, with default header values. + +Successfully deleted. +*/ +type DeleteReplicationNoContent struct { +} + +// IsSuccess returns true when this delete replication no content response has a 2xx status code +func (o *DeleteReplicationNoContent) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this delete replication no content response has a 3xx status code +func (o *DeleteReplicationNoContent) IsRedirect() bool { + return false +} + +// IsClientError returns true when this delete replication no content response has a 4xx status code +func (o *DeleteReplicationNoContent) IsClientError() bool { + return false +} + +// IsServerError returns true when this delete replication no content response has a 5xx status code +func (o *DeleteReplicationNoContent) IsServerError() bool { + return false +} + +// IsCode returns true when this delete replication no content response a status code equal to that given +func (o *DeleteReplicationNoContent) IsCode(code int) bool { + return code == 204 +} + +// Code gets the status code for the delete replication no content response +func (o *DeleteReplicationNoContent) Code() int { + return 204 +} + +func (o *DeleteReplicationNoContent) Error() string { + return fmt.Sprintf("[DELETE /replication/replicate/{id}][%d] deleteReplicationNoContent ", 204) +} + +func (o *DeleteReplicationNoContent) String() string { + return fmt.Sprintf("[DELETE /replication/replicate/{id}][%d] deleteReplicationNoContent ", 204) +} + +func (o *DeleteReplicationNoContent) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewDeleteReplicationUnauthorized creates a DeleteReplicationUnauthorized with default headers values +func NewDeleteReplicationUnauthorized() *DeleteReplicationUnauthorized { + return &DeleteReplicationUnauthorized{} +} + +/* +DeleteReplicationUnauthorized describes a response with status code 401, with default header values. + +Unauthorized or invalid credentials. +*/ +type DeleteReplicationUnauthorized struct { +} + +// IsSuccess returns true when this delete replication unauthorized response has a 2xx status code +func (o *DeleteReplicationUnauthorized) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this delete replication unauthorized response has a 3xx status code +func (o *DeleteReplicationUnauthorized) IsRedirect() bool { + return false +} + +// IsClientError returns true when this delete replication unauthorized response has a 4xx status code +func (o *DeleteReplicationUnauthorized) IsClientError() bool { + return true +} + +// IsServerError returns true when this delete replication unauthorized response has a 5xx status code +func (o *DeleteReplicationUnauthorized) IsServerError() bool { + return false +} + +// IsCode returns true when this delete replication unauthorized response a status code equal to that given +func (o *DeleteReplicationUnauthorized) IsCode(code int) bool { + return code == 401 +} + +// Code gets the status code for the delete replication unauthorized response +func (o *DeleteReplicationUnauthorized) Code() int { + return 401 +} + +func (o *DeleteReplicationUnauthorized) Error() string { + return fmt.Sprintf("[DELETE /replication/replicate/{id}][%d] deleteReplicationUnauthorized ", 401) +} + +func (o *DeleteReplicationUnauthorized) String() string { + return fmt.Sprintf("[DELETE /replication/replicate/{id}][%d] deleteReplicationUnauthorized ", 401) +} + +func (o *DeleteReplicationUnauthorized) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewDeleteReplicationForbidden creates a DeleteReplicationForbidden with default headers values +func NewDeleteReplicationForbidden() *DeleteReplicationForbidden { + return &DeleteReplicationForbidden{} +} + +/* +DeleteReplicationForbidden describes a response with status code 403, with default header values. + +Forbidden. +*/ +type DeleteReplicationForbidden struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this delete replication forbidden response has a 2xx status code +func (o *DeleteReplicationForbidden) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this delete replication forbidden response has a 3xx status code +func (o *DeleteReplicationForbidden) IsRedirect() bool { + return false +} + +// IsClientError returns true when this delete replication forbidden response has a 4xx status code +func (o *DeleteReplicationForbidden) IsClientError() bool { + return true +} + +// IsServerError returns true when this delete replication forbidden response has a 5xx status code +func (o *DeleteReplicationForbidden) IsServerError() bool { + return false +} + +// IsCode returns true when this delete replication forbidden response a status code equal to that given +func (o *DeleteReplicationForbidden) IsCode(code int) bool { + return code == 403 +} + +// Code gets the status code for the delete replication forbidden response +func (o *DeleteReplicationForbidden) Code() int { + return 403 +} + +func (o *DeleteReplicationForbidden) Error() string { + return fmt.Sprintf("[DELETE /replication/replicate/{id}][%d] deleteReplicationForbidden %+v", 403, o.Payload) +} + +func (o *DeleteReplicationForbidden) String() string { + return fmt.Sprintf("[DELETE /replication/replicate/{id}][%d] deleteReplicationForbidden %+v", 403, o.Payload) +} + +func (o *DeleteReplicationForbidden) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *DeleteReplicationForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewDeleteReplicationNotFound creates a DeleteReplicationNotFound with default headers values +func NewDeleteReplicationNotFound() *DeleteReplicationNotFound { + return &DeleteReplicationNotFound{} +} + +/* +DeleteReplicationNotFound describes a response with status code 404, with default header values. + +Shard replica operation not found. +*/ +type DeleteReplicationNotFound struct { +} + +// IsSuccess returns true when this delete replication not found response has a 2xx status code +func (o *DeleteReplicationNotFound) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this delete replication not found response has a 3xx status code +func (o *DeleteReplicationNotFound) IsRedirect() bool { + return false +} + +// IsClientError returns true when this delete replication not found response has a 4xx status code +func (o *DeleteReplicationNotFound) IsClientError() bool { + return true +} + +// IsServerError returns true when this delete replication not found response has a 5xx status code +func (o *DeleteReplicationNotFound) IsServerError() bool { + return false +} + +// IsCode returns true when this delete replication not found response a status code equal to that given +func (o *DeleteReplicationNotFound) IsCode(code int) bool { + return code == 404 +} + +// Code gets the status code for the delete replication not found response +func (o *DeleteReplicationNotFound) Code() int { + return 404 +} + +func (o *DeleteReplicationNotFound) Error() string { + return fmt.Sprintf("[DELETE /replication/replicate/{id}][%d] deleteReplicationNotFound ", 404) +} + +func (o *DeleteReplicationNotFound) String() string { + return fmt.Sprintf("[DELETE /replication/replicate/{id}][%d] deleteReplicationNotFound ", 404) +} + +func (o *DeleteReplicationNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewDeleteReplicationConflict creates a DeleteReplicationConflict with default headers values +func NewDeleteReplicationConflict() *DeleteReplicationConflict { + return &DeleteReplicationConflict{} +} + +/* +DeleteReplicationConflict describes a response with status code 409, with default header values. + +The operation is not in a deletable state, e.g. it is a MOVE op in the DEHYDRATING state. +*/ +type DeleteReplicationConflict struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this delete replication conflict response has a 2xx status code +func (o *DeleteReplicationConflict) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this delete replication conflict response has a 3xx status code +func (o *DeleteReplicationConflict) IsRedirect() bool { + return false +} + +// IsClientError returns true when this delete replication conflict response has a 4xx status code +func (o *DeleteReplicationConflict) IsClientError() bool { + return true +} + +// IsServerError returns true when this delete replication conflict response has a 5xx status code +func (o *DeleteReplicationConflict) IsServerError() bool { + return false +} + +// IsCode returns true when this delete replication conflict response a status code equal to that given +func (o *DeleteReplicationConflict) IsCode(code int) bool { + return code == 409 +} + +// Code gets the status code for the delete replication conflict response +func (o *DeleteReplicationConflict) Code() int { + return 409 +} + +func (o *DeleteReplicationConflict) Error() string { + return fmt.Sprintf("[DELETE /replication/replicate/{id}][%d] deleteReplicationConflict %+v", 409, o.Payload) +} + +func (o *DeleteReplicationConflict) String() string { + return fmt.Sprintf("[DELETE /replication/replicate/{id}][%d] deleteReplicationConflict %+v", 409, o.Payload) +} + +func (o *DeleteReplicationConflict) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *DeleteReplicationConflict) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewDeleteReplicationUnprocessableEntity creates a DeleteReplicationUnprocessableEntity with default headers values +func NewDeleteReplicationUnprocessableEntity() *DeleteReplicationUnprocessableEntity { + return &DeleteReplicationUnprocessableEntity{} +} + +/* +DeleteReplicationUnprocessableEntity describes a response with status code 422, with default header values. + +Request body is well-formed (i.e., syntactically correct), but semantically erroneous. +*/ +type DeleteReplicationUnprocessableEntity struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this delete replication unprocessable entity response has a 2xx status code +func (o *DeleteReplicationUnprocessableEntity) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this delete replication unprocessable entity response has a 3xx status code +func (o *DeleteReplicationUnprocessableEntity) IsRedirect() bool { + return false +} + +// IsClientError returns true when this delete replication unprocessable entity response has a 4xx status code +func (o *DeleteReplicationUnprocessableEntity) IsClientError() bool { + return true +} + +// IsServerError returns true when this delete replication unprocessable entity response has a 5xx status code +func (o *DeleteReplicationUnprocessableEntity) IsServerError() bool { + return false +} + +// IsCode returns true when this delete replication unprocessable entity response a status code equal to that given +func (o *DeleteReplicationUnprocessableEntity) IsCode(code int) bool { + return code == 422 +} + +// Code gets the status code for the delete replication unprocessable entity response +func (o *DeleteReplicationUnprocessableEntity) Code() int { + return 422 +} + +func (o *DeleteReplicationUnprocessableEntity) Error() string { + return fmt.Sprintf("[DELETE /replication/replicate/{id}][%d] deleteReplicationUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *DeleteReplicationUnprocessableEntity) String() string { + return fmt.Sprintf("[DELETE /replication/replicate/{id}][%d] deleteReplicationUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *DeleteReplicationUnprocessableEntity) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *DeleteReplicationUnprocessableEntity) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewDeleteReplicationInternalServerError creates a DeleteReplicationInternalServerError with default headers values +func NewDeleteReplicationInternalServerError() *DeleteReplicationInternalServerError { + return &DeleteReplicationInternalServerError{} +} + +/* +DeleteReplicationInternalServerError describes a response with status code 500, with default header values. + +An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. +*/ +type DeleteReplicationInternalServerError struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this delete replication internal server error response has a 2xx status code +func (o *DeleteReplicationInternalServerError) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this delete replication internal server error response has a 3xx status code +func (o *DeleteReplicationInternalServerError) IsRedirect() bool { + return false +} + +// IsClientError returns true when this delete replication internal server error response has a 4xx status code +func (o *DeleteReplicationInternalServerError) IsClientError() bool { + return false +} + +// IsServerError returns true when this delete replication internal server error response has a 5xx status code +func (o *DeleteReplicationInternalServerError) IsServerError() bool { + return true +} + +// IsCode returns true when this delete replication internal server error response a status code equal to that given +func (o *DeleteReplicationInternalServerError) IsCode(code int) bool { + return code == 500 +} + +// Code gets the status code for the delete replication internal server error response +func (o *DeleteReplicationInternalServerError) Code() int { + return 500 +} + +func (o *DeleteReplicationInternalServerError) Error() string { + return fmt.Sprintf("[DELETE /replication/replicate/{id}][%d] deleteReplicationInternalServerError %+v", 500, o.Payload) +} + +func (o *DeleteReplicationInternalServerError) String() string { + return fmt.Sprintf("[DELETE /replication/replicate/{id}][%d] deleteReplicationInternalServerError %+v", 500, o.Payload) +} + +func (o *DeleteReplicationInternalServerError) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *DeleteReplicationInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewDeleteReplicationNotImplemented creates a DeleteReplicationNotImplemented with default headers values +func NewDeleteReplicationNotImplemented() *DeleteReplicationNotImplemented { + return &DeleteReplicationNotImplemented{} +} + +/* +DeleteReplicationNotImplemented describes a response with status code 501, with default header values. + +Replica movement operations are disabled. +*/ +type DeleteReplicationNotImplemented struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this delete replication not implemented response has a 2xx status code +func (o *DeleteReplicationNotImplemented) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this delete replication not implemented response has a 3xx status code +func (o *DeleteReplicationNotImplemented) IsRedirect() bool { + return false +} + +// IsClientError returns true when this delete replication not implemented response has a 4xx status code +func (o *DeleteReplicationNotImplemented) IsClientError() bool { + return false +} + +// IsServerError returns true when this delete replication not implemented response has a 5xx status code +func (o *DeleteReplicationNotImplemented) IsServerError() bool { + return true +} + +// IsCode returns true when this delete replication not implemented response a status code equal to that given +func (o *DeleteReplicationNotImplemented) IsCode(code int) bool { + return code == 501 +} + +// Code gets the status code for the delete replication not implemented response +func (o *DeleteReplicationNotImplemented) Code() int { + return 501 +} + +func (o *DeleteReplicationNotImplemented) Error() string { + return fmt.Sprintf("[DELETE /replication/replicate/{id}][%d] deleteReplicationNotImplemented %+v", 501, o.Payload) +} + +func (o *DeleteReplicationNotImplemented) String() string { + return fmt.Sprintf("[DELETE /replication/replicate/{id}][%d] deleteReplicationNotImplemented %+v", 501, o.Payload) +} + +func (o *DeleteReplicationNotImplemented) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *DeleteReplicationNotImplemented) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/replication/force_delete_replications_parameters.go b/platform/dbops/binaries/weaviate-src/client/replication/force_delete_replications_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..e5eeb3e19e62fcdf95890b84fbc6d06d6b7a0456 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/replication/force_delete_replications_parameters.go @@ -0,0 +1,161 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package replication + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + + "github.com/weaviate/weaviate/entities/models" +) + +// NewForceDeleteReplicationsParams creates a new ForceDeleteReplicationsParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewForceDeleteReplicationsParams() *ForceDeleteReplicationsParams { + return &ForceDeleteReplicationsParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewForceDeleteReplicationsParamsWithTimeout creates a new ForceDeleteReplicationsParams object +// with the ability to set a timeout on a request. +func NewForceDeleteReplicationsParamsWithTimeout(timeout time.Duration) *ForceDeleteReplicationsParams { + return &ForceDeleteReplicationsParams{ + timeout: timeout, + } +} + +// NewForceDeleteReplicationsParamsWithContext creates a new ForceDeleteReplicationsParams object +// with the ability to set a context for a request. +func NewForceDeleteReplicationsParamsWithContext(ctx context.Context) *ForceDeleteReplicationsParams { + return &ForceDeleteReplicationsParams{ + Context: ctx, + } +} + +// NewForceDeleteReplicationsParamsWithHTTPClient creates a new ForceDeleteReplicationsParams object +// with the ability to set a custom HTTPClient for a request. +func NewForceDeleteReplicationsParamsWithHTTPClient(client *http.Client) *ForceDeleteReplicationsParams { + return &ForceDeleteReplicationsParams{ + HTTPClient: client, + } +} + +/* +ForceDeleteReplicationsParams contains all the parameters to send to the API endpoint + + for the force delete replications operation. + + Typically these are written to a http.Request. +*/ +type ForceDeleteReplicationsParams struct { + + // Body. + Body *models.ReplicationReplicateForceDeleteRequest + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the force delete replications params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *ForceDeleteReplicationsParams) WithDefaults() *ForceDeleteReplicationsParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the force delete replications params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *ForceDeleteReplicationsParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the force delete replications params +func (o *ForceDeleteReplicationsParams) WithTimeout(timeout time.Duration) *ForceDeleteReplicationsParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the force delete replications params +func (o *ForceDeleteReplicationsParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the force delete replications params +func (o *ForceDeleteReplicationsParams) WithContext(ctx context.Context) *ForceDeleteReplicationsParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the force delete replications params +func (o *ForceDeleteReplicationsParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the force delete replications params +func (o *ForceDeleteReplicationsParams) WithHTTPClient(client *http.Client) *ForceDeleteReplicationsParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the force delete replications params +func (o *ForceDeleteReplicationsParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithBody adds the body to the force delete replications params +func (o *ForceDeleteReplicationsParams) WithBody(body *models.ReplicationReplicateForceDeleteRequest) *ForceDeleteReplicationsParams { + o.SetBody(body) + return o +} + +// SetBody adds the body to the force delete replications params +func (o *ForceDeleteReplicationsParams) SetBody(body *models.ReplicationReplicateForceDeleteRequest) { + o.Body = body +} + +// WriteToRequest writes these params to a swagger request +func (o *ForceDeleteReplicationsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + if o.Body != nil { + if err := r.SetBodyParam(o.Body); err != nil { + return err + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/replication/force_delete_replications_responses.go b/platform/dbops/binaries/weaviate-src/client/replication/force_delete_replications_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..ba7877920253d84bd30578c83e5c4a5874357435 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/replication/force_delete_replications_responses.go @@ -0,0 +1,472 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package replication + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/weaviate/weaviate/entities/models" +) + +// ForceDeleteReplicationsReader is a Reader for the ForceDeleteReplications structure. +type ForceDeleteReplicationsReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ForceDeleteReplicationsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewForceDeleteReplicationsOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 400: + result := NewForceDeleteReplicationsBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 401: + result := NewForceDeleteReplicationsUnauthorized() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 403: + result := NewForceDeleteReplicationsForbidden() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 422: + result := NewForceDeleteReplicationsUnprocessableEntity() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 500: + result := NewForceDeleteReplicationsInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + } +} + +// NewForceDeleteReplicationsOK creates a ForceDeleteReplicationsOK with default headers values +func NewForceDeleteReplicationsOK() *ForceDeleteReplicationsOK { + return &ForceDeleteReplicationsOK{} +} + +/* +ForceDeleteReplicationsOK describes a response with status code 200, with default header values. + +Replication operations force deleted successfully. +*/ +type ForceDeleteReplicationsOK struct { + Payload *models.ReplicationReplicateForceDeleteResponse +} + +// IsSuccess returns true when this force delete replications o k response has a 2xx status code +func (o *ForceDeleteReplicationsOK) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this force delete replications o k response has a 3xx status code +func (o *ForceDeleteReplicationsOK) IsRedirect() bool { + return false +} + +// IsClientError returns true when this force delete replications o k response has a 4xx status code +func (o *ForceDeleteReplicationsOK) IsClientError() bool { + return false +} + +// IsServerError returns true when this force delete replications o k response has a 5xx status code +func (o *ForceDeleteReplicationsOK) IsServerError() bool { + return false +} + +// IsCode returns true when this force delete replications o k response a status code equal to that given +func (o *ForceDeleteReplicationsOK) IsCode(code int) bool { + return code == 200 +} + +// Code gets the status code for the force delete replications o k response +func (o *ForceDeleteReplicationsOK) Code() int { + return 200 +} + +func (o *ForceDeleteReplicationsOK) Error() string { + return fmt.Sprintf("[POST /replication/replicate/force-delete][%d] forceDeleteReplicationsOK %+v", 200, o.Payload) +} + +func (o *ForceDeleteReplicationsOK) String() string { + return fmt.Sprintf("[POST /replication/replicate/force-delete][%d] forceDeleteReplicationsOK %+v", 200, o.Payload) +} + +func (o *ForceDeleteReplicationsOK) GetPayload() *models.ReplicationReplicateForceDeleteResponse { + return o.Payload +} + +func (o *ForceDeleteReplicationsOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ReplicationReplicateForceDeleteResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewForceDeleteReplicationsBadRequest creates a ForceDeleteReplicationsBadRequest with default headers values +func NewForceDeleteReplicationsBadRequest() *ForceDeleteReplicationsBadRequest { + return &ForceDeleteReplicationsBadRequest{} +} + +/* +ForceDeleteReplicationsBadRequest describes a response with status code 400, with default header values. + +Malformed request. +*/ +type ForceDeleteReplicationsBadRequest struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this force delete replications bad request response has a 2xx status code +func (o *ForceDeleteReplicationsBadRequest) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this force delete replications bad request response has a 3xx status code +func (o *ForceDeleteReplicationsBadRequest) IsRedirect() bool { + return false +} + +// IsClientError returns true when this force delete replications bad request response has a 4xx status code +func (o *ForceDeleteReplicationsBadRequest) IsClientError() bool { + return true +} + +// IsServerError returns true when this force delete replications bad request response has a 5xx status code +func (o *ForceDeleteReplicationsBadRequest) IsServerError() bool { + return false +} + +// IsCode returns true when this force delete replications bad request response a status code equal to that given +func (o *ForceDeleteReplicationsBadRequest) IsCode(code int) bool { + return code == 400 +} + +// Code gets the status code for the force delete replications bad request response +func (o *ForceDeleteReplicationsBadRequest) Code() int { + return 400 +} + +func (o *ForceDeleteReplicationsBadRequest) Error() string { + return fmt.Sprintf("[POST /replication/replicate/force-delete][%d] forceDeleteReplicationsBadRequest %+v", 400, o.Payload) +} + +func (o *ForceDeleteReplicationsBadRequest) String() string { + return fmt.Sprintf("[POST /replication/replicate/force-delete][%d] forceDeleteReplicationsBadRequest %+v", 400, o.Payload) +} + +func (o *ForceDeleteReplicationsBadRequest) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *ForceDeleteReplicationsBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewForceDeleteReplicationsUnauthorized creates a ForceDeleteReplicationsUnauthorized with default headers values +func NewForceDeleteReplicationsUnauthorized() *ForceDeleteReplicationsUnauthorized { + return &ForceDeleteReplicationsUnauthorized{} +} + +/* +ForceDeleteReplicationsUnauthorized describes a response with status code 401, with default header values. + +Unauthorized or invalid credentials. +*/ +type ForceDeleteReplicationsUnauthorized struct { +} + +// IsSuccess returns true when this force delete replications unauthorized response has a 2xx status code +func (o *ForceDeleteReplicationsUnauthorized) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this force delete replications unauthorized response has a 3xx status code +func (o *ForceDeleteReplicationsUnauthorized) IsRedirect() bool { + return false +} + +// IsClientError returns true when this force delete replications unauthorized response has a 4xx status code +func (o *ForceDeleteReplicationsUnauthorized) IsClientError() bool { + return true +} + +// IsServerError returns true when this force delete replications unauthorized response has a 5xx status code +func (o *ForceDeleteReplicationsUnauthorized) IsServerError() bool { + return false +} + +// IsCode returns true when this force delete replications unauthorized response a status code equal to that given +func (o *ForceDeleteReplicationsUnauthorized) IsCode(code int) bool { + return code == 401 +} + +// Code gets the status code for the force delete replications unauthorized response +func (o *ForceDeleteReplicationsUnauthorized) Code() int { + return 401 +} + +func (o *ForceDeleteReplicationsUnauthorized) Error() string { + return fmt.Sprintf("[POST /replication/replicate/force-delete][%d] forceDeleteReplicationsUnauthorized ", 401) +} + +func (o *ForceDeleteReplicationsUnauthorized) String() string { + return fmt.Sprintf("[POST /replication/replicate/force-delete][%d] forceDeleteReplicationsUnauthorized ", 401) +} + +func (o *ForceDeleteReplicationsUnauthorized) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewForceDeleteReplicationsForbidden creates a ForceDeleteReplicationsForbidden with default headers values +func NewForceDeleteReplicationsForbidden() *ForceDeleteReplicationsForbidden { + return &ForceDeleteReplicationsForbidden{} +} + +/* +ForceDeleteReplicationsForbidden describes a response with status code 403, with default header values. + +Forbidden +*/ +type ForceDeleteReplicationsForbidden struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this force delete replications forbidden response has a 2xx status code +func (o *ForceDeleteReplicationsForbidden) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this force delete replications forbidden response has a 3xx status code +func (o *ForceDeleteReplicationsForbidden) IsRedirect() bool { + return false +} + +// IsClientError returns true when this force delete replications forbidden response has a 4xx status code +func (o *ForceDeleteReplicationsForbidden) IsClientError() bool { + return true +} + +// IsServerError returns true when this force delete replications forbidden response has a 5xx status code +func (o *ForceDeleteReplicationsForbidden) IsServerError() bool { + return false +} + +// IsCode returns true when this force delete replications forbidden response a status code equal to that given +func (o *ForceDeleteReplicationsForbidden) IsCode(code int) bool { + return code == 403 +} + +// Code gets the status code for the force delete replications forbidden response +func (o *ForceDeleteReplicationsForbidden) Code() int { + return 403 +} + +func (o *ForceDeleteReplicationsForbidden) Error() string { + return fmt.Sprintf("[POST /replication/replicate/force-delete][%d] forceDeleteReplicationsForbidden %+v", 403, o.Payload) +} + +func (o *ForceDeleteReplicationsForbidden) String() string { + return fmt.Sprintf("[POST /replication/replicate/force-delete][%d] forceDeleteReplicationsForbidden %+v", 403, o.Payload) +} + +func (o *ForceDeleteReplicationsForbidden) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *ForceDeleteReplicationsForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewForceDeleteReplicationsUnprocessableEntity creates a ForceDeleteReplicationsUnprocessableEntity with default headers values +func NewForceDeleteReplicationsUnprocessableEntity() *ForceDeleteReplicationsUnprocessableEntity { + return &ForceDeleteReplicationsUnprocessableEntity{} +} + +/* +ForceDeleteReplicationsUnprocessableEntity describes a response with status code 422, with default header values. + +Request body is well-formed (i.e., syntactically correct), but semantically erroneous. +*/ +type ForceDeleteReplicationsUnprocessableEntity struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this force delete replications unprocessable entity response has a 2xx status code +func (o *ForceDeleteReplicationsUnprocessableEntity) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this force delete replications unprocessable entity response has a 3xx status code +func (o *ForceDeleteReplicationsUnprocessableEntity) IsRedirect() bool { + return false +} + +// IsClientError returns true when this force delete replications unprocessable entity response has a 4xx status code +func (o *ForceDeleteReplicationsUnprocessableEntity) IsClientError() bool { + return true +} + +// IsServerError returns true when this force delete replications unprocessable entity response has a 5xx status code +func (o *ForceDeleteReplicationsUnprocessableEntity) IsServerError() bool { + return false +} + +// IsCode returns true when this force delete replications unprocessable entity response a status code equal to that given +func (o *ForceDeleteReplicationsUnprocessableEntity) IsCode(code int) bool { + return code == 422 +} + +// Code gets the status code for the force delete replications unprocessable entity response +func (o *ForceDeleteReplicationsUnprocessableEntity) Code() int { + return 422 +} + +func (o *ForceDeleteReplicationsUnprocessableEntity) Error() string { + return fmt.Sprintf("[POST /replication/replicate/force-delete][%d] forceDeleteReplicationsUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *ForceDeleteReplicationsUnprocessableEntity) String() string { + return fmt.Sprintf("[POST /replication/replicate/force-delete][%d] forceDeleteReplicationsUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *ForceDeleteReplicationsUnprocessableEntity) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *ForceDeleteReplicationsUnprocessableEntity) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewForceDeleteReplicationsInternalServerError creates a ForceDeleteReplicationsInternalServerError with default headers values +func NewForceDeleteReplicationsInternalServerError() *ForceDeleteReplicationsInternalServerError { + return &ForceDeleteReplicationsInternalServerError{} +} + +/* +ForceDeleteReplicationsInternalServerError describes a response with status code 500, with default header values. + +An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. +*/ +type ForceDeleteReplicationsInternalServerError struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this force delete replications internal server error response has a 2xx status code +func (o *ForceDeleteReplicationsInternalServerError) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this force delete replications internal server error response has a 3xx status code +func (o *ForceDeleteReplicationsInternalServerError) IsRedirect() bool { + return false +} + +// IsClientError returns true when this force delete replications internal server error response has a 4xx status code +func (o *ForceDeleteReplicationsInternalServerError) IsClientError() bool { + return false +} + +// IsServerError returns true when this force delete replications internal server error response has a 5xx status code +func (o *ForceDeleteReplicationsInternalServerError) IsServerError() bool { + return true +} + +// IsCode returns true when this force delete replications internal server error response a status code equal to that given +func (o *ForceDeleteReplicationsInternalServerError) IsCode(code int) bool { + return code == 500 +} + +// Code gets the status code for the force delete replications internal server error response +func (o *ForceDeleteReplicationsInternalServerError) Code() int { + return 500 +} + +func (o *ForceDeleteReplicationsInternalServerError) Error() string { + return fmt.Sprintf("[POST /replication/replicate/force-delete][%d] forceDeleteReplicationsInternalServerError %+v", 500, o.Payload) +} + +func (o *ForceDeleteReplicationsInternalServerError) String() string { + return fmt.Sprintf("[POST /replication/replicate/force-delete][%d] forceDeleteReplicationsInternalServerError %+v", 500, o.Payload) +} + +func (o *ForceDeleteReplicationsInternalServerError) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *ForceDeleteReplicationsInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/replication/get_collection_sharding_state_parameters.go b/platform/dbops/binaries/weaviate-src/client/replication/get_collection_sharding_state_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..43f0f07e3b1f631bc0309f71e4615c0f4266eeb7 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/replication/get_collection_sharding_state_parameters.go @@ -0,0 +1,208 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package replication + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewGetCollectionShardingStateParams creates a new GetCollectionShardingStateParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewGetCollectionShardingStateParams() *GetCollectionShardingStateParams { + return &GetCollectionShardingStateParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewGetCollectionShardingStateParamsWithTimeout creates a new GetCollectionShardingStateParams object +// with the ability to set a timeout on a request. +func NewGetCollectionShardingStateParamsWithTimeout(timeout time.Duration) *GetCollectionShardingStateParams { + return &GetCollectionShardingStateParams{ + timeout: timeout, + } +} + +// NewGetCollectionShardingStateParamsWithContext creates a new GetCollectionShardingStateParams object +// with the ability to set a context for a request. +func NewGetCollectionShardingStateParamsWithContext(ctx context.Context) *GetCollectionShardingStateParams { + return &GetCollectionShardingStateParams{ + Context: ctx, + } +} + +// NewGetCollectionShardingStateParamsWithHTTPClient creates a new GetCollectionShardingStateParams object +// with the ability to set a custom HTTPClient for a request. +func NewGetCollectionShardingStateParamsWithHTTPClient(client *http.Client) *GetCollectionShardingStateParams { + return &GetCollectionShardingStateParams{ + HTTPClient: client, + } +} + +/* +GetCollectionShardingStateParams contains all the parameters to send to the API endpoint + + for the get collection sharding state operation. + + Typically these are written to a http.Request. +*/ +type GetCollectionShardingStateParams struct { + + /* Collection. + + The collection name to get the sharding state for. + */ + Collection *string + + /* Shard. + + The shard to get the sharding state for. + */ + Shard *string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the get collection sharding state params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *GetCollectionShardingStateParams) WithDefaults() *GetCollectionShardingStateParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the get collection sharding state params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *GetCollectionShardingStateParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the get collection sharding state params +func (o *GetCollectionShardingStateParams) WithTimeout(timeout time.Duration) *GetCollectionShardingStateParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the get collection sharding state params +func (o *GetCollectionShardingStateParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the get collection sharding state params +func (o *GetCollectionShardingStateParams) WithContext(ctx context.Context) *GetCollectionShardingStateParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the get collection sharding state params +func (o *GetCollectionShardingStateParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the get collection sharding state params +func (o *GetCollectionShardingStateParams) WithHTTPClient(client *http.Client) *GetCollectionShardingStateParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the get collection sharding state params +func (o *GetCollectionShardingStateParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithCollection adds the collection to the get collection sharding state params +func (o *GetCollectionShardingStateParams) WithCollection(collection *string) *GetCollectionShardingStateParams { + o.SetCollection(collection) + return o +} + +// SetCollection adds the collection to the get collection sharding state params +func (o *GetCollectionShardingStateParams) SetCollection(collection *string) { + o.Collection = collection +} + +// WithShard adds the shard to the get collection sharding state params +func (o *GetCollectionShardingStateParams) WithShard(shard *string) *GetCollectionShardingStateParams { + o.SetShard(shard) + return o +} + +// SetShard adds the shard to the get collection sharding state params +func (o *GetCollectionShardingStateParams) SetShard(shard *string) { + o.Shard = shard +} + +// WriteToRequest writes these params to a swagger request +func (o *GetCollectionShardingStateParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if o.Collection != nil { + + // query param collection + var qrCollection string + + if o.Collection != nil { + qrCollection = *o.Collection + } + qCollection := qrCollection + if qCollection != "" { + + if err := r.SetQueryParam("collection", qCollection); err != nil { + return err + } + } + } + + if o.Shard != nil { + + // query param shard + var qrShard string + + if o.Shard != nil { + qrShard = *o.Shard + } + qShard := qrShard + if qShard != "" { + + if err := r.SetQueryParam("shard", qShard); err != nil { + return err + } + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/replication/get_collection_sharding_state_responses.go b/platform/dbops/binaries/weaviate-src/client/replication/get_collection_sharding_state_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..f9b1e459a5afe279826906b50314cccd90d07eac --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/replication/get_collection_sharding_state_responses.go @@ -0,0 +1,546 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package replication + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/weaviate/weaviate/entities/models" +) + +// GetCollectionShardingStateReader is a Reader for the GetCollectionShardingState structure. +type GetCollectionShardingStateReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *GetCollectionShardingStateReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewGetCollectionShardingStateOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 400: + result := NewGetCollectionShardingStateBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 401: + result := NewGetCollectionShardingStateUnauthorized() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 403: + result := NewGetCollectionShardingStateForbidden() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 404: + result := NewGetCollectionShardingStateNotFound() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 500: + result := NewGetCollectionShardingStateInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 501: + result := NewGetCollectionShardingStateNotImplemented() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + } +} + +// NewGetCollectionShardingStateOK creates a GetCollectionShardingStateOK with default headers values +func NewGetCollectionShardingStateOK() *GetCollectionShardingStateOK { + return &GetCollectionShardingStateOK{} +} + +/* +GetCollectionShardingStateOK describes a response with status code 200, with default header values. + +Successfully retrieved sharding state. +*/ +type GetCollectionShardingStateOK struct { + Payload *models.ReplicationShardingStateResponse +} + +// IsSuccess returns true when this get collection sharding state o k response has a 2xx status code +func (o *GetCollectionShardingStateOK) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this get collection sharding state o k response has a 3xx status code +func (o *GetCollectionShardingStateOK) IsRedirect() bool { + return false +} + +// IsClientError returns true when this get collection sharding state o k response has a 4xx status code +func (o *GetCollectionShardingStateOK) IsClientError() bool { + return false +} + +// IsServerError returns true when this get collection sharding state o k response has a 5xx status code +func (o *GetCollectionShardingStateOK) IsServerError() bool { + return false +} + +// IsCode returns true when this get collection sharding state o k response a status code equal to that given +func (o *GetCollectionShardingStateOK) IsCode(code int) bool { + return code == 200 +} + +// Code gets the status code for the get collection sharding state o k response +func (o *GetCollectionShardingStateOK) Code() int { + return 200 +} + +func (o *GetCollectionShardingStateOK) Error() string { + return fmt.Sprintf("[GET /replication/sharding-state][%d] getCollectionShardingStateOK %+v", 200, o.Payload) +} + +func (o *GetCollectionShardingStateOK) String() string { + return fmt.Sprintf("[GET /replication/sharding-state][%d] getCollectionShardingStateOK %+v", 200, o.Payload) +} + +func (o *GetCollectionShardingStateOK) GetPayload() *models.ReplicationShardingStateResponse { + return o.Payload +} + +func (o *GetCollectionShardingStateOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ReplicationShardingStateResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewGetCollectionShardingStateBadRequest creates a GetCollectionShardingStateBadRequest with default headers values +func NewGetCollectionShardingStateBadRequest() *GetCollectionShardingStateBadRequest { + return &GetCollectionShardingStateBadRequest{} +} + +/* +GetCollectionShardingStateBadRequest describes a response with status code 400, with default header values. + +Bad request. +*/ +type GetCollectionShardingStateBadRequest struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this get collection sharding state bad request response has a 2xx status code +func (o *GetCollectionShardingStateBadRequest) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this get collection sharding state bad request response has a 3xx status code +func (o *GetCollectionShardingStateBadRequest) IsRedirect() bool { + return false +} + +// IsClientError returns true when this get collection sharding state bad request response has a 4xx status code +func (o *GetCollectionShardingStateBadRequest) IsClientError() bool { + return true +} + +// IsServerError returns true when this get collection sharding state bad request response has a 5xx status code +func (o *GetCollectionShardingStateBadRequest) IsServerError() bool { + return false +} + +// IsCode returns true when this get collection sharding state bad request response a status code equal to that given +func (o *GetCollectionShardingStateBadRequest) IsCode(code int) bool { + return code == 400 +} + +// Code gets the status code for the get collection sharding state bad request response +func (o *GetCollectionShardingStateBadRequest) Code() int { + return 400 +} + +func (o *GetCollectionShardingStateBadRequest) Error() string { + return fmt.Sprintf("[GET /replication/sharding-state][%d] getCollectionShardingStateBadRequest %+v", 400, o.Payload) +} + +func (o *GetCollectionShardingStateBadRequest) String() string { + return fmt.Sprintf("[GET /replication/sharding-state][%d] getCollectionShardingStateBadRequest %+v", 400, o.Payload) +} + +func (o *GetCollectionShardingStateBadRequest) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *GetCollectionShardingStateBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewGetCollectionShardingStateUnauthorized creates a GetCollectionShardingStateUnauthorized with default headers values +func NewGetCollectionShardingStateUnauthorized() *GetCollectionShardingStateUnauthorized { + return &GetCollectionShardingStateUnauthorized{} +} + +/* +GetCollectionShardingStateUnauthorized describes a response with status code 401, with default header values. + +Unauthorized or invalid credentials. +*/ +type GetCollectionShardingStateUnauthorized struct { +} + +// IsSuccess returns true when this get collection sharding state unauthorized response has a 2xx status code +func (o *GetCollectionShardingStateUnauthorized) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this get collection sharding state unauthorized response has a 3xx status code +func (o *GetCollectionShardingStateUnauthorized) IsRedirect() bool { + return false +} + +// IsClientError returns true when this get collection sharding state unauthorized response has a 4xx status code +func (o *GetCollectionShardingStateUnauthorized) IsClientError() bool { + return true +} + +// IsServerError returns true when this get collection sharding state unauthorized response has a 5xx status code +func (o *GetCollectionShardingStateUnauthorized) IsServerError() bool { + return false +} + +// IsCode returns true when this get collection sharding state unauthorized response a status code equal to that given +func (o *GetCollectionShardingStateUnauthorized) IsCode(code int) bool { + return code == 401 +} + +// Code gets the status code for the get collection sharding state unauthorized response +func (o *GetCollectionShardingStateUnauthorized) Code() int { + return 401 +} + +func (o *GetCollectionShardingStateUnauthorized) Error() string { + return fmt.Sprintf("[GET /replication/sharding-state][%d] getCollectionShardingStateUnauthorized ", 401) +} + +func (o *GetCollectionShardingStateUnauthorized) String() string { + return fmt.Sprintf("[GET /replication/sharding-state][%d] getCollectionShardingStateUnauthorized ", 401) +} + +func (o *GetCollectionShardingStateUnauthorized) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewGetCollectionShardingStateForbidden creates a GetCollectionShardingStateForbidden with default headers values +func NewGetCollectionShardingStateForbidden() *GetCollectionShardingStateForbidden { + return &GetCollectionShardingStateForbidden{} +} + +/* +GetCollectionShardingStateForbidden describes a response with status code 403, with default header values. + +Forbidden +*/ +type GetCollectionShardingStateForbidden struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this get collection sharding state forbidden response has a 2xx status code +func (o *GetCollectionShardingStateForbidden) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this get collection sharding state forbidden response has a 3xx status code +func (o *GetCollectionShardingStateForbidden) IsRedirect() bool { + return false +} + +// IsClientError returns true when this get collection sharding state forbidden response has a 4xx status code +func (o *GetCollectionShardingStateForbidden) IsClientError() bool { + return true +} + +// IsServerError returns true when this get collection sharding state forbidden response has a 5xx status code +func (o *GetCollectionShardingStateForbidden) IsServerError() bool { + return false +} + +// IsCode returns true when this get collection sharding state forbidden response a status code equal to that given +func (o *GetCollectionShardingStateForbidden) IsCode(code int) bool { + return code == 403 +} + +// Code gets the status code for the get collection sharding state forbidden response +func (o *GetCollectionShardingStateForbidden) Code() int { + return 403 +} + +func (o *GetCollectionShardingStateForbidden) Error() string { + return fmt.Sprintf("[GET /replication/sharding-state][%d] getCollectionShardingStateForbidden %+v", 403, o.Payload) +} + +func (o *GetCollectionShardingStateForbidden) String() string { + return fmt.Sprintf("[GET /replication/sharding-state][%d] getCollectionShardingStateForbidden %+v", 403, o.Payload) +} + +func (o *GetCollectionShardingStateForbidden) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *GetCollectionShardingStateForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewGetCollectionShardingStateNotFound creates a GetCollectionShardingStateNotFound with default headers values +func NewGetCollectionShardingStateNotFound() *GetCollectionShardingStateNotFound { + return &GetCollectionShardingStateNotFound{} +} + +/* +GetCollectionShardingStateNotFound describes a response with status code 404, with default header values. + +Collection or shard not found. +*/ +type GetCollectionShardingStateNotFound struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this get collection sharding state not found response has a 2xx status code +func (o *GetCollectionShardingStateNotFound) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this get collection sharding state not found response has a 3xx status code +func (o *GetCollectionShardingStateNotFound) IsRedirect() bool { + return false +} + +// IsClientError returns true when this get collection sharding state not found response has a 4xx status code +func (o *GetCollectionShardingStateNotFound) IsClientError() bool { + return true +} + +// IsServerError returns true when this get collection sharding state not found response has a 5xx status code +func (o *GetCollectionShardingStateNotFound) IsServerError() bool { + return false +} + +// IsCode returns true when this get collection sharding state not found response a status code equal to that given +func (o *GetCollectionShardingStateNotFound) IsCode(code int) bool { + return code == 404 +} + +// Code gets the status code for the get collection sharding state not found response +func (o *GetCollectionShardingStateNotFound) Code() int { + return 404 +} + +func (o *GetCollectionShardingStateNotFound) Error() string { + return fmt.Sprintf("[GET /replication/sharding-state][%d] getCollectionShardingStateNotFound %+v", 404, o.Payload) +} + +func (o *GetCollectionShardingStateNotFound) String() string { + return fmt.Sprintf("[GET /replication/sharding-state][%d] getCollectionShardingStateNotFound %+v", 404, o.Payload) +} + +func (o *GetCollectionShardingStateNotFound) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *GetCollectionShardingStateNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewGetCollectionShardingStateInternalServerError creates a GetCollectionShardingStateInternalServerError with default headers values +func NewGetCollectionShardingStateInternalServerError() *GetCollectionShardingStateInternalServerError { + return &GetCollectionShardingStateInternalServerError{} +} + +/* +GetCollectionShardingStateInternalServerError describes a response with status code 500, with default header values. + +An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. +*/ +type GetCollectionShardingStateInternalServerError struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this get collection sharding state internal server error response has a 2xx status code +func (o *GetCollectionShardingStateInternalServerError) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this get collection sharding state internal server error response has a 3xx status code +func (o *GetCollectionShardingStateInternalServerError) IsRedirect() bool { + return false +} + +// IsClientError returns true when this get collection sharding state internal server error response has a 4xx status code +func (o *GetCollectionShardingStateInternalServerError) IsClientError() bool { + return false +} + +// IsServerError returns true when this get collection sharding state internal server error response has a 5xx status code +func (o *GetCollectionShardingStateInternalServerError) IsServerError() bool { + return true +} + +// IsCode returns true when this get collection sharding state internal server error response a status code equal to that given +func (o *GetCollectionShardingStateInternalServerError) IsCode(code int) bool { + return code == 500 +} + +// Code gets the status code for the get collection sharding state internal server error response +func (o *GetCollectionShardingStateInternalServerError) Code() int { + return 500 +} + +func (o *GetCollectionShardingStateInternalServerError) Error() string { + return fmt.Sprintf("[GET /replication/sharding-state][%d] getCollectionShardingStateInternalServerError %+v", 500, o.Payload) +} + +func (o *GetCollectionShardingStateInternalServerError) String() string { + return fmt.Sprintf("[GET /replication/sharding-state][%d] getCollectionShardingStateInternalServerError %+v", 500, o.Payload) +} + +func (o *GetCollectionShardingStateInternalServerError) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *GetCollectionShardingStateInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewGetCollectionShardingStateNotImplemented creates a GetCollectionShardingStateNotImplemented with default headers values +func NewGetCollectionShardingStateNotImplemented() *GetCollectionShardingStateNotImplemented { + return &GetCollectionShardingStateNotImplemented{} +} + +/* +GetCollectionShardingStateNotImplemented describes a response with status code 501, with default header values. + +Replica movement operations are disabled. +*/ +type GetCollectionShardingStateNotImplemented struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this get collection sharding state not implemented response has a 2xx status code +func (o *GetCollectionShardingStateNotImplemented) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this get collection sharding state not implemented response has a 3xx status code +func (o *GetCollectionShardingStateNotImplemented) IsRedirect() bool { + return false +} + +// IsClientError returns true when this get collection sharding state not implemented response has a 4xx status code +func (o *GetCollectionShardingStateNotImplemented) IsClientError() bool { + return false +} + +// IsServerError returns true when this get collection sharding state not implemented response has a 5xx status code +func (o *GetCollectionShardingStateNotImplemented) IsServerError() bool { + return true +} + +// IsCode returns true when this get collection sharding state not implemented response a status code equal to that given +func (o *GetCollectionShardingStateNotImplemented) IsCode(code int) bool { + return code == 501 +} + +// Code gets the status code for the get collection sharding state not implemented response +func (o *GetCollectionShardingStateNotImplemented) Code() int { + return 501 +} + +func (o *GetCollectionShardingStateNotImplemented) Error() string { + return fmt.Sprintf("[GET /replication/sharding-state][%d] getCollectionShardingStateNotImplemented %+v", 501, o.Payload) +} + +func (o *GetCollectionShardingStateNotImplemented) String() string { + return fmt.Sprintf("[GET /replication/sharding-state][%d] getCollectionShardingStateNotImplemented %+v", 501, o.Payload) +} + +func (o *GetCollectionShardingStateNotImplemented) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *GetCollectionShardingStateNotImplemented) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/replication/list_replication_parameters.go b/platform/dbops/binaries/weaviate-src/client/replication/list_replication_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..6a07c81b3f7ae2130cc60de692aed2004510de9a --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/replication/list_replication_parameters.go @@ -0,0 +1,277 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package replication + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// NewListReplicationParams creates a new ListReplicationParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewListReplicationParams() *ListReplicationParams { + return &ListReplicationParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewListReplicationParamsWithTimeout creates a new ListReplicationParams object +// with the ability to set a timeout on a request. +func NewListReplicationParamsWithTimeout(timeout time.Duration) *ListReplicationParams { + return &ListReplicationParams{ + timeout: timeout, + } +} + +// NewListReplicationParamsWithContext creates a new ListReplicationParams object +// with the ability to set a context for a request. +func NewListReplicationParamsWithContext(ctx context.Context) *ListReplicationParams { + return &ListReplicationParams{ + Context: ctx, + } +} + +// NewListReplicationParamsWithHTTPClient creates a new ListReplicationParams object +// with the ability to set a custom HTTPClient for a request. +func NewListReplicationParamsWithHTTPClient(client *http.Client) *ListReplicationParams { + return &ListReplicationParams{ + HTTPClient: client, + } +} + +/* +ListReplicationParams contains all the parameters to send to the API endpoint + + for the list replication operation. + + Typically these are written to a http.Request. +*/ +type ListReplicationParams struct { + + /* Collection. + + The name of the collection to get details for. + */ + Collection *string + + /* IncludeHistory. + + Whether to include the history of the replication operation. + */ + IncludeHistory *bool + + /* Shard. + + The shard to get details for. + */ + Shard *string + + /* TargetNode. + + The name of the target node to get details for. + */ + TargetNode *string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the list replication params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *ListReplicationParams) WithDefaults() *ListReplicationParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the list replication params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *ListReplicationParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the list replication params +func (o *ListReplicationParams) WithTimeout(timeout time.Duration) *ListReplicationParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the list replication params +func (o *ListReplicationParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the list replication params +func (o *ListReplicationParams) WithContext(ctx context.Context) *ListReplicationParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the list replication params +func (o *ListReplicationParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the list replication params +func (o *ListReplicationParams) WithHTTPClient(client *http.Client) *ListReplicationParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the list replication params +func (o *ListReplicationParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithCollection adds the collection to the list replication params +func (o *ListReplicationParams) WithCollection(collection *string) *ListReplicationParams { + o.SetCollection(collection) + return o +} + +// SetCollection adds the collection to the list replication params +func (o *ListReplicationParams) SetCollection(collection *string) { + o.Collection = collection +} + +// WithIncludeHistory adds the includeHistory to the list replication params +func (o *ListReplicationParams) WithIncludeHistory(includeHistory *bool) *ListReplicationParams { + o.SetIncludeHistory(includeHistory) + return o +} + +// SetIncludeHistory adds the includeHistory to the list replication params +func (o *ListReplicationParams) SetIncludeHistory(includeHistory *bool) { + o.IncludeHistory = includeHistory +} + +// WithShard adds the shard to the list replication params +func (o *ListReplicationParams) WithShard(shard *string) *ListReplicationParams { + o.SetShard(shard) + return o +} + +// SetShard adds the shard to the list replication params +func (o *ListReplicationParams) SetShard(shard *string) { + o.Shard = shard +} + +// WithTargetNode adds the targetNode to the list replication params +func (o *ListReplicationParams) WithTargetNode(targetNode *string) *ListReplicationParams { + o.SetTargetNode(targetNode) + return o +} + +// SetTargetNode adds the targetNode to the list replication params +func (o *ListReplicationParams) SetTargetNode(targetNode *string) { + o.TargetNode = targetNode +} + +// WriteToRequest writes these params to a swagger request +func (o *ListReplicationParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if o.Collection != nil { + + // query param collection + var qrCollection string + + if o.Collection != nil { + qrCollection = *o.Collection + } + qCollection := qrCollection + if qCollection != "" { + + if err := r.SetQueryParam("collection", qCollection); err != nil { + return err + } + } + } + + if o.IncludeHistory != nil { + + // query param includeHistory + var qrIncludeHistory bool + + if o.IncludeHistory != nil { + qrIncludeHistory = *o.IncludeHistory + } + qIncludeHistory := swag.FormatBool(qrIncludeHistory) + if qIncludeHistory != "" { + + if err := r.SetQueryParam("includeHistory", qIncludeHistory); err != nil { + return err + } + } + } + + if o.Shard != nil { + + // query param shard + var qrShard string + + if o.Shard != nil { + qrShard = *o.Shard + } + qShard := qrShard + if qShard != "" { + + if err := r.SetQueryParam("shard", qShard); err != nil { + return err + } + } + } + + if o.TargetNode != nil { + + // query param targetNode + var qrTargetNode string + + if o.TargetNode != nil { + qrTargetNode = *o.TargetNode + } + qTargetNode := qrTargetNode + if qTargetNode != "" { + + if err := r.SetQueryParam("targetNode", qTargetNode); err != nil { + return err + } + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/replication/list_replication_responses.go b/platform/dbops/binaries/weaviate-src/client/replication/list_replication_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..3e4d39019b1c8d2753e620d4c366e8ea2bf5d749 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/replication/list_replication_responses.go @@ -0,0 +1,470 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package replication + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/weaviate/weaviate/entities/models" +) + +// ListReplicationReader is a Reader for the ListReplication structure. +type ListReplicationReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ListReplicationReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewListReplicationOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 400: + result := NewListReplicationBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 401: + result := NewListReplicationUnauthorized() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 403: + result := NewListReplicationForbidden() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 500: + result := NewListReplicationInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 501: + result := NewListReplicationNotImplemented() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + } +} + +// NewListReplicationOK creates a ListReplicationOK with default headers values +func NewListReplicationOK() *ListReplicationOK { + return &ListReplicationOK{} +} + +/* +ListReplicationOK describes a response with status code 200, with default header values. + +The details of the replication operations. +*/ +type ListReplicationOK struct { + Payload []*models.ReplicationReplicateDetailsReplicaResponse +} + +// IsSuccess returns true when this list replication o k response has a 2xx status code +func (o *ListReplicationOK) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this list replication o k response has a 3xx status code +func (o *ListReplicationOK) IsRedirect() bool { + return false +} + +// IsClientError returns true when this list replication o k response has a 4xx status code +func (o *ListReplicationOK) IsClientError() bool { + return false +} + +// IsServerError returns true when this list replication o k response has a 5xx status code +func (o *ListReplicationOK) IsServerError() bool { + return false +} + +// IsCode returns true when this list replication o k response a status code equal to that given +func (o *ListReplicationOK) IsCode(code int) bool { + return code == 200 +} + +// Code gets the status code for the list replication o k response +func (o *ListReplicationOK) Code() int { + return 200 +} + +func (o *ListReplicationOK) Error() string { + return fmt.Sprintf("[GET /replication/replicate/list][%d] listReplicationOK %+v", 200, o.Payload) +} + +func (o *ListReplicationOK) String() string { + return fmt.Sprintf("[GET /replication/replicate/list][%d] listReplicationOK %+v", 200, o.Payload) +} + +func (o *ListReplicationOK) GetPayload() []*models.ReplicationReplicateDetailsReplicaResponse { + return o.Payload +} + +func (o *ListReplicationOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewListReplicationBadRequest creates a ListReplicationBadRequest with default headers values +func NewListReplicationBadRequest() *ListReplicationBadRequest { + return &ListReplicationBadRequest{} +} + +/* +ListReplicationBadRequest describes a response with status code 400, with default header values. + +Bad request. +*/ +type ListReplicationBadRequest struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this list replication bad request response has a 2xx status code +func (o *ListReplicationBadRequest) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this list replication bad request response has a 3xx status code +func (o *ListReplicationBadRequest) IsRedirect() bool { + return false +} + +// IsClientError returns true when this list replication bad request response has a 4xx status code +func (o *ListReplicationBadRequest) IsClientError() bool { + return true +} + +// IsServerError returns true when this list replication bad request response has a 5xx status code +func (o *ListReplicationBadRequest) IsServerError() bool { + return false +} + +// IsCode returns true when this list replication bad request response a status code equal to that given +func (o *ListReplicationBadRequest) IsCode(code int) bool { + return code == 400 +} + +// Code gets the status code for the list replication bad request response +func (o *ListReplicationBadRequest) Code() int { + return 400 +} + +func (o *ListReplicationBadRequest) Error() string { + return fmt.Sprintf("[GET /replication/replicate/list][%d] listReplicationBadRequest %+v", 400, o.Payload) +} + +func (o *ListReplicationBadRequest) String() string { + return fmt.Sprintf("[GET /replication/replicate/list][%d] listReplicationBadRequest %+v", 400, o.Payload) +} + +func (o *ListReplicationBadRequest) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *ListReplicationBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewListReplicationUnauthorized creates a ListReplicationUnauthorized with default headers values +func NewListReplicationUnauthorized() *ListReplicationUnauthorized { + return &ListReplicationUnauthorized{} +} + +/* +ListReplicationUnauthorized describes a response with status code 401, with default header values. + +Unauthorized or invalid credentials. +*/ +type ListReplicationUnauthorized struct { +} + +// IsSuccess returns true when this list replication unauthorized response has a 2xx status code +func (o *ListReplicationUnauthorized) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this list replication unauthorized response has a 3xx status code +func (o *ListReplicationUnauthorized) IsRedirect() bool { + return false +} + +// IsClientError returns true when this list replication unauthorized response has a 4xx status code +func (o *ListReplicationUnauthorized) IsClientError() bool { + return true +} + +// IsServerError returns true when this list replication unauthorized response has a 5xx status code +func (o *ListReplicationUnauthorized) IsServerError() bool { + return false +} + +// IsCode returns true when this list replication unauthorized response a status code equal to that given +func (o *ListReplicationUnauthorized) IsCode(code int) bool { + return code == 401 +} + +// Code gets the status code for the list replication unauthorized response +func (o *ListReplicationUnauthorized) Code() int { + return 401 +} + +func (o *ListReplicationUnauthorized) Error() string { + return fmt.Sprintf("[GET /replication/replicate/list][%d] listReplicationUnauthorized ", 401) +} + +func (o *ListReplicationUnauthorized) String() string { + return fmt.Sprintf("[GET /replication/replicate/list][%d] listReplicationUnauthorized ", 401) +} + +func (o *ListReplicationUnauthorized) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewListReplicationForbidden creates a ListReplicationForbidden with default headers values +func NewListReplicationForbidden() *ListReplicationForbidden { + return &ListReplicationForbidden{} +} + +/* +ListReplicationForbidden describes a response with status code 403, with default header values. + +Forbidden +*/ +type ListReplicationForbidden struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this list replication forbidden response has a 2xx status code +func (o *ListReplicationForbidden) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this list replication forbidden response has a 3xx status code +func (o *ListReplicationForbidden) IsRedirect() bool { + return false +} + +// IsClientError returns true when this list replication forbidden response has a 4xx status code +func (o *ListReplicationForbidden) IsClientError() bool { + return true +} + +// IsServerError returns true when this list replication forbidden response has a 5xx status code +func (o *ListReplicationForbidden) IsServerError() bool { + return false +} + +// IsCode returns true when this list replication forbidden response a status code equal to that given +func (o *ListReplicationForbidden) IsCode(code int) bool { + return code == 403 +} + +// Code gets the status code for the list replication forbidden response +func (o *ListReplicationForbidden) Code() int { + return 403 +} + +func (o *ListReplicationForbidden) Error() string { + return fmt.Sprintf("[GET /replication/replicate/list][%d] listReplicationForbidden %+v", 403, o.Payload) +} + +func (o *ListReplicationForbidden) String() string { + return fmt.Sprintf("[GET /replication/replicate/list][%d] listReplicationForbidden %+v", 403, o.Payload) +} + +func (o *ListReplicationForbidden) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *ListReplicationForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewListReplicationInternalServerError creates a ListReplicationInternalServerError with default headers values +func NewListReplicationInternalServerError() *ListReplicationInternalServerError { + return &ListReplicationInternalServerError{} +} + +/* +ListReplicationInternalServerError describes a response with status code 500, with default header values. + +An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. +*/ +type ListReplicationInternalServerError struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this list replication internal server error response has a 2xx status code +func (o *ListReplicationInternalServerError) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this list replication internal server error response has a 3xx status code +func (o *ListReplicationInternalServerError) IsRedirect() bool { + return false +} + +// IsClientError returns true when this list replication internal server error response has a 4xx status code +func (o *ListReplicationInternalServerError) IsClientError() bool { + return false +} + +// IsServerError returns true when this list replication internal server error response has a 5xx status code +func (o *ListReplicationInternalServerError) IsServerError() bool { + return true +} + +// IsCode returns true when this list replication internal server error response a status code equal to that given +func (o *ListReplicationInternalServerError) IsCode(code int) bool { + return code == 500 +} + +// Code gets the status code for the list replication internal server error response +func (o *ListReplicationInternalServerError) Code() int { + return 500 +} + +func (o *ListReplicationInternalServerError) Error() string { + return fmt.Sprintf("[GET /replication/replicate/list][%d] listReplicationInternalServerError %+v", 500, o.Payload) +} + +func (o *ListReplicationInternalServerError) String() string { + return fmt.Sprintf("[GET /replication/replicate/list][%d] listReplicationInternalServerError %+v", 500, o.Payload) +} + +func (o *ListReplicationInternalServerError) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *ListReplicationInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewListReplicationNotImplemented creates a ListReplicationNotImplemented with default headers values +func NewListReplicationNotImplemented() *ListReplicationNotImplemented { + return &ListReplicationNotImplemented{} +} + +/* +ListReplicationNotImplemented describes a response with status code 501, with default header values. + +Replica movement operations are disabled. +*/ +type ListReplicationNotImplemented struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this list replication not implemented response has a 2xx status code +func (o *ListReplicationNotImplemented) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this list replication not implemented response has a 3xx status code +func (o *ListReplicationNotImplemented) IsRedirect() bool { + return false +} + +// IsClientError returns true when this list replication not implemented response has a 4xx status code +func (o *ListReplicationNotImplemented) IsClientError() bool { + return false +} + +// IsServerError returns true when this list replication not implemented response has a 5xx status code +func (o *ListReplicationNotImplemented) IsServerError() bool { + return true +} + +// IsCode returns true when this list replication not implemented response a status code equal to that given +func (o *ListReplicationNotImplemented) IsCode(code int) bool { + return code == 501 +} + +// Code gets the status code for the list replication not implemented response +func (o *ListReplicationNotImplemented) Code() int { + return 501 +} + +func (o *ListReplicationNotImplemented) Error() string { + return fmt.Sprintf("[GET /replication/replicate/list][%d] listReplicationNotImplemented %+v", 501, o.Payload) +} + +func (o *ListReplicationNotImplemented) String() string { + return fmt.Sprintf("[GET /replication/replicate/list][%d] listReplicationNotImplemented %+v", 501, o.Payload) +} + +func (o *ListReplicationNotImplemented) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *ListReplicationNotImplemented) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/replication/replicate_parameters.go b/platform/dbops/binaries/weaviate-src/client/replication/replicate_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..223f14e968e65b9bc9ace6213cb5a51102668082 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/replication/replicate_parameters.go @@ -0,0 +1,161 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package replication + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + + "github.com/weaviate/weaviate/entities/models" +) + +// NewReplicateParams creates a new ReplicateParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewReplicateParams() *ReplicateParams { + return &ReplicateParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewReplicateParamsWithTimeout creates a new ReplicateParams object +// with the ability to set a timeout on a request. +func NewReplicateParamsWithTimeout(timeout time.Duration) *ReplicateParams { + return &ReplicateParams{ + timeout: timeout, + } +} + +// NewReplicateParamsWithContext creates a new ReplicateParams object +// with the ability to set a context for a request. +func NewReplicateParamsWithContext(ctx context.Context) *ReplicateParams { + return &ReplicateParams{ + Context: ctx, + } +} + +// NewReplicateParamsWithHTTPClient creates a new ReplicateParams object +// with the ability to set a custom HTTPClient for a request. +func NewReplicateParamsWithHTTPClient(client *http.Client) *ReplicateParams { + return &ReplicateParams{ + HTTPClient: client, + } +} + +/* +ReplicateParams contains all the parameters to send to the API endpoint + + for the replicate operation. + + Typically these are written to a http.Request. +*/ +type ReplicateParams struct { + + // Body. + Body *models.ReplicationReplicateReplicaRequest + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the replicate params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *ReplicateParams) WithDefaults() *ReplicateParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the replicate params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *ReplicateParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the replicate params +func (o *ReplicateParams) WithTimeout(timeout time.Duration) *ReplicateParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the replicate params +func (o *ReplicateParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the replicate params +func (o *ReplicateParams) WithContext(ctx context.Context) *ReplicateParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the replicate params +func (o *ReplicateParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the replicate params +func (o *ReplicateParams) WithHTTPClient(client *http.Client) *ReplicateParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the replicate params +func (o *ReplicateParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithBody adds the body to the replicate params +func (o *ReplicateParams) WithBody(body *models.ReplicationReplicateReplicaRequest) *ReplicateParams { + o.SetBody(body) + return o +} + +// SetBody adds the body to the replicate params +func (o *ReplicateParams) SetBody(body *models.ReplicationReplicateReplicaRequest) { + o.Body = body +} + +// WriteToRequest writes these params to a swagger request +func (o *ReplicateParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + if o.Body != nil { + if err := r.SetBodyParam(o.Body); err != nil { + return err + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/replication/replicate_responses.go b/platform/dbops/binaries/weaviate-src/client/replication/replicate_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..c6be21b7eca7e914e07611adfdbee5f5d26473e1 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/replication/replicate_responses.go @@ -0,0 +1,546 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package replication + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/weaviate/weaviate/entities/models" +) + +// ReplicateReader is a Reader for the Replicate structure. +type ReplicateReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ReplicateReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewReplicateOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 400: + result := NewReplicateBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 401: + result := NewReplicateUnauthorized() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 403: + result := NewReplicateForbidden() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 422: + result := NewReplicateUnprocessableEntity() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 500: + result := NewReplicateInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 501: + result := NewReplicateNotImplemented() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + } +} + +// NewReplicateOK creates a ReplicateOK with default headers values +func NewReplicateOK() *ReplicateOK { + return &ReplicateOK{} +} + +/* +ReplicateOK describes a response with status code 200, with default header values. + +Replication operation registered successfully. ID of the operation is returned. +*/ +type ReplicateOK struct { + Payload *models.ReplicationReplicateReplicaResponse +} + +// IsSuccess returns true when this replicate o k response has a 2xx status code +func (o *ReplicateOK) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this replicate o k response has a 3xx status code +func (o *ReplicateOK) IsRedirect() bool { + return false +} + +// IsClientError returns true when this replicate o k response has a 4xx status code +func (o *ReplicateOK) IsClientError() bool { + return false +} + +// IsServerError returns true when this replicate o k response has a 5xx status code +func (o *ReplicateOK) IsServerError() bool { + return false +} + +// IsCode returns true when this replicate o k response a status code equal to that given +func (o *ReplicateOK) IsCode(code int) bool { + return code == 200 +} + +// Code gets the status code for the replicate o k response +func (o *ReplicateOK) Code() int { + return 200 +} + +func (o *ReplicateOK) Error() string { + return fmt.Sprintf("[POST /replication/replicate][%d] replicateOK %+v", 200, o.Payload) +} + +func (o *ReplicateOK) String() string { + return fmt.Sprintf("[POST /replication/replicate][%d] replicateOK %+v", 200, o.Payload) +} + +func (o *ReplicateOK) GetPayload() *models.ReplicationReplicateReplicaResponse { + return o.Payload +} + +func (o *ReplicateOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ReplicationReplicateReplicaResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewReplicateBadRequest creates a ReplicateBadRequest with default headers values +func NewReplicateBadRequest() *ReplicateBadRequest { + return &ReplicateBadRequest{} +} + +/* +ReplicateBadRequest describes a response with status code 400, with default header values. + +Malformed request. +*/ +type ReplicateBadRequest struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this replicate bad request response has a 2xx status code +func (o *ReplicateBadRequest) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this replicate bad request response has a 3xx status code +func (o *ReplicateBadRequest) IsRedirect() bool { + return false +} + +// IsClientError returns true when this replicate bad request response has a 4xx status code +func (o *ReplicateBadRequest) IsClientError() bool { + return true +} + +// IsServerError returns true when this replicate bad request response has a 5xx status code +func (o *ReplicateBadRequest) IsServerError() bool { + return false +} + +// IsCode returns true when this replicate bad request response a status code equal to that given +func (o *ReplicateBadRequest) IsCode(code int) bool { + return code == 400 +} + +// Code gets the status code for the replicate bad request response +func (o *ReplicateBadRequest) Code() int { + return 400 +} + +func (o *ReplicateBadRequest) Error() string { + return fmt.Sprintf("[POST /replication/replicate][%d] replicateBadRequest %+v", 400, o.Payload) +} + +func (o *ReplicateBadRequest) String() string { + return fmt.Sprintf("[POST /replication/replicate][%d] replicateBadRequest %+v", 400, o.Payload) +} + +func (o *ReplicateBadRequest) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *ReplicateBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewReplicateUnauthorized creates a ReplicateUnauthorized with default headers values +func NewReplicateUnauthorized() *ReplicateUnauthorized { + return &ReplicateUnauthorized{} +} + +/* +ReplicateUnauthorized describes a response with status code 401, with default header values. + +Unauthorized or invalid credentials. +*/ +type ReplicateUnauthorized struct { +} + +// IsSuccess returns true when this replicate unauthorized response has a 2xx status code +func (o *ReplicateUnauthorized) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this replicate unauthorized response has a 3xx status code +func (o *ReplicateUnauthorized) IsRedirect() bool { + return false +} + +// IsClientError returns true when this replicate unauthorized response has a 4xx status code +func (o *ReplicateUnauthorized) IsClientError() bool { + return true +} + +// IsServerError returns true when this replicate unauthorized response has a 5xx status code +func (o *ReplicateUnauthorized) IsServerError() bool { + return false +} + +// IsCode returns true when this replicate unauthorized response a status code equal to that given +func (o *ReplicateUnauthorized) IsCode(code int) bool { + return code == 401 +} + +// Code gets the status code for the replicate unauthorized response +func (o *ReplicateUnauthorized) Code() int { + return 401 +} + +func (o *ReplicateUnauthorized) Error() string { + return fmt.Sprintf("[POST /replication/replicate][%d] replicateUnauthorized ", 401) +} + +func (o *ReplicateUnauthorized) String() string { + return fmt.Sprintf("[POST /replication/replicate][%d] replicateUnauthorized ", 401) +} + +func (o *ReplicateUnauthorized) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewReplicateForbidden creates a ReplicateForbidden with default headers values +func NewReplicateForbidden() *ReplicateForbidden { + return &ReplicateForbidden{} +} + +/* +ReplicateForbidden describes a response with status code 403, with default header values. + +Forbidden +*/ +type ReplicateForbidden struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this replicate forbidden response has a 2xx status code +func (o *ReplicateForbidden) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this replicate forbidden response has a 3xx status code +func (o *ReplicateForbidden) IsRedirect() bool { + return false +} + +// IsClientError returns true when this replicate forbidden response has a 4xx status code +func (o *ReplicateForbidden) IsClientError() bool { + return true +} + +// IsServerError returns true when this replicate forbidden response has a 5xx status code +func (o *ReplicateForbidden) IsServerError() bool { + return false +} + +// IsCode returns true when this replicate forbidden response a status code equal to that given +func (o *ReplicateForbidden) IsCode(code int) bool { + return code == 403 +} + +// Code gets the status code for the replicate forbidden response +func (o *ReplicateForbidden) Code() int { + return 403 +} + +func (o *ReplicateForbidden) Error() string { + return fmt.Sprintf("[POST /replication/replicate][%d] replicateForbidden %+v", 403, o.Payload) +} + +func (o *ReplicateForbidden) String() string { + return fmt.Sprintf("[POST /replication/replicate][%d] replicateForbidden %+v", 403, o.Payload) +} + +func (o *ReplicateForbidden) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *ReplicateForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewReplicateUnprocessableEntity creates a ReplicateUnprocessableEntity with default headers values +func NewReplicateUnprocessableEntity() *ReplicateUnprocessableEntity { + return &ReplicateUnprocessableEntity{} +} + +/* +ReplicateUnprocessableEntity describes a response with status code 422, with default header values. + +Request body is well-formed (i.e., syntactically correct), but semantically erroneous. +*/ +type ReplicateUnprocessableEntity struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this replicate unprocessable entity response has a 2xx status code +func (o *ReplicateUnprocessableEntity) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this replicate unprocessable entity response has a 3xx status code +func (o *ReplicateUnprocessableEntity) IsRedirect() bool { + return false +} + +// IsClientError returns true when this replicate unprocessable entity response has a 4xx status code +func (o *ReplicateUnprocessableEntity) IsClientError() bool { + return true +} + +// IsServerError returns true when this replicate unprocessable entity response has a 5xx status code +func (o *ReplicateUnprocessableEntity) IsServerError() bool { + return false +} + +// IsCode returns true when this replicate unprocessable entity response a status code equal to that given +func (o *ReplicateUnprocessableEntity) IsCode(code int) bool { + return code == 422 +} + +// Code gets the status code for the replicate unprocessable entity response +func (o *ReplicateUnprocessableEntity) Code() int { + return 422 +} + +func (o *ReplicateUnprocessableEntity) Error() string { + return fmt.Sprintf("[POST /replication/replicate][%d] replicateUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *ReplicateUnprocessableEntity) String() string { + return fmt.Sprintf("[POST /replication/replicate][%d] replicateUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *ReplicateUnprocessableEntity) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *ReplicateUnprocessableEntity) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewReplicateInternalServerError creates a ReplicateInternalServerError with default headers values +func NewReplicateInternalServerError() *ReplicateInternalServerError { + return &ReplicateInternalServerError{} +} + +/* +ReplicateInternalServerError describes a response with status code 500, with default header values. + +An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. +*/ +type ReplicateInternalServerError struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this replicate internal server error response has a 2xx status code +func (o *ReplicateInternalServerError) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this replicate internal server error response has a 3xx status code +func (o *ReplicateInternalServerError) IsRedirect() bool { + return false +} + +// IsClientError returns true when this replicate internal server error response has a 4xx status code +func (o *ReplicateInternalServerError) IsClientError() bool { + return false +} + +// IsServerError returns true when this replicate internal server error response has a 5xx status code +func (o *ReplicateInternalServerError) IsServerError() bool { + return true +} + +// IsCode returns true when this replicate internal server error response a status code equal to that given +func (o *ReplicateInternalServerError) IsCode(code int) bool { + return code == 500 +} + +// Code gets the status code for the replicate internal server error response +func (o *ReplicateInternalServerError) Code() int { + return 500 +} + +func (o *ReplicateInternalServerError) Error() string { + return fmt.Sprintf("[POST /replication/replicate][%d] replicateInternalServerError %+v", 500, o.Payload) +} + +func (o *ReplicateInternalServerError) String() string { + return fmt.Sprintf("[POST /replication/replicate][%d] replicateInternalServerError %+v", 500, o.Payload) +} + +func (o *ReplicateInternalServerError) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *ReplicateInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewReplicateNotImplemented creates a ReplicateNotImplemented with default headers values +func NewReplicateNotImplemented() *ReplicateNotImplemented { + return &ReplicateNotImplemented{} +} + +/* +ReplicateNotImplemented describes a response with status code 501, with default header values. + +Replica movement operations are disabled. +*/ +type ReplicateNotImplemented struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this replicate not implemented response has a 2xx status code +func (o *ReplicateNotImplemented) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this replicate not implemented response has a 3xx status code +func (o *ReplicateNotImplemented) IsRedirect() bool { + return false +} + +// IsClientError returns true when this replicate not implemented response has a 4xx status code +func (o *ReplicateNotImplemented) IsClientError() bool { + return false +} + +// IsServerError returns true when this replicate not implemented response has a 5xx status code +func (o *ReplicateNotImplemented) IsServerError() bool { + return true +} + +// IsCode returns true when this replicate not implemented response a status code equal to that given +func (o *ReplicateNotImplemented) IsCode(code int) bool { + return code == 501 +} + +// Code gets the status code for the replicate not implemented response +func (o *ReplicateNotImplemented) Code() int { + return 501 +} + +func (o *ReplicateNotImplemented) Error() string { + return fmt.Sprintf("[POST /replication/replicate][%d] replicateNotImplemented %+v", 501, o.Payload) +} + +func (o *ReplicateNotImplemented) String() string { + return fmt.Sprintf("[POST /replication/replicate][%d] replicateNotImplemented %+v", 501, o.Payload) +} + +func (o *ReplicateNotImplemented) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *ReplicateNotImplemented) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/replication/replication_client.go b/platform/dbops/binaries/weaviate-src/client/replication/replication_client.go new file mode 100644 index 0000000000000000000000000000000000000000..1a5938d1c5ca696e3b77d966dcf4e8e0fa8fd2ff --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/replication/replication_client.go @@ -0,0 +1,392 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package replication + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" +) + +// New creates a new replication API client. +func New(transport runtime.ClientTransport, formats strfmt.Registry) ClientService { + return &Client{transport: transport, formats: formats} +} + +/* +Client for replication API +*/ +type Client struct { + transport runtime.ClientTransport + formats strfmt.Registry +} + +// ClientOption is the option for Client methods +type ClientOption func(*runtime.ClientOperation) + +// ClientService is the interface for Client methods +type ClientService interface { + CancelReplication(params *CancelReplicationParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*CancelReplicationNoContent, error) + + DeleteAllReplications(params *DeleteAllReplicationsParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*DeleteAllReplicationsNoContent, error) + + DeleteReplication(params *DeleteReplicationParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*DeleteReplicationNoContent, error) + + ForceDeleteReplications(params *ForceDeleteReplicationsParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ForceDeleteReplicationsOK, error) + + GetCollectionShardingState(params *GetCollectionShardingStateParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*GetCollectionShardingStateOK, error) + + ListReplication(params *ListReplicationParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ListReplicationOK, error) + + Replicate(params *ReplicateParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ReplicateOK, error) + + ReplicationDetails(params *ReplicationDetailsParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ReplicationDetailsOK, error) + + SetTransport(transport runtime.ClientTransport) +} + +/* +CancelReplication cancels a replication operation + +Requests the cancellation of an active replication operation identified by its ID. The operation will be stopped, but its record will remain in the 'CANCELLED' state (can't be resumed) and will not be automatically deleted. +*/ +func (a *Client) CancelReplication(params *CancelReplicationParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*CancelReplicationNoContent, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewCancelReplicationParams() + } + op := &runtime.ClientOperation{ + ID: "cancelReplication", + Method: "POST", + PathPattern: "/replication/replicate/{id}/cancel", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json", "application/yaml"}, + Schemes: []string{"https"}, + Params: params, + Reader: &CancelReplicationReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + success, ok := result.(*CancelReplicationNoContent) + if ok { + return success, nil + } + // unexpected success response + // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for cancelReplication: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + +/* +DeleteAllReplications schedules all replication operations for deletion across all collections shards and nodes +*/ +func (a *Client) DeleteAllReplications(params *DeleteAllReplicationsParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*DeleteAllReplicationsNoContent, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewDeleteAllReplicationsParams() + } + op := &runtime.ClientOperation{ + ID: "deleteAllReplications", + Method: "DELETE", + PathPattern: "/replication/replicate", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json", "application/yaml"}, + Schemes: []string{"https"}, + Params: params, + Reader: &DeleteAllReplicationsReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + success, ok := result.(*DeleteAllReplicationsNoContent) + if ok { + return success, nil + } + // unexpected success response + // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for deleteAllReplications: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + +/* +DeleteReplication deletes a replication operation + +Removes a specific replication operation. If the operation is currently active, it will be cancelled and its resources cleaned up before the operation is deleted. +*/ +func (a *Client) DeleteReplication(params *DeleteReplicationParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*DeleteReplicationNoContent, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewDeleteReplicationParams() + } + op := &runtime.ClientOperation{ + ID: "deleteReplication", + Method: "DELETE", + PathPattern: "/replication/replicate/{id}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json", "application/yaml"}, + Schemes: []string{"https"}, + Params: params, + Reader: &DeleteReplicationReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + success, ok := result.(*DeleteReplicationNoContent) + if ok { + return success, nil + } + // unexpected success response + // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for deleteReplication: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + +/* +ForceDeleteReplications forces delete replication operations + +USE AT OWN RISK! Synchronously force delete operations from the FSM. This will not perform any checks on which state the operation is in so may lead to data corruption or loss. It is recommended to first scale the number of replication engine workers to 0 before calling this endpoint to ensure no operations are in-flight. +*/ +func (a *Client) ForceDeleteReplications(params *ForceDeleteReplicationsParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ForceDeleteReplicationsOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewForceDeleteReplicationsParams() + } + op := &runtime.ClientOperation{ + ID: "forceDeleteReplications", + Method: "POST", + PathPattern: "/replication/replicate/force-delete", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json", "application/yaml"}, + Schemes: []string{"https"}, + Params: params, + Reader: &ForceDeleteReplicationsReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + success, ok := result.(*ForceDeleteReplicationsOK) + if ok { + return success, nil + } + // unexpected success response + // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for forceDeleteReplications: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + +/* +GetCollectionShardingState gets sharding state + +Fetches the current sharding state, including replica locations and statuses, for all collections or a specified collection. If a shard name is provided along with a collection, the state for that specific shard is returned. +*/ +func (a *Client) GetCollectionShardingState(params *GetCollectionShardingStateParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*GetCollectionShardingStateOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewGetCollectionShardingStateParams() + } + op := &runtime.ClientOperation{ + ID: "getCollectionShardingState", + Method: "GET", + PathPattern: "/replication/sharding-state", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json", "application/yaml"}, + Schemes: []string{"https"}, + Params: params, + Reader: &GetCollectionShardingStateReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + success, ok := result.(*GetCollectionShardingStateOK) + if ok { + return success, nil + } + // unexpected success response + // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for getCollectionShardingState: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + +/* +ListReplication lists replication operations + +Retrieves a list of currently registered replication operations, optionally filtered by collection, shard, or node ID. +*/ +func (a *Client) ListReplication(params *ListReplicationParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ListReplicationOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewListReplicationParams() + } + op := &runtime.ClientOperation{ + ID: "listReplication", + Method: "GET", + PathPattern: "/replication/replicate/list", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json", "application/yaml"}, + Schemes: []string{"https"}, + Params: params, + Reader: &ListReplicationReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + success, ok := result.(*ListReplicationOK) + if ok { + return success, nil + } + // unexpected success response + // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for listReplication: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + +/* +Replicate initiates a replica movement + +Begins an asynchronous operation to move or copy a specific shard replica from its current node to a designated target node. The operation involves copying data, synchronizing, and potentially decommissioning the source replica. +*/ +func (a *Client) Replicate(params *ReplicateParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ReplicateOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewReplicateParams() + } + op := &runtime.ClientOperation{ + ID: "replicate", + Method: "POST", + PathPattern: "/replication/replicate", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json", "application/yaml"}, + Schemes: []string{"https"}, + Params: params, + Reader: &ReplicateReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + success, ok := result.(*ReplicateOK) + if ok { + return success, nil + } + // unexpected success response + // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for replicate: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + +/* +ReplicationDetails retrieves a replication operation + +Fetches the current status and detailed information for a specific replication operation, identified by its unique ID. Optionally includes historical data of the operation's progress if requested. +*/ +func (a *Client) ReplicationDetails(params *ReplicationDetailsParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ReplicationDetailsOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewReplicationDetailsParams() + } + op := &runtime.ClientOperation{ + ID: "replicationDetails", + Method: "GET", + PathPattern: "/replication/replicate/{id}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json", "application/yaml"}, + Schemes: []string{"https"}, + Params: params, + Reader: &ReplicationDetailsReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + success, ok := result.(*ReplicationDetailsOK) + if ok { + return success, nil + } + // unexpected success response + // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for replicationDetails: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + +// SetTransport changes the transport on the client +func (a *Client) SetTransport(transport runtime.ClientTransport) { + a.transport = transport +} diff --git a/platform/dbops/binaries/weaviate-src/client/replication/replication_details_parameters.go b/platform/dbops/binaries/weaviate-src/client/replication/replication_details_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..454b7d40cb3f5c66f1d1545cb30b0c760a7ea845 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/replication/replication_details_parameters.go @@ -0,0 +1,199 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package replication + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// NewReplicationDetailsParams creates a new ReplicationDetailsParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewReplicationDetailsParams() *ReplicationDetailsParams { + return &ReplicationDetailsParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewReplicationDetailsParamsWithTimeout creates a new ReplicationDetailsParams object +// with the ability to set a timeout on a request. +func NewReplicationDetailsParamsWithTimeout(timeout time.Duration) *ReplicationDetailsParams { + return &ReplicationDetailsParams{ + timeout: timeout, + } +} + +// NewReplicationDetailsParamsWithContext creates a new ReplicationDetailsParams object +// with the ability to set a context for a request. +func NewReplicationDetailsParamsWithContext(ctx context.Context) *ReplicationDetailsParams { + return &ReplicationDetailsParams{ + Context: ctx, + } +} + +// NewReplicationDetailsParamsWithHTTPClient creates a new ReplicationDetailsParams object +// with the ability to set a custom HTTPClient for a request. +func NewReplicationDetailsParamsWithHTTPClient(client *http.Client) *ReplicationDetailsParams { + return &ReplicationDetailsParams{ + HTTPClient: client, + } +} + +/* +ReplicationDetailsParams contains all the parameters to send to the API endpoint + + for the replication details operation. + + Typically these are written to a http.Request. +*/ +type ReplicationDetailsParams struct { + + /* ID. + + The ID of the replication operation to get details for. + + Format: uuid + */ + ID strfmt.UUID + + /* IncludeHistory. + + Whether to include the history of the replication operation. + */ + IncludeHistory *bool + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the replication details params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *ReplicationDetailsParams) WithDefaults() *ReplicationDetailsParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the replication details params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *ReplicationDetailsParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the replication details params +func (o *ReplicationDetailsParams) WithTimeout(timeout time.Duration) *ReplicationDetailsParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the replication details params +func (o *ReplicationDetailsParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the replication details params +func (o *ReplicationDetailsParams) WithContext(ctx context.Context) *ReplicationDetailsParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the replication details params +func (o *ReplicationDetailsParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the replication details params +func (o *ReplicationDetailsParams) WithHTTPClient(client *http.Client) *ReplicationDetailsParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the replication details params +func (o *ReplicationDetailsParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithID adds the id to the replication details params +func (o *ReplicationDetailsParams) WithID(id strfmt.UUID) *ReplicationDetailsParams { + o.SetID(id) + return o +} + +// SetID adds the id to the replication details params +func (o *ReplicationDetailsParams) SetID(id strfmt.UUID) { + o.ID = id +} + +// WithIncludeHistory adds the includeHistory to the replication details params +func (o *ReplicationDetailsParams) WithIncludeHistory(includeHistory *bool) *ReplicationDetailsParams { + o.SetIncludeHistory(includeHistory) + return o +} + +// SetIncludeHistory adds the includeHistory to the replication details params +func (o *ReplicationDetailsParams) SetIncludeHistory(includeHistory *bool) { + o.IncludeHistory = includeHistory +} + +// WriteToRequest writes these params to a swagger request +func (o *ReplicationDetailsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param id + if err := r.SetPathParam("id", o.ID.String()); err != nil { + return err + } + + if o.IncludeHistory != nil { + + // query param includeHistory + var qrIncludeHistory bool + + if o.IncludeHistory != nil { + qrIncludeHistory = *o.IncludeHistory + } + qIncludeHistory := swag.FormatBool(qrIncludeHistory) + if qIncludeHistory != "" { + + if err := r.SetQueryParam("includeHistory", qIncludeHistory); err != nil { + return err + } + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/replication/replication_details_responses.go b/platform/dbops/binaries/weaviate-src/client/replication/replication_details_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..f52e294a16d5e3f3b62e88218690d895bf2d9e88 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/replication/replication_details_responses.go @@ -0,0 +1,534 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package replication + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/weaviate/weaviate/entities/models" +) + +// ReplicationDetailsReader is a Reader for the ReplicationDetails structure. +type ReplicationDetailsReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ReplicationDetailsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewReplicationDetailsOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 401: + result := NewReplicationDetailsUnauthorized() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 403: + result := NewReplicationDetailsForbidden() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 404: + result := NewReplicationDetailsNotFound() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 422: + result := NewReplicationDetailsUnprocessableEntity() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 500: + result := NewReplicationDetailsInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 501: + result := NewReplicationDetailsNotImplemented() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + } +} + +// NewReplicationDetailsOK creates a ReplicationDetailsOK with default headers values +func NewReplicationDetailsOK() *ReplicationDetailsOK { + return &ReplicationDetailsOK{} +} + +/* +ReplicationDetailsOK describes a response with status code 200, with default header values. + +The details of the replication operation. +*/ +type ReplicationDetailsOK struct { + Payload *models.ReplicationReplicateDetailsReplicaResponse +} + +// IsSuccess returns true when this replication details o k response has a 2xx status code +func (o *ReplicationDetailsOK) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this replication details o k response has a 3xx status code +func (o *ReplicationDetailsOK) IsRedirect() bool { + return false +} + +// IsClientError returns true when this replication details o k response has a 4xx status code +func (o *ReplicationDetailsOK) IsClientError() bool { + return false +} + +// IsServerError returns true when this replication details o k response has a 5xx status code +func (o *ReplicationDetailsOK) IsServerError() bool { + return false +} + +// IsCode returns true when this replication details o k response a status code equal to that given +func (o *ReplicationDetailsOK) IsCode(code int) bool { + return code == 200 +} + +// Code gets the status code for the replication details o k response +func (o *ReplicationDetailsOK) Code() int { + return 200 +} + +func (o *ReplicationDetailsOK) Error() string { + return fmt.Sprintf("[GET /replication/replicate/{id}][%d] replicationDetailsOK %+v", 200, o.Payload) +} + +func (o *ReplicationDetailsOK) String() string { + return fmt.Sprintf("[GET /replication/replicate/{id}][%d] replicationDetailsOK %+v", 200, o.Payload) +} + +func (o *ReplicationDetailsOK) GetPayload() *models.ReplicationReplicateDetailsReplicaResponse { + return o.Payload +} + +func (o *ReplicationDetailsOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ReplicationReplicateDetailsReplicaResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewReplicationDetailsUnauthorized creates a ReplicationDetailsUnauthorized with default headers values +func NewReplicationDetailsUnauthorized() *ReplicationDetailsUnauthorized { + return &ReplicationDetailsUnauthorized{} +} + +/* +ReplicationDetailsUnauthorized describes a response with status code 401, with default header values. + +Unauthorized or invalid credentials. +*/ +type ReplicationDetailsUnauthorized struct { +} + +// IsSuccess returns true when this replication details unauthorized response has a 2xx status code +func (o *ReplicationDetailsUnauthorized) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this replication details unauthorized response has a 3xx status code +func (o *ReplicationDetailsUnauthorized) IsRedirect() bool { + return false +} + +// IsClientError returns true when this replication details unauthorized response has a 4xx status code +func (o *ReplicationDetailsUnauthorized) IsClientError() bool { + return true +} + +// IsServerError returns true when this replication details unauthorized response has a 5xx status code +func (o *ReplicationDetailsUnauthorized) IsServerError() bool { + return false +} + +// IsCode returns true when this replication details unauthorized response a status code equal to that given +func (o *ReplicationDetailsUnauthorized) IsCode(code int) bool { + return code == 401 +} + +// Code gets the status code for the replication details unauthorized response +func (o *ReplicationDetailsUnauthorized) Code() int { + return 401 +} + +func (o *ReplicationDetailsUnauthorized) Error() string { + return fmt.Sprintf("[GET /replication/replicate/{id}][%d] replicationDetailsUnauthorized ", 401) +} + +func (o *ReplicationDetailsUnauthorized) String() string { + return fmt.Sprintf("[GET /replication/replicate/{id}][%d] replicationDetailsUnauthorized ", 401) +} + +func (o *ReplicationDetailsUnauthorized) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewReplicationDetailsForbidden creates a ReplicationDetailsForbidden with default headers values +func NewReplicationDetailsForbidden() *ReplicationDetailsForbidden { + return &ReplicationDetailsForbidden{} +} + +/* +ReplicationDetailsForbidden describes a response with status code 403, with default header values. + +Forbidden. +*/ +type ReplicationDetailsForbidden struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this replication details forbidden response has a 2xx status code +func (o *ReplicationDetailsForbidden) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this replication details forbidden response has a 3xx status code +func (o *ReplicationDetailsForbidden) IsRedirect() bool { + return false +} + +// IsClientError returns true when this replication details forbidden response has a 4xx status code +func (o *ReplicationDetailsForbidden) IsClientError() bool { + return true +} + +// IsServerError returns true when this replication details forbidden response has a 5xx status code +func (o *ReplicationDetailsForbidden) IsServerError() bool { + return false +} + +// IsCode returns true when this replication details forbidden response a status code equal to that given +func (o *ReplicationDetailsForbidden) IsCode(code int) bool { + return code == 403 +} + +// Code gets the status code for the replication details forbidden response +func (o *ReplicationDetailsForbidden) Code() int { + return 403 +} + +func (o *ReplicationDetailsForbidden) Error() string { + return fmt.Sprintf("[GET /replication/replicate/{id}][%d] replicationDetailsForbidden %+v", 403, o.Payload) +} + +func (o *ReplicationDetailsForbidden) String() string { + return fmt.Sprintf("[GET /replication/replicate/{id}][%d] replicationDetailsForbidden %+v", 403, o.Payload) +} + +func (o *ReplicationDetailsForbidden) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *ReplicationDetailsForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewReplicationDetailsNotFound creates a ReplicationDetailsNotFound with default headers values +func NewReplicationDetailsNotFound() *ReplicationDetailsNotFound { + return &ReplicationDetailsNotFound{} +} + +/* +ReplicationDetailsNotFound describes a response with status code 404, with default header values. + +Shard replica operation not found. +*/ +type ReplicationDetailsNotFound struct { +} + +// IsSuccess returns true when this replication details not found response has a 2xx status code +func (o *ReplicationDetailsNotFound) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this replication details not found response has a 3xx status code +func (o *ReplicationDetailsNotFound) IsRedirect() bool { + return false +} + +// IsClientError returns true when this replication details not found response has a 4xx status code +func (o *ReplicationDetailsNotFound) IsClientError() bool { + return true +} + +// IsServerError returns true when this replication details not found response has a 5xx status code +func (o *ReplicationDetailsNotFound) IsServerError() bool { + return false +} + +// IsCode returns true when this replication details not found response a status code equal to that given +func (o *ReplicationDetailsNotFound) IsCode(code int) bool { + return code == 404 +} + +// Code gets the status code for the replication details not found response +func (o *ReplicationDetailsNotFound) Code() int { + return 404 +} + +func (o *ReplicationDetailsNotFound) Error() string { + return fmt.Sprintf("[GET /replication/replicate/{id}][%d] replicationDetailsNotFound ", 404) +} + +func (o *ReplicationDetailsNotFound) String() string { + return fmt.Sprintf("[GET /replication/replicate/{id}][%d] replicationDetailsNotFound ", 404) +} + +func (o *ReplicationDetailsNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewReplicationDetailsUnprocessableEntity creates a ReplicationDetailsUnprocessableEntity with default headers values +func NewReplicationDetailsUnprocessableEntity() *ReplicationDetailsUnprocessableEntity { + return &ReplicationDetailsUnprocessableEntity{} +} + +/* +ReplicationDetailsUnprocessableEntity describes a response with status code 422, with default header values. + +Request body is well-formed (i.e., syntactically correct), but semantically erroneous. +*/ +type ReplicationDetailsUnprocessableEntity struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this replication details unprocessable entity response has a 2xx status code +func (o *ReplicationDetailsUnprocessableEntity) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this replication details unprocessable entity response has a 3xx status code +func (o *ReplicationDetailsUnprocessableEntity) IsRedirect() bool { + return false +} + +// IsClientError returns true when this replication details unprocessable entity response has a 4xx status code +func (o *ReplicationDetailsUnprocessableEntity) IsClientError() bool { + return true +} + +// IsServerError returns true when this replication details unprocessable entity response has a 5xx status code +func (o *ReplicationDetailsUnprocessableEntity) IsServerError() bool { + return false +} + +// IsCode returns true when this replication details unprocessable entity response a status code equal to that given +func (o *ReplicationDetailsUnprocessableEntity) IsCode(code int) bool { + return code == 422 +} + +// Code gets the status code for the replication details unprocessable entity response +func (o *ReplicationDetailsUnprocessableEntity) Code() int { + return 422 +} + +func (o *ReplicationDetailsUnprocessableEntity) Error() string { + return fmt.Sprintf("[GET /replication/replicate/{id}][%d] replicationDetailsUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *ReplicationDetailsUnprocessableEntity) String() string { + return fmt.Sprintf("[GET /replication/replicate/{id}][%d] replicationDetailsUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *ReplicationDetailsUnprocessableEntity) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *ReplicationDetailsUnprocessableEntity) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewReplicationDetailsInternalServerError creates a ReplicationDetailsInternalServerError with default headers values +func NewReplicationDetailsInternalServerError() *ReplicationDetailsInternalServerError { + return &ReplicationDetailsInternalServerError{} +} + +/* +ReplicationDetailsInternalServerError describes a response with status code 500, with default header values. + +An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. +*/ +type ReplicationDetailsInternalServerError struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this replication details internal server error response has a 2xx status code +func (o *ReplicationDetailsInternalServerError) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this replication details internal server error response has a 3xx status code +func (o *ReplicationDetailsInternalServerError) IsRedirect() bool { + return false +} + +// IsClientError returns true when this replication details internal server error response has a 4xx status code +func (o *ReplicationDetailsInternalServerError) IsClientError() bool { + return false +} + +// IsServerError returns true when this replication details internal server error response has a 5xx status code +func (o *ReplicationDetailsInternalServerError) IsServerError() bool { + return true +} + +// IsCode returns true when this replication details internal server error response a status code equal to that given +func (o *ReplicationDetailsInternalServerError) IsCode(code int) bool { + return code == 500 +} + +// Code gets the status code for the replication details internal server error response +func (o *ReplicationDetailsInternalServerError) Code() int { + return 500 +} + +func (o *ReplicationDetailsInternalServerError) Error() string { + return fmt.Sprintf("[GET /replication/replicate/{id}][%d] replicationDetailsInternalServerError %+v", 500, o.Payload) +} + +func (o *ReplicationDetailsInternalServerError) String() string { + return fmt.Sprintf("[GET /replication/replicate/{id}][%d] replicationDetailsInternalServerError %+v", 500, o.Payload) +} + +func (o *ReplicationDetailsInternalServerError) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *ReplicationDetailsInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewReplicationDetailsNotImplemented creates a ReplicationDetailsNotImplemented with default headers values +func NewReplicationDetailsNotImplemented() *ReplicationDetailsNotImplemented { + return &ReplicationDetailsNotImplemented{} +} + +/* +ReplicationDetailsNotImplemented describes a response with status code 501, with default header values. + +Replica movement operations are disabled. +*/ +type ReplicationDetailsNotImplemented struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this replication details not implemented response has a 2xx status code +func (o *ReplicationDetailsNotImplemented) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this replication details not implemented response has a 3xx status code +func (o *ReplicationDetailsNotImplemented) IsRedirect() bool { + return false +} + +// IsClientError returns true when this replication details not implemented response has a 4xx status code +func (o *ReplicationDetailsNotImplemented) IsClientError() bool { + return false +} + +// IsServerError returns true when this replication details not implemented response has a 5xx status code +func (o *ReplicationDetailsNotImplemented) IsServerError() bool { + return true +} + +// IsCode returns true when this replication details not implemented response a status code equal to that given +func (o *ReplicationDetailsNotImplemented) IsCode(code int) bool { + return code == 501 +} + +// Code gets the status code for the replication details not implemented response +func (o *ReplicationDetailsNotImplemented) Code() int { + return 501 +} + +func (o *ReplicationDetailsNotImplemented) Error() string { + return fmt.Sprintf("[GET /replication/replicate/{id}][%d] replicationDetailsNotImplemented %+v", 501, o.Payload) +} + +func (o *ReplicationDetailsNotImplemented) String() string { + return fmt.Sprintf("[GET /replication/replicate/{id}][%d] replicationDetailsNotImplemented %+v", 501, o.Payload) +} + +func (o *ReplicationDetailsNotImplemented) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *ReplicationDetailsNotImplemented) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/schema/aliases_create_parameters.go b/platform/dbops/binaries/weaviate-src/client/schema/aliases_create_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..2bda4ab7cb319e5b289ec4202f4d2cebcc58dc1e --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/schema/aliases_create_parameters.go @@ -0,0 +1,161 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package schema + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + + "github.com/weaviate/weaviate/entities/models" +) + +// NewAliasesCreateParams creates a new AliasesCreateParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewAliasesCreateParams() *AliasesCreateParams { + return &AliasesCreateParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewAliasesCreateParamsWithTimeout creates a new AliasesCreateParams object +// with the ability to set a timeout on a request. +func NewAliasesCreateParamsWithTimeout(timeout time.Duration) *AliasesCreateParams { + return &AliasesCreateParams{ + timeout: timeout, + } +} + +// NewAliasesCreateParamsWithContext creates a new AliasesCreateParams object +// with the ability to set a context for a request. +func NewAliasesCreateParamsWithContext(ctx context.Context) *AliasesCreateParams { + return &AliasesCreateParams{ + Context: ctx, + } +} + +// NewAliasesCreateParamsWithHTTPClient creates a new AliasesCreateParams object +// with the ability to set a custom HTTPClient for a request. +func NewAliasesCreateParamsWithHTTPClient(client *http.Client) *AliasesCreateParams { + return &AliasesCreateParams{ + HTTPClient: client, + } +} + +/* +AliasesCreateParams contains all the parameters to send to the API endpoint + + for the aliases create operation. + + Typically these are written to a http.Request. +*/ +type AliasesCreateParams struct { + + // Body. + Body *models.Alias + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the aliases create params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *AliasesCreateParams) WithDefaults() *AliasesCreateParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the aliases create params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *AliasesCreateParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the aliases create params +func (o *AliasesCreateParams) WithTimeout(timeout time.Duration) *AliasesCreateParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the aliases create params +func (o *AliasesCreateParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the aliases create params +func (o *AliasesCreateParams) WithContext(ctx context.Context) *AliasesCreateParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the aliases create params +func (o *AliasesCreateParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the aliases create params +func (o *AliasesCreateParams) WithHTTPClient(client *http.Client) *AliasesCreateParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the aliases create params +func (o *AliasesCreateParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithBody adds the body to the aliases create params +func (o *AliasesCreateParams) WithBody(body *models.Alias) *AliasesCreateParams { + o.SetBody(body) + return o +} + +// SetBody adds the body to the aliases create params +func (o *AliasesCreateParams) SetBody(body *models.Alias) { + o.Body = body +} + +// WriteToRequest writes these params to a swagger request +func (o *AliasesCreateParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + if o.Body != nil { + if err := r.SetBodyParam(o.Body); err != nil { + return err + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/schema/aliases_create_responses.go b/platform/dbops/binaries/weaviate-src/client/schema/aliases_create_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..ac12d844a634be4618569f2f6c418a03f22908ca --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/schema/aliases_create_responses.go @@ -0,0 +1,398 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package schema + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/weaviate/weaviate/entities/models" +) + +// AliasesCreateReader is a Reader for the AliasesCreate structure. +type AliasesCreateReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *AliasesCreateReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewAliasesCreateOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 401: + result := NewAliasesCreateUnauthorized() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 403: + result := NewAliasesCreateForbidden() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 422: + result := NewAliasesCreateUnprocessableEntity() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 500: + result := NewAliasesCreateInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + } +} + +// NewAliasesCreateOK creates a AliasesCreateOK with default headers values +func NewAliasesCreateOK() *AliasesCreateOK { + return &AliasesCreateOK{} +} + +/* +AliasesCreateOK describes a response with status code 200, with default header values. + +Successfully created a new alias for the specified collection (class) +*/ +type AliasesCreateOK struct { + Payload *models.Alias +} + +// IsSuccess returns true when this aliases create o k response has a 2xx status code +func (o *AliasesCreateOK) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this aliases create o k response has a 3xx status code +func (o *AliasesCreateOK) IsRedirect() bool { + return false +} + +// IsClientError returns true when this aliases create o k response has a 4xx status code +func (o *AliasesCreateOK) IsClientError() bool { + return false +} + +// IsServerError returns true when this aliases create o k response has a 5xx status code +func (o *AliasesCreateOK) IsServerError() bool { + return false +} + +// IsCode returns true when this aliases create o k response a status code equal to that given +func (o *AliasesCreateOK) IsCode(code int) bool { + return code == 200 +} + +// Code gets the status code for the aliases create o k response +func (o *AliasesCreateOK) Code() int { + return 200 +} + +func (o *AliasesCreateOK) Error() string { + return fmt.Sprintf("[POST /aliases][%d] aliasesCreateOK %+v", 200, o.Payload) +} + +func (o *AliasesCreateOK) String() string { + return fmt.Sprintf("[POST /aliases][%d] aliasesCreateOK %+v", 200, o.Payload) +} + +func (o *AliasesCreateOK) GetPayload() *models.Alias { + return o.Payload +} + +func (o *AliasesCreateOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Alias) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewAliasesCreateUnauthorized creates a AliasesCreateUnauthorized with default headers values +func NewAliasesCreateUnauthorized() *AliasesCreateUnauthorized { + return &AliasesCreateUnauthorized{} +} + +/* +AliasesCreateUnauthorized describes a response with status code 401, with default header values. + +Unauthorized or invalid credentials. +*/ +type AliasesCreateUnauthorized struct { +} + +// IsSuccess returns true when this aliases create unauthorized response has a 2xx status code +func (o *AliasesCreateUnauthorized) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this aliases create unauthorized response has a 3xx status code +func (o *AliasesCreateUnauthorized) IsRedirect() bool { + return false +} + +// IsClientError returns true when this aliases create unauthorized response has a 4xx status code +func (o *AliasesCreateUnauthorized) IsClientError() bool { + return true +} + +// IsServerError returns true when this aliases create unauthorized response has a 5xx status code +func (o *AliasesCreateUnauthorized) IsServerError() bool { + return false +} + +// IsCode returns true when this aliases create unauthorized response a status code equal to that given +func (o *AliasesCreateUnauthorized) IsCode(code int) bool { + return code == 401 +} + +// Code gets the status code for the aliases create unauthorized response +func (o *AliasesCreateUnauthorized) Code() int { + return 401 +} + +func (o *AliasesCreateUnauthorized) Error() string { + return fmt.Sprintf("[POST /aliases][%d] aliasesCreateUnauthorized ", 401) +} + +func (o *AliasesCreateUnauthorized) String() string { + return fmt.Sprintf("[POST /aliases][%d] aliasesCreateUnauthorized ", 401) +} + +func (o *AliasesCreateUnauthorized) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewAliasesCreateForbidden creates a AliasesCreateForbidden with default headers values +func NewAliasesCreateForbidden() *AliasesCreateForbidden { + return &AliasesCreateForbidden{} +} + +/* +AliasesCreateForbidden describes a response with status code 403, with default header values. + +Forbidden +*/ +type AliasesCreateForbidden struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this aliases create forbidden response has a 2xx status code +func (o *AliasesCreateForbidden) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this aliases create forbidden response has a 3xx status code +func (o *AliasesCreateForbidden) IsRedirect() bool { + return false +} + +// IsClientError returns true when this aliases create forbidden response has a 4xx status code +func (o *AliasesCreateForbidden) IsClientError() bool { + return true +} + +// IsServerError returns true when this aliases create forbidden response has a 5xx status code +func (o *AliasesCreateForbidden) IsServerError() bool { + return false +} + +// IsCode returns true when this aliases create forbidden response a status code equal to that given +func (o *AliasesCreateForbidden) IsCode(code int) bool { + return code == 403 +} + +// Code gets the status code for the aliases create forbidden response +func (o *AliasesCreateForbidden) Code() int { + return 403 +} + +func (o *AliasesCreateForbidden) Error() string { + return fmt.Sprintf("[POST /aliases][%d] aliasesCreateForbidden %+v", 403, o.Payload) +} + +func (o *AliasesCreateForbidden) String() string { + return fmt.Sprintf("[POST /aliases][%d] aliasesCreateForbidden %+v", 403, o.Payload) +} + +func (o *AliasesCreateForbidden) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *AliasesCreateForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewAliasesCreateUnprocessableEntity creates a AliasesCreateUnprocessableEntity with default headers values +func NewAliasesCreateUnprocessableEntity() *AliasesCreateUnprocessableEntity { + return &AliasesCreateUnprocessableEntity{} +} + +/* +AliasesCreateUnprocessableEntity describes a response with status code 422, with default header values. + +Invalid create alias request. +*/ +type AliasesCreateUnprocessableEntity struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this aliases create unprocessable entity response has a 2xx status code +func (o *AliasesCreateUnprocessableEntity) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this aliases create unprocessable entity response has a 3xx status code +func (o *AliasesCreateUnprocessableEntity) IsRedirect() bool { + return false +} + +// IsClientError returns true when this aliases create unprocessable entity response has a 4xx status code +func (o *AliasesCreateUnprocessableEntity) IsClientError() bool { + return true +} + +// IsServerError returns true when this aliases create unprocessable entity response has a 5xx status code +func (o *AliasesCreateUnprocessableEntity) IsServerError() bool { + return false +} + +// IsCode returns true when this aliases create unprocessable entity response a status code equal to that given +func (o *AliasesCreateUnprocessableEntity) IsCode(code int) bool { + return code == 422 +} + +// Code gets the status code for the aliases create unprocessable entity response +func (o *AliasesCreateUnprocessableEntity) Code() int { + return 422 +} + +func (o *AliasesCreateUnprocessableEntity) Error() string { + return fmt.Sprintf("[POST /aliases][%d] aliasesCreateUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *AliasesCreateUnprocessableEntity) String() string { + return fmt.Sprintf("[POST /aliases][%d] aliasesCreateUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *AliasesCreateUnprocessableEntity) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *AliasesCreateUnprocessableEntity) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewAliasesCreateInternalServerError creates a AliasesCreateInternalServerError with default headers values +func NewAliasesCreateInternalServerError() *AliasesCreateInternalServerError { + return &AliasesCreateInternalServerError{} +} + +/* +AliasesCreateInternalServerError describes a response with status code 500, with default header values. + +An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. +*/ +type AliasesCreateInternalServerError struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this aliases create internal server error response has a 2xx status code +func (o *AliasesCreateInternalServerError) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this aliases create internal server error response has a 3xx status code +func (o *AliasesCreateInternalServerError) IsRedirect() bool { + return false +} + +// IsClientError returns true when this aliases create internal server error response has a 4xx status code +func (o *AliasesCreateInternalServerError) IsClientError() bool { + return false +} + +// IsServerError returns true when this aliases create internal server error response has a 5xx status code +func (o *AliasesCreateInternalServerError) IsServerError() bool { + return true +} + +// IsCode returns true when this aliases create internal server error response a status code equal to that given +func (o *AliasesCreateInternalServerError) IsCode(code int) bool { + return code == 500 +} + +// Code gets the status code for the aliases create internal server error response +func (o *AliasesCreateInternalServerError) Code() int { + return 500 +} + +func (o *AliasesCreateInternalServerError) Error() string { + return fmt.Sprintf("[POST /aliases][%d] aliasesCreateInternalServerError %+v", 500, o.Payload) +} + +func (o *AliasesCreateInternalServerError) String() string { + return fmt.Sprintf("[POST /aliases][%d] aliasesCreateInternalServerError %+v", 500, o.Payload) +} + +func (o *AliasesCreateInternalServerError) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *AliasesCreateInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/schema/aliases_delete_parameters.go b/platform/dbops/binaries/weaviate-src/client/schema/aliases_delete_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..f5b933dc99119967cc5ccf17c0482a96f8b286aa --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/schema/aliases_delete_parameters.go @@ -0,0 +1,159 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package schema + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewAliasesDeleteParams creates a new AliasesDeleteParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewAliasesDeleteParams() *AliasesDeleteParams { + return &AliasesDeleteParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewAliasesDeleteParamsWithTimeout creates a new AliasesDeleteParams object +// with the ability to set a timeout on a request. +func NewAliasesDeleteParamsWithTimeout(timeout time.Duration) *AliasesDeleteParams { + return &AliasesDeleteParams{ + timeout: timeout, + } +} + +// NewAliasesDeleteParamsWithContext creates a new AliasesDeleteParams object +// with the ability to set a context for a request. +func NewAliasesDeleteParamsWithContext(ctx context.Context) *AliasesDeleteParams { + return &AliasesDeleteParams{ + Context: ctx, + } +} + +// NewAliasesDeleteParamsWithHTTPClient creates a new AliasesDeleteParams object +// with the ability to set a custom HTTPClient for a request. +func NewAliasesDeleteParamsWithHTTPClient(client *http.Client) *AliasesDeleteParams { + return &AliasesDeleteParams{ + HTTPClient: client, + } +} + +/* +AliasesDeleteParams contains all the parameters to send to the API endpoint + + for the aliases delete operation. + + Typically these are written to a http.Request. +*/ +type AliasesDeleteParams struct { + + // AliasName. + AliasName string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the aliases delete params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *AliasesDeleteParams) WithDefaults() *AliasesDeleteParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the aliases delete params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *AliasesDeleteParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the aliases delete params +func (o *AliasesDeleteParams) WithTimeout(timeout time.Duration) *AliasesDeleteParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the aliases delete params +func (o *AliasesDeleteParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the aliases delete params +func (o *AliasesDeleteParams) WithContext(ctx context.Context) *AliasesDeleteParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the aliases delete params +func (o *AliasesDeleteParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the aliases delete params +func (o *AliasesDeleteParams) WithHTTPClient(client *http.Client) *AliasesDeleteParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the aliases delete params +func (o *AliasesDeleteParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithAliasName adds the aliasName to the aliases delete params +func (o *AliasesDeleteParams) WithAliasName(aliasName string) *AliasesDeleteParams { + o.SetAliasName(aliasName) + return o +} + +// SetAliasName adds the aliasName to the aliases delete params +func (o *AliasesDeleteParams) SetAliasName(aliasName string) { + o.AliasName = aliasName +} + +// WriteToRequest writes these params to a swagger request +func (o *AliasesDeleteParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param aliasName + if err := r.SetPathParam("aliasName", o.AliasName); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/schema/aliases_delete_responses.go b/platform/dbops/binaries/weaviate-src/client/schema/aliases_delete_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..ec0f6c122063b23a55d41f2d853cca84c83cd629 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/schema/aliases_delete_responses.go @@ -0,0 +1,460 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package schema + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/weaviate/weaviate/entities/models" +) + +// AliasesDeleteReader is a Reader for the AliasesDelete structure. +type AliasesDeleteReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *AliasesDeleteReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 204: + result := NewAliasesDeleteNoContent() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 401: + result := NewAliasesDeleteUnauthorized() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 403: + result := NewAliasesDeleteForbidden() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 404: + result := NewAliasesDeleteNotFound() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 422: + result := NewAliasesDeleteUnprocessableEntity() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 500: + result := NewAliasesDeleteInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + } +} + +// NewAliasesDeleteNoContent creates a AliasesDeleteNoContent with default headers values +func NewAliasesDeleteNoContent() *AliasesDeleteNoContent { + return &AliasesDeleteNoContent{} +} + +/* +AliasesDeleteNoContent describes a response with status code 204, with default header values. + +Successfully deleted the alias. +*/ +type AliasesDeleteNoContent struct { +} + +// IsSuccess returns true when this aliases delete no content response has a 2xx status code +func (o *AliasesDeleteNoContent) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this aliases delete no content response has a 3xx status code +func (o *AliasesDeleteNoContent) IsRedirect() bool { + return false +} + +// IsClientError returns true when this aliases delete no content response has a 4xx status code +func (o *AliasesDeleteNoContent) IsClientError() bool { + return false +} + +// IsServerError returns true when this aliases delete no content response has a 5xx status code +func (o *AliasesDeleteNoContent) IsServerError() bool { + return false +} + +// IsCode returns true when this aliases delete no content response a status code equal to that given +func (o *AliasesDeleteNoContent) IsCode(code int) bool { + return code == 204 +} + +// Code gets the status code for the aliases delete no content response +func (o *AliasesDeleteNoContent) Code() int { + return 204 +} + +func (o *AliasesDeleteNoContent) Error() string { + return fmt.Sprintf("[DELETE /aliases/{aliasName}][%d] aliasesDeleteNoContent ", 204) +} + +func (o *AliasesDeleteNoContent) String() string { + return fmt.Sprintf("[DELETE /aliases/{aliasName}][%d] aliasesDeleteNoContent ", 204) +} + +func (o *AliasesDeleteNoContent) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewAliasesDeleteUnauthorized creates a AliasesDeleteUnauthorized with default headers values +func NewAliasesDeleteUnauthorized() *AliasesDeleteUnauthorized { + return &AliasesDeleteUnauthorized{} +} + +/* +AliasesDeleteUnauthorized describes a response with status code 401, with default header values. + +Unauthorized or invalid credentials. +*/ +type AliasesDeleteUnauthorized struct { +} + +// IsSuccess returns true when this aliases delete unauthorized response has a 2xx status code +func (o *AliasesDeleteUnauthorized) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this aliases delete unauthorized response has a 3xx status code +func (o *AliasesDeleteUnauthorized) IsRedirect() bool { + return false +} + +// IsClientError returns true when this aliases delete unauthorized response has a 4xx status code +func (o *AliasesDeleteUnauthorized) IsClientError() bool { + return true +} + +// IsServerError returns true when this aliases delete unauthorized response has a 5xx status code +func (o *AliasesDeleteUnauthorized) IsServerError() bool { + return false +} + +// IsCode returns true when this aliases delete unauthorized response a status code equal to that given +func (o *AliasesDeleteUnauthorized) IsCode(code int) bool { + return code == 401 +} + +// Code gets the status code for the aliases delete unauthorized response +func (o *AliasesDeleteUnauthorized) Code() int { + return 401 +} + +func (o *AliasesDeleteUnauthorized) Error() string { + return fmt.Sprintf("[DELETE /aliases/{aliasName}][%d] aliasesDeleteUnauthorized ", 401) +} + +func (o *AliasesDeleteUnauthorized) String() string { + return fmt.Sprintf("[DELETE /aliases/{aliasName}][%d] aliasesDeleteUnauthorized ", 401) +} + +func (o *AliasesDeleteUnauthorized) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewAliasesDeleteForbidden creates a AliasesDeleteForbidden with default headers values +func NewAliasesDeleteForbidden() *AliasesDeleteForbidden { + return &AliasesDeleteForbidden{} +} + +/* +AliasesDeleteForbidden describes a response with status code 403, with default header values. + +Forbidden +*/ +type AliasesDeleteForbidden struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this aliases delete forbidden response has a 2xx status code +func (o *AliasesDeleteForbidden) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this aliases delete forbidden response has a 3xx status code +func (o *AliasesDeleteForbidden) IsRedirect() bool { + return false +} + +// IsClientError returns true when this aliases delete forbidden response has a 4xx status code +func (o *AliasesDeleteForbidden) IsClientError() bool { + return true +} + +// IsServerError returns true when this aliases delete forbidden response has a 5xx status code +func (o *AliasesDeleteForbidden) IsServerError() bool { + return false +} + +// IsCode returns true when this aliases delete forbidden response a status code equal to that given +func (o *AliasesDeleteForbidden) IsCode(code int) bool { + return code == 403 +} + +// Code gets the status code for the aliases delete forbidden response +func (o *AliasesDeleteForbidden) Code() int { + return 403 +} + +func (o *AliasesDeleteForbidden) Error() string { + return fmt.Sprintf("[DELETE /aliases/{aliasName}][%d] aliasesDeleteForbidden %+v", 403, o.Payload) +} + +func (o *AliasesDeleteForbidden) String() string { + return fmt.Sprintf("[DELETE /aliases/{aliasName}][%d] aliasesDeleteForbidden %+v", 403, o.Payload) +} + +func (o *AliasesDeleteForbidden) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *AliasesDeleteForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewAliasesDeleteNotFound creates a AliasesDeleteNotFound with default headers values +func NewAliasesDeleteNotFound() *AliasesDeleteNotFound { + return &AliasesDeleteNotFound{} +} + +/* +AliasesDeleteNotFound describes a response with status code 404, with default header values. + +Not Found - Alias does not exist +*/ +type AliasesDeleteNotFound struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this aliases delete not found response has a 2xx status code +func (o *AliasesDeleteNotFound) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this aliases delete not found response has a 3xx status code +func (o *AliasesDeleteNotFound) IsRedirect() bool { + return false +} + +// IsClientError returns true when this aliases delete not found response has a 4xx status code +func (o *AliasesDeleteNotFound) IsClientError() bool { + return true +} + +// IsServerError returns true when this aliases delete not found response has a 5xx status code +func (o *AliasesDeleteNotFound) IsServerError() bool { + return false +} + +// IsCode returns true when this aliases delete not found response a status code equal to that given +func (o *AliasesDeleteNotFound) IsCode(code int) bool { + return code == 404 +} + +// Code gets the status code for the aliases delete not found response +func (o *AliasesDeleteNotFound) Code() int { + return 404 +} + +func (o *AliasesDeleteNotFound) Error() string { + return fmt.Sprintf("[DELETE /aliases/{aliasName}][%d] aliasesDeleteNotFound %+v", 404, o.Payload) +} + +func (o *AliasesDeleteNotFound) String() string { + return fmt.Sprintf("[DELETE /aliases/{aliasName}][%d] aliasesDeleteNotFound %+v", 404, o.Payload) +} + +func (o *AliasesDeleteNotFound) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *AliasesDeleteNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewAliasesDeleteUnprocessableEntity creates a AliasesDeleteUnprocessableEntity with default headers values +func NewAliasesDeleteUnprocessableEntity() *AliasesDeleteUnprocessableEntity { + return &AliasesDeleteUnprocessableEntity{} +} + +/* +AliasesDeleteUnprocessableEntity describes a response with status code 422, with default header values. + +Invalid delete alias request. +*/ +type AliasesDeleteUnprocessableEntity struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this aliases delete unprocessable entity response has a 2xx status code +func (o *AliasesDeleteUnprocessableEntity) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this aliases delete unprocessable entity response has a 3xx status code +func (o *AliasesDeleteUnprocessableEntity) IsRedirect() bool { + return false +} + +// IsClientError returns true when this aliases delete unprocessable entity response has a 4xx status code +func (o *AliasesDeleteUnprocessableEntity) IsClientError() bool { + return true +} + +// IsServerError returns true when this aliases delete unprocessable entity response has a 5xx status code +func (o *AliasesDeleteUnprocessableEntity) IsServerError() bool { + return false +} + +// IsCode returns true when this aliases delete unprocessable entity response a status code equal to that given +func (o *AliasesDeleteUnprocessableEntity) IsCode(code int) bool { + return code == 422 +} + +// Code gets the status code for the aliases delete unprocessable entity response +func (o *AliasesDeleteUnprocessableEntity) Code() int { + return 422 +} + +func (o *AliasesDeleteUnprocessableEntity) Error() string { + return fmt.Sprintf("[DELETE /aliases/{aliasName}][%d] aliasesDeleteUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *AliasesDeleteUnprocessableEntity) String() string { + return fmt.Sprintf("[DELETE /aliases/{aliasName}][%d] aliasesDeleteUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *AliasesDeleteUnprocessableEntity) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *AliasesDeleteUnprocessableEntity) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewAliasesDeleteInternalServerError creates a AliasesDeleteInternalServerError with default headers values +func NewAliasesDeleteInternalServerError() *AliasesDeleteInternalServerError { + return &AliasesDeleteInternalServerError{} +} + +/* +AliasesDeleteInternalServerError describes a response with status code 500, with default header values. + +An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. +*/ +type AliasesDeleteInternalServerError struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this aliases delete internal server error response has a 2xx status code +func (o *AliasesDeleteInternalServerError) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this aliases delete internal server error response has a 3xx status code +func (o *AliasesDeleteInternalServerError) IsRedirect() bool { + return false +} + +// IsClientError returns true when this aliases delete internal server error response has a 4xx status code +func (o *AliasesDeleteInternalServerError) IsClientError() bool { + return false +} + +// IsServerError returns true when this aliases delete internal server error response has a 5xx status code +func (o *AliasesDeleteInternalServerError) IsServerError() bool { + return true +} + +// IsCode returns true when this aliases delete internal server error response a status code equal to that given +func (o *AliasesDeleteInternalServerError) IsCode(code int) bool { + return code == 500 +} + +// Code gets the status code for the aliases delete internal server error response +func (o *AliasesDeleteInternalServerError) Code() int { + return 500 +} + +func (o *AliasesDeleteInternalServerError) Error() string { + return fmt.Sprintf("[DELETE /aliases/{aliasName}][%d] aliasesDeleteInternalServerError %+v", 500, o.Payload) +} + +func (o *AliasesDeleteInternalServerError) String() string { + return fmt.Sprintf("[DELETE /aliases/{aliasName}][%d] aliasesDeleteInternalServerError %+v", 500, o.Payload) +} + +func (o *AliasesDeleteInternalServerError) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *AliasesDeleteInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/schema/aliases_get_alias_parameters.go b/platform/dbops/binaries/weaviate-src/client/schema/aliases_get_alias_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..6bc7ce392a50aa8b91d86ecf65cba6da220e45f6 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/schema/aliases_get_alias_parameters.go @@ -0,0 +1,159 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package schema + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewAliasesGetAliasParams creates a new AliasesGetAliasParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewAliasesGetAliasParams() *AliasesGetAliasParams { + return &AliasesGetAliasParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewAliasesGetAliasParamsWithTimeout creates a new AliasesGetAliasParams object +// with the ability to set a timeout on a request. +func NewAliasesGetAliasParamsWithTimeout(timeout time.Duration) *AliasesGetAliasParams { + return &AliasesGetAliasParams{ + timeout: timeout, + } +} + +// NewAliasesGetAliasParamsWithContext creates a new AliasesGetAliasParams object +// with the ability to set a context for a request. +func NewAliasesGetAliasParamsWithContext(ctx context.Context) *AliasesGetAliasParams { + return &AliasesGetAliasParams{ + Context: ctx, + } +} + +// NewAliasesGetAliasParamsWithHTTPClient creates a new AliasesGetAliasParams object +// with the ability to set a custom HTTPClient for a request. +func NewAliasesGetAliasParamsWithHTTPClient(client *http.Client) *AliasesGetAliasParams { + return &AliasesGetAliasParams{ + HTTPClient: client, + } +} + +/* +AliasesGetAliasParams contains all the parameters to send to the API endpoint + + for the aliases get alias operation. + + Typically these are written to a http.Request. +*/ +type AliasesGetAliasParams struct { + + // AliasName. + AliasName string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the aliases get alias params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *AliasesGetAliasParams) WithDefaults() *AliasesGetAliasParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the aliases get alias params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *AliasesGetAliasParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the aliases get alias params +func (o *AliasesGetAliasParams) WithTimeout(timeout time.Duration) *AliasesGetAliasParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the aliases get alias params +func (o *AliasesGetAliasParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the aliases get alias params +func (o *AliasesGetAliasParams) WithContext(ctx context.Context) *AliasesGetAliasParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the aliases get alias params +func (o *AliasesGetAliasParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the aliases get alias params +func (o *AliasesGetAliasParams) WithHTTPClient(client *http.Client) *AliasesGetAliasParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the aliases get alias params +func (o *AliasesGetAliasParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithAliasName adds the aliasName to the aliases get alias params +func (o *AliasesGetAliasParams) WithAliasName(aliasName string) *AliasesGetAliasParams { + o.SetAliasName(aliasName) + return o +} + +// SetAliasName adds the aliasName to the aliases get alias params +func (o *AliasesGetAliasParams) SetAliasName(aliasName string) { + o.AliasName = aliasName +} + +// WriteToRequest writes these params to a swagger request +func (o *AliasesGetAliasParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param aliasName + if err := r.SetPathParam("aliasName", o.AliasName); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/schema/aliases_get_alias_responses.go b/platform/dbops/binaries/weaviate-src/client/schema/aliases_get_alias_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..3b13ccf66dea48af08ae1a131ab9976dcea4ce89 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/schema/aliases_get_alias_responses.go @@ -0,0 +1,472 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package schema + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/weaviate/weaviate/entities/models" +) + +// AliasesGetAliasReader is a Reader for the AliasesGetAlias structure. +type AliasesGetAliasReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *AliasesGetAliasReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewAliasesGetAliasOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 401: + result := NewAliasesGetAliasUnauthorized() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 403: + result := NewAliasesGetAliasForbidden() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 404: + result := NewAliasesGetAliasNotFound() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 422: + result := NewAliasesGetAliasUnprocessableEntity() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 500: + result := NewAliasesGetAliasInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + } +} + +// NewAliasesGetAliasOK creates a AliasesGetAliasOK with default headers values +func NewAliasesGetAliasOK() *AliasesGetAliasOK { + return &AliasesGetAliasOK{} +} + +/* +AliasesGetAliasOK describes a response with status code 200, with default header values. + +Successfully retrieved the alias details. +*/ +type AliasesGetAliasOK struct { + Payload *models.Alias +} + +// IsSuccess returns true when this aliases get alias o k response has a 2xx status code +func (o *AliasesGetAliasOK) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this aliases get alias o k response has a 3xx status code +func (o *AliasesGetAliasOK) IsRedirect() bool { + return false +} + +// IsClientError returns true when this aliases get alias o k response has a 4xx status code +func (o *AliasesGetAliasOK) IsClientError() bool { + return false +} + +// IsServerError returns true when this aliases get alias o k response has a 5xx status code +func (o *AliasesGetAliasOK) IsServerError() bool { + return false +} + +// IsCode returns true when this aliases get alias o k response a status code equal to that given +func (o *AliasesGetAliasOK) IsCode(code int) bool { + return code == 200 +} + +// Code gets the status code for the aliases get alias o k response +func (o *AliasesGetAliasOK) Code() int { + return 200 +} + +func (o *AliasesGetAliasOK) Error() string { + return fmt.Sprintf("[GET /aliases/{aliasName}][%d] aliasesGetAliasOK %+v", 200, o.Payload) +} + +func (o *AliasesGetAliasOK) String() string { + return fmt.Sprintf("[GET /aliases/{aliasName}][%d] aliasesGetAliasOK %+v", 200, o.Payload) +} + +func (o *AliasesGetAliasOK) GetPayload() *models.Alias { + return o.Payload +} + +func (o *AliasesGetAliasOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Alias) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewAliasesGetAliasUnauthorized creates a AliasesGetAliasUnauthorized with default headers values +func NewAliasesGetAliasUnauthorized() *AliasesGetAliasUnauthorized { + return &AliasesGetAliasUnauthorized{} +} + +/* +AliasesGetAliasUnauthorized describes a response with status code 401, with default header values. + +Unauthorized or invalid credentials. +*/ +type AliasesGetAliasUnauthorized struct { +} + +// IsSuccess returns true when this aliases get alias unauthorized response has a 2xx status code +func (o *AliasesGetAliasUnauthorized) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this aliases get alias unauthorized response has a 3xx status code +func (o *AliasesGetAliasUnauthorized) IsRedirect() bool { + return false +} + +// IsClientError returns true when this aliases get alias unauthorized response has a 4xx status code +func (o *AliasesGetAliasUnauthorized) IsClientError() bool { + return true +} + +// IsServerError returns true when this aliases get alias unauthorized response has a 5xx status code +func (o *AliasesGetAliasUnauthorized) IsServerError() bool { + return false +} + +// IsCode returns true when this aliases get alias unauthorized response a status code equal to that given +func (o *AliasesGetAliasUnauthorized) IsCode(code int) bool { + return code == 401 +} + +// Code gets the status code for the aliases get alias unauthorized response +func (o *AliasesGetAliasUnauthorized) Code() int { + return 401 +} + +func (o *AliasesGetAliasUnauthorized) Error() string { + return fmt.Sprintf("[GET /aliases/{aliasName}][%d] aliasesGetAliasUnauthorized ", 401) +} + +func (o *AliasesGetAliasUnauthorized) String() string { + return fmt.Sprintf("[GET /aliases/{aliasName}][%d] aliasesGetAliasUnauthorized ", 401) +} + +func (o *AliasesGetAliasUnauthorized) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewAliasesGetAliasForbidden creates a AliasesGetAliasForbidden with default headers values +func NewAliasesGetAliasForbidden() *AliasesGetAliasForbidden { + return &AliasesGetAliasForbidden{} +} + +/* +AliasesGetAliasForbidden describes a response with status code 403, with default header values. + +Forbidden +*/ +type AliasesGetAliasForbidden struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this aliases get alias forbidden response has a 2xx status code +func (o *AliasesGetAliasForbidden) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this aliases get alias forbidden response has a 3xx status code +func (o *AliasesGetAliasForbidden) IsRedirect() bool { + return false +} + +// IsClientError returns true when this aliases get alias forbidden response has a 4xx status code +func (o *AliasesGetAliasForbidden) IsClientError() bool { + return true +} + +// IsServerError returns true when this aliases get alias forbidden response has a 5xx status code +func (o *AliasesGetAliasForbidden) IsServerError() bool { + return false +} + +// IsCode returns true when this aliases get alias forbidden response a status code equal to that given +func (o *AliasesGetAliasForbidden) IsCode(code int) bool { + return code == 403 +} + +// Code gets the status code for the aliases get alias forbidden response +func (o *AliasesGetAliasForbidden) Code() int { + return 403 +} + +func (o *AliasesGetAliasForbidden) Error() string { + return fmt.Sprintf("[GET /aliases/{aliasName}][%d] aliasesGetAliasForbidden %+v", 403, o.Payload) +} + +func (o *AliasesGetAliasForbidden) String() string { + return fmt.Sprintf("[GET /aliases/{aliasName}][%d] aliasesGetAliasForbidden %+v", 403, o.Payload) +} + +func (o *AliasesGetAliasForbidden) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *AliasesGetAliasForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewAliasesGetAliasNotFound creates a AliasesGetAliasNotFound with default headers values +func NewAliasesGetAliasNotFound() *AliasesGetAliasNotFound { + return &AliasesGetAliasNotFound{} +} + +/* +AliasesGetAliasNotFound describes a response with status code 404, with default header values. + +Not Found - Alias does not exist +*/ +type AliasesGetAliasNotFound struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this aliases get alias not found response has a 2xx status code +func (o *AliasesGetAliasNotFound) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this aliases get alias not found response has a 3xx status code +func (o *AliasesGetAliasNotFound) IsRedirect() bool { + return false +} + +// IsClientError returns true when this aliases get alias not found response has a 4xx status code +func (o *AliasesGetAliasNotFound) IsClientError() bool { + return true +} + +// IsServerError returns true when this aliases get alias not found response has a 5xx status code +func (o *AliasesGetAliasNotFound) IsServerError() bool { + return false +} + +// IsCode returns true when this aliases get alias not found response a status code equal to that given +func (o *AliasesGetAliasNotFound) IsCode(code int) bool { + return code == 404 +} + +// Code gets the status code for the aliases get alias not found response +func (o *AliasesGetAliasNotFound) Code() int { + return 404 +} + +func (o *AliasesGetAliasNotFound) Error() string { + return fmt.Sprintf("[GET /aliases/{aliasName}][%d] aliasesGetAliasNotFound %+v", 404, o.Payload) +} + +func (o *AliasesGetAliasNotFound) String() string { + return fmt.Sprintf("[GET /aliases/{aliasName}][%d] aliasesGetAliasNotFound %+v", 404, o.Payload) +} + +func (o *AliasesGetAliasNotFound) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *AliasesGetAliasNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewAliasesGetAliasUnprocessableEntity creates a AliasesGetAliasUnprocessableEntity with default headers values +func NewAliasesGetAliasUnprocessableEntity() *AliasesGetAliasUnprocessableEntity { + return &AliasesGetAliasUnprocessableEntity{} +} + +/* +AliasesGetAliasUnprocessableEntity describes a response with status code 422, with default header values. + +Invalid alias name provided. +*/ +type AliasesGetAliasUnprocessableEntity struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this aliases get alias unprocessable entity response has a 2xx status code +func (o *AliasesGetAliasUnprocessableEntity) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this aliases get alias unprocessable entity response has a 3xx status code +func (o *AliasesGetAliasUnprocessableEntity) IsRedirect() bool { + return false +} + +// IsClientError returns true when this aliases get alias unprocessable entity response has a 4xx status code +func (o *AliasesGetAliasUnprocessableEntity) IsClientError() bool { + return true +} + +// IsServerError returns true when this aliases get alias unprocessable entity response has a 5xx status code +func (o *AliasesGetAliasUnprocessableEntity) IsServerError() bool { + return false +} + +// IsCode returns true when this aliases get alias unprocessable entity response a status code equal to that given +func (o *AliasesGetAliasUnprocessableEntity) IsCode(code int) bool { + return code == 422 +} + +// Code gets the status code for the aliases get alias unprocessable entity response +func (o *AliasesGetAliasUnprocessableEntity) Code() int { + return 422 +} + +func (o *AliasesGetAliasUnprocessableEntity) Error() string { + return fmt.Sprintf("[GET /aliases/{aliasName}][%d] aliasesGetAliasUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *AliasesGetAliasUnprocessableEntity) String() string { + return fmt.Sprintf("[GET /aliases/{aliasName}][%d] aliasesGetAliasUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *AliasesGetAliasUnprocessableEntity) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *AliasesGetAliasUnprocessableEntity) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewAliasesGetAliasInternalServerError creates a AliasesGetAliasInternalServerError with default headers values +func NewAliasesGetAliasInternalServerError() *AliasesGetAliasInternalServerError { + return &AliasesGetAliasInternalServerError{} +} + +/* +AliasesGetAliasInternalServerError describes a response with status code 500, with default header values. + +An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. +*/ +type AliasesGetAliasInternalServerError struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this aliases get alias internal server error response has a 2xx status code +func (o *AliasesGetAliasInternalServerError) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this aliases get alias internal server error response has a 3xx status code +func (o *AliasesGetAliasInternalServerError) IsRedirect() bool { + return false +} + +// IsClientError returns true when this aliases get alias internal server error response has a 4xx status code +func (o *AliasesGetAliasInternalServerError) IsClientError() bool { + return false +} + +// IsServerError returns true when this aliases get alias internal server error response has a 5xx status code +func (o *AliasesGetAliasInternalServerError) IsServerError() bool { + return true +} + +// IsCode returns true when this aliases get alias internal server error response a status code equal to that given +func (o *AliasesGetAliasInternalServerError) IsCode(code int) bool { + return code == 500 +} + +// Code gets the status code for the aliases get alias internal server error response +func (o *AliasesGetAliasInternalServerError) Code() int { + return 500 +} + +func (o *AliasesGetAliasInternalServerError) Error() string { + return fmt.Sprintf("[GET /aliases/{aliasName}][%d] aliasesGetAliasInternalServerError %+v", 500, o.Payload) +} + +func (o *AliasesGetAliasInternalServerError) String() string { + return fmt.Sprintf("[GET /aliases/{aliasName}][%d] aliasesGetAliasInternalServerError %+v", 500, o.Payload) +} + +func (o *AliasesGetAliasInternalServerError) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *AliasesGetAliasInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/schema/aliases_get_parameters.go b/platform/dbops/binaries/weaviate-src/client/schema/aliases_get_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..471091b2a7c72cf8502e0e614fe06f446a26e543 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/schema/aliases_get_parameters.go @@ -0,0 +1,174 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package schema + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewAliasesGetParams creates a new AliasesGetParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewAliasesGetParams() *AliasesGetParams { + return &AliasesGetParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewAliasesGetParamsWithTimeout creates a new AliasesGetParams object +// with the ability to set a timeout on a request. +func NewAliasesGetParamsWithTimeout(timeout time.Duration) *AliasesGetParams { + return &AliasesGetParams{ + timeout: timeout, + } +} + +// NewAliasesGetParamsWithContext creates a new AliasesGetParams object +// with the ability to set a context for a request. +func NewAliasesGetParamsWithContext(ctx context.Context) *AliasesGetParams { + return &AliasesGetParams{ + Context: ctx, + } +} + +// NewAliasesGetParamsWithHTTPClient creates a new AliasesGetParams object +// with the ability to set a custom HTTPClient for a request. +func NewAliasesGetParamsWithHTTPClient(client *http.Client) *AliasesGetParams { + return &AliasesGetParams{ + HTTPClient: client, + } +} + +/* +AliasesGetParams contains all the parameters to send to the API endpoint + + for the aliases get operation. + + Typically these are written to a http.Request. +*/ +type AliasesGetParams struct { + + /* Class. + + Optional filter to retrieve aliases for a specific collection (class) only. If not provided, returns all aliases. + */ + Class *string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the aliases get params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *AliasesGetParams) WithDefaults() *AliasesGetParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the aliases get params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *AliasesGetParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the aliases get params +func (o *AliasesGetParams) WithTimeout(timeout time.Duration) *AliasesGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the aliases get params +func (o *AliasesGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the aliases get params +func (o *AliasesGetParams) WithContext(ctx context.Context) *AliasesGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the aliases get params +func (o *AliasesGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the aliases get params +func (o *AliasesGetParams) WithHTTPClient(client *http.Client) *AliasesGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the aliases get params +func (o *AliasesGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithClass adds the class to the aliases get params +func (o *AliasesGetParams) WithClass(class *string) *AliasesGetParams { + o.SetClass(class) + return o +} + +// SetClass adds the class to the aliases get params +func (o *AliasesGetParams) SetClass(class *string) { + o.Class = class +} + +// WriteToRequest writes these params to a swagger request +func (o *AliasesGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if o.Class != nil { + + // query param class + var qrClass string + + if o.Class != nil { + qrClass = *o.Class + } + qClass := qrClass + if qClass != "" { + + if err := r.SetQueryParam("class", qClass); err != nil { + return err + } + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/schema/aliases_get_responses.go b/platform/dbops/binaries/weaviate-src/client/schema/aliases_get_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..8310dd6e07d1c596f1286be19ec4917c2b9b7802 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/schema/aliases_get_responses.go @@ -0,0 +1,398 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package schema + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/weaviate/weaviate/entities/models" +) + +// AliasesGetReader is a Reader for the AliasesGet structure. +type AliasesGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *AliasesGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewAliasesGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 401: + result := NewAliasesGetUnauthorized() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 403: + result := NewAliasesGetForbidden() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 422: + result := NewAliasesGetUnprocessableEntity() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 500: + result := NewAliasesGetInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + } +} + +// NewAliasesGetOK creates a AliasesGetOK with default headers values +func NewAliasesGetOK() *AliasesGetOK { + return &AliasesGetOK{} +} + +/* +AliasesGetOK describes a response with status code 200, with default header values. + +Successfully retrieved the list of aliases +*/ +type AliasesGetOK struct { + Payload *models.AliasResponse +} + +// IsSuccess returns true when this aliases get o k response has a 2xx status code +func (o *AliasesGetOK) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this aliases get o k response has a 3xx status code +func (o *AliasesGetOK) IsRedirect() bool { + return false +} + +// IsClientError returns true when this aliases get o k response has a 4xx status code +func (o *AliasesGetOK) IsClientError() bool { + return false +} + +// IsServerError returns true when this aliases get o k response has a 5xx status code +func (o *AliasesGetOK) IsServerError() bool { + return false +} + +// IsCode returns true when this aliases get o k response a status code equal to that given +func (o *AliasesGetOK) IsCode(code int) bool { + return code == 200 +} + +// Code gets the status code for the aliases get o k response +func (o *AliasesGetOK) Code() int { + return 200 +} + +func (o *AliasesGetOK) Error() string { + return fmt.Sprintf("[GET /aliases][%d] aliasesGetOK %+v", 200, o.Payload) +} + +func (o *AliasesGetOK) String() string { + return fmt.Sprintf("[GET /aliases][%d] aliasesGetOK %+v", 200, o.Payload) +} + +func (o *AliasesGetOK) GetPayload() *models.AliasResponse { + return o.Payload +} + +func (o *AliasesGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.AliasResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewAliasesGetUnauthorized creates a AliasesGetUnauthorized with default headers values +func NewAliasesGetUnauthorized() *AliasesGetUnauthorized { + return &AliasesGetUnauthorized{} +} + +/* +AliasesGetUnauthorized describes a response with status code 401, with default header values. + +Unauthorized or invalid credentials. +*/ +type AliasesGetUnauthorized struct { +} + +// IsSuccess returns true when this aliases get unauthorized response has a 2xx status code +func (o *AliasesGetUnauthorized) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this aliases get unauthorized response has a 3xx status code +func (o *AliasesGetUnauthorized) IsRedirect() bool { + return false +} + +// IsClientError returns true when this aliases get unauthorized response has a 4xx status code +func (o *AliasesGetUnauthorized) IsClientError() bool { + return true +} + +// IsServerError returns true when this aliases get unauthorized response has a 5xx status code +func (o *AliasesGetUnauthorized) IsServerError() bool { + return false +} + +// IsCode returns true when this aliases get unauthorized response a status code equal to that given +func (o *AliasesGetUnauthorized) IsCode(code int) bool { + return code == 401 +} + +// Code gets the status code for the aliases get unauthorized response +func (o *AliasesGetUnauthorized) Code() int { + return 401 +} + +func (o *AliasesGetUnauthorized) Error() string { + return fmt.Sprintf("[GET /aliases][%d] aliasesGetUnauthorized ", 401) +} + +func (o *AliasesGetUnauthorized) String() string { + return fmt.Sprintf("[GET /aliases][%d] aliasesGetUnauthorized ", 401) +} + +func (o *AliasesGetUnauthorized) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewAliasesGetForbidden creates a AliasesGetForbidden with default headers values +func NewAliasesGetForbidden() *AliasesGetForbidden { + return &AliasesGetForbidden{} +} + +/* +AliasesGetForbidden describes a response with status code 403, with default header values. + +Forbidden +*/ +type AliasesGetForbidden struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this aliases get forbidden response has a 2xx status code +func (o *AliasesGetForbidden) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this aliases get forbidden response has a 3xx status code +func (o *AliasesGetForbidden) IsRedirect() bool { + return false +} + +// IsClientError returns true when this aliases get forbidden response has a 4xx status code +func (o *AliasesGetForbidden) IsClientError() bool { + return true +} + +// IsServerError returns true when this aliases get forbidden response has a 5xx status code +func (o *AliasesGetForbidden) IsServerError() bool { + return false +} + +// IsCode returns true when this aliases get forbidden response a status code equal to that given +func (o *AliasesGetForbidden) IsCode(code int) bool { + return code == 403 +} + +// Code gets the status code for the aliases get forbidden response +func (o *AliasesGetForbidden) Code() int { + return 403 +} + +func (o *AliasesGetForbidden) Error() string { + return fmt.Sprintf("[GET /aliases][%d] aliasesGetForbidden %+v", 403, o.Payload) +} + +func (o *AliasesGetForbidden) String() string { + return fmt.Sprintf("[GET /aliases][%d] aliasesGetForbidden %+v", 403, o.Payload) +} + +func (o *AliasesGetForbidden) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *AliasesGetForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewAliasesGetUnprocessableEntity creates a AliasesGetUnprocessableEntity with default headers values +func NewAliasesGetUnprocessableEntity() *AliasesGetUnprocessableEntity { + return &AliasesGetUnprocessableEntity{} +} + +/* +AliasesGetUnprocessableEntity describes a response with status code 422, with default header values. + +Invalid collection (class) parameter provided +*/ +type AliasesGetUnprocessableEntity struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this aliases get unprocessable entity response has a 2xx status code +func (o *AliasesGetUnprocessableEntity) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this aliases get unprocessable entity response has a 3xx status code +func (o *AliasesGetUnprocessableEntity) IsRedirect() bool { + return false +} + +// IsClientError returns true when this aliases get unprocessable entity response has a 4xx status code +func (o *AliasesGetUnprocessableEntity) IsClientError() bool { + return true +} + +// IsServerError returns true when this aliases get unprocessable entity response has a 5xx status code +func (o *AliasesGetUnprocessableEntity) IsServerError() bool { + return false +} + +// IsCode returns true when this aliases get unprocessable entity response a status code equal to that given +func (o *AliasesGetUnprocessableEntity) IsCode(code int) bool { + return code == 422 +} + +// Code gets the status code for the aliases get unprocessable entity response +func (o *AliasesGetUnprocessableEntity) Code() int { + return 422 +} + +func (o *AliasesGetUnprocessableEntity) Error() string { + return fmt.Sprintf("[GET /aliases][%d] aliasesGetUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *AliasesGetUnprocessableEntity) String() string { + return fmt.Sprintf("[GET /aliases][%d] aliasesGetUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *AliasesGetUnprocessableEntity) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *AliasesGetUnprocessableEntity) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewAliasesGetInternalServerError creates a AliasesGetInternalServerError with default headers values +func NewAliasesGetInternalServerError() *AliasesGetInternalServerError { + return &AliasesGetInternalServerError{} +} + +/* +AliasesGetInternalServerError describes a response with status code 500, with default header values. + +An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. +*/ +type AliasesGetInternalServerError struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this aliases get internal server error response has a 2xx status code +func (o *AliasesGetInternalServerError) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this aliases get internal server error response has a 3xx status code +func (o *AliasesGetInternalServerError) IsRedirect() bool { + return false +} + +// IsClientError returns true when this aliases get internal server error response has a 4xx status code +func (o *AliasesGetInternalServerError) IsClientError() bool { + return false +} + +// IsServerError returns true when this aliases get internal server error response has a 5xx status code +func (o *AliasesGetInternalServerError) IsServerError() bool { + return true +} + +// IsCode returns true when this aliases get internal server error response a status code equal to that given +func (o *AliasesGetInternalServerError) IsCode(code int) bool { + return code == 500 +} + +// Code gets the status code for the aliases get internal server error response +func (o *AliasesGetInternalServerError) Code() int { + return 500 +} + +func (o *AliasesGetInternalServerError) Error() string { + return fmt.Sprintf("[GET /aliases][%d] aliasesGetInternalServerError %+v", 500, o.Payload) +} + +func (o *AliasesGetInternalServerError) String() string { + return fmt.Sprintf("[GET /aliases][%d] aliasesGetInternalServerError %+v", 500, o.Payload) +} + +func (o *AliasesGetInternalServerError) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *AliasesGetInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/schema/aliases_update_parameters.go b/platform/dbops/binaries/weaviate-src/client/schema/aliases_update_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..d095fa9dc3783af5afedf92aa9786a89f405592a --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/schema/aliases_update_parameters.go @@ -0,0 +1,176 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package schema + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewAliasesUpdateParams creates a new AliasesUpdateParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewAliasesUpdateParams() *AliasesUpdateParams { + return &AliasesUpdateParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewAliasesUpdateParamsWithTimeout creates a new AliasesUpdateParams object +// with the ability to set a timeout on a request. +func NewAliasesUpdateParamsWithTimeout(timeout time.Duration) *AliasesUpdateParams { + return &AliasesUpdateParams{ + timeout: timeout, + } +} + +// NewAliasesUpdateParamsWithContext creates a new AliasesUpdateParams object +// with the ability to set a context for a request. +func NewAliasesUpdateParamsWithContext(ctx context.Context) *AliasesUpdateParams { + return &AliasesUpdateParams{ + Context: ctx, + } +} + +// NewAliasesUpdateParamsWithHTTPClient creates a new AliasesUpdateParams object +// with the ability to set a custom HTTPClient for a request. +func NewAliasesUpdateParamsWithHTTPClient(client *http.Client) *AliasesUpdateParams { + return &AliasesUpdateParams{ + HTTPClient: client, + } +} + +/* +AliasesUpdateParams contains all the parameters to send to the API endpoint + + for the aliases update operation. + + Typically these are written to a http.Request. +*/ +type AliasesUpdateParams struct { + + // AliasName. + AliasName string + + // Body. + Body AliasesUpdateBody + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the aliases update params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *AliasesUpdateParams) WithDefaults() *AliasesUpdateParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the aliases update params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *AliasesUpdateParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the aliases update params +func (o *AliasesUpdateParams) WithTimeout(timeout time.Duration) *AliasesUpdateParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the aliases update params +func (o *AliasesUpdateParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the aliases update params +func (o *AliasesUpdateParams) WithContext(ctx context.Context) *AliasesUpdateParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the aliases update params +func (o *AliasesUpdateParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the aliases update params +func (o *AliasesUpdateParams) WithHTTPClient(client *http.Client) *AliasesUpdateParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the aliases update params +func (o *AliasesUpdateParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithAliasName adds the aliasName to the aliases update params +func (o *AliasesUpdateParams) WithAliasName(aliasName string) *AliasesUpdateParams { + o.SetAliasName(aliasName) + return o +} + +// SetAliasName adds the aliasName to the aliases update params +func (o *AliasesUpdateParams) SetAliasName(aliasName string) { + o.AliasName = aliasName +} + +// WithBody adds the body to the aliases update params +func (o *AliasesUpdateParams) WithBody(body AliasesUpdateBody) *AliasesUpdateParams { + o.SetBody(body) + return o +} + +// SetBody adds the body to the aliases update params +func (o *AliasesUpdateParams) SetBody(body AliasesUpdateBody) { + o.Body = body +} + +// WriteToRequest writes these params to a swagger request +func (o *AliasesUpdateParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param aliasName + if err := r.SetPathParam("aliasName", o.AliasName); err != nil { + return err + } + if err := r.SetBodyParam(o.Body); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/schema/aliases_update_responses.go b/platform/dbops/binaries/weaviate-src/client/schema/aliases_update_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..c823744e1dd5e03a8e70070d5b1b02ba5c5ca4c0 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/schema/aliases_update_responses.go @@ -0,0 +1,512 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package schema + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + + "github.com/weaviate/weaviate/entities/models" +) + +// AliasesUpdateReader is a Reader for the AliasesUpdate structure. +type AliasesUpdateReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *AliasesUpdateReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewAliasesUpdateOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 401: + result := NewAliasesUpdateUnauthorized() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 403: + result := NewAliasesUpdateForbidden() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 404: + result := NewAliasesUpdateNotFound() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 422: + result := NewAliasesUpdateUnprocessableEntity() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 500: + result := NewAliasesUpdateInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + } +} + +// NewAliasesUpdateOK creates a AliasesUpdateOK with default headers values +func NewAliasesUpdateOK() *AliasesUpdateOK { + return &AliasesUpdateOK{} +} + +/* +AliasesUpdateOK describes a response with status code 200, with default header values. + +Successfully updated the alias to point to the new collection (class). +*/ +type AliasesUpdateOK struct { + Payload *models.Alias +} + +// IsSuccess returns true when this aliases update o k response has a 2xx status code +func (o *AliasesUpdateOK) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this aliases update o k response has a 3xx status code +func (o *AliasesUpdateOK) IsRedirect() bool { + return false +} + +// IsClientError returns true when this aliases update o k response has a 4xx status code +func (o *AliasesUpdateOK) IsClientError() bool { + return false +} + +// IsServerError returns true when this aliases update o k response has a 5xx status code +func (o *AliasesUpdateOK) IsServerError() bool { + return false +} + +// IsCode returns true when this aliases update o k response a status code equal to that given +func (o *AliasesUpdateOK) IsCode(code int) bool { + return code == 200 +} + +// Code gets the status code for the aliases update o k response +func (o *AliasesUpdateOK) Code() int { + return 200 +} + +func (o *AliasesUpdateOK) Error() string { + return fmt.Sprintf("[PUT /aliases/{aliasName}][%d] aliasesUpdateOK %+v", 200, o.Payload) +} + +func (o *AliasesUpdateOK) String() string { + return fmt.Sprintf("[PUT /aliases/{aliasName}][%d] aliasesUpdateOK %+v", 200, o.Payload) +} + +func (o *AliasesUpdateOK) GetPayload() *models.Alias { + return o.Payload +} + +func (o *AliasesUpdateOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Alias) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewAliasesUpdateUnauthorized creates a AliasesUpdateUnauthorized with default headers values +func NewAliasesUpdateUnauthorized() *AliasesUpdateUnauthorized { + return &AliasesUpdateUnauthorized{} +} + +/* +AliasesUpdateUnauthorized describes a response with status code 401, with default header values. + +Unauthorized or invalid credentials. +*/ +type AliasesUpdateUnauthorized struct { +} + +// IsSuccess returns true when this aliases update unauthorized response has a 2xx status code +func (o *AliasesUpdateUnauthorized) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this aliases update unauthorized response has a 3xx status code +func (o *AliasesUpdateUnauthorized) IsRedirect() bool { + return false +} + +// IsClientError returns true when this aliases update unauthorized response has a 4xx status code +func (o *AliasesUpdateUnauthorized) IsClientError() bool { + return true +} + +// IsServerError returns true when this aliases update unauthorized response has a 5xx status code +func (o *AliasesUpdateUnauthorized) IsServerError() bool { + return false +} + +// IsCode returns true when this aliases update unauthorized response a status code equal to that given +func (o *AliasesUpdateUnauthorized) IsCode(code int) bool { + return code == 401 +} + +// Code gets the status code for the aliases update unauthorized response +func (o *AliasesUpdateUnauthorized) Code() int { + return 401 +} + +func (o *AliasesUpdateUnauthorized) Error() string { + return fmt.Sprintf("[PUT /aliases/{aliasName}][%d] aliasesUpdateUnauthorized ", 401) +} + +func (o *AliasesUpdateUnauthorized) String() string { + return fmt.Sprintf("[PUT /aliases/{aliasName}][%d] aliasesUpdateUnauthorized ", 401) +} + +func (o *AliasesUpdateUnauthorized) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewAliasesUpdateForbidden creates a AliasesUpdateForbidden with default headers values +func NewAliasesUpdateForbidden() *AliasesUpdateForbidden { + return &AliasesUpdateForbidden{} +} + +/* +AliasesUpdateForbidden describes a response with status code 403, with default header values. + +Forbidden +*/ +type AliasesUpdateForbidden struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this aliases update forbidden response has a 2xx status code +func (o *AliasesUpdateForbidden) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this aliases update forbidden response has a 3xx status code +func (o *AliasesUpdateForbidden) IsRedirect() bool { + return false +} + +// IsClientError returns true when this aliases update forbidden response has a 4xx status code +func (o *AliasesUpdateForbidden) IsClientError() bool { + return true +} + +// IsServerError returns true when this aliases update forbidden response has a 5xx status code +func (o *AliasesUpdateForbidden) IsServerError() bool { + return false +} + +// IsCode returns true when this aliases update forbidden response a status code equal to that given +func (o *AliasesUpdateForbidden) IsCode(code int) bool { + return code == 403 +} + +// Code gets the status code for the aliases update forbidden response +func (o *AliasesUpdateForbidden) Code() int { + return 403 +} + +func (o *AliasesUpdateForbidden) Error() string { + return fmt.Sprintf("[PUT /aliases/{aliasName}][%d] aliasesUpdateForbidden %+v", 403, o.Payload) +} + +func (o *AliasesUpdateForbidden) String() string { + return fmt.Sprintf("[PUT /aliases/{aliasName}][%d] aliasesUpdateForbidden %+v", 403, o.Payload) +} + +func (o *AliasesUpdateForbidden) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *AliasesUpdateForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewAliasesUpdateNotFound creates a AliasesUpdateNotFound with default headers values +func NewAliasesUpdateNotFound() *AliasesUpdateNotFound { + return &AliasesUpdateNotFound{} +} + +/* +AliasesUpdateNotFound describes a response with status code 404, with default header values. + +Not Found - Alias does not exist +*/ +type AliasesUpdateNotFound struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this aliases update not found response has a 2xx status code +func (o *AliasesUpdateNotFound) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this aliases update not found response has a 3xx status code +func (o *AliasesUpdateNotFound) IsRedirect() bool { + return false +} + +// IsClientError returns true when this aliases update not found response has a 4xx status code +func (o *AliasesUpdateNotFound) IsClientError() bool { + return true +} + +// IsServerError returns true when this aliases update not found response has a 5xx status code +func (o *AliasesUpdateNotFound) IsServerError() bool { + return false +} + +// IsCode returns true when this aliases update not found response a status code equal to that given +func (o *AliasesUpdateNotFound) IsCode(code int) bool { + return code == 404 +} + +// Code gets the status code for the aliases update not found response +func (o *AliasesUpdateNotFound) Code() int { + return 404 +} + +func (o *AliasesUpdateNotFound) Error() string { + return fmt.Sprintf("[PUT /aliases/{aliasName}][%d] aliasesUpdateNotFound %+v", 404, o.Payload) +} + +func (o *AliasesUpdateNotFound) String() string { + return fmt.Sprintf("[PUT /aliases/{aliasName}][%d] aliasesUpdateNotFound %+v", 404, o.Payload) +} + +func (o *AliasesUpdateNotFound) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *AliasesUpdateNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewAliasesUpdateUnprocessableEntity creates a AliasesUpdateUnprocessableEntity with default headers values +func NewAliasesUpdateUnprocessableEntity() *AliasesUpdateUnprocessableEntity { + return &AliasesUpdateUnprocessableEntity{} +} + +/* +AliasesUpdateUnprocessableEntity describes a response with status code 422, with default header values. + +Invalid update alias request. +*/ +type AliasesUpdateUnprocessableEntity struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this aliases update unprocessable entity response has a 2xx status code +func (o *AliasesUpdateUnprocessableEntity) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this aliases update unprocessable entity response has a 3xx status code +func (o *AliasesUpdateUnprocessableEntity) IsRedirect() bool { + return false +} + +// IsClientError returns true when this aliases update unprocessable entity response has a 4xx status code +func (o *AliasesUpdateUnprocessableEntity) IsClientError() bool { + return true +} + +// IsServerError returns true when this aliases update unprocessable entity response has a 5xx status code +func (o *AliasesUpdateUnprocessableEntity) IsServerError() bool { + return false +} + +// IsCode returns true when this aliases update unprocessable entity response a status code equal to that given +func (o *AliasesUpdateUnprocessableEntity) IsCode(code int) bool { + return code == 422 +} + +// Code gets the status code for the aliases update unprocessable entity response +func (o *AliasesUpdateUnprocessableEntity) Code() int { + return 422 +} + +func (o *AliasesUpdateUnprocessableEntity) Error() string { + return fmt.Sprintf("[PUT /aliases/{aliasName}][%d] aliasesUpdateUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *AliasesUpdateUnprocessableEntity) String() string { + return fmt.Sprintf("[PUT /aliases/{aliasName}][%d] aliasesUpdateUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *AliasesUpdateUnprocessableEntity) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *AliasesUpdateUnprocessableEntity) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewAliasesUpdateInternalServerError creates a AliasesUpdateInternalServerError with default headers values +func NewAliasesUpdateInternalServerError() *AliasesUpdateInternalServerError { + return &AliasesUpdateInternalServerError{} +} + +/* +AliasesUpdateInternalServerError describes a response with status code 500, with default header values. + +An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. +*/ +type AliasesUpdateInternalServerError struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this aliases update internal server error response has a 2xx status code +func (o *AliasesUpdateInternalServerError) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this aliases update internal server error response has a 3xx status code +func (o *AliasesUpdateInternalServerError) IsRedirect() bool { + return false +} + +// IsClientError returns true when this aliases update internal server error response has a 4xx status code +func (o *AliasesUpdateInternalServerError) IsClientError() bool { + return false +} + +// IsServerError returns true when this aliases update internal server error response has a 5xx status code +func (o *AliasesUpdateInternalServerError) IsServerError() bool { + return true +} + +// IsCode returns true when this aliases update internal server error response a status code equal to that given +func (o *AliasesUpdateInternalServerError) IsCode(code int) bool { + return code == 500 +} + +// Code gets the status code for the aliases update internal server error response +func (o *AliasesUpdateInternalServerError) Code() int { + return 500 +} + +func (o *AliasesUpdateInternalServerError) Error() string { + return fmt.Sprintf("[PUT /aliases/{aliasName}][%d] aliasesUpdateInternalServerError %+v", 500, o.Payload) +} + +func (o *AliasesUpdateInternalServerError) String() string { + return fmt.Sprintf("[PUT /aliases/{aliasName}][%d] aliasesUpdateInternalServerError %+v", 500, o.Payload) +} + +func (o *AliasesUpdateInternalServerError) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *AliasesUpdateInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +/* +AliasesUpdateBody aliases update body +swagger:model AliasesUpdateBody +*/ +type AliasesUpdateBody struct { + + // The new collection (class) that the alias should point to. + Class string `json:"class,omitempty"` +} + +// Validate validates this aliases update body +func (o *AliasesUpdateBody) Validate(formats strfmt.Registry) error { + return nil +} + +// ContextValidate validates this aliases update body based on context it is used +func (o *AliasesUpdateBody) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (o *AliasesUpdateBody) MarshalBinary() ([]byte, error) { + if o == nil { + return nil, nil + } + return swag.WriteJSON(o) +} + +// UnmarshalBinary interface implementation +func (o *AliasesUpdateBody) UnmarshalBinary(b []byte) error { + var res AliasesUpdateBody + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *o = res + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/schema/schema_client.go b/platform/dbops/binaries/weaviate-src/client/schema/schema_client.go new file mode 100644 index 0000000000000000000000000000000000000000..010dc3ec580a1c756aa083a2610ef2f1409a5883 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/schema/schema_client.go @@ -0,0 +1,861 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package schema + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" +) + +// New creates a new schema API client. +func New(transport runtime.ClientTransport, formats strfmt.Registry) ClientService { + return &Client{transport: transport, formats: formats} +} + +/* +Client for schema API +*/ +type Client struct { + transport runtime.ClientTransport + formats strfmt.Registry +} + +// ClientOption is the option for Client methods +type ClientOption func(*runtime.ClientOperation) + +// ClientService is the interface for Client methods +type ClientService interface { + AliasesCreate(params *AliasesCreateParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*AliasesCreateOK, error) + + AliasesDelete(params *AliasesDeleteParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*AliasesDeleteNoContent, error) + + AliasesGet(params *AliasesGetParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*AliasesGetOK, error) + + AliasesGetAlias(params *AliasesGetAliasParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*AliasesGetAliasOK, error) + + AliasesUpdate(params *AliasesUpdateParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*AliasesUpdateOK, error) + + SchemaDump(params *SchemaDumpParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*SchemaDumpOK, error) + + SchemaObjectsCreate(params *SchemaObjectsCreateParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*SchemaObjectsCreateOK, error) + + SchemaObjectsDelete(params *SchemaObjectsDeleteParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*SchemaObjectsDeleteOK, error) + + SchemaObjectsGet(params *SchemaObjectsGetParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*SchemaObjectsGetOK, error) + + SchemaObjectsPropertiesAdd(params *SchemaObjectsPropertiesAddParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*SchemaObjectsPropertiesAddOK, error) + + SchemaObjectsShardsGet(params *SchemaObjectsShardsGetParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*SchemaObjectsShardsGetOK, error) + + SchemaObjectsShardsUpdate(params *SchemaObjectsShardsUpdateParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*SchemaObjectsShardsUpdateOK, error) + + SchemaObjectsUpdate(params *SchemaObjectsUpdateParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*SchemaObjectsUpdateOK, error) + + TenantExists(params *TenantExistsParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*TenantExistsOK, error) + + TenantsCreate(params *TenantsCreateParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*TenantsCreateOK, error) + + TenantsDelete(params *TenantsDeleteParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*TenantsDeleteOK, error) + + TenantsGet(params *TenantsGetParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*TenantsGetOK, error) + + TenantsGetOne(params *TenantsGetOneParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*TenantsGetOneOK, error) + + TenantsUpdate(params *TenantsUpdateParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*TenantsUpdateOK, error) + + SetTransport(transport runtime.ClientTransport) +} + +/* +AliasesCreate creates a new alias + +Create a new alias mapping between an alias name and a collection (class). The alias acts as an alternative name for accessing the collection. +*/ +func (a *Client) AliasesCreate(params *AliasesCreateParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*AliasesCreateOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewAliasesCreateParams() + } + op := &runtime.ClientOperation{ + ID: "aliases.create", + Method: "POST", + PathPattern: "/aliases", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json", "application/yaml"}, + Schemes: []string{"https"}, + Params: params, + Reader: &AliasesCreateReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + success, ok := result.(*AliasesCreateOK) + if ok { + return success, nil + } + // unexpected success response + // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for aliases.create: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + +/* +AliasesDelete deletes an alias + +Remove an existing alias from the system. This will delete the alias mapping but will not affect the underlying collection (class). +*/ +func (a *Client) AliasesDelete(params *AliasesDeleteParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*AliasesDeleteNoContent, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewAliasesDeleteParams() + } + op := &runtime.ClientOperation{ + ID: "aliases.delete", + Method: "DELETE", + PathPattern: "/aliases/{aliasName}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json", "application/yaml"}, + Schemes: []string{"https"}, + Params: params, + Reader: &AliasesDeleteReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + success, ok := result.(*AliasesDeleteNoContent) + if ok { + return success, nil + } + // unexpected success response + // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for aliases.delete: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + +/* +AliasesGet lists aliases + +Retrieve a list of all aliases in the system. Results can be filtered by specifying a collection (class) name to get aliases for a specific collection only. +*/ +func (a *Client) AliasesGet(params *AliasesGetParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*AliasesGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewAliasesGetParams() + } + op := &runtime.ClientOperation{ + ID: "aliases.get", + Method: "GET", + PathPattern: "/aliases", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json", "application/yaml"}, + Schemes: []string{"https"}, + Params: params, + Reader: &AliasesGetReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + success, ok := result.(*AliasesGetOK) + if ok { + return success, nil + } + // unexpected success response + // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for aliases.get: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + +/* +AliasesGetAlias gets an alias + +Retrieve details about a specific alias by its name, including which collection (class) it points to. +*/ +func (a *Client) AliasesGetAlias(params *AliasesGetAliasParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*AliasesGetAliasOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewAliasesGetAliasParams() + } + op := &runtime.ClientOperation{ + ID: "aliases.get.alias", + Method: "GET", + PathPattern: "/aliases/{aliasName}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json", "application/yaml"}, + Schemes: []string{"https"}, + Params: params, + Reader: &AliasesGetAliasReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + success, ok := result.(*AliasesGetAliasOK) + if ok { + return success, nil + } + // unexpected success response + // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for aliases.get.alias: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + +/* +AliasesUpdate updates an alias + +Update an existing alias to point to a different collection (class). This allows you to redirect an alias from one collection to another without changing the alias name. +*/ +func (a *Client) AliasesUpdate(params *AliasesUpdateParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*AliasesUpdateOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewAliasesUpdateParams() + } + op := &runtime.ClientOperation{ + ID: "aliases.update", + Method: "PUT", + PathPattern: "/aliases/{aliasName}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json", "application/yaml"}, + Schemes: []string{"https"}, + Params: params, + Reader: &AliasesUpdateReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + success, ok := result.(*AliasesUpdateOK) + if ok { + return success, nil + } + // unexpected success response + // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for aliases.update: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + +/* +SchemaDump dumps the current the database schema + +Fetch an array of all collection definitions from the schema. +*/ +func (a *Client) SchemaDump(params *SchemaDumpParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*SchemaDumpOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewSchemaDumpParams() + } + op := &runtime.ClientOperation{ + ID: "schema.dump", + Method: "GET", + PathPattern: "/schema", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json", "application/yaml"}, + Schemes: []string{"https"}, + Params: params, + Reader: &SchemaDumpReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + success, ok := result.(*SchemaDumpOK) + if ok { + return success, nil + } + // unexpected success response + // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for schema.dump: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + +/* +SchemaObjectsCreate creates a new object class in the schema + +Create a new data object collection.

If AutoSchema is enabled, Weaviate will attempt to infer the schema from the data at import time. However, manual schema definition is recommended for production environments. +*/ +func (a *Client) SchemaObjectsCreate(params *SchemaObjectsCreateParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*SchemaObjectsCreateOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewSchemaObjectsCreateParams() + } + op := &runtime.ClientOperation{ + ID: "schema.objects.create", + Method: "POST", + PathPattern: "/schema", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json", "application/yaml"}, + Schemes: []string{"https"}, + Params: params, + Reader: &SchemaObjectsCreateReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + success, ok := result.(*SchemaObjectsCreateOK) + if ok { + return success, nil + } + // unexpected success response + // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for schema.objects.create: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + +/* +SchemaObjectsDelete removes an object class and all data in the instances from the schema + +Remove a collection from the schema. This will also delete all the objects in the collection. +*/ +func (a *Client) SchemaObjectsDelete(params *SchemaObjectsDeleteParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*SchemaObjectsDeleteOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewSchemaObjectsDeleteParams() + } + op := &runtime.ClientOperation{ + ID: "schema.objects.delete", + Method: "DELETE", + PathPattern: "/schema/{className}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json", "application/yaml"}, + Schemes: []string{"https"}, + Params: params, + Reader: &SchemaObjectsDeleteReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + success, ok := result.(*SchemaObjectsDeleteOK) + if ok { + return success, nil + } + // unexpected success response + // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for schema.objects.delete: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + +/* +SchemaObjectsGet gets a single class from the schema +*/ +func (a *Client) SchemaObjectsGet(params *SchemaObjectsGetParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*SchemaObjectsGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewSchemaObjectsGetParams() + } + op := &runtime.ClientOperation{ + ID: "schema.objects.get", + Method: "GET", + PathPattern: "/schema/{className}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json", "application/yaml"}, + Schemes: []string{"https"}, + Params: params, + Reader: &SchemaObjectsGetReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + success, ok := result.(*SchemaObjectsGetOK) + if ok { + return success, nil + } + // unexpected success response + // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for schema.objects.get: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + +/* +SchemaObjectsPropertiesAdd adds a property to an object class +*/ +func (a *Client) SchemaObjectsPropertiesAdd(params *SchemaObjectsPropertiesAddParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*SchemaObjectsPropertiesAddOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewSchemaObjectsPropertiesAddParams() + } + op := &runtime.ClientOperation{ + ID: "schema.objects.properties.add", + Method: "POST", + PathPattern: "/schema/{className}/properties", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json", "application/yaml"}, + Schemes: []string{"https"}, + Params: params, + Reader: &SchemaObjectsPropertiesAddReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + success, ok := result.(*SchemaObjectsPropertiesAddOK) + if ok { + return success, nil + } + // unexpected success response + // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for schema.objects.properties.add: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + +/* +SchemaObjectsShardsGet gets the shards status of an object class + +Get the status of every shard in the cluster. +*/ +func (a *Client) SchemaObjectsShardsGet(params *SchemaObjectsShardsGetParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*SchemaObjectsShardsGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewSchemaObjectsShardsGetParams() + } + op := &runtime.ClientOperation{ + ID: "schema.objects.shards.get", + Method: "GET", + PathPattern: "/schema/{className}/shards", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json", "application/yaml"}, + Schemes: []string{"https"}, + Params: params, + Reader: &SchemaObjectsShardsGetReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + success, ok := result.(*SchemaObjectsShardsGetOK) + if ok { + return success, nil + } + // unexpected success response + // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for schema.objects.shards.get: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + +/* +SchemaObjectsShardsUpdate updates a shard status + +Update a shard status for a collection. For example, a shard may have been marked as `READONLY` because its disk was full. After providing more disk space, use this endpoint to set the shard status to `READY` again. There is also a convenience function in each client to set the status of all shards of a collection. +*/ +func (a *Client) SchemaObjectsShardsUpdate(params *SchemaObjectsShardsUpdateParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*SchemaObjectsShardsUpdateOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewSchemaObjectsShardsUpdateParams() + } + op := &runtime.ClientOperation{ + ID: "schema.objects.shards.update", + Method: "PUT", + PathPattern: "/schema/{className}/shards/{shardName}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json", "application/yaml"}, + Schemes: []string{"https"}, + Params: params, + Reader: &SchemaObjectsShardsUpdateReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + success, ok := result.(*SchemaObjectsShardsUpdateOK) + if ok { + return success, nil + } + // unexpected success response + // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for schema.objects.shards.update: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + +/* +SchemaObjectsUpdate updates settings of an existing schema class + +Add a property to an existing collection. +*/ +func (a *Client) SchemaObjectsUpdate(params *SchemaObjectsUpdateParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*SchemaObjectsUpdateOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewSchemaObjectsUpdateParams() + } + op := &runtime.ClientOperation{ + ID: "schema.objects.update", + Method: "PUT", + PathPattern: "/schema/{className}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json", "application/yaml"}, + Schemes: []string{"https"}, + Params: params, + Reader: &SchemaObjectsUpdateReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + success, ok := result.(*SchemaObjectsUpdateOK) + if ok { + return success, nil + } + // unexpected success response + // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for schema.objects.update: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + +/* +TenantExists checks whether a tenant exists + +Check if a tenant exists for a specific class +*/ +func (a *Client) TenantExists(params *TenantExistsParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*TenantExistsOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewTenantExistsParams() + } + op := &runtime.ClientOperation{ + ID: "tenant.exists", + Method: "HEAD", + PathPattern: "/schema/{className}/tenants/{tenantName}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json", "application/yaml"}, + Schemes: []string{"https"}, + Params: params, + Reader: &TenantExistsReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + success, ok := result.(*TenantExistsOK) + if ok { + return success, nil + } + // unexpected success response + // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for tenant.exists: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + +/* +TenantsCreate creates a new tenant + +Create a new tenant for a collection. Multi-tenancy must be enabled in the collection definition. +*/ +func (a *Client) TenantsCreate(params *TenantsCreateParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*TenantsCreateOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewTenantsCreateParams() + } + op := &runtime.ClientOperation{ + ID: "tenants.create", + Method: "POST", + PathPattern: "/schema/{className}/tenants", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json", "application/yaml"}, + Schemes: []string{"https"}, + Params: params, + Reader: &TenantsCreateReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + success, ok := result.(*TenantsCreateOK) + if ok { + return success, nil + } + // unexpected success response + // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for tenants.create: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + +/* +TenantsDelete delete tenants from a specific class +*/ +func (a *Client) TenantsDelete(params *TenantsDeleteParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*TenantsDeleteOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewTenantsDeleteParams() + } + op := &runtime.ClientOperation{ + ID: "tenants.delete", + Method: "DELETE", + PathPattern: "/schema/{className}/tenants", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json", "application/yaml"}, + Schemes: []string{"https"}, + Params: params, + Reader: &TenantsDeleteReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + success, ok := result.(*TenantsDeleteOK) + if ok { + return success, nil + } + // unexpected success response + // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for tenants.delete: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + +/* +TenantsGet gets the list of tenants + +get all tenants from a specific class +*/ +func (a *Client) TenantsGet(params *TenantsGetParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*TenantsGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewTenantsGetParams() + } + op := &runtime.ClientOperation{ + ID: "tenants.get", + Method: "GET", + PathPattern: "/schema/{className}/tenants", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json", "application/yaml"}, + Schemes: []string{"https"}, + Params: params, + Reader: &TenantsGetReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + success, ok := result.(*TenantsGetOK) + if ok { + return success, nil + } + // unexpected success response + // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for tenants.get: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + +/* +TenantsGetOne gets a specific tenant + +get a specific tenant for the given class +*/ +func (a *Client) TenantsGetOne(params *TenantsGetOneParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*TenantsGetOneOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewTenantsGetOneParams() + } + op := &runtime.ClientOperation{ + ID: "tenants.get.one", + Method: "GET", + PathPattern: "/schema/{className}/tenants/{tenantName}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json", "application/yaml"}, + Schemes: []string{"https"}, + Params: params, + Reader: &TenantsGetOneReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + success, ok := result.(*TenantsGetOneOK) + if ok { + return success, nil + } + // unexpected success response + // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for tenants.get.one: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + +/* +TenantsUpdate updates a tenant + +Update tenant of a specific class +*/ +func (a *Client) TenantsUpdate(params *TenantsUpdateParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*TenantsUpdateOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewTenantsUpdateParams() + } + op := &runtime.ClientOperation{ + ID: "tenants.update", + Method: "PUT", + PathPattern: "/schema/{className}/tenants", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json", "application/yaml"}, + Schemes: []string{"https"}, + Params: params, + Reader: &TenantsUpdateReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + success, ok := result.(*TenantsUpdateOK) + if ok { + return success, nil + } + // unexpected success response + // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for tenants.update: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + +// SetTransport changes the transport on the client +func (a *Client) SetTransport(transport runtime.ClientTransport) { + a.transport = transport +} diff --git a/platform/dbops/binaries/weaviate-src/client/schema/schema_dump_parameters.go b/platform/dbops/binaries/weaviate-src/client/schema/schema_dump_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..f15683c57566d7afeebd1c6d8ecb8c2a9a1cfd81 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/schema/schema_dump_parameters.go @@ -0,0 +1,179 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package schema + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// NewSchemaDumpParams creates a new SchemaDumpParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewSchemaDumpParams() *SchemaDumpParams { + return &SchemaDumpParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewSchemaDumpParamsWithTimeout creates a new SchemaDumpParams object +// with the ability to set a timeout on a request. +func NewSchemaDumpParamsWithTimeout(timeout time.Duration) *SchemaDumpParams { + return &SchemaDumpParams{ + timeout: timeout, + } +} + +// NewSchemaDumpParamsWithContext creates a new SchemaDumpParams object +// with the ability to set a context for a request. +func NewSchemaDumpParamsWithContext(ctx context.Context) *SchemaDumpParams { + return &SchemaDumpParams{ + Context: ctx, + } +} + +// NewSchemaDumpParamsWithHTTPClient creates a new SchemaDumpParams object +// with the ability to set a custom HTTPClient for a request. +func NewSchemaDumpParamsWithHTTPClient(client *http.Client) *SchemaDumpParams { + return &SchemaDumpParams{ + HTTPClient: client, + } +} + +/* +SchemaDumpParams contains all the parameters to send to the API endpoint + + for the schema dump operation. + + Typically these are written to a http.Request. +*/ +type SchemaDumpParams struct { + + /* Consistency. + + If consistency is true, the request will be proxied to the leader to ensure strong schema consistency + + Default: true + */ + Consistency *bool + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the schema dump params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *SchemaDumpParams) WithDefaults() *SchemaDumpParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the schema dump params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *SchemaDumpParams) SetDefaults() { + var ( + consistencyDefault = bool(true) + ) + + val := SchemaDumpParams{ + Consistency: &consistencyDefault, + } + + val.timeout = o.timeout + val.Context = o.Context + val.HTTPClient = o.HTTPClient + *o = val +} + +// WithTimeout adds the timeout to the schema dump params +func (o *SchemaDumpParams) WithTimeout(timeout time.Duration) *SchemaDumpParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the schema dump params +func (o *SchemaDumpParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the schema dump params +func (o *SchemaDumpParams) WithContext(ctx context.Context) *SchemaDumpParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the schema dump params +func (o *SchemaDumpParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the schema dump params +func (o *SchemaDumpParams) WithHTTPClient(client *http.Client) *SchemaDumpParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the schema dump params +func (o *SchemaDumpParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithConsistency adds the consistency to the schema dump params +func (o *SchemaDumpParams) WithConsistency(consistency *bool) *SchemaDumpParams { + o.SetConsistency(consistency) + return o +} + +// SetConsistency adds the consistency to the schema dump params +func (o *SchemaDumpParams) SetConsistency(consistency *bool) { + o.Consistency = consistency +} + +// WriteToRequest writes these params to a swagger request +func (o *SchemaDumpParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if o.Consistency != nil { + + // header param consistency + if err := r.SetHeaderParam("consistency", swag.FormatBool(*o.Consistency)); err != nil { + return err + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/schema/schema_dump_responses.go b/platform/dbops/binaries/weaviate-src/client/schema/schema_dump_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..adc84d36360328ef2b4c2c647657db7b40390ff3 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/schema/schema_dump_responses.go @@ -0,0 +1,324 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package schema + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/weaviate/weaviate/entities/models" +) + +// SchemaDumpReader is a Reader for the SchemaDump structure. +type SchemaDumpReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *SchemaDumpReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewSchemaDumpOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 401: + result := NewSchemaDumpUnauthorized() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 403: + result := NewSchemaDumpForbidden() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 500: + result := NewSchemaDumpInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + } +} + +// NewSchemaDumpOK creates a SchemaDumpOK with default headers values +func NewSchemaDumpOK() *SchemaDumpOK { + return &SchemaDumpOK{} +} + +/* +SchemaDumpOK describes a response with status code 200, with default header values. + +Successfully dumped the database schema. +*/ +type SchemaDumpOK struct { + Payload *models.Schema +} + +// IsSuccess returns true when this schema dump o k response has a 2xx status code +func (o *SchemaDumpOK) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this schema dump o k response has a 3xx status code +func (o *SchemaDumpOK) IsRedirect() bool { + return false +} + +// IsClientError returns true when this schema dump o k response has a 4xx status code +func (o *SchemaDumpOK) IsClientError() bool { + return false +} + +// IsServerError returns true when this schema dump o k response has a 5xx status code +func (o *SchemaDumpOK) IsServerError() bool { + return false +} + +// IsCode returns true when this schema dump o k response a status code equal to that given +func (o *SchemaDumpOK) IsCode(code int) bool { + return code == 200 +} + +// Code gets the status code for the schema dump o k response +func (o *SchemaDumpOK) Code() int { + return 200 +} + +func (o *SchemaDumpOK) Error() string { + return fmt.Sprintf("[GET /schema][%d] schemaDumpOK %+v", 200, o.Payload) +} + +func (o *SchemaDumpOK) String() string { + return fmt.Sprintf("[GET /schema][%d] schemaDumpOK %+v", 200, o.Payload) +} + +func (o *SchemaDumpOK) GetPayload() *models.Schema { + return o.Payload +} + +func (o *SchemaDumpOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Schema) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewSchemaDumpUnauthorized creates a SchemaDumpUnauthorized with default headers values +func NewSchemaDumpUnauthorized() *SchemaDumpUnauthorized { + return &SchemaDumpUnauthorized{} +} + +/* +SchemaDumpUnauthorized describes a response with status code 401, with default header values. + +Unauthorized or invalid credentials. +*/ +type SchemaDumpUnauthorized struct { +} + +// IsSuccess returns true when this schema dump unauthorized response has a 2xx status code +func (o *SchemaDumpUnauthorized) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this schema dump unauthorized response has a 3xx status code +func (o *SchemaDumpUnauthorized) IsRedirect() bool { + return false +} + +// IsClientError returns true when this schema dump unauthorized response has a 4xx status code +func (o *SchemaDumpUnauthorized) IsClientError() bool { + return true +} + +// IsServerError returns true when this schema dump unauthorized response has a 5xx status code +func (o *SchemaDumpUnauthorized) IsServerError() bool { + return false +} + +// IsCode returns true when this schema dump unauthorized response a status code equal to that given +func (o *SchemaDumpUnauthorized) IsCode(code int) bool { + return code == 401 +} + +// Code gets the status code for the schema dump unauthorized response +func (o *SchemaDumpUnauthorized) Code() int { + return 401 +} + +func (o *SchemaDumpUnauthorized) Error() string { + return fmt.Sprintf("[GET /schema][%d] schemaDumpUnauthorized ", 401) +} + +func (o *SchemaDumpUnauthorized) String() string { + return fmt.Sprintf("[GET /schema][%d] schemaDumpUnauthorized ", 401) +} + +func (o *SchemaDumpUnauthorized) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewSchemaDumpForbidden creates a SchemaDumpForbidden with default headers values +func NewSchemaDumpForbidden() *SchemaDumpForbidden { + return &SchemaDumpForbidden{} +} + +/* +SchemaDumpForbidden describes a response with status code 403, with default header values. + +Forbidden +*/ +type SchemaDumpForbidden struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this schema dump forbidden response has a 2xx status code +func (o *SchemaDumpForbidden) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this schema dump forbidden response has a 3xx status code +func (o *SchemaDumpForbidden) IsRedirect() bool { + return false +} + +// IsClientError returns true when this schema dump forbidden response has a 4xx status code +func (o *SchemaDumpForbidden) IsClientError() bool { + return true +} + +// IsServerError returns true when this schema dump forbidden response has a 5xx status code +func (o *SchemaDumpForbidden) IsServerError() bool { + return false +} + +// IsCode returns true when this schema dump forbidden response a status code equal to that given +func (o *SchemaDumpForbidden) IsCode(code int) bool { + return code == 403 +} + +// Code gets the status code for the schema dump forbidden response +func (o *SchemaDumpForbidden) Code() int { + return 403 +} + +func (o *SchemaDumpForbidden) Error() string { + return fmt.Sprintf("[GET /schema][%d] schemaDumpForbidden %+v", 403, o.Payload) +} + +func (o *SchemaDumpForbidden) String() string { + return fmt.Sprintf("[GET /schema][%d] schemaDumpForbidden %+v", 403, o.Payload) +} + +func (o *SchemaDumpForbidden) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *SchemaDumpForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewSchemaDumpInternalServerError creates a SchemaDumpInternalServerError with default headers values +func NewSchemaDumpInternalServerError() *SchemaDumpInternalServerError { + return &SchemaDumpInternalServerError{} +} + +/* +SchemaDumpInternalServerError describes a response with status code 500, with default header values. + +An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. +*/ +type SchemaDumpInternalServerError struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this schema dump internal server error response has a 2xx status code +func (o *SchemaDumpInternalServerError) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this schema dump internal server error response has a 3xx status code +func (o *SchemaDumpInternalServerError) IsRedirect() bool { + return false +} + +// IsClientError returns true when this schema dump internal server error response has a 4xx status code +func (o *SchemaDumpInternalServerError) IsClientError() bool { + return false +} + +// IsServerError returns true when this schema dump internal server error response has a 5xx status code +func (o *SchemaDumpInternalServerError) IsServerError() bool { + return true +} + +// IsCode returns true when this schema dump internal server error response a status code equal to that given +func (o *SchemaDumpInternalServerError) IsCode(code int) bool { + return code == 500 +} + +// Code gets the status code for the schema dump internal server error response +func (o *SchemaDumpInternalServerError) Code() int { + return 500 +} + +func (o *SchemaDumpInternalServerError) Error() string { + return fmt.Sprintf("[GET /schema][%d] schemaDumpInternalServerError %+v", 500, o.Payload) +} + +func (o *SchemaDumpInternalServerError) String() string { + return fmt.Sprintf("[GET /schema][%d] schemaDumpInternalServerError %+v", 500, o.Payload) +} + +func (o *SchemaDumpInternalServerError) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *SchemaDumpInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/schema/schema_objects_create_parameters.go b/platform/dbops/binaries/weaviate-src/client/schema/schema_objects_create_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..5d0a264e04c8fb76d402ebf258f2e6b3e698e1f0 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/schema/schema_objects_create_parameters.go @@ -0,0 +1,161 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package schema + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + + "github.com/weaviate/weaviate/entities/models" +) + +// NewSchemaObjectsCreateParams creates a new SchemaObjectsCreateParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewSchemaObjectsCreateParams() *SchemaObjectsCreateParams { + return &SchemaObjectsCreateParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewSchemaObjectsCreateParamsWithTimeout creates a new SchemaObjectsCreateParams object +// with the ability to set a timeout on a request. +func NewSchemaObjectsCreateParamsWithTimeout(timeout time.Duration) *SchemaObjectsCreateParams { + return &SchemaObjectsCreateParams{ + timeout: timeout, + } +} + +// NewSchemaObjectsCreateParamsWithContext creates a new SchemaObjectsCreateParams object +// with the ability to set a context for a request. +func NewSchemaObjectsCreateParamsWithContext(ctx context.Context) *SchemaObjectsCreateParams { + return &SchemaObjectsCreateParams{ + Context: ctx, + } +} + +// NewSchemaObjectsCreateParamsWithHTTPClient creates a new SchemaObjectsCreateParams object +// with the ability to set a custom HTTPClient for a request. +func NewSchemaObjectsCreateParamsWithHTTPClient(client *http.Client) *SchemaObjectsCreateParams { + return &SchemaObjectsCreateParams{ + HTTPClient: client, + } +} + +/* +SchemaObjectsCreateParams contains all the parameters to send to the API endpoint + + for the schema objects create operation. + + Typically these are written to a http.Request. +*/ +type SchemaObjectsCreateParams struct { + + // ObjectClass. + ObjectClass *models.Class + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the schema objects create params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *SchemaObjectsCreateParams) WithDefaults() *SchemaObjectsCreateParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the schema objects create params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *SchemaObjectsCreateParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the schema objects create params +func (o *SchemaObjectsCreateParams) WithTimeout(timeout time.Duration) *SchemaObjectsCreateParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the schema objects create params +func (o *SchemaObjectsCreateParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the schema objects create params +func (o *SchemaObjectsCreateParams) WithContext(ctx context.Context) *SchemaObjectsCreateParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the schema objects create params +func (o *SchemaObjectsCreateParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the schema objects create params +func (o *SchemaObjectsCreateParams) WithHTTPClient(client *http.Client) *SchemaObjectsCreateParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the schema objects create params +func (o *SchemaObjectsCreateParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithObjectClass adds the objectClass to the schema objects create params +func (o *SchemaObjectsCreateParams) WithObjectClass(objectClass *models.Class) *SchemaObjectsCreateParams { + o.SetObjectClass(objectClass) + return o +} + +// SetObjectClass adds the objectClass to the schema objects create params +func (o *SchemaObjectsCreateParams) SetObjectClass(objectClass *models.Class) { + o.ObjectClass = objectClass +} + +// WriteToRequest writes these params to a swagger request +func (o *SchemaObjectsCreateParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + if o.ObjectClass != nil { + if err := r.SetBodyParam(o.ObjectClass); err != nil { + return err + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/schema/schema_objects_create_responses.go b/platform/dbops/binaries/weaviate-src/client/schema/schema_objects_create_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..9518d56096f5ed44abbcd0768496dde09a65681e --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/schema/schema_objects_create_responses.go @@ -0,0 +1,398 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package schema + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/weaviate/weaviate/entities/models" +) + +// SchemaObjectsCreateReader is a Reader for the SchemaObjectsCreate structure. +type SchemaObjectsCreateReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *SchemaObjectsCreateReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewSchemaObjectsCreateOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 401: + result := NewSchemaObjectsCreateUnauthorized() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 403: + result := NewSchemaObjectsCreateForbidden() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 422: + result := NewSchemaObjectsCreateUnprocessableEntity() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 500: + result := NewSchemaObjectsCreateInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + } +} + +// NewSchemaObjectsCreateOK creates a SchemaObjectsCreateOK with default headers values +func NewSchemaObjectsCreateOK() *SchemaObjectsCreateOK { + return &SchemaObjectsCreateOK{} +} + +/* +SchemaObjectsCreateOK describes a response with status code 200, with default header values. + +Added the new Object class to the schema. +*/ +type SchemaObjectsCreateOK struct { + Payload *models.Class +} + +// IsSuccess returns true when this schema objects create o k response has a 2xx status code +func (o *SchemaObjectsCreateOK) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this schema objects create o k response has a 3xx status code +func (o *SchemaObjectsCreateOK) IsRedirect() bool { + return false +} + +// IsClientError returns true when this schema objects create o k response has a 4xx status code +func (o *SchemaObjectsCreateOK) IsClientError() bool { + return false +} + +// IsServerError returns true when this schema objects create o k response has a 5xx status code +func (o *SchemaObjectsCreateOK) IsServerError() bool { + return false +} + +// IsCode returns true when this schema objects create o k response a status code equal to that given +func (o *SchemaObjectsCreateOK) IsCode(code int) bool { + return code == 200 +} + +// Code gets the status code for the schema objects create o k response +func (o *SchemaObjectsCreateOK) Code() int { + return 200 +} + +func (o *SchemaObjectsCreateOK) Error() string { + return fmt.Sprintf("[POST /schema][%d] schemaObjectsCreateOK %+v", 200, o.Payload) +} + +func (o *SchemaObjectsCreateOK) String() string { + return fmt.Sprintf("[POST /schema][%d] schemaObjectsCreateOK %+v", 200, o.Payload) +} + +func (o *SchemaObjectsCreateOK) GetPayload() *models.Class { + return o.Payload +} + +func (o *SchemaObjectsCreateOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Class) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewSchemaObjectsCreateUnauthorized creates a SchemaObjectsCreateUnauthorized with default headers values +func NewSchemaObjectsCreateUnauthorized() *SchemaObjectsCreateUnauthorized { + return &SchemaObjectsCreateUnauthorized{} +} + +/* +SchemaObjectsCreateUnauthorized describes a response with status code 401, with default header values. + +Unauthorized or invalid credentials. +*/ +type SchemaObjectsCreateUnauthorized struct { +} + +// IsSuccess returns true when this schema objects create unauthorized response has a 2xx status code +func (o *SchemaObjectsCreateUnauthorized) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this schema objects create unauthorized response has a 3xx status code +func (o *SchemaObjectsCreateUnauthorized) IsRedirect() bool { + return false +} + +// IsClientError returns true when this schema objects create unauthorized response has a 4xx status code +func (o *SchemaObjectsCreateUnauthorized) IsClientError() bool { + return true +} + +// IsServerError returns true when this schema objects create unauthorized response has a 5xx status code +func (o *SchemaObjectsCreateUnauthorized) IsServerError() bool { + return false +} + +// IsCode returns true when this schema objects create unauthorized response a status code equal to that given +func (o *SchemaObjectsCreateUnauthorized) IsCode(code int) bool { + return code == 401 +} + +// Code gets the status code for the schema objects create unauthorized response +func (o *SchemaObjectsCreateUnauthorized) Code() int { + return 401 +} + +func (o *SchemaObjectsCreateUnauthorized) Error() string { + return fmt.Sprintf("[POST /schema][%d] schemaObjectsCreateUnauthorized ", 401) +} + +func (o *SchemaObjectsCreateUnauthorized) String() string { + return fmt.Sprintf("[POST /schema][%d] schemaObjectsCreateUnauthorized ", 401) +} + +func (o *SchemaObjectsCreateUnauthorized) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewSchemaObjectsCreateForbidden creates a SchemaObjectsCreateForbidden with default headers values +func NewSchemaObjectsCreateForbidden() *SchemaObjectsCreateForbidden { + return &SchemaObjectsCreateForbidden{} +} + +/* +SchemaObjectsCreateForbidden describes a response with status code 403, with default header values. + +Forbidden +*/ +type SchemaObjectsCreateForbidden struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this schema objects create forbidden response has a 2xx status code +func (o *SchemaObjectsCreateForbidden) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this schema objects create forbidden response has a 3xx status code +func (o *SchemaObjectsCreateForbidden) IsRedirect() bool { + return false +} + +// IsClientError returns true when this schema objects create forbidden response has a 4xx status code +func (o *SchemaObjectsCreateForbidden) IsClientError() bool { + return true +} + +// IsServerError returns true when this schema objects create forbidden response has a 5xx status code +func (o *SchemaObjectsCreateForbidden) IsServerError() bool { + return false +} + +// IsCode returns true when this schema objects create forbidden response a status code equal to that given +func (o *SchemaObjectsCreateForbidden) IsCode(code int) bool { + return code == 403 +} + +// Code gets the status code for the schema objects create forbidden response +func (o *SchemaObjectsCreateForbidden) Code() int { + return 403 +} + +func (o *SchemaObjectsCreateForbidden) Error() string { + return fmt.Sprintf("[POST /schema][%d] schemaObjectsCreateForbidden %+v", 403, o.Payload) +} + +func (o *SchemaObjectsCreateForbidden) String() string { + return fmt.Sprintf("[POST /schema][%d] schemaObjectsCreateForbidden %+v", 403, o.Payload) +} + +func (o *SchemaObjectsCreateForbidden) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *SchemaObjectsCreateForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewSchemaObjectsCreateUnprocessableEntity creates a SchemaObjectsCreateUnprocessableEntity with default headers values +func NewSchemaObjectsCreateUnprocessableEntity() *SchemaObjectsCreateUnprocessableEntity { + return &SchemaObjectsCreateUnprocessableEntity{} +} + +/* +SchemaObjectsCreateUnprocessableEntity describes a response with status code 422, with default header values. + +Invalid Object class +*/ +type SchemaObjectsCreateUnprocessableEntity struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this schema objects create unprocessable entity response has a 2xx status code +func (o *SchemaObjectsCreateUnprocessableEntity) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this schema objects create unprocessable entity response has a 3xx status code +func (o *SchemaObjectsCreateUnprocessableEntity) IsRedirect() bool { + return false +} + +// IsClientError returns true when this schema objects create unprocessable entity response has a 4xx status code +func (o *SchemaObjectsCreateUnprocessableEntity) IsClientError() bool { + return true +} + +// IsServerError returns true when this schema objects create unprocessable entity response has a 5xx status code +func (o *SchemaObjectsCreateUnprocessableEntity) IsServerError() bool { + return false +} + +// IsCode returns true when this schema objects create unprocessable entity response a status code equal to that given +func (o *SchemaObjectsCreateUnprocessableEntity) IsCode(code int) bool { + return code == 422 +} + +// Code gets the status code for the schema objects create unprocessable entity response +func (o *SchemaObjectsCreateUnprocessableEntity) Code() int { + return 422 +} + +func (o *SchemaObjectsCreateUnprocessableEntity) Error() string { + return fmt.Sprintf("[POST /schema][%d] schemaObjectsCreateUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *SchemaObjectsCreateUnprocessableEntity) String() string { + return fmt.Sprintf("[POST /schema][%d] schemaObjectsCreateUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *SchemaObjectsCreateUnprocessableEntity) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *SchemaObjectsCreateUnprocessableEntity) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewSchemaObjectsCreateInternalServerError creates a SchemaObjectsCreateInternalServerError with default headers values +func NewSchemaObjectsCreateInternalServerError() *SchemaObjectsCreateInternalServerError { + return &SchemaObjectsCreateInternalServerError{} +} + +/* +SchemaObjectsCreateInternalServerError describes a response with status code 500, with default header values. + +An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. +*/ +type SchemaObjectsCreateInternalServerError struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this schema objects create internal server error response has a 2xx status code +func (o *SchemaObjectsCreateInternalServerError) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this schema objects create internal server error response has a 3xx status code +func (o *SchemaObjectsCreateInternalServerError) IsRedirect() bool { + return false +} + +// IsClientError returns true when this schema objects create internal server error response has a 4xx status code +func (o *SchemaObjectsCreateInternalServerError) IsClientError() bool { + return false +} + +// IsServerError returns true when this schema objects create internal server error response has a 5xx status code +func (o *SchemaObjectsCreateInternalServerError) IsServerError() bool { + return true +} + +// IsCode returns true when this schema objects create internal server error response a status code equal to that given +func (o *SchemaObjectsCreateInternalServerError) IsCode(code int) bool { + return code == 500 +} + +// Code gets the status code for the schema objects create internal server error response +func (o *SchemaObjectsCreateInternalServerError) Code() int { + return 500 +} + +func (o *SchemaObjectsCreateInternalServerError) Error() string { + return fmt.Sprintf("[POST /schema][%d] schemaObjectsCreateInternalServerError %+v", 500, o.Payload) +} + +func (o *SchemaObjectsCreateInternalServerError) String() string { + return fmt.Sprintf("[POST /schema][%d] schemaObjectsCreateInternalServerError %+v", 500, o.Payload) +} + +func (o *SchemaObjectsCreateInternalServerError) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *SchemaObjectsCreateInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/schema/schema_objects_delete_parameters.go b/platform/dbops/binaries/weaviate-src/client/schema/schema_objects_delete_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..ee2d302ffc5d1ff29694ab7398328a715a215fd1 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/schema/schema_objects_delete_parameters.go @@ -0,0 +1,159 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package schema + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewSchemaObjectsDeleteParams creates a new SchemaObjectsDeleteParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewSchemaObjectsDeleteParams() *SchemaObjectsDeleteParams { + return &SchemaObjectsDeleteParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewSchemaObjectsDeleteParamsWithTimeout creates a new SchemaObjectsDeleteParams object +// with the ability to set a timeout on a request. +func NewSchemaObjectsDeleteParamsWithTimeout(timeout time.Duration) *SchemaObjectsDeleteParams { + return &SchemaObjectsDeleteParams{ + timeout: timeout, + } +} + +// NewSchemaObjectsDeleteParamsWithContext creates a new SchemaObjectsDeleteParams object +// with the ability to set a context for a request. +func NewSchemaObjectsDeleteParamsWithContext(ctx context.Context) *SchemaObjectsDeleteParams { + return &SchemaObjectsDeleteParams{ + Context: ctx, + } +} + +// NewSchemaObjectsDeleteParamsWithHTTPClient creates a new SchemaObjectsDeleteParams object +// with the ability to set a custom HTTPClient for a request. +func NewSchemaObjectsDeleteParamsWithHTTPClient(client *http.Client) *SchemaObjectsDeleteParams { + return &SchemaObjectsDeleteParams{ + HTTPClient: client, + } +} + +/* +SchemaObjectsDeleteParams contains all the parameters to send to the API endpoint + + for the schema objects delete operation. + + Typically these are written to a http.Request. +*/ +type SchemaObjectsDeleteParams struct { + + // ClassName. + ClassName string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the schema objects delete params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *SchemaObjectsDeleteParams) WithDefaults() *SchemaObjectsDeleteParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the schema objects delete params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *SchemaObjectsDeleteParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the schema objects delete params +func (o *SchemaObjectsDeleteParams) WithTimeout(timeout time.Duration) *SchemaObjectsDeleteParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the schema objects delete params +func (o *SchemaObjectsDeleteParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the schema objects delete params +func (o *SchemaObjectsDeleteParams) WithContext(ctx context.Context) *SchemaObjectsDeleteParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the schema objects delete params +func (o *SchemaObjectsDeleteParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the schema objects delete params +func (o *SchemaObjectsDeleteParams) WithHTTPClient(client *http.Client) *SchemaObjectsDeleteParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the schema objects delete params +func (o *SchemaObjectsDeleteParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithClassName adds the className to the schema objects delete params +func (o *SchemaObjectsDeleteParams) WithClassName(className string) *SchemaObjectsDeleteParams { + o.SetClassName(className) + return o +} + +// SetClassName adds the className to the schema objects delete params +func (o *SchemaObjectsDeleteParams) SetClassName(className string) { + o.ClassName = className +} + +// WriteToRequest writes these params to a swagger request +func (o *SchemaObjectsDeleteParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param className + if err := r.SetPathParam("className", o.ClassName); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/schema/schema_objects_delete_responses.go b/platform/dbops/binaries/weaviate-src/client/schema/schema_objects_delete_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..ac341da1d83566626d601ed4ddcaa22d7e3ea343 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/schema/schema_objects_delete_responses.go @@ -0,0 +1,386 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package schema + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/weaviate/weaviate/entities/models" +) + +// SchemaObjectsDeleteReader is a Reader for the SchemaObjectsDelete structure. +type SchemaObjectsDeleteReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *SchemaObjectsDeleteReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewSchemaObjectsDeleteOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 400: + result := NewSchemaObjectsDeleteBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 401: + result := NewSchemaObjectsDeleteUnauthorized() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 403: + result := NewSchemaObjectsDeleteForbidden() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 500: + result := NewSchemaObjectsDeleteInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + } +} + +// NewSchemaObjectsDeleteOK creates a SchemaObjectsDeleteOK with default headers values +func NewSchemaObjectsDeleteOK() *SchemaObjectsDeleteOK { + return &SchemaObjectsDeleteOK{} +} + +/* +SchemaObjectsDeleteOK describes a response with status code 200, with default header values. + +Removed the Object class from the schema. +*/ +type SchemaObjectsDeleteOK struct { +} + +// IsSuccess returns true when this schema objects delete o k response has a 2xx status code +func (o *SchemaObjectsDeleteOK) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this schema objects delete o k response has a 3xx status code +func (o *SchemaObjectsDeleteOK) IsRedirect() bool { + return false +} + +// IsClientError returns true when this schema objects delete o k response has a 4xx status code +func (o *SchemaObjectsDeleteOK) IsClientError() bool { + return false +} + +// IsServerError returns true when this schema objects delete o k response has a 5xx status code +func (o *SchemaObjectsDeleteOK) IsServerError() bool { + return false +} + +// IsCode returns true when this schema objects delete o k response a status code equal to that given +func (o *SchemaObjectsDeleteOK) IsCode(code int) bool { + return code == 200 +} + +// Code gets the status code for the schema objects delete o k response +func (o *SchemaObjectsDeleteOK) Code() int { + return 200 +} + +func (o *SchemaObjectsDeleteOK) Error() string { + return fmt.Sprintf("[DELETE /schema/{className}][%d] schemaObjectsDeleteOK ", 200) +} + +func (o *SchemaObjectsDeleteOK) String() string { + return fmt.Sprintf("[DELETE /schema/{className}][%d] schemaObjectsDeleteOK ", 200) +} + +func (o *SchemaObjectsDeleteOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewSchemaObjectsDeleteBadRequest creates a SchemaObjectsDeleteBadRequest with default headers values +func NewSchemaObjectsDeleteBadRequest() *SchemaObjectsDeleteBadRequest { + return &SchemaObjectsDeleteBadRequest{} +} + +/* +SchemaObjectsDeleteBadRequest describes a response with status code 400, with default header values. + +Could not delete the Object class. +*/ +type SchemaObjectsDeleteBadRequest struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this schema objects delete bad request response has a 2xx status code +func (o *SchemaObjectsDeleteBadRequest) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this schema objects delete bad request response has a 3xx status code +func (o *SchemaObjectsDeleteBadRequest) IsRedirect() bool { + return false +} + +// IsClientError returns true when this schema objects delete bad request response has a 4xx status code +func (o *SchemaObjectsDeleteBadRequest) IsClientError() bool { + return true +} + +// IsServerError returns true when this schema objects delete bad request response has a 5xx status code +func (o *SchemaObjectsDeleteBadRequest) IsServerError() bool { + return false +} + +// IsCode returns true when this schema objects delete bad request response a status code equal to that given +func (o *SchemaObjectsDeleteBadRequest) IsCode(code int) bool { + return code == 400 +} + +// Code gets the status code for the schema objects delete bad request response +func (o *SchemaObjectsDeleteBadRequest) Code() int { + return 400 +} + +func (o *SchemaObjectsDeleteBadRequest) Error() string { + return fmt.Sprintf("[DELETE /schema/{className}][%d] schemaObjectsDeleteBadRequest %+v", 400, o.Payload) +} + +func (o *SchemaObjectsDeleteBadRequest) String() string { + return fmt.Sprintf("[DELETE /schema/{className}][%d] schemaObjectsDeleteBadRequest %+v", 400, o.Payload) +} + +func (o *SchemaObjectsDeleteBadRequest) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *SchemaObjectsDeleteBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewSchemaObjectsDeleteUnauthorized creates a SchemaObjectsDeleteUnauthorized with default headers values +func NewSchemaObjectsDeleteUnauthorized() *SchemaObjectsDeleteUnauthorized { + return &SchemaObjectsDeleteUnauthorized{} +} + +/* +SchemaObjectsDeleteUnauthorized describes a response with status code 401, with default header values. + +Unauthorized or invalid credentials. +*/ +type SchemaObjectsDeleteUnauthorized struct { +} + +// IsSuccess returns true when this schema objects delete unauthorized response has a 2xx status code +func (o *SchemaObjectsDeleteUnauthorized) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this schema objects delete unauthorized response has a 3xx status code +func (o *SchemaObjectsDeleteUnauthorized) IsRedirect() bool { + return false +} + +// IsClientError returns true when this schema objects delete unauthorized response has a 4xx status code +func (o *SchemaObjectsDeleteUnauthorized) IsClientError() bool { + return true +} + +// IsServerError returns true when this schema objects delete unauthorized response has a 5xx status code +func (o *SchemaObjectsDeleteUnauthorized) IsServerError() bool { + return false +} + +// IsCode returns true when this schema objects delete unauthorized response a status code equal to that given +func (o *SchemaObjectsDeleteUnauthorized) IsCode(code int) bool { + return code == 401 +} + +// Code gets the status code for the schema objects delete unauthorized response +func (o *SchemaObjectsDeleteUnauthorized) Code() int { + return 401 +} + +func (o *SchemaObjectsDeleteUnauthorized) Error() string { + return fmt.Sprintf("[DELETE /schema/{className}][%d] schemaObjectsDeleteUnauthorized ", 401) +} + +func (o *SchemaObjectsDeleteUnauthorized) String() string { + return fmt.Sprintf("[DELETE /schema/{className}][%d] schemaObjectsDeleteUnauthorized ", 401) +} + +func (o *SchemaObjectsDeleteUnauthorized) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewSchemaObjectsDeleteForbidden creates a SchemaObjectsDeleteForbidden with default headers values +func NewSchemaObjectsDeleteForbidden() *SchemaObjectsDeleteForbidden { + return &SchemaObjectsDeleteForbidden{} +} + +/* +SchemaObjectsDeleteForbidden describes a response with status code 403, with default header values. + +Forbidden +*/ +type SchemaObjectsDeleteForbidden struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this schema objects delete forbidden response has a 2xx status code +func (o *SchemaObjectsDeleteForbidden) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this schema objects delete forbidden response has a 3xx status code +func (o *SchemaObjectsDeleteForbidden) IsRedirect() bool { + return false +} + +// IsClientError returns true when this schema objects delete forbidden response has a 4xx status code +func (o *SchemaObjectsDeleteForbidden) IsClientError() bool { + return true +} + +// IsServerError returns true when this schema objects delete forbidden response has a 5xx status code +func (o *SchemaObjectsDeleteForbidden) IsServerError() bool { + return false +} + +// IsCode returns true when this schema objects delete forbidden response a status code equal to that given +func (o *SchemaObjectsDeleteForbidden) IsCode(code int) bool { + return code == 403 +} + +// Code gets the status code for the schema objects delete forbidden response +func (o *SchemaObjectsDeleteForbidden) Code() int { + return 403 +} + +func (o *SchemaObjectsDeleteForbidden) Error() string { + return fmt.Sprintf("[DELETE /schema/{className}][%d] schemaObjectsDeleteForbidden %+v", 403, o.Payload) +} + +func (o *SchemaObjectsDeleteForbidden) String() string { + return fmt.Sprintf("[DELETE /schema/{className}][%d] schemaObjectsDeleteForbidden %+v", 403, o.Payload) +} + +func (o *SchemaObjectsDeleteForbidden) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *SchemaObjectsDeleteForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewSchemaObjectsDeleteInternalServerError creates a SchemaObjectsDeleteInternalServerError with default headers values +func NewSchemaObjectsDeleteInternalServerError() *SchemaObjectsDeleteInternalServerError { + return &SchemaObjectsDeleteInternalServerError{} +} + +/* +SchemaObjectsDeleteInternalServerError describes a response with status code 500, with default header values. + +An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. +*/ +type SchemaObjectsDeleteInternalServerError struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this schema objects delete internal server error response has a 2xx status code +func (o *SchemaObjectsDeleteInternalServerError) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this schema objects delete internal server error response has a 3xx status code +func (o *SchemaObjectsDeleteInternalServerError) IsRedirect() bool { + return false +} + +// IsClientError returns true when this schema objects delete internal server error response has a 4xx status code +func (o *SchemaObjectsDeleteInternalServerError) IsClientError() bool { + return false +} + +// IsServerError returns true when this schema objects delete internal server error response has a 5xx status code +func (o *SchemaObjectsDeleteInternalServerError) IsServerError() bool { + return true +} + +// IsCode returns true when this schema objects delete internal server error response a status code equal to that given +func (o *SchemaObjectsDeleteInternalServerError) IsCode(code int) bool { + return code == 500 +} + +// Code gets the status code for the schema objects delete internal server error response +func (o *SchemaObjectsDeleteInternalServerError) Code() int { + return 500 +} + +func (o *SchemaObjectsDeleteInternalServerError) Error() string { + return fmt.Sprintf("[DELETE /schema/{className}][%d] schemaObjectsDeleteInternalServerError %+v", 500, o.Payload) +} + +func (o *SchemaObjectsDeleteInternalServerError) String() string { + return fmt.Sprintf("[DELETE /schema/{className}][%d] schemaObjectsDeleteInternalServerError %+v", 500, o.Payload) +} + +func (o *SchemaObjectsDeleteInternalServerError) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *SchemaObjectsDeleteInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/schema/schema_objects_get_parameters.go b/platform/dbops/binaries/weaviate-src/client/schema/schema_objects_get_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..fdfdf1e8b1aa7d693cf016f61271e8cac290b4c4 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/schema/schema_objects_get_parameters.go @@ -0,0 +1,198 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package schema + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// NewSchemaObjectsGetParams creates a new SchemaObjectsGetParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewSchemaObjectsGetParams() *SchemaObjectsGetParams { + return &SchemaObjectsGetParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewSchemaObjectsGetParamsWithTimeout creates a new SchemaObjectsGetParams object +// with the ability to set a timeout on a request. +func NewSchemaObjectsGetParamsWithTimeout(timeout time.Duration) *SchemaObjectsGetParams { + return &SchemaObjectsGetParams{ + timeout: timeout, + } +} + +// NewSchemaObjectsGetParamsWithContext creates a new SchemaObjectsGetParams object +// with the ability to set a context for a request. +func NewSchemaObjectsGetParamsWithContext(ctx context.Context) *SchemaObjectsGetParams { + return &SchemaObjectsGetParams{ + Context: ctx, + } +} + +// NewSchemaObjectsGetParamsWithHTTPClient creates a new SchemaObjectsGetParams object +// with the ability to set a custom HTTPClient for a request. +func NewSchemaObjectsGetParamsWithHTTPClient(client *http.Client) *SchemaObjectsGetParams { + return &SchemaObjectsGetParams{ + HTTPClient: client, + } +} + +/* +SchemaObjectsGetParams contains all the parameters to send to the API endpoint + + for the schema objects get operation. + + Typically these are written to a http.Request. +*/ +type SchemaObjectsGetParams struct { + + // ClassName. + ClassName string + + /* Consistency. + + If consistency is true, the request will be proxied to the leader to ensure strong schema consistency + + Default: true + */ + Consistency *bool + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the schema objects get params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *SchemaObjectsGetParams) WithDefaults() *SchemaObjectsGetParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the schema objects get params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *SchemaObjectsGetParams) SetDefaults() { + var ( + consistencyDefault = bool(true) + ) + + val := SchemaObjectsGetParams{ + Consistency: &consistencyDefault, + } + + val.timeout = o.timeout + val.Context = o.Context + val.HTTPClient = o.HTTPClient + *o = val +} + +// WithTimeout adds the timeout to the schema objects get params +func (o *SchemaObjectsGetParams) WithTimeout(timeout time.Duration) *SchemaObjectsGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the schema objects get params +func (o *SchemaObjectsGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the schema objects get params +func (o *SchemaObjectsGetParams) WithContext(ctx context.Context) *SchemaObjectsGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the schema objects get params +func (o *SchemaObjectsGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the schema objects get params +func (o *SchemaObjectsGetParams) WithHTTPClient(client *http.Client) *SchemaObjectsGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the schema objects get params +func (o *SchemaObjectsGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithClassName adds the className to the schema objects get params +func (o *SchemaObjectsGetParams) WithClassName(className string) *SchemaObjectsGetParams { + o.SetClassName(className) + return o +} + +// SetClassName adds the className to the schema objects get params +func (o *SchemaObjectsGetParams) SetClassName(className string) { + o.ClassName = className +} + +// WithConsistency adds the consistency to the schema objects get params +func (o *SchemaObjectsGetParams) WithConsistency(consistency *bool) *SchemaObjectsGetParams { + o.SetConsistency(consistency) + return o +} + +// SetConsistency adds the consistency to the schema objects get params +func (o *SchemaObjectsGetParams) SetConsistency(consistency *bool) { + o.Consistency = consistency +} + +// WriteToRequest writes these params to a swagger request +func (o *SchemaObjectsGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param className + if err := r.SetPathParam("className", o.ClassName); err != nil { + return err + } + + if o.Consistency != nil { + + // header param consistency + if err := r.SetHeaderParam("consistency", swag.FormatBool(*o.Consistency)); err != nil { + return err + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/schema/schema_objects_get_responses.go b/platform/dbops/binaries/weaviate-src/client/schema/schema_objects_get_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..3749ad2d70b6fdef37fbdb1e6b57192ad4fcfdda --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/schema/schema_objects_get_responses.go @@ -0,0 +1,386 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package schema + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/weaviate/weaviate/entities/models" +) + +// SchemaObjectsGetReader is a Reader for the SchemaObjectsGet structure. +type SchemaObjectsGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *SchemaObjectsGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewSchemaObjectsGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 401: + result := NewSchemaObjectsGetUnauthorized() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 403: + result := NewSchemaObjectsGetForbidden() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 404: + result := NewSchemaObjectsGetNotFound() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 500: + result := NewSchemaObjectsGetInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + } +} + +// NewSchemaObjectsGetOK creates a SchemaObjectsGetOK with default headers values +func NewSchemaObjectsGetOK() *SchemaObjectsGetOK { + return &SchemaObjectsGetOK{} +} + +/* +SchemaObjectsGetOK describes a response with status code 200, with default header values. + +Found the Class, returned as body +*/ +type SchemaObjectsGetOK struct { + Payload *models.Class +} + +// IsSuccess returns true when this schema objects get o k response has a 2xx status code +func (o *SchemaObjectsGetOK) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this schema objects get o k response has a 3xx status code +func (o *SchemaObjectsGetOK) IsRedirect() bool { + return false +} + +// IsClientError returns true when this schema objects get o k response has a 4xx status code +func (o *SchemaObjectsGetOK) IsClientError() bool { + return false +} + +// IsServerError returns true when this schema objects get o k response has a 5xx status code +func (o *SchemaObjectsGetOK) IsServerError() bool { + return false +} + +// IsCode returns true when this schema objects get o k response a status code equal to that given +func (o *SchemaObjectsGetOK) IsCode(code int) bool { + return code == 200 +} + +// Code gets the status code for the schema objects get o k response +func (o *SchemaObjectsGetOK) Code() int { + return 200 +} + +func (o *SchemaObjectsGetOK) Error() string { + return fmt.Sprintf("[GET /schema/{className}][%d] schemaObjectsGetOK %+v", 200, o.Payload) +} + +func (o *SchemaObjectsGetOK) String() string { + return fmt.Sprintf("[GET /schema/{className}][%d] schemaObjectsGetOK %+v", 200, o.Payload) +} + +func (o *SchemaObjectsGetOK) GetPayload() *models.Class { + return o.Payload +} + +func (o *SchemaObjectsGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Class) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewSchemaObjectsGetUnauthorized creates a SchemaObjectsGetUnauthorized with default headers values +func NewSchemaObjectsGetUnauthorized() *SchemaObjectsGetUnauthorized { + return &SchemaObjectsGetUnauthorized{} +} + +/* +SchemaObjectsGetUnauthorized describes a response with status code 401, with default header values. + +Unauthorized or invalid credentials. +*/ +type SchemaObjectsGetUnauthorized struct { +} + +// IsSuccess returns true when this schema objects get unauthorized response has a 2xx status code +func (o *SchemaObjectsGetUnauthorized) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this schema objects get unauthorized response has a 3xx status code +func (o *SchemaObjectsGetUnauthorized) IsRedirect() bool { + return false +} + +// IsClientError returns true when this schema objects get unauthorized response has a 4xx status code +func (o *SchemaObjectsGetUnauthorized) IsClientError() bool { + return true +} + +// IsServerError returns true when this schema objects get unauthorized response has a 5xx status code +func (o *SchemaObjectsGetUnauthorized) IsServerError() bool { + return false +} + +// IsCode returns true when this schema objects get unauthorized response a status code equal to that given +func (o *SchemaObjectsGetUnauthorized) IsCode(code int) bool { + return code == 401 +} + +// Code gets the status code for the schema objects get unauthorized response +func (o *SchemaObjectsGetUnauthorized) Code() int { + return 401 +} + +func (o *SchemaObjectsGetUnauthorized) Error() string { + return fmt.Sprintf("[GET /schema/{className}][%d] schemaObjectsGetUnauthorized ", 401) +} + +func (o *SchemaObjectsGetUnauthorized) String() string { + return fmt.Sprintf("[GET /schema/{className}][%d] schemaObjectsGetUnauthorized ", 401) +} + +func (o *SchemaObjectsGetUnauthorized) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewSchemaObjectsGetForbidden creates a SchemaObjectsGetForbidden with default headers values +func NewSchemaObjectsGetForbidden() *SchemaObjectsGetForbidden { + return &SchemaObjectsGetForbidden{} +} + +/* +SchemaObjectsGetForbidden describes a response with status code 403, with default header values. + +Forbidden +*/ +type SchemaObjectsGetForbidden struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this schema objects get forbidden response has a 2xx status code +func (o *SchemaObjectsGetForbidden) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this schema objects get forbidden response has a 3xx status code +func (o *SchemaObjectsGetForbidden) IsRedirect() bool { + return false +} + +// IsClientError returns true when this schema objects get forbidden response has a 4xx status code +func (o *SchemaObjectsGetForbidden) IsClientError() bool { + return true +} + +// IsServerError returns true when this schema objects get forbidden response has a 5xx status code +func (o *SchemaObjectsGetForbidden) IsServerError() bool { + return false +} + +// IsCode returns true when this schema objects get forbidden response a status code equal to that given +func (o *SchemaObjectsGetForbidden) IsCode(code int) bool { + return code == 403 +} + +// Code gets the status code for the schema objects get forbidden response +func (o *SchemaObjectsGetForbidden) Code() int { + return 403 +} + +func (o *SchemaObjectsGetForbidden) Error() string { + return fmt.Sprintf("[GET /schema/{className}][%d] schemaObjectsGetForbidden %+v", 403, o.Payload) +} + +func (o *SchemaObjectsGetForbidden) String() string { + return fmt.Sprintf("[GET /schema/{className}][%d] schemaObjectsGetForbidden %+v", 403, o.Payload) +} + +func (o *SchemaObjectsGetForbidden) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *SchemaObjectsGetForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewSchemaObjectsGetNotFound creates a SchemaObjectsGetNotFound with default headers values +func NewSchemaObjectsGetNotFound() *SchemaObjectsGetNotFound { + return &SchemaObjectsGetNotFound{} +} + +/* +SchemaObjectsGetNotFound describes a response with status code 404, with default header values. + +This class does not exist +*/ +type SchemaObjectsGetNotFound struct { +} + +// IsSuccess returns true when this schema objects get not found response has a 2xx status code +func (o *SchemaObjectsGetNotFound) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this schema objects get not found response has a 3xx status code +func (o *SchemaObjectsGetNotFound) IsRedirect() bool { + return false +} + +// IsClientError returns true when this schema objects get not found response has a 4xx status code +func (o *SchemaObjectsGetNotFound) IsClientError() bool { + return true +} + +// IsServerError returns true when this schema objects get not found response has a 5xx status code +func (o *SchemaObjectsGetNotFound) IsServerError() bool { + return false +} + +// IsCode returns true when this schema objects get not found response a status code equal to that given +func (o *SchemaObjectsGetNotFound) IsCode(code int) bool { + return code == 404 +} + +// Code gets the status code for the schema objects get not found response +func (o *SchemaObjectsGetNotFound) Code() int { + return 404 +} + +func (o *SchemaObjectsGetNotFound) Error() string { + return fmt.Sprintf("[GET /schema/{className}][%d] schemaObjectsGetNotFound ", 404) +} + +func (o *SchemaObjectsGetNotFound) String() string { + return fmt.Sprintf("[GET /schema/{className}][%d] schemaObjectsGetNotFound ", 404) +} + +func (o *SchemaObjectsGetNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewSchemaObjectsGetInternalServerError creates a SchemaObjectsGetInternalServerError with default headers values +func NewSchemaObjectsGetInternalServerError() *SchemaObjectsGetInternalServerError { + return &SchemaObjectsGetInternalServerError{} +} + +/* +SchemaObjectsGetInternalServerError describes a response with status code 500, with default header values. + +An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. +*/ +type SchemaObjectsGetInternalServerError struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this schema objects get internal server error response has a 2xx status code +func (o *SchemaObjectsGetInternalServerError) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this schema objects get internal server error response has a 3xx status code +func (o *SchemaObjectsGetInternalServerError) IsRedirect() bool { + return false +} + +// IsClientError returns true when this schema objects get internal server error response has a 4xx status code +func (o *SchemaObjectsGetInternalServerError) IsClientError() bool { + return false +} + +// IsServerError returns true when this schema objects get internal server error response has a 5xx status code +func (o *SchemaObjectsGetInternalServerError) IsServerError() bool { + return true +} + +// IsCode returns true when this schema objects get internal server error response a status code equal to that given +func (o *SchemaObjectsGetInternalServerError) IsCode(code int) bool { + return code == 500 +} + +// Code gets the status code for the schema objects get internal server error response +func (o *SchemaObjectsGetInternalServerError) Code() int { + return 500 +} + +func (o *SchemaObjectsGetInternalServerError) Error() string { + return fmt.Sprintf("[GET /schema/{className}][%d] schemaObjectsGetInternalServerError %+v", 500, o.Payload) +} + +func (o *SchemaObjectsGetInternalServerError) String() string { + return fmt.Sprintf("[GET /schema/{className}][%d] schemaObjectsGetInternalServerError %+v", 500, o.Payload) +} + +func (o *SchemaObjectsGetInternalServerError) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *SchemaObjectsGetInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/schema/schema_objects_properties_add_parameters.go b/platform/dbops/binaries/weaviate-src/client/schema/schema_objects_properties_add_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..81483cb4127f85eb3087283c35310b87598d294a --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/schema/schema_objects_properties_add_parameters.go @@ -0,0 +1,180 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package schema + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + + "github.com/weaviate/weaviate/entities/models" +) + +// NewSchemaObjectsPropertiesAddParams creates a new SchemaObjectsPropertiesAddParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewSchemaObjectsPropertiesAddParams() *SchemaObjectsPropertiesAddParams { + return &SchemaObjectsPropertiesAddParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewSchemaObjectsPropertiesAddParamsWithTimeout creates a new SchemaObjectsPropertiesAddParams object +// with the ability to set a timeout on a request. +func NewSchemaObjectsPropertiesAddParamsWithTimeout(timeout time.Duration) *SchemaObjectsPropertiesAddParams { + return &SchemaObjectsPropertiesAddParams{ + timeout: timeout, + } +} + +// NewSchemaObjectsPropertiesAddParamsWithContext creates a new SchemaObjectsPropertiesAddParams object +// with the ability to set a context for a request. +func NewSchemaObjectsPropertiesAddParamsWithContext(ctx context.Context) *SchemaObjectsPropertiesAddParams { + return &SchemaObjectsPropertiesAddParams{ + Context: ctx, + } +} + +// NewSchemaObjectsPropertiesAddParamsWithHTTPClient creates a new SchemaObjectsPropertiesAddParams object +// with the ability to set a custom HTTPClient for a request. +func NewSchemaObjectsPropertiesAddParamsWithHTTPClient(client *http.Client) *SchemaObjectsPropertiesAddParams { + return &SchemaObjectsPropertiesAddParams{ + HTTPClient: client, + } +} + +/* +SchemaObjectsPropertiesAddParams contains all the parameters to send to the API endpoint + + for the schema objects properties add operation. + + Typically these are written to a http.Request. +*/ +type SchemaObjectsPropertiesAddParams struct { + + // Body. + Body *models.Property + + // ClassName. + ClassName string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the schema objects properties add params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *SchemaObjectsPropertiesAddParams) WithDefaults() *SchemaObjectsPropertiesAddParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the schema objects properties add params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *SchemaObjectsPropertiesAddParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the schema objects properties add params +func (o *SchemaObjectsPropertiesAddParams) WithTimeout(timeout time.Duration) *SchemaObjectsPropertiesAddParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the schema objects properties add params +func (o *SchemaObjectsPropertiesAddParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the schema objects properties add params +func (o *SchemaObjectsPropertiesAddParams) WithContext(ctx context.Context) *SchemaObjectsPropertiesAddParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the schema objects properties add params +func (o *SchemaObjectsPropertiesAddParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the schema objects properties add params +func (o *SchemaObjectsPropertiesAddParams) WithHTTPClient(client *http.Client) *SchemaObjectsPropertiesAddParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the schema objects properties add params +func (o *SchemaObjectsPropertiesAddParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithBody adds the body to the schema objects properties add params +func (o *SchemaObjectsPropertiesAddParams) WithBody(body *models.Property) *SchemaObjectsPropertiesAddParams { + o.SetBody(body) + return o +} + +// SetBody adds the body to the schema objects properties add params +func (o *SchemaObjectsPropertiesAddParams) SetBody(body *models.Property) { + o.Body = body +} + +// WithClassName adds the className to the schema objects properties add params +func (o *SchemaObjectsPropertiesAddParams) WithClassName(className string) *SchemaObjectsPropertiesAddParams { + o.SetClassName(className) + return o +} + +// SetClassName adds the className to the schema objects properties add params +func (o *SchemaObjectsPropertiesAddParams) SetClassName(className string) { + o.ClassName = className +} + +// WriteToRequest writes these params to a swagger request +func (o *SchemaObjectsPropertiesAddParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + if o.Body != nil { + if err := r.SetBodyParam(o.Body); err != nil { + return err + } + } + + // path param className + if err := r.SetPathParam("className", o.ClassName); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/schema/schema_objects_properties_add_responses.go b/platform/dbops/binaries/weaviate-src/client/schema/schema_objects_properties_add_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..e8bc021a07e388094ed656713001525d0e1ea0bf --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/schema/schema_objects_properties_add_responses.go @@ -0,0 +1,398 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package schema + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/weaviate/weaviate/entities/models" +) + +// SchemaObjectsPropertiesAddReader is a Reader for the SchemaObjectsPropertiesAdd structure. +type SchemaObjectsPropertiesAddReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *SchemaObjectsPropertiesAddReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewSchemaObjectsPropertiesAddOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 401: + result := NewSchemaObjectsPropertiesAddUnauthorized() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 403: + result := NewSchemaObjectsPropertiesAddForbidden() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 422: + result := NewSchemaObjectsPropertiesAddUnprocessableEntity() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 500: + result := NewSchemaObjectsPropertiesAddInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + } +} + +// NewSchemaObjectsPropertiesAddOK creates a SchemaObjectsPropertiesAddOK with default headers values +func NewSchemaObjectsPropertiesAddOK() *SchemaObjectsPropertiesAddOK { + return &SchemaObjectsPropertiesAddOK{} +} + +/* +SchemaObjectsPropertiesAddOK describes a response with status code 200, with default header values. + +Added the property. +*/ +type SchemaObjectsPropertiesAddOK struct { + Payload *models.Property +} + +// IsSuccess returns true when this schema objects properties add o k response has a 2xx status code +func (o *SchemaObjectsPropertiesAddOK) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this schema objects properties add o k response has a 3xx status code +func (o *SchemaObjectsPropertiesAddOK) IsRedirect() bool { + return false +} + +// IsClientError returns true when this schema objects properties add o k response has a 4xx status code +func (o *SchemaObjectsPropertiesAddOK) IsClientError() bool { + return false +} + +// IsServerError returns true when this schema objects properties add o k response has a 5xx status code +func (o *SchemaObjectsPropertiesAddOK) IsServerError() bool { + return false +} + +// IsCode returns true when this schema objects properties add o k response a status code equal to that given +func (o *SchemaObjectsPropertiesAddOK) IsCode(code int) bool { + return code == 200 +} + +// Code gets the status code for the schema objects properties add o k response +func (o *SchemaObjectsPropertiesAddOK) Code() int { + return 200 +} + +func (o *SchemaObjectsPropertiesAddOK) Error() string { + return fmt.Sprintf("[POST /schema/{className}/properties][%d] schemaObjectsPropertiesAddOK %+v", 200, o.Payload) +} + +func (o *SchemaObjectsPropertiesAddOK) String() string { + return fmt.Sprintf("[POST /schema/{className}/properties][%d] schemaObjectsPropertiesAddOK %+v", 200, o.Payload) +} + +func (o *SchemaObjectsPropertiesAddOK) GetPayload() *models.Property { + return o.Payload +} + +func (o *SchemaObjectsPropertiesAddOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Property) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewSchemaObjectsPropertiesAddUnauthorized creates a SchemaObjectsPropertiesAddUnauthorized with default headers values +func NewSchemaObjectsPropertiesAddUnauthorized() *SchemaObjectsPropertiesAddUnauthorized { + return &SchemaObjectsPropertiesAddUnauthorized{} +} + +/* +SchemaObjectsPropertiesAddUnauthorized describes a response with status code 401, with default header values. + +Unauthorized or invalid credentials. +*/ +type SchemaObjectsPropertiesAddUnauthorized struct { +} + +// IsSuccess returns true when this schema objects properties add unauthorized response has a 2xx status code +func (o *SchemaObjectsPropertiesAddUnauthorized) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this schema objects properties add unauthorized response has a 3xx status code +func (o *SchemaObjectsPropertiesAddUnauthorized) IsRedirect() bool { + return false +} + +// IsClientError returns true when this schema objects properties add unauthorized response has a 4xx status code +func (o *SchemaObjectsPropertiesAddUnauthorized) IsClientError() bool { + return true +} + +// IsServerError returns true when this schema objects properties add unauthorized response has a 5xx status code +func (o *SchemaObjectsPropertiesAddUnauthorized) IsServerError() bool { + return false +} + +// IsCode returns true when this schema objects properties add unauthorized response a status code equal to that given +func (o *SchemaObjectsPropertiesAddUnauthorized) IsCode(code int) bool { + return code == 401 +} + +// Code gets the status code for the schema objects properties add unauthorized response +func (o *SchemaObjectsPropertiesAddUnauthorized) Code() int { + return 401 +} + +func (o *SchemaObjectsPropertiesAddUnauthorized) Error() string { + return fmt.Sprintf("[POST /schema/{className}/properties][%d] schemaObjectsPropertiesAddUnauthorized ", 401) +} + +func (o *SchemaObjectsPropertiesAddUnauthorized) String() string { + return fmt.Sprintf("[POST /schema/{className}/properties][%d] schemaObjectsPropertiesAddUnauthorized ", 401) +} + +func (o *SchemaObjectsPropertiesAddUnauthorized) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewSchemaObjectsPropertiesAddForbidden creates a SchemaObjectsPropertiesAddForbidden with default headers values +func NewSchemaObjectsPropertiesAddForbidden() *SchemaObjectsPropertiesAddForbidden { + return &SchemaObjectsPropertiesAddForbidden{} +} + +/* +SchemaObjectsPropertiesAddForbidden describes a response with status code 403, with default header values. + +Forbidden +*/ +type SchemaObjectsPropertiesAddForbidden struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this schema objects properties add forbidden response has a 2xx status code +func (o *SchemaObjectsPropertiesAddForbidden) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this schema objects properties add forbidden response has a 3xx status code +func (o *SchemaObjectsPropertiesAddForbidden) IsRedirect() bool { + return false +} + +// IsClientError returns true when this schema objects properties add forbidden response has a 4xx status code +func (o *SchemaObjectsPropertiesAddForbidden) IsClientError() bool { + return true +} + +// IsServerError returns true when this schema objects properties add forbidden response has a 5xx status code +func (o *SchemaObjectsPropertiesAddForbidden) IsServerError() bool { + return false +} + +// IsCode returns true when this schema objects properties add forbidden response a status code equal to that given +func (o *SchemaObjectsPropertiesAddForbidden) IsCode(code int) bool { + return code == 403 +} + +// Code gets the status code for the schema objects properties add forbidden response +func (o *SchemaObjectsPropertiesAddForbidden) Code() int { + return 403 +} + +func (o *SchemaObjectsPropertiesAddForbidden) Error() string { + return fmt.Sprintf("[POST /schema/{className}/properties][%d] schemaObjectsPropertiesAddForbidden %+v", 403, o.Payload) +} + +func (o *SchemaObjectsPropertiesAddForbidden) String() string { + return fmt.Sprintf("[POST /schema/{className}/properties][%d] schemaObjectsPropertiesAddForbidden %+v", 403, o.Payload) +} + +func (o *SchemaObjectsPropertiesAddForbidden) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *SchemaObjectsPropertiesAddForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewSchemaObjectsPropertiesAddUnprocessableEntity creates a SchemaObjectsPropertiesAddUnprocessableEntity with default headers values +func NewSchemaObjectsPropertiesAddUnprocessableEntity() *SchemaObjectsPropertiesAddUnprocessableEntity { + return &SchemaObjectsPropertiesAddUnprocessableEntity{} +} + +/* +SchemaObjectsPropertiesAddUnprocessableEntity describes a response with status code 422, with default header values. + +Invalid property. +*/ +type SchemaObjectsPropertiesAddUnprocessableEntity struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this schema objects properties add unprocessable entity response has a 2xx status code +func (o *SchemaObjectsPropertiesAddUnprocessableEntity) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this schema objects properties add unprocessable entity response has a 3xx status code +func (o *SchemaObjectsPropertiesAddUnprocessableEntity) IsRedirect() bool { + return false +} + +// IsClientError returns true when this schema objects properties add unprocessable entity response has a 4xx status code +func (o *SchemaObjectsPropertiesAddUnprocessableEntity) IsClientError() bool { + return true +} + +// IsServerError returns true when this schema objects properties add unprocessable entity response has a 5xx status code +func (o *SchemaObjectsPropertiesAddUnprocessableEntity) IsServerError() bool { + return false +} + +// IsCode returns true when this schema objects properties add unprocessable entity response a status code equal to that given +func (o *SchemaObjectsPropertiesAddUnprocessableEntity) IsCode(code int) bool { + return code == 422 +} + +// Code gets the status code for the schema objects properties add unprocessable entity response +func (o *SchemaObjectsPropertiesAddUnprocessableEntity) Code() int { + return 422 +} + +func (o *SchemaObjectsPropertiesAddUnprocessableEntity) Error() string { + return fmt.Sprintf("[POST /schema/{className}/properties][%d] schemaObjectsPropertiesAddUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *SchemaObjectsPropertiesAddUnprocessableEntity) String() string { + return fmt.Sprintf("[POST /schema/{className}/properties][%d] schemaObjectsPropertiesAddUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *SchemaObjectsPropertiesAddUnprocessableEntity) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *SchemaObjectsPropertiesAddUnprocessableEntity) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewSchemaObjectsPropertiesAddInternalServerError creates a SchemaObjectsPropertiesAddInternalServerError with default headers values +func NewSchemaObjectsPropertiesAddInternalServerError() *SchemaObjectsPropertiesAddInternalServerError { + return &SchemaObjectsPropertiesAddInternalServerError{} +} + +/* +SchemaObjectsPropertiesAddInternalServerError describes a response with status code 500, with default header values. + +An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. +*/ +type SchemaObjectsPropertiesAddInternalServerError struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this schema objects properties add internal server error response has a 2xx status code +func (o *SchemaObjectsPropertiesAddInternalServerError) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this schema objects properties add internal server error response has a 3xx status code +func (o *SchemaObjectsPropertiesAddInternalServerError) IsRedirect() bool { + return false +} + +// IsClientError returns true when this schema objects properties add internal server error response has a 4xx status code +func (o *SchemaObjectsPropertiesAddInternalServerError) IsClientError() bool { + return false +} + +// IsServerError returns true when this schema objects properties add internal server error response has a 5xx status code +func (o *SchemaObjectsPropertiesAddInternalServerError) IsServerError() bool { + return true +} + +// IsCode returns true when this schema objects properties add internal server error response a status code equal to that given +func (o *SchemaObjectsPropertiesAddInternalServerError) IsCode(code int) bool { + return code == 500 +} + +// Code gets the status code for the schema objects properties add internal server error response +func (o *SchemaObjectsPropertiesAddInternalServerError) Code() int { + return 500 +} + +func (o *SchemaObjectsPropertiesAddInternalServerError) Error() string { + return fmt.Sprintf("[POST /schema/{className}/properties][%d] schemaObjectsPropertiesAddInternalServerError %+v", 500, o.Payload) +} + +func (o *SchemaObjectsPropertiesAddInternalServerError) String() string { + return fmt.Sprintf("[POST /schema/{className}/properties][%d] schemaObjectsPropertiesAddInternalServerError %+v", 500, o.Payload) +} + +func (o *SchemaObjectsPropertiesAddInternalServerError) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *SchemaObjectsPropertiesAddInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/schema/schema_objects_shards_get_parameters.go b/platform/dbops/binaries/weaviate-src/client/schema/schema_objects_shards_get_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..49f8047039aebc5f68292301a61f481c9fe3a2d0 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/schema/schema_objects_shards_get_parameters.go @@ -0,0 +1,190 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package schema + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewSchemaObjectsShardsGetParams creates a new SchemaObjectsShardsGetParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewSchemaObjectsShardsGetParams() *SchemaObjectsShardsGetParams { + return &SchemaObjectsShardsGetParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewSchemaObjectsShardsGetParamsWithTimeout creates a new SchemaObjectsShardsGetParams object +// with the ability to set a timeout on a request. +func NewSchemaObjectsShardsGetParamsWithTimeout(timeout time.Duration) *SchemaObjectsShardsGetParams { + return &SchemaObjectsShardsGetParams{ + timeout: timeout, + } +} + +// NewSchemaObjectsShardsGetParamsWithContext creates a new SchemaObjectsShardsGetParams object +// with the ability to set a context for a request. +func NewSchemaObjectsShardsGetParamsWithContext(ctx context.Context) *SchemaObjectsShardsGetParams { + return &SchemaObjectsShardsGetParams{ + Context: ctx, + } +} + +// NewSchemaObjectsShardsGetParamsWithHTTPClient creates a new SchemaObjectsShardsGetParams object +// with the ability to set a custom HTTPClient for a request. +func NewSchemaObjectsShardsGetParamsWithHTTPClient(client *http.Client) *SchemaObjectsShardsGetParams { + return &SchemaObjectsShardsGetParams{ + HTTPClient: client, + } +} + +/* +SchemaObjectsShardsGetParams contains all the parameters to send to the API endpoint + + for the schema objects shards get operation. + + Typically these are written to a http.Request. +*/ +type SchemaObjectsShardsGetParams struct { + + // ClassName. + ClassName string + + // Tenant. + Tenant *string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the schema objects shards get params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *SchemaObjectsShardsGetParams) WithDefaults() *SchemaObjectsShardsGetParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the schema objects shards get params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *SchemaObjectsShardsGetParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the schema objects shards get params +func (o *SchemaObjectsShardsGetParams) WithTimeout(timeout time.Duration) *SchemaObjectsShardsGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the schema objects shards get params +func (o *SchemaObjectsShardsGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the schema objects shards get params +func (o *SchemaObjectsShardsGetParams) WithContext(ctx context.Context) *SchemaObjectsShardsGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the schema objects shards get params +func (o *SchemaObjectsShardsGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the schema objects shards get params +func (o *SchemaObjectsShardsGetParams) WithHTTPClient(client *http.Client) *SchemaObjectsShardsGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the schema objects shards get params +func (o *SchemaObjectsShardsGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithClassName adds the className to the schema objects shards get params +func (o *SchemaObjectsShardsGetParams) WithClassName(className string) *SchemaObjectsShardsGetParams { + o.SetClassName(className) + return o +} + +// SetClassName adds the className to the schema objects shards get params +func (o *SchemaObjectsShardsGetParams) SetClassName(className string) { + o.ClassName = className +} + +// WithTenant adds the tenant to the schema objects shards get params +func (o *SchemaObjectsShardsGetParams) WithTenant(tenant *string) *SchemaObjectsShardsGetParams { + o.SetTenant(tenant) + return o +} + +// SetTenant adds the tenant to the schema objects shards get params +func (o *SchemaObjectsShardsGetParams) SetTenant(tenant *string) { + o.Tenant = tenant +} + +// WriteToRequest writes these params to a swagger request +func (o *SchemaObjectsShardsGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param className + if err := r.SetPathParam("className", o.ClassName); err != nil { + return err + } + + if o.Tenant != nil { + + // query param tenant + var qrTenant string + + if o.Tenant != nil { + qrTenant = *o.Tenant + } + qTenant := qrTenant + if qTenant != "" { + + if err := r.SetQueryParam("tenant", qTenant); err != nil { + return err + } + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/schema/schema_objects_shards_get_responses.go b/platform/dbops/binaries/weaviate-src/client/schema/schema_objects_shards_get_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..5839abbda0e8e6561289e309f559efb46c046564 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/schema/schema_objects_shards_get_responses.go @@ -0,0 +1,396 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package schema + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/weaviate/weaviate/entities/models" +) + +// SchemaObjectsShardsGetReader is a Reader for the SchemaObjectsShardsGet structure. +type SchemaObjectsShardsGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *SchemaObjectsShardsGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewSchemaObjectsShardsGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 401: + result := NewSchemaObjectsShardsGetUnauthorized() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 403: + result := NewSchemaObjectsShardsGetForbidden() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 404: + result := NewSchemaObjectsShardsGetNotFound() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 500: + result := NewSchemaObjectsShardsGetInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + } +} + +// NewSchemaObjectsShardsGetOK creates a SchemaObjectsShardsGetOK with default headers values +func NewSchemaObjectsShardsGetOK() *SchemaObjectsShardsGetOK { + return &SchemaObjectsShardsGetOK{} +} + +/* +SchemaObjectsShardsGetOK describes a response with status code 200, with default header values. + +Found the status of the shards, returned as body +*/ +type SchemaObjectsShardsGetOK struct { + Payload models.ShardStatusList +} + +// IsSuccess returns true when this schema objects shards get o k response has a 2xx status code +func (o *SchemaObjectsShardsGetOK) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this schema objects shards get o k response has a 3xx status code +func (o *SchemaObjectsShardsGetOK) IsRedirect() bool { + return false +} + +// IsClientError returns true when this schema objects shards get o k response has a 4xx status code +func (o *SchemaObjectsShardsGetOK) IsClientError() bool { + return false +} + +// IsServerError returns true when this schema objects shards get o k response has a 5xx status code +func (o *SchemaObjectsShardsGetOK) IsServerError() bool { + return false +} + +// IsCode returns true when this schema objects shards get o k response a status code equal to that given +func (o *SchemaObjectsShardsGetOK) IsCode(code int) bool { + return code == 200 +} + +// Code gets the status code for the schema objects shards get o k response +func (o *SchemaObjectsShardsGetOK) Code() int { + return 200 +} + +func (o *SchemaObjectsShardsGetOK) Error() string { + return fmt.Sprintf("[GET /schema/{className}/shards][%d] schemaObjectsShardsGetOK %+v", 200, o.Payload) +} + +func (o *SchemaObjectsShardsGetOK) String() string { + return fmt.Sprintf("[GET /schema/{className}/shards][%d] schemaObjectsShardsGetOK %+v", 200, o.Payload) +} + +func (o *SchemaObjectsShardsGetOK) GetPayload() models.ShardStatusList { + return o.Payload +} + +func (o *SchemaObjectsShardsGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewSchemaObjectsShardsGetUnauthorized creates a SchemaObjectsShardsGetUnauthorized with default headers values +func NewSchemaObjectsShardsGetUnauthorized() *SchemaObjectsShardsGetUnauthorized { + return &SchemaObjectsShardsGetUnauthorized{} +} + +/* +SchemaObjectsShardsGetUnauthorized describes a response with status code 401, with default header values. + +Unauthorized or invalid credentials. +*/ +type SchemaObjectsShardsGetUnauthorized struct { +} + +// IsSuccess returns true when this schema objects shards get unauthorized response has a 2xx status code +func (o *SchemaObjectsShardsGetUnauthorized) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this schema objects shards get unauthorized response has a 3xx status code +func (o *SchemaObjectsShardsGetUnauthorized) IsRedirect() bool { + return false +} + +// IsClientError returns true when this schema objects shards get unauthorized response has a 4xx status code +func (o *SchemaObjectsShardsGetUnauthorized) IsClientError() bool { + return true +} + +// IsServerError returns true when this schema objects shards get unauthorized response has a 5xx status code +func (o *SchemaObjectsShardsGetUnauthorized) IsServerError() bool { + return false +} + +// IsCode returns true when this schema objects shards get unauthorized response a status code equal to that given +func (o *SchemaObjectsShardsGetUnauthorized) IsCode(code int) bool { + return code == 401 +} + +// Code gets the status code for the schema objects shards get unauthorized response +func (o *SchemaObjectsShardsGetUnauthorized) Code() int { + return 401 +} + +func (o *SchemaObjectsShardsGetUnauthorized) Error() string { + return fmt.Sprintf("[GET /schema/{className}/shards][%d] schemaObjectsShardsGetUnauthorized ", 401) +} + +func (o *SchemaObjectsShardsGetUnauthorized) String() string { + return fmt.Sprintf("[GET /schema/{className}/shards][%d] schemaObjectsShardsGetUnauthorized ", 401) +} + +func (o *SchemaObjectsShardsGetUnauthorized) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewSchemaObjectsShardsGetForbidden creates a SchemaObjectsShardsGetForbidden with default headers values +func NewSchemaObjectsShardsGetForbidden() *SchemaObjectsShardsGetForbidden { + return &SchemaObjectsShardsGetForbidden{} +} + +/* +SchemaObjectsShardsGetForbidden describes a response with status code 403, with default header values. + +Forbidden +*/ +type SchemaObjectsShardsGetForbidden struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this schema objects shards get forbidden response has a 2xx status code +func (o *SchemaObjectsShardsGetForbidden) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this schema objects shards get forbidden response has a 3xx status code +func (o *SchemaObjectsShardsGetForbidden) IsRedirect() bool { + return false +} + +// IsClientError returns true when this schema objects shards get forbidden response has a 4xx status code +func (o *SchemaObjectsShardsGetForbidden) IsClientError() bool { + return true +} + +// IsServerError returns true when this schema objects shards get forbidden response has a 5xx status code +func (o *SchemaObjectsShardsGetForbidden) IsServerError() bool { + return false +} + +// IsCode returns true when this schema objects shards get forbidden response a status code equal to that given +func (o *SchemaObjectsShardsGetForbidden) IsCode(code int) bool { + return code == 403 +} + +// Code gets the status code for the schema objects shards get forbidden response +func (o *SchemaObjectsShardsGetForbidden) Code() int { + return 403 +} + +func (o *SchemaObjectsShardsGetForbidden) Error() string { + return fmt.Sprintf("[GET /schema/{className}/shards][%d] schemaObjectsShardsGetForbidden %+v", 403, o.Payload) +} + +func (o *SchemaObjectsShardsGetForbidden) String() string { + return fmt.Sprintf("[GET /schema/{className}/shards][%d] schemaObjectsShardsGetForbidden %+v", 403, o.Payload) +} + +func (o *SchemaObjectsShardsGetForbidden) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *SchemaObjectsShardsGetForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewSchemaObjectsShardsGetNotFound creates a SchemaObjectsShardsGetNotFound with default headers values +func NewSchemaObjectsShardsGetNotFound() *SchemaObjectsShardsGetNotFound { + return &SchemaObjectsShardsGetNotFound{} +} + +/* +SchemaObjectsShardsGetNotFound describes a response with status code 404, with default header values. + +This class does not exist +*/ +type SchemaObjectsShardsGetNotFound struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this schema objects shards get not found response has a 2xx status code +func (o *SchemaObjectsShardsGetNotFound) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this schema objects shards get not found response has a 3xx status code +func (o *SchemaObjectsShardsGetNotFound) IsRedirect() bool { + return false +} + +// IsClientError returns true when this schema objects shards get not found response has a 4xx status code +func (o *SchemaObjectsShardsGetNotFound) IsClientError() bool { + return true +} + +// IsServerError returns true when this schema objects shards get not found response has a 5xx status code +func (o *SchemaObjectsShardsGetNotFound) IsServerError() bool { + return false +} + +// IsCode returns true when this schema objects shards get not found response a status code equal to that given +func (o *SchemaObjectsShardsGetNotFound) IsCode(code int) bool { + return code == 404 +} + +// Code gets the status code for the schema objects shards get not found response +func (o *SchemaObjectsShardsGetNotFound) Code() int { + return 404 +} + +func (o *SchemaObjectsShardsGetNotFound) Error() string { + return fmt.Sprintf("[GET /schema/{className}/shards][%d] schemaObjectsShardsGetNotFound %+v", 404, o.Payload) +} + +func (o *SchemaObjectsShardsGetNotFound) String() string { + return fmt.Sprintf("[GET /schema/{className}/shards][%d] schemaObjectsShardsGetNotFound %+v", 404, o.Payload) +} + +func (o *SchemaObjectsShardsGetNotFound) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *SchemaObjectsShardsGetNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewSchemaObjectsShardsGetInternalServerError creates a SchemaObjectsShardsGetInternalServerError with default headers values +func NewSchemaObjectsShardsGetInternalServerError() *SchemaObjectsShardsGetInternalServerError { + return &SchemaObjectsShardsGetInternalServerError{} +} + +/* +SchemaObjectsShardsGetInternalServerError describes a response with status code 500, with default header values. + +An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. +*/ +type SchemaObjectsShardsGetInternalServerError struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this schema objects shards get internal server error response has a 2xx status code +func (o *SchemaObjectsShardsGetInternalServerError) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this schema objects shards get internal server error response has a 3xx status code +func (o *SchemaObjectsShardsGetInternalServerError) IsRedirect() bool { + return false +} + +// IsClientError returns true when this schema objects shards get internal server error response has a 4xx status code +func (o *SchemaObjectsShardsGetInternalServerError) IsClientError() bool { + return false +} + +// IsServerError returns true when this schema objects shards get internal server error response has a 5xx status code +func (o *SchemaObjectsShardsGetInternalServerError) IsServerError() bool { + return true +} + +// IsCode returns true when this schema objects shards get internal server error response a status code equal to that given +func (o *SchemaObjectsShardsGetInternalServerError) IsCode(code int) bool { + return code == 500 +} + +// Code gets the status code for the schema objects shards get internal server error response +func (o *SchemaObjectsShardsGetInternalServerError) Code() int { + return 500 +} + +func (o *SchemaObjectsShardsGetInternalServerError) Error() string { + return fmt.Sprintf("[GET /schema/{className}/shards][%d] schemaObjectsShardsGetInternalServerError %+v", 500, o.Payload) +} + +func (o *SchemaObjectsShardsGetInternalServerError) String() string { + return fmt.Sprintf("[GET /schema/{className}/shards][%d] schemaObjectsShardsGetInternalServerError %+v", 500, o.Payload) +} + +func (o *SchemaObjectsShardsGetInternalServerError) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *SchemaObjectsShardsGetInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/schema/schema_objects_shards_update_parameters.go b/platform/dbops/binaries/weaviate-src/client/schema/schema_objects_shards_update_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..711507d4621264c42a10ec390327d95e699fac9c --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/schema/schema_objects_shards_update_parameters.go @@ -0,0 +1,199 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package schema + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + + "github.com/weaviate/weaviate/entities/models" +) + +// NewSchemaObjectsShardsUpdateParams creates a new SchemaObjectsShardsUpdateParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewSchemaObjectsShardsUpdateParams() *SchemaObjectsShardsUpdateParams { + return &SchemaObjectsShardsUpdateParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewSchemaObjectsShardsUpdateParamsWithTimeout creates a new SchemaObjectsShardsUpdateParams object +// with the ability to set a timeout on a request. +func NewSchemaObjectsShardsUpdateParamsWithTimeout(timeout time.Duration) *SchemaObjectsShardsUpdateParams { + return &SchemaObjectsShardsUpdateParams{ + timeout: timeout, + } +} + +// NewSchemaObjectsShardsUpdateParamsWithContext creates a new SchemaObjectsShardsUpdateParams object +// with the ability to set a context for a request. +func NewSchemaObjectsShardsUpdateParamsWithContext(ctx context.Context) *SchemaObjectsShardsUpdateParams { + return &SchemaObjectsShardsUpdateParams{ + Context: ctx, + } +} + +// NewSchemaObjectsShardsUpdateParamsWithHTTPClient creates a new SchemaObjectsShardsUpdateParams object +// with the ability to set a custom HTTPClient for a request. +func NewSchemaObjectsShardsUpdateParamsWithHTTPClient(client *http.Client) *SchemaObjectsShardsUpdateParams { + return &SchemaObjectsShardsUpdateParams{ + HTTPClient: client, + } +} + +/* +SchemaObjectsShardsUpdateParams contains all the parameters to send to the API endpoint + + for the schema objects shards update operation. + + Typically these are written to a http.Request. +*/ +type SchemaObjectsShardsUpdateParams struct { + + // Body. + Body *models.ShardStatus + + // ClassName. + ClassName string + + // ShardName. + ShardName string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the schema objects shards update params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *SchemaObjectsShardsUpdateParams) WithDefaults() *SchemaObjectsShardsUpdateParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the schema objects shards update params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *SchemaObjectsShardsUpdateParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the schema objects shards update params +func (o *SchemaObjectsShardsUpdateParams) WithTimeout(timeout time.Duration) *SchemaObjectsShardsUpdateParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the schema objects shards update params +func (o *SchemaObjectsShardsUpdateParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the schema objects shards update params +func (o *SchemaObjectsShardsUpdateParams) WithContext(ctx context.Context) *SchemaObjectsShardsUpdateParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the schema objects shards update params +func (o *SchemaObjectsShardsUpdateParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the schema objects shards update params +func (o *SchemaObjectsShardsUpdateParams) WithHTTPClient(client *http.Client) *SchemaObjectsShardsUpdateParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the schema objects shards update params +func (o *SchemaObjectsShardsUpdateParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithBody adds the body to the schema objects shards update params +func (o *SchemaObjectsShardsUpdateParams) WithBody(body *models.ShardStatus) *SchemaObjectsShardsUpdateParams { + o.SetBody(body) + return o +} + +// SetBody adds the body to the schema objects shards update params +func (o *SchemaObjectsShardsUpdateParams) SetBody(body *models.ShardStatus) { + o.Body = body +} + +// WithClassName adds the className to the schema objects shards update params +func (o *SchemaObjectsShardsUpdateParams) WithClassName(className string) *SchemaObjectsShardsUpdateParams { + o.SetClassName(className) + return o +} + +// SetClassName adds the className to the schema objects shards update params +func (o *SchemaObjectsShardsUpdateParams) SetClassName(className string) { + o.ClassName = className +} + +// WithShardName adds the shardName to the schema objects shards update params +func (o *SchemaObjectsShardsUpdateParams) WithShardName(shardName string) *SchemaObjectsShardsUpdateParams { + o.SetShardName(shardName) + return o +} + +// SetShardName adds the shardName to the schema objects shards update params +func (o *SchemaObjectsShardsUpdateParams) SetShardName(shardName string) { + o.ShardName = shardName +} + +// WriteToRequest writes these params to a swagger request +func (o *SchemaObjectsShardsUpdateParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + if o.Body != nil { + if err := r.SetBodyParam(o.Body); err != nil { + return err + } + } + + // path param className + if err := r.SetPathParam("className", o.ClassName); err != nil { + return err + } + + // path param shardName + if err := r.SetPathParam("shardName", o.ShardName); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/schema/schema_objects_shards_update_responses.go b/platform/dbops/binaries/weaviate-src/client/schema/schema_objects_shards_update_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..6d0c11919a9b7dfb325352447084d3a9661094d5 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/schema/schema_objects_shards_update_responses.go @@ -0,0 +1,472 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package schema + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/weaviate/weaviate/entities/models" +) + +// SchemaObjectsShardsUpdateReader is a Reader for the SchemaObjectsShardsUpdate structure. +type SchemaObjectsShardsUpdateReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *SchemaObjectsShardsUpdateReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewSchemaObjectsShardsUpdateOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 401: + result := NewSchemaObjectsShardsUpdateUnauthorized() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 403: + result := NewSchemaObjectsShardsUpdateForbidden() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 404: + result := NewSchemaObjectsShardsUpdateNotFound() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 422: + result := NewSchemaObjectsShardsUpdateUnprocessableEntity() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 500: + result := NewSchemaObjectsShardsUpdateInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + } +} + +// NewSchemaObjectsShardsUpdateOK creates a SchemaObjectsShardsUpdateOK with default headers values +func NewSchemaObjectsShardsUpdateOK() *SchemaObjectsShardsUpdateOK { + return &SchemaObjectsShardsUpdateOK{} +} + +/* +SchemaObjectsShardsUpdateOK describes a response with status code 200, with default header values. + +Shard status was updated successfully +*/ +type SchemaObjectsShardsUpdateOK struct { + Payload *models.ShardStatus +} + +// IsSuccess returns true when this schema objects shards update o k response has a 2xx status code +func (o *SchemaObjectsShardsUpdateOK) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this schema objects shards update o k response has a 3xx status code +func (o *SchemaObjectsShardsUpdateOK) IsRedirect() bool { + return false +} + +// IsClientError returns true when this schema objects shards update o k response has a 4xx status code +func (o *SchemaObjectsShardsUpdateOK) IsClientError() bool { + return false +} + +// IsServerError returns true when this schema objects shards update o k response has a 5xx status code +func (o *SchemaObjectsShardsUpdateOK) IsServerError() bool { + return false +} + +// IsCode returns true when this schema objects shards update o k response a status code equal to that given +func (o *SchemaObjectsShardsUpdateOK) IsCode(code int) bool { + return code == 200 +} + +// Code gets the status code for the schema objects shards update o k response +func (o *SchemaObjectsShardsUpdateOK) Code() int { + return 200 +} + +func (o *SchemaObjectsShardsUpdateOK) Error() string { + return fmt.Sprintf("[PUT /schema/{className}/shards/{shardName}][%d] schemaObjectsShardsUpdateOK %+v", 200, o.Payload) +} + +func (o *SchemaObjectsShardsUpdateOK) String() string { + return fmt.Sprintf("[PUT /schema/{className}/shards/{shardName}][%d] schemaObjectsShardsUpdateOK %+v", 200, o.Payload) +} + +func (o *SchemaObjectsShardsUpdateOK) GetPayload() *models.ShardStatus { + return o.Payload +} + +func (o *SchemaObjectsShardsUpdateOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ShardStatus) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewSchemaObjectsShardsUpdateUnauthorized creates a SchemaObjectsShardsUpdateUnauthorized with default headers values +func NewSchemaObjectsShardsUpdateUnauthorized() *SchemaObjectsShardsUpdateUnauthorized { + return &SchemaObjectsShardsUpdateUnauthorized{} +} + +/* +SchemaObjectsShardsUpdateUnauthorized describes a response with status code 401, with default header values. + +Unauthorized or invalid credentials. +*/ +type SchemaObjectsShardsUpdateUnauthorized struct { +} + +// IsSuccess returns true when this schema objects shards update unauthorized response has a 2xx status code +func (o *SchemaObjectsShardsUpdateUnauthorized) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this schema objects shards update unauthorized response has a 3xx status code +func (o *SchemaObjectsShardsUpdateUnauthorized) IsRedirect() bool { + return false +} + +// IsClientError returns true when this schema objects shards update unauthorized response has a 4xx status code +func (o *SchemaObjectsShardsUpdateUnauthorized) IsClientError() bool { + return true +} + +// IsServerError returns true when this schema objects shards update unauthorized response has a 5xx status code +func (o *SchemaObjectsShardsUpdateUnauthorized) IsServerError() bool { + return false +} + +// IsCode returns true when this schema objects shards update unauthorized response a status code equal to that given +func (o *SchemaObjectsShardsUpdateUnauthorized) IsCode(code int) bool { + return code == 401 +} + +// Code gets the status code for the schema objects shards update unauthorized response +func (o *SchemaObjectsShardsUpdateUnauthorized) Code() int { + return 401 +} + +func (o *SchemaObjectsShardsUpdateUnauthorized) Error() string { + return fmt.Sprintf("[PUT /schema/{className}/shards/{shardName}][%d] schemaObjectsShardsUpdateUnauthorized ", 401) +} + +func (o *SchemaObjectsShardsUpdateUnauthorized) String() string { + return fmt.Sprintf("[PUT /schema/{className}/shards/{shardName}][%d] schemaObjectsShardsUpdateUnauthorized ", 401) +} + +func (o *SchemaObjectsShardsUpdateUnauthorized) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewSchemaObjectsShardsUpdateForbidden creates a SchemaObjectsShardsUpdateForbidden with default headers values +func NewSchemaObjectsShardsUpdateForbidden() *SchemaObjectsShardsUpdateForbidden { + return &SchemaObjectsShardsUpdateForbidden{} +} + +/* +SchemaObjectsShardsUpdateForbidden describes a response with status code 403, with default header values. + +Forbidden +*/ +type SchemaObjectsShardsUpdateForbidden struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this schema objects shards update forbidden response has a 2xx status code +func (o *SchemaObjectsShardsUpdateForbidden) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this schema objects shards update forbidden response has a 3xx status code +func (o *SchemaObjectsShardsUpdateForbidden) IsRedirect() bool { + return false +} + +// IsClientError returns true when this schema objects shards update forbidden response has a 4xx status code +func (o *SchemaObjectsShardsUpdateForbidden) IsClientError() bool { + return true +} + +// IsServerError returns true when this schema objects shards update forbidden response has a 5xx status code +func (o *SchemaObjectsShardsUpdateForbidden) IsServerError() bool { + return false +} + +// IsCode returns true when this schema objects shards update forbidden response a status code equal to that given +func (o *SchemaObjectsShardsUpdateForbidden) IsCode(code int) bool { + return code == 403 +} + +// Code gets the status code for the schema objects shards update forbidden response +func (o *SchemaObjectsShardsUpdateForbidden) Code() int { + return 403 +} + +func (o *SchemaObjectsShardsUpdateForbidden) Error() string { + return fmt.Sprintf("[PUT /schema/{className}/shards/{shardName}][%d] schemaObjectsShardsUpdateForbidden %+v", 403, o.Payload) +} + +func (o *SchemaObjectsShardsUpdateForbidden) String() string { + return fmt.Sprintf("[PUT /schema/{className}/shards/{shardName}][%d] schemaObjectsShardsUpdateForbidden %+v", 403, o.Payload) +} + +func (o *SchemaObjectsShardsUpdateForbidden) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *SchemaObjectsShardsUpdateForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewSchemaObjectsShardsUpdateNotFound creates a SchemaObjectsShardsUpdateNotFound with default headers values +func NewSchemaObjectsShardsUpdateNotFound() *SchemaObjectsShardsUpdateNotFound { + return &SchemaObjectsShardsUpdateNotFound{} +} + +/* +SchemaObjectsShardsUpdateNotFound describes a response with status code 404, with default header values. + +Shard to be updated does not exist +*/ +type SchemaObjectsShardsUpdateNotFound struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this schema objects shards update not found response has a 2xx status code +func (o *SchemaObjectsShardsUpdateNotFound) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this schema objects shards update not found response has a 3xx status code +func (o *SchemaObjectsShardsUpdateNotFound) IsRedirect() bool { + return false +} + +// IsClientError returns true when this schema objects shards update not found response has a 4xx status code +func (o *SchemaObjectsShardsUpdateNotFound) IsClientError() bool { + return true +} + +// IsServerError returns true when this schema objects shards update not found response has a 5xx status code +func (o *SchemaObjectsShardsUpdateNotFound) IsServerError() bool { + return false +} + +// IsCode returns true when this schema objects shards update not found response a status code equal to that given +func (o *SchemaObjectsShardsUpdateNotFound) IsCode(code int) bool { + return code == 404 +} + +// Code gets the status code for the schema objects shards update not found response +func (o *SchemaObjectsShardsUpdateNotFound) Code() int { + return 404 +} + +func (o *SchemaObjectsShardsUpdateNotFound) Error() string { + return fmt.Sprintf("[PUT /schema/{className}/shards/{shardName}][%d] schemaObjectsShardsUpdateNotFound %+v", 404, o.Payload) +} + +func (o *SchemaObjectsShardsUpdateNotFound) String() string { + return fmt.Sprintf("[PUT /schema/{className}/shards/{shardName}][%d] schemaObjectsShardsUpdateNotFound %+v", 404, o.Payload) +} + +func (o *SchemaObjectsShardsUpdateNotFound) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *SchemaObjectsShardsUpdateNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewSchemaObjectsShardsUpdateUnprocessableEntity creates a SchemaObjectsShardsUpdateUnprocessableEntity with default headers values +func NewSchemaObjectsShardsUpdateUnprocessableEntity() *SchemaObjectsShardsUpdateUnprocessableEntity { + return &SchemaObjectsShardsUpdateUnprocessableEntity{} +} + +/* +SchemaObjectsShardsUpdateUnprocessableEntity describes a response with status code 422, with default header values. + +Invalid update attempt +*/ +type SchemaObjectsShardsUpdateUnprocessableEntity struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this schema objects shards update unprocessable entity response has a 2xx status code +func (o *SchemaObjectsShardsUpdateUnprocessableEntity) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this schema objects shards update unprocessable entity response has a 3xx status code +func (o *SchemaObjectsShardsUpdateUnprocessableEntity) IsRedirect() bool { + return false +} + +// IsClientError returns true when this schema objects shards update unprocessable entity response has a 4xx status code +func (o *SchemaObjectsShardsUpdateUnprocessableEntity) IsClientError() bool { + return true +} + +// IsServerError returns true when this schema objects shards update unprocessable entity response has a 5xx status code +func (o *SchemaObjectsShardsUpdateUnprocessableEntity) IsServerError() bool { + return false +} + +// IsCode returns true when this schema objects shards update unprocessable entity response a status code equal to that given +func (o *SchemaObjectsShardsUpdateUnprocessableEntity) IsCode(code int) bool { + return code == 422 +} + +// Code gets the status code for the schema objects shards update unprocessable entity response +func (o *SchemaObjectsShardsUpdateUnprocessableEntity) Code() int { + return 422 +} + +func (o *SchemaObjectsShardsUpdateUnprocessableEntity) Error() string { + return fmt.Sprintf("[PUT /schema/{className}/shards/{shardName}][%d] schemaObjectsShardsUpdateUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *SchemaObjectsShardsUpdateUnprocessableEntity) String() string { + return fmt.Sprintf("[PUT /schema/{className}/shards/{shardName}][%d] schemaObjectsShardsUpdateUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *SchemaObjectsShardsUpdateUnprocessableEntity) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *SchemaObjectsShardsUpdateUnprocessableEntity) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewSchemaObjectsShardsUpdateInternalServerError creates a SchemaObjectsShardsUpdateInternalServerError with default headers values +func NewSchemaObjectsShardsUpdateInternalServerError() *SchemaObjectsShardsUpdateInternalServerError { + return &SchemaObjectsShardsUpdateInternalServerError{} +} + +/* +SchemaObjectsShardsUpdateInternalServerError describes a response with status code 500, with default header values. + +An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. +*/ +type SchemaObjectsShardsUpdateInternalServerError struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this schema objects shards update internal server error response has a 2xx status code +func (o *SchemaObjectsShardsUpdateInternalServerError) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this schema objects shards update internal server error response has a 3xx status code +func (o *SchemaObjectsShardsUpdateInternalServerError) IsRedirect() bool { + return false +} + +// IsClientError returns true when this schema objects shards update internal server error response has a 4xx status code +func (o *SchemaObjectsShardsUpdateInternalServerError) IsClientError() bool { + return false +} + +// IsServerError returns true when this schema objects shards update internal server error response has a 5xx status code +func (o *SchemaObjectsShardsUpdateInternalServerError) IsServerError() bool { + return true +} + +// IsCode returns true when this schema objects shards update internal server error response a status code equal to that given +func (o *SchemaObjectsShardsUpdateInternalServerError) IsCode(code int) bool { + return code == 500 +} + +// Code gets the status code for the schema objects shards update internal server error response +func (o *SchemaObjectsShardsUpdateInternalServerError) Code() int { + return 500 +} + +func (o *SchemaObjectsShardsUpdateInternalServerError) Error() string { + return fmt.Sprintf("[PUT /schema/{className}/shards/{shardName}][%d] schemaObjectsShardsUpdateInternalServerError %+v", 500, o.Payload) +} + +func (o *SchemaObjectsShardsUpdateInternalServerError) String() string { + return fmt.Sprintf("[PUT /schema/{className}/shards/{shardName}][%d] schemaObjectsShardsUpdateInternalServerError %+v", 500, o.Payload) +} + +func (o *SchemaObjectsShardsUpdateInternalServerError) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *SchemaObjectsShardsUpdateInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/schema/schema_objects_update_parameters.go b/platform/dbops/binaries/weaviate-src/client/schema/schema_objects_update_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..fdc558dd72bf553a6f205088c9d57ce3f223519a --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/schema/schema_objects_update_parameters.go @@ -0,0 +1,180 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package schema + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + + "github.com/weaviate/weaviate/entities/models" +) + +// NewSchemaObjectsUpdateParams creates a new SchemaObjectsUpdateParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewSchemaObjectsUpdateParams() *SchemaObjectsUpdateParams { + return &SchemaObjectsUpdateParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewSchemaObjectsUpdateParamsWithTimeout creates a new SchemaObjectsUpdateParams object +// with the ability to set a timeout on a request. +func NewSchemaObjectsUpdateParamsWithTimeout(timeout time.Duration) *SchemaObjectsUpdateParams { + return &SchemaObjectsUpdateParams{ + timeout: timeout, + } +} + +// NewSchemaObjectsUpdateParamsWithContext creates a new SchemaObjectsUpdateParams object +// with the ability to set a context for a request. +func NewSchemaObjectsUpdateParamsWithContext(ctx context.Context) *SchemaObjectsUpdateParams { + return &SchemaObjectsUpdateParams{ + Context: ctx, + } +} + +// NewSchemaObjectsUpdateParamsWithHTTPClient creates a new SchemaObjectsUpdateParams object +// with the ability to set a custom HTTPClient for a request. +func NewSchemaObjectsUpdateParamsWithHTTPClient(client *http.Client) *SchemaObjectsUpdateParams { + return &SchemaObjectsUpdateParams{ + HTTPClient: client, + } +} + +/* +SchemaObjectsUpdateParams contains all the parameters to send to the API endpoint + + for the schema objects update operation. + + Typically these are written to a http.Request. +*/ +type SchemaObjectsUpdateParams struct { + + // ClassName. + ClassName string + + // ObjectClass. + ObjectClass *models.Class + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the schema objects update params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *SchemaObjectsUpdateParams) WithDefaults() *SchemaObjectsUpdateParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the schema objects update params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *SchemaObjectsUpdateParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the schema objects update params +func (o *SchemaObjectsUpdateParams) WithTimeout(timeout time.Duration) *SchemaObjectsUpdateParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the schema objects update params +func (o *SchemaObjectsUpdateParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the schema objects update params +func (o *SchemaObjectsUpdateParams) WithContext(ctx context.Context) *SchemaObjectsUpdateParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the schema objects update params +func (o *SchemaObjectsUpdateParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the schema objects update params +func (o *SchemaObjectsUpdateParams) WithHTTPClient(client *http.Client) *SchemaObjectsUpdateParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the schema objects update params +func (o *SchemaObjectsUpdateParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithClassName adds the className to the schema objects update params +func (o *SchemaObjectsUpdateParams) WithClassName(className string) *SchemaObjectsUpdateParams { + o.SetClassName(className) + return o +} + +// SetClassName adds the className to the schema objects update params +func (o *SchemaObjectsUpdateParams) SetClassName(className string) { + o.ClassName = className +} + +// WithObjectClass adds the objectClass to the schema objects update params +func (o *SchemaObjectsUpdateParams) WithObjectClass(objectClass *models.Class) *SchemaObjectsUpdateParams { + o.SetObjectClass(objectClass) + return o +} + +// SetObjectClass adds the objectClass to the schema objects update params +func (o *SchemaObjectsUpdateParams) SetObjectClass(objectClass *models.Class) { + o.ObjectClass = objectClass +} + +// WriteToRequest writes these params to a swagger request +func (o *SchemaObjectsUpdateParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param className + if err := r.SetPathParam("className", o.ClassName); err != nil { + return err + } + if o.ObjectClass != nil { + if err := r.SetBodyParam(o.ObjectClass); err != nil { + return err + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/schema/schema_objects_update_responses.go b/platform/dbops/binaries/weaviate-src/client/schema/schema_objects_update_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..34809c04150be34b164abfba6b8f2d6cb12d6c89 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/schema/schema_objects_update_responses.go @@ -0,0 +1,472 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package schema + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/weaviate/weaviate/entities/models" +) + +// SchemaObjectsUpdateReader is a Reader for the SchemaObjectsUpdate structure. +type SchemaObjectsUpdateReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *SchemaObjectsUpdateReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewSchemaObjectsUpdateOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 401: + result := NewSchemaObjectsUpdateUnauthorized() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 403: + result := NewSchemaObjectsUpdateForbidden() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 404: + result := NewSchemaObjectsUpdateNotFound() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 422: + result := NewSchemaObjectsUpdateUnprocessableEntity() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 500: + result := NewSchemaObjectsUpdateInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + } +} + +// NewSchemaObjectsUpdateOK creates a SchemaObjectsUpdateOK with default headers values +func NewSchemaObjectsUpdateOK() *SchemaObjectsUpdateOK { + return &SchemaObjectsUpdateOK{} +} + +/* +SchemaObjectsUpdateOK describes a response with status code 200, with default header values. + +Class was updated successfully +*/ +type SchemaObjectsUpdateOK struct { + Payload *models.Class +} + +// IsSuccess returns true when this schema objects update o k response has a 2xx status code +func (o *SchemaObjectsUpdateOK) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this schema objects update o k response has a 3xx status code +func (o *SchemaObjectsUpdateOK) IsRedirect() bool { + return false +} + +// IsClientError returns true when this schema objects update o k response has a 4xx status code +func (o *SchemaObjectsUpdateOK) IsClientError() bool { + return false +} + +// IsServerError returns true when this schema objects update o k response has a 5xx status code +func (o *SchemaObjectsUpdateOK) IsServerError() bool { + return false +} + +// IsCode returns true when this schema objects update o k response a status code equal to that given +func (o *SchemaObjectsUpdateOK) IsCode(code int) bool { + return code == 200 +} + +// Code gets the status code for the schema objects update o k response +func (o *SchemaObjectsUpdateOK) Code() int { + return 200 +} + +func (o *SchemaObjectsUpdateOK) Error() string { + return fmt.Sprintf("[PUT /schema/{className}][%d] schemaObjectsUpdateOK %+v", 200, o.Payload) +} + +func (o *SchemaObjectsUpdateOK) String() string { + return fmt.Sprintf("[PUT /schema/{className}][%d] schemaObjectsUpdateOK %+v", 200, o.Payload) +} + +func (o *SchemaObjectsUpdateOK) GetPayload() *models.Class { + return o.Payload +} + +func (o *SchemaObjectsUpdateOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Class) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewSchemaObjectsUpdateUnauthorized creates a SchemaObjectsUpdateUnauthorized with default headers values +func NewSchemaObjectsUpdateUnauthorized() *SchemaObjectsUpdateUnauthorized { + return &SchemaObjectsUpdateUnauthorized{} +} + +/* +SchemaObjectsUpdateUnauthorized describes a response with status code 401, with default header values. + +Unauthorized or invalid credentials. +*/ +type SchemaObjectsUpdateUnauthorized struct { +} + +// IsSuccess returns true when this schema objects update unauthorized response has a 2xx status code +func (o *SchemaObjectsUpdateUnauthorized) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this schema objects update unauthorized response has a 3xx status code +func (o *SchemaObjectsUpdateUnauthorized) IsRedirect() bool { + return false +} + +// IsClientError returns true when this schema objects update unauthorized response has a 4xx status code +func (o *SchemaObjectsUpdateUnauthorized) IsClientError() bool { + return true +} + +// IsServerError returns true when this schema objects update unauthorized response has a 5xx status code +func (o *SchemaObjectsUpdateUnauthorized) IsServerError() bool { + return false +} + +// IsCode returns true when this schema objects update unauthorized response a status code equal to that given +func (o *SchemaObjectsUpdateUnauthorized) IsCode(code int) bool { + return code == 401 +} + +// Code gets the status code for the schema objects update unauthorized response +func (o *SchemaObjectsUpdateUnauthorized) Code() int { + return 401 +} + +func (o *SchemaObjectsUpdateUnauthorized) Error() string { + return fmt.Sprintf("[PUT /schema/{className}][%d] schemaObjectsUpdateUnauthorized ", 401) +} + +func (o *SchemaObjectsUpdateUnauthorized) String() string { + return fmt.Sprintf("[PUT /schema/{className}][%d] schemaObjectsUpdateUnauthorized ", 401) +} + +func (o *SchemaObjectsUpdateUnauthorized) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewSchemaObjectsUpdateForbidden creates a SchemaObjectsUpdateForbidden with default headers values +func NewSchemaObjectsUpdateForbidden() *SchemaObjectsUpdateForbidden { + return &SchemaObjectsUpdateForbidden{} +} + +/* +SchemaObjectsUpdateForbidden describes a response with status code 403, with default header values. + +Forbidden +*/ +type SchemaObjectsUpdateForbidden struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this schema objects update forbidden response has a 2xx status code +func (o *SchemaObjectsUpdateForbidden) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this schema objects update forbidden response has a 3xx status code +func (o *SchemaObjectsUpdateForbidden) IsRedirect() bool { + return false +} + +// IsClientError returns true when this schema objects update forbidden response has a 4xx status code +func (o *SchemaObjectsUpdateForbidden) IsClientError() bool { + return true +} + +// IsServerError returns true when this schema objects update forbidden response has a 5xx status code +func (o *SchemaObjectsUpdateForbidden) IsServerError() bool { + return false +} + +// IsCode returns true when this schema objects update forbidden response a status code equal to that given +func (o *SchemaObjectsUpdateForbidden) IsCode(code int) bool { + return code == 403 +} + +// Code gets the status code for the schema objects update forbidden response +func (o *SchemaObjectsUpdateForbidden) Code() int { + return 403 +} + +func (o *SchemaObjectsUpdateForbidden) Error() string { + return fmt.Sprintf("[PUT /schema/{className}][%d] schemaObjectsUpdateForbidden %+v", 403, o.Payload) +} + +func (o *SchemaObjectsUpdateForbidden) String() string { + return fmt.Sprintf("[PUT /schema/{className}][%d] schemaObjectsUpdateForbidden %+v", 403, o.Payload) +} + +func (o *SchemaObjectsUpdateForbidden) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *SchemaObjectsUpdateForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewSchemaObjectsUpdateNotFound creates a SchemaObjectsUpdateNotFound with default headers values +func NewSchemaObjectsUpdateNotFound() *SchemaObjectsUpdateNotFound { + return &SchemaObjectsUpdateNotFound{} +} + +/* +SchemaObjectsUpdateNotFound describes a response with status code 404, with default header values. + +Class to be updated does not exist +*/ +type SchemaObjectsUpdateNotFound struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this schema objects update not found response has a 2xx status code +func (o *SchemaObjectsUpdateNotFound) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this schema objects update not found response has a 3xx status code +func (o *SchemaObjectsUpdateNotFound) IsRedirect() bool { + return false +} + +// IsClientError returns true when this schema objects update not found response has a 4xx status code +func (o *SchemaObjectsUpdateNotFound) IsClientError() bool { + return true +} + +// IsServerError returns true when this schema objects update not found response has a 5xx status code +func (o *SchemaObjectsUpdateNotFound) IsServerError() bool { + return false +} + +// IsCode returns true when this schema objects update not found response a status code equal to that given +func (o *SchemaObjectsUpdateNotFound) IsCode(code int) bool { + return code == 404 +} + +// Code gets the status code for the schema objects update not found response +func (o *SchemaObjectsUpdateNotFound) Code() int { + return 404 +} + +func (o *SchemaObjectsUpdateNotFound) Error() string { + return fmt.Sprintf("[PUT /schema/{className}][%d] schemaObjectsUpdateNotFound %+v", 404, o.Payload) +} + +func (o *SchemaObjectsUpdateNotFound) String() string { + return fmt.Sprintf("[PUT /schema/{className}][%d] schemaObjectsUpdateNotFound %+v", 404, o.Payload) +} + +func (o *SchemaObjectsUpdateNotFound) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *SchemaObjectsUpdateNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewSchemaObjectsUpdateUnprocessableEntity creates a SchemaObjectsUpdateUnprocessableEntity with default headers values +func NewSchemaObjectsUpdateUnprocessableEntity() *SchemaObjectsUpdateUnprocessableEntity { + return &SchemaObjectsUpdateUnprocessableEntity{} +} + +/* +SchemaObjectsUpdateUnprocessableEntity describes a response with status code 422, with default header values. + +Invalid update attempt +*/ +type SchemaObjectsUpdateUnprocessableEntity struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this schema objects update unprocessable entity response has a 2xx status code +func (o *SchemaObjectsUpdateUnprocessableEntity) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this schema objects update unprocessable entity response has a 3xx status code +func (o *SchemaObjectsUpdateUnprocessableEntity) IsRedirect() bool { + return false +} + +// IsClientError returns true when this schema objects update unprocessable entity response has a 4xx status code +func (o *SchemaObjectsUpdateUnprocessableEntity) IsClientError() bool { + return true +} + +// IsServerError returns true when this schema objects update unprocessable entity response has a 5xx status code +func (o *SchemaObjectsUpdateUnprocessableEntity) IsServerError() bool { + return false +} + +// IsCode returns true when this schema objects update unprocessable entity response a status code equal to that given +func (o *SchemaObjectsUpdateUnprocessableEntity) IsCode(code int) bool { + return code == 422 +} + +// Code gets the status code for the schema objects update unprocessable entity response +func (o *SchemaObjectsUpdateUnprocessableEntity) Code() int { + return 422 +} + +func (o *SchemaObjectsUpdateUnprocessableEntity) Error() string { + return fmt.Sprintf("[PUT /schema/{className}][%d] schemaObjectsUpdateUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *SchemaObjectsUpdateUnprocessableEntity) String() string { + return fmt.Sprintf("[PUT /schema/{className}][%d] schemaObjectsUpdateUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *SchemaObjectsUpdateUnprocessableEntity) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *SchemaObjectsUpdateUnprocessableEntity) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewSchemaObjectsUpdateInternalServerError creates a SchemaObjectsUpdateInternalServerError with default headers values +func NewSchemaObjectsUpdateInternalServerError() *SchemaObjectsUpdateInternalServerError { + return &SchemaObjectsUpdateInternalServerError{} +} + +/* +SchemaObjectsUpdateInternalServerError describes a response with status code 500, with default header values. + +An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. +*/ +type SchemaObjectsUpdateInternalServerError struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this schema objects update internal server error response has a 2xx status code +func (o *SchemaObjectsUpdateInternalServerError) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this schema objects update internal server error response has a 3xx status code +func (o *SchemaObjectsUpdateInternalServerError) IsRedirect() bool { + return false +} + +// IsClientError returns true when this schema objects update internal server error response has a 4xx status code +func (o *SchemaObjectsUpdateInternalServerError) IsClientError() bool { + return false +} + +// IsServerError returns true when this schema objects update internal server error response has a 5xx status code +func (o *SchemaObjectsUpdateInternalServerError) IsServerError() bool { + return true +} + +// IsCode returns true when this schema objects update internal server error response a status code equal to that given +func (o *SchemaObjectsUpdateInternalServerError) IsCode(code int) bool { + return code == 500 +} + +// Code gets the status code for the schema objects update internal server error response +func (o *SchemaObjectsUpdateInternalServerError) Code() int { + return 500 +} + +func (o *SchemaObjectsUpdateInternalServerError) Error() string { + return fmt.Sprintf("[PUT /schema/{className}][%d] schemaObjectsUpdateInternalServerError %+v", 500, o.Payload) +} + +func (o *SchemaObjectsUpdateInternalServerError) String() string { + return fmt.Sprintf("[PUT /schema/{className}][%d] schemaObjectsUpdateInternalServerError %+v", 500, o.Payload) +} + +func (o *SchemaObjectsUpdateInternalServerError) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *SchemaObjectsUpdateInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/schema/tenant_exists_parameters.go b/platform/dbops/binaries/weaviate-src/client/schema/tenant_exists_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..2fe6217181f4b3abfd37e4cf3f37bd6212aa7c52 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/schema/tenant_exists_parameters.go @@ -0,0 +1,217 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package schema + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// NewTenantExistsParams creates a new TenantExistsParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewTenantExistsParams() *TenantExistsParams { + return &TenantExistsParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewTenantExistsParamsWithTimeout creates a new TenantExistsParams object +// with the ability to set a timeout on a request. +func NewTenantExistsParamsWithTimeout(timeout time.Duration) *TenantExistsParams { + return &TenantExistsParams{ + timeout: timeout, + } +} + +// NewTenantExistsParamsWithContext creates a new TenantExistsParams object +// with the ability to set a context for a request. +func NewTenantExistsParamsWithContext(ctx context.Context) *TenantExistsParams { + return &TenantExistsParams{ + Context: ctx, + } +} + +// NewTenantExistsParamsWithHTTPClient creates a new TenantExistsParams object +// with the ability to set a custom HTTPClient for a request. +func NewTenantExistsParamsWithHTTPClient(client *http.Client) *TenantExistsParams { + return &TenantExistsParams{ + HTTPClient: client, + } +} + +/* +TenantExistsParams contains all the parameters to send to the API endpoint + + for the tenant exists operation. + + Typically these are written to a http.Request. +*/ +type TenantExistsParams struct { + + // ClassName. + ClassName string + + /* Consistency. + + If consistency is true, the request will be proxied to the leader to ensure strong schema consistency + + Default: true + */ + Consistency *bool + + // TenantName. + TenantName string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the tenant exists params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *TenantExistsParams) WithDefaults() *TenantExistsParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the tenant exists params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *TenantExistsParams) SetDefaults() { + var ( + consistencyDefault = bool(true) + ) + + val := TenantExistsParams{ + Consistency: &consistencyDefault, + } + + val.timeout = o.timeout + val.Context = o.Context + val.HTTPClient = o.HTTPClient + *o = val +} + +// WithTimeout adds the timeout to the tenant exists params +func (o *TenantExistsParams) WithTimeout(timeout time.Duration) *TenantExistsParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the tenant exists params +func (o *TenantExistsParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the tenant exists params +func (o *TenantExistsParams) WithContext(ctx context.Context) *TenantExistsParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the tenant exists params +func (o *TenantExistsParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the tenant exists params +func (o *TenantExistsParams) WithHTTPClient(client *http.Client) *TenantExistsParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the tenant exists params +func (o *TenantExistsParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithClassName adds the className to the tenant exists params +func (o *TenantExistsParams) WithClassName(className string) *TenantExistsParams { + o.SetClassName(className) + return o +} + +// SetClassName adds the className to the tenant exists params +func (o *TenantExistsParams) SetClassName(className string) { + o.ClassName = className +} + +// WithConsistency adds the consistency to the tenant exists params +func (o *TenantExistsParams) WithConsistency(consistency *bool) *TenantExistsParams { + o.SetConsistency(consistency) + return o +} + +// SetConsistency adds the consistency to the tenant exists params +func (o *TenantExistsParams) SetConsistency(consistency *bool) { + o.Consistency = consistency +} + +// WithTenantName adds the tenantName to the tenant exists params +func (o *TenantExistsParams) WithTenantName(tenantName string) *TenantExistsParams { + o.SetTenantName(tenantName) + return o +} + +// SetTenantName adds the tenantName to the tenant exists params +func (o *TenantExistsParams) SetTenantName(tenantName string) { + o.TenantName = tenantName +} + +// WriteToRequest writes these params to a swagger request +func (o *TenantExistsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param className + if err := r.SetPathParam("className", o.ClassName); err != nil { + return err + } + + if o.Consistency != nil { + + // header param consistency + if err := r.SetHeaderParam("consistency", swag.FormatBool(*o.Consistency)); err != nil { + return err + } + } + + // path param tenantName + if err := r.SetPathParam("tenantName", o.TenantName); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/schema/tenant_exists_responses.go b/platform/dbops/binaries/weaviate-src/client/schema/tenant_exists_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..e877139a32bdcb13894bafd5d3307c8a510e5790 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/schema/tenant_exists_responses.go @@ -0,0 +1,448 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package schema + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/weaviate/weaviate/entities/models" +) + +// TenantExistsReader is a Reader for the TenantExists structure. +type TenantExistsReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *TenantExistsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewTenantExistsOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 401: + result := NewTenantExistsUnauthorized() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 403: + result := NewTenantExistsForbidden() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 404: + result := NewTenantExistsNotFound() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 422: + result := NewTenantExistsUnprocessableEntity() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 500: + result := NewTenantExistsInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + } +} + +// NewTenantExistsOK creates a TenantExistsOK with default headers values +func NewTenantExistsOK() *TenantExistsOK { + return &TenantExistsOK{} +} + +/* +TenantExistsOK describes a response with status code 200, with default header values. + +The tenant exists in the specified class +*/ +type TenantExistsOK struct { +} + +// IsSuccess returns true when this tenant exists o k response has a 2xx status code +func (o *TenantExistsOK) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this tenant exists o k response has a 3xx status code +func (o *TenantExistsOK) IsRedirect() bool { + return false +} + +// IsClientError returns true when this tenant exists o k response has a 4xx status code +func (o *TenantExistsOK) IsClientError() bool { + return false +} + +// IsServerError returns true when this tenant exists o k response has a 5xx status code +func (o *TenantExistsOK) IsServerError() bool { + return false +} + +// IsCode returns true when this tenant exists o k response a status code equal to that given +func (o *TenantExistsOK) IsCode(code int) bool { + return code == 200 +} + +// Code gets the status code for the tenant exists o k response +func (o *TenantExistsOK) Code() int { + return 200 +} + +func (o *TenantExistsOK) Error() string { + return fmt.Sprintf("[HEAD /schema/{className}/tenants/{tenantName}][%d] tenantExistsOK ", 200) +} + +func (o *TenantExistsOK) String() string { + return fmt.Sprintf("[HEAD /schema/{className}/tenants/{tenantName}][%d] tenantExistsOK ", 200) +} + +func (o *TenantExistsOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewTenantExistsUnauthorized creates a TenantExistsUnauthorized with default headers values +func NewTenantExistsUnauthorized() *TenantExistsUnauthorized { + return &TenantExistsUnauthorized{} +} + +/* +TenantExistsUnauthorized describes a response with status code 401, with default header values. + +Unauthorized or invalid credentials. +*/ +type TenantExistsUnauthorized struct { +} + +// IsSuccess returns true when this tenant exists unauthorized response has a 2xx status code +func (o *TenantExistsUnauthorized) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this tenant exists unauthorized response has a 3xx status code +func (o *TenantExistsUnauthorized) IsRedirect() bool { + return false +} + +// IsClientError returns true when this tenant exists unauthorized response has a 4xx status code +func (o *TenantExistsUnauthorized) IsClientError() bool { + return true +} + +// IsServerError returns true when this tenant exists unauthorized response has a 5xx status code +func (o *TenantExistsUnauthorized) IsServerError() bool { + return false +} + +// IsCode returns true when this tenant exists unauthorized response a status code equal to that given +func (o *TenantExistsUnauthorized) IsCode(code int) bool { + return code == 401 +} + +// Code gets the status code for the tenant exists unauthorized response +func (o *TenantExistsUnauthorized) Code() int { + return 401 +} + +func (o *TenantExistsUnauthorized) Error() string { + return fmt.Sprintf("[HEAD /schema/{className}/tenants/{tenantName}][%d] tenantExistsUnauthorized ", 401) +} + +func (o *TenantExistsUnauthorized) String() string { + return fmt.Sprintf("[HEAD /schema/{className}/tenants/{tenantName}][%d] tenantExistsUnauthorized ", 401) +} + +func (o *TenantExistsUnauthorized) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewTenantExistsForbidden creates a TenantExistsForbidden with default headers values +func NewTenantExistsForbidden() *TenantExistsForbidden { + return &TenantExistsForbidden{} +} + +/* +TenantExistsForbidden describes a response with status code 403, with default header values. + +Forbidden +*/ +type TenantExistsForbidden struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this tenant exists forbidden response has a 2xx status code +func (o *TenantExistsForbidden) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this tenant exists forbidden response has a 3xx status code +func (o *TenantExistsForbidden) IsRedirect() bool { + return false +} + +// IsClientError returns true when this tenant exists forbidden response has a 4xx status code +func (o *TenantExistsForbidden) IsClientError() bool { + return true +} + +// IsServerError returns true when this tenant exists forbidden response has a 5xx status code +func (o *TenantExistsForbidden) IsServerError() bool { + return false +} + +// IsCode returns true when this tenant exists forbidden response a status code equal to that given +func (o *TenantExistsForbidden) IsCode(code int) bool { + return code == 403 +} + +// Code gets the status code for the tenant exists forbidden response +func (o *TenantExistsForbidden) Code() int { + return 403 +} + +func (o *TenantExistsForbidden) Error() string { + return fmt.Sprintf("[HEAD /schema/{className}/tenants/{tenantName}][%d] tenantExistsForbidden %+v", 403, o.Payload) +} + +func (o *TenantExistsForbidden) String() string { + return fmt.Sprintf("[HEAD /schema/{className}/tenants/{tenantName}][%d] tenantExistsForbidden %+v", 403, o.Payload) +} + +func (o *TenantExistsForbidden) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *TenantExistsForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewTenantExistsNotFound creates a TenantExistsNotFound with default headers values +func NewTenantExistsNotFound() *TenantExistsNotFound { + return &TenantExistsNotFound{} +} + +/* +TenantExistsNotFound describes a response with status code 404, with default header values. + +The tenant not found +*/ +type TenantExistsNotFound struct { +} + +// IsSuccess returns true when this tenant exists not found response has a 2xx status code +func (o *TenantExistsNotFound) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this tenant exists not found response has a 3xx status code +func (o *TenantExistsNotFound) IsRedirect() bool { + return false +} + +// IsClientError returns true when this tenant exists not found response has a 4xx status code +func (o *TenantExistsNotFound) IsClientError() bool { + return true +} + +// IsServerError returns true when this tenant exists not found response has a 5xx status code +func (o *TenantExistsNotFound) IsServerError() bool { + return false +} + +// IsCode returns true when this tenant exists not found response a status code equal to that given +func (o *TenantExistsNotFound) IsCode(code int) bool { + return code == 404 +} + +// Code gets the status code for the tenant exists not found response +func (o *TenantExistsNotFound) Code() int { + return 404 +} + +func (o *TenantExistsNotFound) Error() string { + return fmt.Sprintf("[HEAD /schema/{className}/tenants/{tenantName}][%d] tenantExistsNotFound ", 404) +} + +func (o *TenantExistsNotFound) String() string { + return fmt.Sprintf("[HEAD /schema/{className}/tenants/{tenantName}][%d] tenantExistsNotFound ", 404) +} + +func (o *TenantExistsNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewTenantExistsUnprocessableEntity creates a TenantExistsUnprocessableEntity with default headers values +func NewTenantExistsUnprocessableEntity() *TenantExistsUnprocessableEntity { + return &TenantExistsUnprocessableEntity{} +} + +/* +TenantExistsUnprocessableEntity describes a response with status code 422, with default header values. + +Invalid Tenant class +*/ +type TenantExistsUnprocessableEntity struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this tenant exists unprocessable entity response has a 2xx status code +func (o *TenantExistsUnprocessableEntity) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this tenant exists unprocessable entity response has a 3xx status code +func (o *TenantExistsUnprocessableEntity) IsRedirect() bool { + return false +} + +// IsClientError returns true when this tenant exists unprocessable entity response has a 4xx status code +func (o *TenantExistsUnprocessableEntity) IsClientError() bool { + return true +} + +// IsServerError returns true when this tenant exists unprocessable entity response has a 5xx status code +func (o *TenantExistsUnprocessableEntity) IsServerError() bool { + return false +} + +// IsCode returns true when this tenant exists unprocessable entity response a status code equal to that given +func (o *TenantExistsUnprocessableEntity) IsCode(code int) bool { + return code == 422 +} + +// Code gets the status code for the tenant exists unprocessable entity response +func (o *TenantExistsUnprocessableEntity) Code() int { + return 422 +} + +func (o *TenantExistsUnprocessableEntity) Error() string { + return fmt.Sprintf("[HEAD /schema/{className}/tenants/{tenantName}][%d] tenantExistsUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *TenantExistsUnprocessableEntity) String() string { + return fmt.Sprintf("[HEAD /schema/{className}/tenants/{tenantName}][%d] tenantExistsUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *TenantExistsUnprocessableEntity) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *TenantExistsUnprocessableEntity) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewTenantExistsInternalServerError creates a TenantExistsInternalServerError with default headers values +func NewTenantExistsInternalServerError() *TenantExistsInternalServerError { + return &TenantExistsInternalServerError{} +} + +/* +TenantExistsInternalServerError describes a response with status code 500, with default header values. + +An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. +*/ +type TenantExistsInternalServerError struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this tenant exists internal server error response has a 2xx status code +func (o *TenantExistsInternalServerError) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this tenant exists internal server error response has a 3xx status code +func (o *TenantExistsInternalServerError) IsRedirect() bool { + return false +} + +// IsClientError returns true when this tenant exists internal server error response has a 4xx status code +func (o *TenantExistsInternalServerError) IsClientError() bool { + return false +} + +// IsServerError returns true when this tenant exists internal server error response has a 5xx status code +func (o *TenantExistsInternalServerError) IsServerError() bool { + return true +} + +// IsCode returns true when this tenant exists internal server error response a status code equal to that given +func (o *TenantExistsInternalServerError) IsCode(code int) bool { + return code == 500 +} + +// Code gets the status code for the tenant exists internal server error response +func (o *TenantExistsInternalServerError) Code() int { + return 500 +} + +func (o *TenantExistsInternalServerError) Error() string { + return fmt.Sprintf("[HEAD /schema/{className}/tenants/{tenantName}][%d] tenantExistsInternalServerError %+v", 500, o.Payload) +} + +func (o *TenantExistsInternalServerError) String() string { + return fmt.Sprintf("[HEAD /schema/{className}/tenants/{tenantName}][%d] tenantExistsInternalServerError %+v", 500, o.Payload) +} + +func (o *TenantExistsInternalServerError) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *TenantExistsInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/schema/tenants_create_parameters.go b/platform/dbops/binaries/weaviate-src/client/schema/tenants_create_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..e0d13b3c3fea0d956c1a22eb24af1e65ec000052 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/schema/tenants_create_parameters.go @@ -0,0 +1,180 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package schema + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + + "github.com/weaviate/weaviate/entities/models" +) + +// NewTenantsCreateParams creates a new TenantsCreateParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewTenantsCreateParams() *TenantsCreateParams { + return &TenantsCreateParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewTenantsCreateParamsWithTimeout creates a new TenantsCreateParams object +// with the ability to set a timeout on a request. +func NewTenantsCreateParamsWithTimeout(timeout time.Duration) *TenantsCreateParams { + return &TenantsCreateParams{ + timeout: timeout, + } +} + +// NewTenantsCreateParamsWithContext creates a new TenantsCreateParams object +// with the ability to set a context for a request. +func NewTenantsCreateParamsWithContext(ctx context.Context) *TenantsCreateParams { + return &TenantsCreateParams{ + Context: ctx, + } +} + +// NewTenantsCreateParamsWithHTTPClient creates a new TenantsCreateParams object +// with the ability to set a custom HTTPClient for a request. +func NewTenantsCreateParamsWithHTTPClient(client *http.Client) *TenantsCreateParams { + return &TenantsCreateParams{ + HTTPClient: client, + } +} + +/* +TenantsCreateParams contains all the parameters to send to the API endpoint + + for the tenants create operation. + + Typically these are written to a http.Request. +*/ +type TenantsCreateParams struct { + + // Body. + Body []*models.Tenant + + // ClassName. + ClassName string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the tenants create params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *TenantsCreateParams) WithDefaults() *TenantsCreateParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the tenants create params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *TenantsCreateParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the tenants create params +func (o *TenantsCreateParams) WithTimeout(timeout time.Duration) *TenantsCreateParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the tenants create params +func (o *TenantsCreateParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the tenants create params +func (o *TenantsCreateParams) WithContext(ctx context.Context) *TenantsCreateParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the tenants create params +func (o *TenantsCreateParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the tenants create params +func (o *TenantsCreateParams) WithHTTPClient(client *http.Client) *TenantsCreateParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the tenants create params +func (o *TenantsCreateParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithBody adds the body to the tenants create params +func (o *TenantsCreateParams) WithBody(body []*models.Tenant) *TenantsCreateParams { + o.SetBody(body) + return o +} + +// SetBody adds the body to the tenants create params +func (o *TenantsCreateParams) SetBody(body []*models.Tenant) { + o.Body = body +} + +// WithClassName adds the className to the tenants create params +func (o *TenantsCreateParams) WithClassName(className string) *TenantsCreateParams { + o.SetClassName(className) + return o +} + +// SetClassName adds the className to the tenants create params +func (o *TenantsCreateParams) SetClassName(className string) { + o.ClassName = className +} + +// WriteToRequest writes these params to a swagger request +func (o *TenantsCreateParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + if o.Body != nil { + if err := r.SetBodyParam(o.Body); err != nil { + return err + } + } + + // path param className + if err := r.SetPathParam("className", o.ClassName); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/schema/tenants_create_responses.go b/platform/dbops/binaries/weaviate-src/client/schema/tenants_create_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..811c9ce39cca1c065ae039d6a1ac01199beefbff --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/schema/tenants_create_responses.go @@ -0,0 +1,396 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package schema + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/weaviate/weaviate/entities/models" +) + +// TenantsCreateReader is a Reader for the TenantsCreate structure. +type TenantsCreateReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *TenantsCreateReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewTenantsCreateOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 401: + result := NewTenantsCreateUnauthorized() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 403: + result := NewTenantsCreateForbidden() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 422: + result := NewTenantsCreateUnprocessableEntity() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 500: + result := NewTenantsCreateInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + } +} + +// NewTenantsCreateOK creates a TenantsCreateOK with default headers values +func NewTenantsCreateOK() *TenantsCreateOK { + return &TenantsCreateOK{} +} + +/* +TenantsCreateOK describes a response with status code 200, with default header values. + +Added new tenants to the specified class +*/ +type TenantsCreateOK struct { + Payload []*models.Tenant +} + +// IsSuccess returns true when this tenants create o k response has a 2xx status code +func (o *TenantsCreateOK) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this tenants create o k response has a 3xx status code +func (o *TenantsCreateOK) IsRedirect() bool { + return false +} + +// IsClientError returns true when this tenants create o k response has a 4xx status code +func (o *TenantsCreateOK) IsClientError() bool { + return false +} + +// IsServerError returns true when this tenants create o k response has a 5xx status code +func (o *TenantsCreateOK) IsServerError() bool { + return false +} + +// IsCode returns true when this tenants create o k response a status code equal to that given +func (o *TenantsCreateOK) IsCode(code int) bool { + return code == 200 +} + +// Code gets the status code for the tenants create o k response +func (o *TenantsCreateOK) Code() int { + return 200 +} + +func (o *TenantsCreateOK) Error() string { + return fmt.Sprintf("[POST /schema/{className}/tenants][%d] tenantsCreateOK %+v", 200, o.Payload) +} + +func (o *TenantsCreateOK) String() string { + return fmt.Sprintf("[POST /schema/{className}/tenants][%d] tenantsCreateOK %+v", 200, o.Payload) +} + +func (o *TenantsCreateOK) GetPayload() []*models.Tenant { + return o.Payload +} + +func (o *TenantsCreateOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewTenantsCreateUnauthorized creates a TenantsCreateUnauthorized with default headers values +func NewTenantsCreateUnauthorized() *TenantsCreateUnauthorized { + return &TenantsCreateUnauthorized{} +} + +/* +TenantsCreateUnauthorized describes a response with status code 401, with default header values. + +Unauthorized or invalid credentials. +*/ +type TenantsCreateUnauthorized struct { +} + +// IsSuccess returns true when this tenants create unauthorized response has a 2xx status code +func (o *TenantsCreateUnauthorized) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this tenants create unauthorized response has a 3xx status code +func (o *TenantsCreateUnauthorized) IsRedirect() bool { + return false +} + +// IsClientError returns true when this tenants create unauthorized response has a 4xx status code +func (o *TenantsCreateUnauthorized) IsClientError() bool { + return true +} + +// IsServerError returns true when this tenants create unauthorized response has a 5xx status code +func (o *TenantsCreateUnauthorized) IsServerError() bool { + return false +} + +// IsCode returns true when this tenants create unauthorized response a status code equal to that given +func (o *TenantsCreateUnauthorized) IsCode(code int) bool { + return code == 401 +} + +// Code gets the status code for the tenants create unauthorized response +func (o *TenantsCreateUnauthorized) Code() int { + return 401 +} + +func (o *TenantsCreateUnauthorized) Error() string { + return fmt.Sprintf("[POST /schema/{className}/tenants][%d] tenantsCreateUnauthorized ", 401) +} + +func (o *TenantsCreateUnauthorized) String() string { + return fmt.Sprintf("[POST /schema/{className}/tenants][%d] tenantsCreateUnauthorized ", 401) +} + +func (o *TenantsCreateUnauthorized) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewTenantsCreateForbidden creates a TenantsCreateForbidden with default headers values +func NewTenantsCreateForbidden() *TenantsCreateForbidden { + return &TenantsCreateForbidden{} +} + +/* +TenantsCreateForbidden describes a response with status code 403, with default header values. + +Forbidden +*/ +type TenantsCreateForbidden struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this tenants create forbidden response has a 2xx status code +func (o *TenantsCreateForbidden) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this tenants create forbidden response has a 3xx status code +func (o *TenantsCreateForbidden) IsRedirect() bool { + return false +} + +// IsClientError returns true when this tenants create forbidden response has a 4xx status code +func (o *TenantsCreateForbidden) IsClientError() bool { + return true +} + +// IsServerError returns true when this tenants create forbidden response has a 5xx status code +func (o *TenantsCreateForbidden) IsServerError() bool { + return false +} + +// IsCode returns true when this tenants create forbidden response a status code equal to that given +func (o *TenantsCreateForbidden) IsCode(code int) bool { + return code == 403 +} + +// Code gets the status code for the tenants create forbidden response +func (o *TenantsCreateForbidden) Code() int { + return 403 +} + +func (o *TenantsCreateForbidden) Error() string { + return fmt.Sprintf("[POST /schema/{className}/tenants][%d] tenantsCreateForbidden %+v", 403, o.Payload) +} + +func (o *TenantsCreateForbidden) String() string { + return fmt.Sprintf("[POST /schema/{className}/tenants][%d] tenantsCreateForbidden %+v", 403, o.Payload) +} + +func (o *TenantsCreateForbidden) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *TenantsCreateForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewTenantsCreateUnprocessableEntity creates a TenantsCreateUnprocessableEntity with default headers values +func NewTenantsCreateUnprocessableEntity() *TenantsCreateUnprocessableEntity { + return &TenantsCreateUnprocessableEntity{} +} + +/* +TenantsCreateUnprocessableEntity describes a response with status code 422, with default header values. + +Invalid Tenant class +*/ +type TenantsCreateUnprocessableEntity struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this tenants create unprocessable entity response has a 2xx status code +func (o *TenantsCreateUnprocessableEntity) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this tenants create unprocessable entity response has a 3xx status code +func (o *TenantsCreateUnprocessableEntity) IsRedirect() bool { + return false +} + +// IsClientError returns true when this tenants create unprocessable entity response has a 4xx status code +func (o *TenantsCreateUnprocessableEntity) IsClientError() bool { + return true +} + +// IsServerError returns true when this tenants create unprocessable entity response has a 5xx status code +func (o *TenantsCreateUnprocessableEntity) IsServerError() bool { + return false +} + +// IsCode returns true when this tenants create unprocessable entity response a status code equal to that given +func (o *TenantsCreateUnprocessableEntity) IsCode(code int) bool { + return code == 422 +} + +// Code gets the status code for the tenants create unprocessable entity response +func (o *TenantsCreateUnprocessableEntity) Code() int { + return 422 +} + +func (o *TenantsCreateUnprocessableEntity) Error() string { + return fmt.Sprintf("[POST /schema/{className}/tenants][%d] tenantsCreateUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *TenantsCreateUnprocessableEntity) String() string { + return fmt.Sprintf("[POST /schema/{className}/tenants][%d] tenantsCreateUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *TenantsCreateUnprocessableEntity) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *TenantsCreateUnprocessableEntity) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewTenantsCreateInternalServerError creates a TenantsCreateInternalServerError with default headers values +func NewTenantsCreateInternalServerError() *TenantsCreateInternalServerError { + return &TenantsCreateInternalServerError{} +} + +/* +TenantsCreateInternalServerError describes a response with status code 500, with default header values. + +An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. +*/ +type TenantsCreateInternalServerError struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this tenants create internal server error response has a 2xx status code +func (o *TenantsCreateInternalServerError) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this tenants create internal server error response has a 3xx status code +func (o *TenantsCreateInternalServerError) IsRedirect() bool { + return false +} + +// IsClientError returns true when this tenants create internal server error response has a 4xx status code +func (o *TenantsCreateInternalServerError) IsClientError() bool { + return false +} + +// IsServerError returns true when this tenants create internal server error response has a 5xx status code +func (o *TenantsCreateInternalServerError) IsServerError() bool { + return true +} + +// IsCode returns true when this tenants create internal server error response a status code equal to that given +func (o *TenantsCreateInternalServerError) IsCode(code int) bool { + return code == 500 +} + +// Code gets the status code for the tenants create internal server error response +func (o *TenantsCreateInternalServerError) Code() int { + return 500 +} + +func (o *TenantsCreateInternalServerError) Error() string { + return fmt.Sprintf("[POST /schema/{className}/tenants][%d] tenantsCreateInternalServerError %+v", 500, o.Payload) +} + +func (o *TenantsCreateInternalServerError) String() string { + return fmt.Sprintf("[POST /schema/{className}/tenants][%d] tenantsCreateInternalServerError %+v", 500, o.Payload) +} + +func (o *TenantsCreateInternalServerError) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *TenantsCreateInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/schema/tenants_delete_parameters.go b/platform/dbops/binaries/weaviate-src/client/schema/tenants_delete_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..bb1386d2fd35e563e388f6dde7805894d7e852b8 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/schema/tenants_delete_parameters.go @@ -0,0 +1,178 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package schema + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewTenantsDeleteParams creates a new TenantsDeleteParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewTenantsDeleteParams() *TenantsDeleteParams { + return &TenantsDeleteParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewTenantsDeleteParamsWithTimeout creates a new TenantsDeleteParams object +// with the ability to set a timeout on a request. +func NewTenantsDeleteParamsWithTimeout(timeout time.Duration) *TenantsDeleteParams { + return &TenantsDeleteParams{ + timeout: timeout, + } +} + +// NewTenantsDeleteParamsWithContext creates a new TenantsDeleteParams object +// with the ability to set a context for a request. +func NewTenantsDeleteParamsWithContext(ctx context.Context) *TenantsDeleteParams { + return &TenantsDeleteParams{ + Context: ctx, + } +} + +// NewTenantsDeleteParamsWithHTTPClient creates a new TenantsDeleteParams object +// with the ability to set a custom HTTPClient for a request. +func NewTenantsDeleteParamsWithHTTPClient(client *http.Client) *TenantsDeleteParams { + return &TenantsDeleteParams{ + HTTPClient: client, + } +} + +/* +TenantsDeleteParams contains all the parameters to send to the API endpoint + + for the tenants delete operation. + + Typically these are written to a http.Request. +*/ +type TenantsDeleteParams struct { + + // ClassName. + ClassName string + + // Tenants. + Tenants []string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the tenants delete params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *TenantsDeleteParams) WithDefaults() *TenantsDeleteParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the tenants delete params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *TenantsDeleteParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the tenants delete params +func (o *TenantsDeleteParams) WithTimeout(timeout time.Duration) *TenantsDeleteParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the tenants delete params +func (o *TenantsDeleteParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the tenants delete params +func (o *TenantsDeleteParams) WithContext(ctx context.Context) *TenantsDeleteParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the tenants delete params +func (o *TenantsDeleteParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the tenants delete params +func (o *TenantsDeleteParams) WithHTTPClient(client *http.Client) *TenantsDeleteParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the tenants delete params +func (o *TenantsDeleteParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithClassName adds the className to the tenants delete params +func (o *TenantsDeleteParams) WithClassName(className string) *TenantsDeleteParams { + o.SetClassName(className) + return o +} + +// SetClassName adds the className to the tenants delete params +func (o *TenantsDeleteParams) SetClassName(className string) { + o.ClassName = className +} + +// WithTenants adds the tenants to the tenants delete params +func (o *TenantsDeleteParams) WithTenants(tenants []string) *TenantsDeleteParams { + o.SetTenants(tenants) + return o +} + +// SetTenants adds the tenants to the tenants delete params +func (o *TenantsDeleteParams) SetTenants(tenants []string) { + o.Tenants = tenants +} + +// WriteToRequest writes these params to a swagger request +func (o *TenantsDeleteParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param className + if err := r.SetPathParam("className", o.ClassName); err != nil { + return err + } + if o.Tenants != nil { + if err := r.SetBodyParam(o.Tenants); err != nil { + return err + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/schema/tenants_delete_responses.go b/platform/dbops/binaries/weaviate-src/client/schema/tenants_delete_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..a4db249e22bc909d4cef4797caf844fd4628ad34 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/schema/tenants_delete_responses.go @@ -0,0 +1,386 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package schema + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/weaviate/weaviate/entities/models" +) + +// TenantsDeleteReader is a Reader for the TenantsDelete structure. +type TenantsDeleteReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *TenantsDeleteReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewTenantsDeleteOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 401: + result := NewTenantsDeleteUnauthorized() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 403: + result := NewTenantsDeleteForbidden() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 422: + result := NewTenantsDeleteUnprocessableEntity() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 500: + result := NewTenantsDeleteInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + } +} + +// NewTenantsDeleteOK creates a TenantsDeleteOK with default headers values +func NewTenantsDeleteOK() *TenantsDeleteOK { + return &TenantsDeleteOK{} +} + +/* +TenantsDeleteOK describes a response with status code 200, with default header values. + +Deleted tenants from specified class. +*/ +type TenantsDeleteOK struct { +} + +// IsSuccess returns true when this tenants delete o k response has a 2xx status code +func (o *TenantsDeleteOK) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this tenants delete o k response has a 3xx status code +func (o *TenantsDeleteOK) IsRedirect() bool { + return false +} + +// IsClientError returns true when this tenants delete o k response has a 4xx status code +func (o *TenantsDeleteOK) IsClientError() bool { + return false +} + +// IsServerError returns true when this tenants delete o k response has a 5xx status code +func (o *TenantsDeleteOK) IsServerError() bool { + return false +} + +// IsCode returns true when this tenants delete o k response a status code equal to that given +func (o *TenantsDeleteOK) IsCode(code int) bool { + return code == 200 +} + +// Code gets the status code for the tenants delete o k response +func (o *TenantsDeleteOK) Code() int { + return 200 +} + +func (o *TenantsDeleteOK) Error() string { + return fmt.Sprintf("[DELETE /schema/{className}/tenants][%d] tenantsDeleteOK ", 200) +} + +func (o *TenantsDeleteOK) String() string { + return fmt.Sprintf("[DELETE /schema/{className}/tenants][%d] tenantsDeleteOK ", 200) +} + +func (o *TenantsDeleteOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewTenantsDeleteUnauthorized creates a TenantsDeleteUnauthorized with default headers values +func NewTenantsDeleteUnauthorized() *TenantsDeleteUnauthorized { + return &TenantsDeleteUnauthorized{} +} + +/* +TenantsDeleteUnauthorized describes a response with status code 401, with default header values. + +Unauthorized or invalid credentials. +*/ +type TenantsDeleteUnauthorized struct { +} + +// IsSuccess returns true when this tenants delete unauthorized response has a 2xx status code +func (o *TenantsDeleteUnauthorized) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this tenants delete unauthorized response has a 3xx status code +func (o *TenantsDeleteUnauthorized) IsRedirect() bool { + return false +} + +// IsClientError returns true when this tenants delete unauthorized response has a 4xx status code +func (o *TenantsDeleteUnauthorized) IsClientError() bool { + return true +} + +// IsServerError returns true when this tenants delete unauthorized response has a 5xx status code +func (o *TenantsDeleteUnauthorized) IsServerError() bool { + return false +} + +// IsCode returns true when this tenants delete unauthorized response a status code equal to that given +func (o *TenantsDeleteUnauthorized) IsCode(code int) bool { + return code == 401 +} + +// Code gets the status code for the tenants delete unauthorized response +func (o *TenantsDeleteUnauthorized) Code() int { + return 401 +} + +func (o *TenantsDeleteUnauthorized) Error() string { + return fmt.Sprintf("[DELETE /schema/{className}/tenants][%d] tenantsDeleteUnauthorized ", 401) +} + +func (o *TenantsDeleteUnauthorized) String() string { + return fmt.Sprintf("[DELETE /schema/{className}/tenants][%d] tenantsDeleteUnauthorized ", 401) +} + +func (o *TenantsDeleteUnauthorized) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewTenantsDeleteForbidden creates a TenantsDeleteForbidden with default headers values +func NewTenantsDeleteForbidden() *TenantsDeleteForbidden { + return &TenantsDeleteForbidden{} +} + +/* +TenantsDeleteForbidden describes a response with status code 403, with default header values. + +Forbidden +*/ +type TenantsDeleteForbidden struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this tenants delete forbidden response has a 2xx status code +func (o *TenantsDeleteForbidden) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this tenants delete forbidden response has a 3xx status code +func (o *TenantsDeleteForbidden) IsRedirect() bool { + return false +} + +// IsClientError returns true when this tenants delete forbidden response has a 4xx status code +func (o *TenantsDeleteForbidden) IsClientError() bool { + return true +} + +// IsServerError returns true when this tenants delete forbidden response has a 5xx status code +func (o *TenantsDeleteForbidden) IsServerError() bool { + return false +} + +// IsCode returns true when this tenants delete forbidden response a status code equal to that given +func (o *TenantsDeleteForbidden) IsCode(code int) bool { + return code == 403 +} + +// Code gets the status code for the tenants delete forbidden response +func (o *TenantsDeleteForbidden) Code() int { + return 403 +} + +func (o *TenantsDeleteForbidden) Error() string { + return fmt.Sprintf("[DELETE /schema/{className}/tenants][%d] tenantsDeleteForbidden %+v", 403, o.Payload) +} + +func (o *TenantsDeleteForbidden) String() string { + return fmt.Sprintf("[DELETE /schema/{className}/tenants][%d] tenantsDeleteForbidden %+v", 403, o.Payload) +} + +func (o *TenantsDeleteForbidden) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *TenantsDeleteForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewTenantsDeleteUnprocessableEntity creates a TenantsDeleteUnprocessableEntity with default headers values +func NewTenantsDeleteUnprocessableEntity() *TenantsDeleteUnprocessableEntity { + return &TenantsDeleteUnprocessableEntity{} +} + +/* +TenantsDeleteUnprocessableEntity describes a response with status code 422, with default header values. + +Invalid Tenant class +*/ +type TenantsDeleteUnprocessableEntity struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this tenants delete unprocessable entity response has a 2xx status code +func (o *TenantsDeleteUnprocessableEntity) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this tenants delete unprocessable entity response has a 3xx status code +func (o *TenantsDeleteUnprocessableEntity) IsRedirect() bool { + return false +} + +// IsClientError returns true when this tenants delete unprocessable entity response has a 4xx status code +func (o *TenantsDeleteUnprocessableEntity) IsClientError() bool { + return true +} + +// IsServerError returns true when this tenants delete unprocessable entity response has a 5xx status code +func (o *TenantsDeleteUnprocessableEntity) IsServerError() bool { + return false +} + +// IsCode returns true when this tenants delete unprocessable entity response a status code equal to that given +func (o *TenantsDeleteUnprocessableEntity) IsCode(code int) bool { + return code == 422 +} + +// Code gets the status code for the tenants delete unprocessable entity response +func (o *TenantsDeleteUnprocessableEntity) Code() int { + return 422 +} + +func (o *TenantsDeleteUnprocessableEntity) Error() string { + return fmt.Sprintf("[DELETE /schema/{className}/tenants][%d] tenantsDeleteUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *TenantsDeleteUnprocessableEntity) String() string { + return fmt.Sprintf("[DELETE /schema/{className}/tenants][%d] tenantsDeleteUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *TenantsDeleteUnprocessableEntity) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *TenantsDeleteUnprocessableEntity) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewTenantsDeleteInternalServerError creates a TenantsDeleteInternalServerError with default headers values +func NewTenantsDeleteInternalServerError() *TenantsDeleteInternalServerError { + return &TenantsDeleteInternalServerError{} +} + +/* +TenantsDeleteInternalServerError describes a response with status code 500, with default header values. + +An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. +*/ +type TenantsDeleteInternalServerError struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this tenants delete internal server error response has a 2xx status code +func (o *TenantsDeleteInternalServerError) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this tenants delete internal server error response has a 3xx status code +func (o *TenantsDeleteInternalServerError) IsRedirect() bool { + return false +} + +// IsClientError returns true when this tenants delete internal server error response has a 4xx status code +func (o *TenantsDeleteInternalServerError) IsClientError() bool { + return false +} + +// IsServerError returns true when this tenants delete internal server error response has a 5xx status code +func (o *TenantsDeleteInternalServerError) IsServerError() bool { + return true +} + +// IsCode returns true when this tenants delete internal server error response a status code equal to that given +func (o *TenantsDeleteInternalServerError) IsCode(code int) bool { + return code == 500 +} + +// Code gets the status code for the tenants delete internal server error response +func (o *TenantsDeleteInternalServerError) Code() int { + return 500 +} + +func (o *TenantsDeleteInternalServerError) Error() string { + return fmt.Sprintf("[DELETE /schema/{className}/tenants][%d] tenantsDeleteInternalServerError %+v", 500, o.Payload) +} + +func (o *TenantsDeleteInternalServerError) String() string { + return fmt.Sprintf("[DELETE /schema/{className}/tenants][%d] tenantsDeleteInternalServerError %+v", 500, o.Payload) +} + +func (o *TenantsDeleteInternalServerError) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *TenantsDeleteInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/schema/tenants_get_one_parameters.go b/platform/dbops/binaries/weaviate-src/client/schema/tenants_get_one_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..a4a634a38cf0d20fd8659f2574ebae4470ea4040 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/schema/tenants_get_one_parameters.go @@ -0,0 +1,217 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package schema + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// NewTenantsGetOneParams creates a new TenantsGetOneParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewTenantsGetOneParams() *TenantsGetOneParams { + return &TenantsGetOneParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewTenantsGetOneParamsWithTimeout creates a new TenantsGetOneParams object +// with the ability to set a timeout on a request. +func NewTenantsGetOneParamsWithTimeout(timeout time.Duration) *TenantsGetOneParams { + return &TenantsGetOneParams{ + timeout: timeout, + } +} + +// NewTenantsGetOneParamsWithContext creates a new TenantsGetOneParams object +// with the ability to set a context for a request. +func NewTenantsGetOneParamsWithContext(ctx context.Context) *TenantsGetOneParams { + return &TenantsGetOneParams{ + Context: ctx, + } +} + +// NewTenantsGetOneParamsWithHTTPClient creates a new TenantsGetOneParams object +// with the ability to set a custom HTTPClient for a request. +func NewTenantsGetOneParamsWithHTTPClient(client *http.Client) *TenantsGetOneParams { + return &TenantsGetOneParams{ + HTTPClient: client, + } +} + +/* +TenantsGetOneParams contains all the parameters to send to the API endpoint + + for the tenants get one operation. + + Typically these are written to a http.Request. +*/ +type TenantsGetOneParams struct { + + // ClassName. + ClassName string + + /* Consistency. + + If consistency is true, the request will be proxied to the leader to ensure strong schema consistency + + Default: true + */ + Consistency *bool + + // TenantName. + TenantName string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the tenants get one params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *TenantsGetOneParams) WithDefaults() *TenantsGetOneParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the tenants get one params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *TenantsGetOneParams) SetDefaults() { + var ( + consistencyDefault = bool(true) + ) + + val := TenantsGetOneParams{ + Consistency: &consistencyDefault, + } + + val.timeout = o.timeout + val.Context = o.Context + val.HTTPClient = o.HTTPClient + *o = val +} + +// WithTimeout adds the timeout to the tenants get one params +func (o *TenantsGetOneParams) WithTimeout(timeout time.Duration) *TenantsGetOneParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the tenants get one params +func (o *TenantsGetOneParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the tenants get one params +func (o *TenantsGetOneParams) WithContext(ctx context.Context) *TenantsGetOneParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the tenants get one params +func (o *TenantsGetOneParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the tenants get one params +func (o *TenantsGetOneParams) WithHTTPClient(client *http.Client) *TenantsGetOneParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the tenants get one params +func (o *TenantsGetOneParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithClassName adds the className to the tenants get one params +func (o *TenantsGetOneParams) WithClassName(className string) *TenantsGetOneParams { + o.SetClassName(className) + return o +} + +// SetClassName adds the className to the tenants get one params +func (o *TenantsGetOneParams) SetClassName(className string) { + o.ClassName = className +} + +// WithConsistency adds the consistency to the tenants get one params +func (o *TenantsGetOneParams) WithConsistency(consistency *bool) *TenantsGetOneParams { + o.SetConsistency(consistency) + return o +} + +// SetConsistency adds the consistency to the tenants get one params +func (o *TenantsGetOneParams) SetConsistency(consistency *bool) { + o.Consistency = consistency +} + +// WithTenantName adds the tenantName to the tenants get one params +func (o *TenantsGetOneParams) WithTenantName(tenantName string) *TenantsGetOneParams { + o.SetTenantName(tenantName) + return o +} + +// SetTenantName adds the tenantName to the tenants get one params +func (o *TenantsGetOneParams) SetTenantName(tenantName string) { + o.TenantName = tenantName +} + +// WriteToRequest writes these params to a swagger request +func (o *TenantsGetOneParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param className + if err := r.SetPathParam("className", o.ClassName); err != nil { + return err + } + + if o.Consistency != nil { + + // header param consistency + if err := r.SetHeaderParam("consistency", swag.FormatBool(*o.Consistency)); err != nil { + return err + } + } + + // path param tenantName + if err := r.SetPathParam("tenantName", o.TenantName); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/schema/tenants_get_one_responses.go b/platform/dbops/binaries/weaviate-src/client/schema/tenants_get_one_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..5da1480c732ffb32691648297ae556b5d4f5e53d --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/schema/tenants_get_one_responses.go @@ -0,0 +1,460 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package schema + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/weaviate/weaviate/entities/models" +) + +// TenantsGetOneReader is a Reader for the TenantsGetOne structure. +type TenantsGetOneReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *TenantsGetOneReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewTenantsGetOneOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 401: + result := NewTenantsGetOneUnauthorized() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 403: + result := NewTenantsGetOneForbidden() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 404: + result := NewTenantsGetOneNotFound() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 422: + result := NewTenantsGetOneUnprocessableEntity() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 500: + result := NewTenantsGetOneInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + } +} + +// NewTenantsGetOneOK creates a TenantsGetOneOK with default headers values +func NewTenantsGetOneOK() *TenantsGetOneOK { + return &TenantsGetOneOK{} +} + +/* +TenantsGetOneOK describes a response with status code 200, with default header values. + +load the tenant given the specified class +*/ +type TenantsGetOneOK struct { + Payload *models.Tenant +} + +// IsSuccess returns true when this tenants get one o k response has a 2xx status code +func (o *TenantsGetOneOK) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this tenants get one o k response has a 3xx status code +func (o *TenantsGetOneOK) IsRedirect() bool { + return false +} + +// IsClientError returns true when this tenants get one o k response has a 4xx status code +func (o *TenantsGetOneOK) IsClientError() bool { + return false +} + +// IsServerError returns true when this tenants get one o k response has a 5xx status code +func (o *TenantsGetOneOK) IsServerError() bool { + return false +} + +// IsCode returns true when this tenants get one o k response a status code equal to that given +func (o *TenantsGetOneOK) IsCode(code int) bool { + return code == 200 +} + +// Code gets the status code for the tenants get one o k response +func (o *TenantsGetOneOK) Code() int { + return 200 +} + +func (o *TenantsGetOneOK) Error() string { + return fmt.Sprintf("[GET /schema/{className}/tenants/{tenantName}][%d] tenantsGetOneOK %+v", 200, o.Payload) +} + +func (o *TenantsGetOneOK) String() string { + return fmt.Sprintf("[GET /schema/{className}/tenants/{tenantName}][%d] tenantsGetOneOK %+v", 200, o.Payload) +} + +func (o *TenantsGetOneOK) GetPayload() *models.Tenant { + return o.Payload +} + +func (o *TenantsGetOneOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Tenant) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewTenantsGetOneUnauthorized creates a TenantsGetOneUnauthorized with default headers values +func NewTenantsGetOneUnauthorized() *TenantsGetOneUnauthorized { + return &TenantsGetOneUnauthorized{} +} + +/* +TenantsGetOneUnauthorized describes a response with status code 401, with default header values. + +Unauthorized or invalid credentials. +*/ +type TenantsGetOneUnauthorized struct { +} + +// IsSuccess returns true when this tenants get one unauthorized response has a 2xx status code +func (o *TenantsGetOneUnauthorized) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this tenants get one unauthorized response has a 3xx status code +func (o *TenantsGetOneUnauthorized) IsRedirect() bool { + return false +} + +// IsClientError returns true when this tenants get one unauthorized response has a 4xx status code +func (o *TenantsGetOneUnauthorized) IsClientError() bool { + return true +} + +// IsServerError returns true when this tenants get one unauthorized response has a 5xx status code +func (o *TenantsGetOneUnauthorized) IsServerError() bool { + return false +} + +// IsCode returns true when this tenants get one unauthorized response a status code equal to that given +func (o *TenantsGetOneUnauthorized) IsCode(code int) bool { + return code == 401 +} + +// Code gets the status code for the tenants get one unauthorized response +func (o *TenantsGetOneUnauthorized) Code() int { + return 401 +} + +func (o *TenantsGetOneUnauthorized) Error() string { + return fmt.Sprintf("[GET /schema/{className}/tenants/{tenantName}][%d] tenantsGetOneUnauthorized ", 401) +} + +func (o *TenantsGetOneUnauthorized) String() string { + return fmt.Sprintf("[GET /schema/{className}/tenants/{tenantName}][%d] tenantsGetOneUnauthorized ", 401) +} + +func (o *TenantsGetOneUnauthorized) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewTenantsGetOneForbidden creates a TenantsGetOneForbidden with default headers values +func NewTenantsGetOneForbidden() *TenantsGetOneForbidden { + return &TenantsGetOneForbidden{} +} + +/* +TenantsGetOneForbidden describes a response with status code 403, with default header values. + +Forbidden +*/ +type TenantsGetOneForbidden struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this tenants get one forbidden response has a 2xx status code +func (o *TenantsGetOneForbidden) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this tenants get one forbidden response has a 3xx status code +func (o *TenantsGetOneForbidden) IsRedirect() bool { + return false +} + +// IsClientError returns true when this tenants get one forbidden response has a 4xx status code +func (o *TenantsGetOneForbidden) IsClientError() bool { + return true +} + +// IsServerError returns true when this tenants get one forbidden response has a 5xx status code +func (o *TenantsGetOneForbidden) IsServerError() bool { + return false +} + +// IsCode returns true when this tenants get one forbidden response a status code equal to that given +func (o *TenantsGetOneForbidden) IsCode(code int) bool { + return code == 403 +} + +// Code gets the status code for the tenants get one forbidden response +func (o *TenantsGetOneForbidden) Code() int { + return 403 +} + +func (o *TenantsGetOneForbidden) Error() string { + return fmt.Sprintf("[GET /schema/{className}/tenants/{tenantName}][%d] tenantsGetOneForbidden %+v", 403, o.Payload) +} + +func (o *TenantsGetOneForbidden) String() string { + return fmt.Sprintf("[GET /schema/{className}/tenants/{tenantName}][%d] tenantsGetOneForbidden %+v", 403, o.Payload) +} + +func (o *TenantsGetOneForbidden) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *TenantsGetOneForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewTenantsGetOneNotFound creates a TenantsGetOneNotFound with default headers values +func NewTenantsGetOneNotFound() *TenantsGetOneNotFound { + return &TenantsGetOneNotFound{} +} + +/* +TenantsGetOneNotFound describes a response with status code 404, with default header values. + +Tenant not found +*/ +type TenantsGetOneNotFound struct { +} + +// IsSuccess returns true when this tenants get one not found response has a 2xx status code +func (o *TenantsGetOneNotFound) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this tenants get one not found response has a 3xx status code +func (o *TenantsGetOneNotFound) IsRedirect() bool { + return false +} + +// IsClientError returns true when this tenants get one not found response has a 4xx status code +func (o *TenantsGetOneNotFound) IsClientError() bool { + return true +} + +// IsServerError returns true when this tenants get one not found response has a 5xx status code +func (o *TenantsGetOneNotFound) IsServerError() bool { + return false +} + +// IsCode returns true when this tenants get one not found response a status code equal to that given +func (o *TenantsGetOneNotFound) IsCode(code int) bool { + return code == 404 +} + +// Code gets the status code for the tenants get one not found response +func (o *TenantsGetOneNotFound) Code() int { + return 404 +} + +func (o *TenantsGetOneNotFound) Error() string { + return fmt.Sprintf("[GET /schema/{className}/tenants/{tenantName}][%d] tenantsGetOneNotFound ", 404) +} + +func (o *TenantsGetOneNotFound) String() string { + return fmt.Sprintf("[GET /schema/{className}/tenants/{tenantName}][%d] tenantsGetOneNotFound ", 404) +} + +func (o *TenantsGetOneNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewTenantsGetOneUnprocessableEntity creates a TenantsGetOneUnprocessableEntity with default headers values +func NewTenantsGetOneUnprocessableEntity() *TenantsGetOneUnprocessableEntity { + return &TenantsGetOneUnprocessableEntity{} +} + +/* +TenantsGetOneUnprocessableEntity describes a response with status code 422, with default header values. + +Invalid tenant or class +*/ +type TenantsGetOneUnprocessableEntity struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this tenants get one unprocessable entity response has a 2xx status code +func (o *TenantsGetOneUnprocessableEntity) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this tenants get one unprocessable entity response has a 3xx status code +func (o *TenantsGetOneUnprocessableEntity) IsRedirect() bool { + return false +} + +// IsClientError returns true when this tenants get one unprocessable entity response has a 4xx status code +func (o *TenantsGetOneUnprocessableEntity) IsClientError() bool { + return true +} + +// IsServerError returns true when this tenants get one unprocessable entity response has a 5xx status code +func (o *TenantsGetOneUnprocessableEntity) IsServerError() bool { + return false +} + +// IsCode returns true when this tenants get one unprocessable entity response a status code equal to that given +func (o *TenantsGetOneUnprocessableEntity) IsCode(code int) bool { + return code == 422 +} + +// Code gets the status code for the tenants get one unprocessable entity response +func (o *TenantsGetOneUnprocessableEntity) Code() int { + return 422 +} + +func (o *TenantsGetOneUnprocessableEntity) Error() string { + return fmt.Sprintf("[GET /schema/{className}/tenants/{tenantName}][%d] tenantsGetOneUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *TenantsGetOneUnprocessableEntity) String() string { + return fmt.Sprintf("[GET /schema/{className}/tenants/{tenantName}][%d] tenantsGetOneUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *TenantsGetOneUnprocessableEntity) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *TenantsGetOneUnprocessableEntity) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewTenantsGetOneInternalServerError creates a TenantsGetOneInternalServerError with default headers values +func NewTenantsGetOneInternalServerError() *TenantsGetOneInternalServerError { + return &TenantsGetOneInternalServerError{} +} + +/* +TenantsGetOneInternalServerError describes a response with status code 500, with default header values. + +An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. +*/ +type TenantsGetOneInternalServerError struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this tenants get one internal server error response has a 2xx status code +func (o *TenantsGetOneInternalServerError) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this tenants get one internal server error response has a 3xx status code +func (o *TenantsGetOneInternalServerError) IsRedirect() bool { + return false +} + +// IsClientError returns true when this tenants get one internal server error response has a 4xx status code +func (o *TenantsGetOneInternalServerError) IsClientError() bool { + return false +} + +// IsServerError returns true when this tenants get one internal server error response has a 5xx status code +func (o *TenantsGetOneInternalServerError) IsServerError() bool { + return true +} + +// IsCode returns true when this tenants get one internal server error response a status code equal to that given +func (o *TenantsGetOneInternalServerError) IsCode(code int) bool { + return code == 500 +} + +// Code gets the status code for the tenants get one internal server error response +func (o *TenantsGetOneInternalServerError) Code() int { + return 500 +} + +func (o *TenantsGetOneInternalServerError) Error() string { + return fmt.Sprintf("[GET /schema/{className}/tenants/{tenantName}][%d] tenantsGetOneInternalServerError %+v", 500, o.Payload) +} + +func (o *TenantsGetOneInternalServerError) String() string { + return fmt.Sprintf("[GET /schema/{className}/tenants/{tenantName}][%d] tenantsGetOneInternalServerError %+v", 500, o.Payload) +} + +func (o *TenantsGetOneInternalServerError) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *TenantsGetOneInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/schema/tenants_get_parameters.go b/platform/dbops/binaries/weaviate-src/client/schema/tenants_get_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..223cfe270f97853bd8dab057447543dc0ece9756 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/schema/tenants_get_parameters.go @@ -0,0 +1,198 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package schema + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// NewTenantsGetParams creates a new TenantsGetParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewTenantsGetParams() *TenantsGetParams { + return &TenantsGetParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewTenantsGetParamsWithTimeout creates a new TenantsGetParams object +// with the ability to set a timeout on a request. +func NewTenantsGetParamsWithTimeout(timeout time.Duration) *TenantsGetParams { + return &TenantsGetParams{ + timeout: timeout, + } +} + +// NewTenantsGetParamsWithContext creates a new TenantsGetParams object +// with the ability to set a context for a request. +func NewTenantsGetParamsWithContext(ctx context.Context) *TenantsGetParams { + return &TenantsGetParams{ + Context: ctx, + } +} + +// NewTenantsGetParamsWithHTTPClient creates a new TenantsGetParams object +// with the ability to set a custom HTTPClient for a request. +func NewTenantsGetParamsWithHTTPClient(client *http.Client) *TenantsGetParams { + return &TenantsGetParams{ + HTTPClient: client, + } +} + +/* +TenantsGetParams contains all the parameters to send to the API endpoint + + for the tenants get operation. + + Typically these are written to a http.Request. +*/ +type TenantsGetParams struct { + + // ClassName. + ClassName string + + /* Consistency. + + If consistency is true, the request will be proxied to the leader to ensure strong schema consistency + + Default: true + */ + Consistency *bool + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the tenants get params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *TenantsGetParams) WithDefaults() *TenantsGetParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the tenants get params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *TenantsGetParams) SetDefaults() { + var ( + consistencyDefault = bool(true) + ) + + val := TenantsGetParams{ + Consistency: &consistencyDefault, + } + + val.timeout = o.timeout + val.Context = o.Context + val.HTTPClient = o.HTTPClient + *o = val +} + +// WithTimeout adds the timeout to the tenants get params +func (o *TenantsGetParams) WithTimeout(timeout time.Duration) *TenantsGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the tenants get params +func (o *TenantsGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the tenants get params +func (o *TenantsGetParams) WithContext(ctx context.Context) *TenantsGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the tenants get params +func (o *TenantsGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the tenants get params +func (o *TenantsGetParams) WithHTTPClient(client *http.Client) *TenantsGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the tenants get params +func (o *TenantsGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithClassName adds the className to the tenants get params +func (o *TenantsGetParams) WithClassName(className string) *TenantsGetParams { + o.SetClassName(className) + return o +} + +// SetClassName adds the className to the tenants get params +func (o *TenantsGetParams) SetClassName(className string) { + o.ClassName = className +} + +// WithConsistency adds the consistency to the tenants get params +func (o *TenantsGetParams) WithConsistency(consistency *bool) *TenantsGetParams { + o.SetConsistency(consistency) + return o +} + +// SetConsistency adds the consistency to the tenants get params +func (o *TenantsGetParams) SetConsistency(consistency *bool) { + o.Consistency = consistency +} + +// WriteToRequest writes these params to a swagger request +func (o *TenantsGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param className + if err := r.SetPathParam("className", o.ClassName); err != nil { + return err + } + + if o.Consistency != nil { + + // header param consistency + if err := r.SetHeaderParam("consistency", swag.FormatBool(*o.Consistency)); err != nil { + return err + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/schema/tenants_get_responses.go b/platform/dbops/binaries/weaviate-src/client/schema/tenants_get_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..ec77575715c20b2bbf0b258ac23efacf05b31a92 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/schema/tenants_get_responses.go @@ -0,0 +1,396 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package schema + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/weaviate/weaviate/entities/models" +) + +// TenantsGetReader is a Reader for the TenantsGet structure. +type TenantsGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *TenantsGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewTenantsGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 401: + result := NewTenantsGetUnauthorized() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 403: + result := NewTenantsGetForbidden() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 422: + result := NewTenantsGetUnprocessableEntity() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 500: + result := NewTenantsGetInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + } +} + +// NewTenantsGetOK creates a TenantsGetOK with default headers values +func NewTenantsGetOK() *TenantsGetOK { + return &TenantsGetOK{} +} + +/* +TenantsGetOK describes a response with status code 200, with default header values. + +tenants from specified class. +*/ +type TenantsGetOK struct { + Payload []*models.Tenant +} + +// IsSuccess returns true when this tenants get o k response has a 2xx status code +func (o *TenantsGetOK) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this tenants get o k response has a 3xx status code +func (o *TenantsGetOK) IsRedirect() bool { + return false +} + +// IsClientError returns true when this tenants get o k response has a 4xx status code +func (o *TenantsGetOK) IsClientError() bool { + return false +} + +// IsServerError returns true when this tenants get o k response has a 5xx status code +func (o *TenantsGetOK) IsServerError() bool { + return false +} + +// IsCode returns true when this tenants get o k response a status code equal to that given +func (o *TenantsGetOK) IsCode(code int) bool { + return code == 200 +} + +// Code gets the status code for the tenants get o k response +func (o *TenantsGetOK) Code() int { + return 200 +} + +func (o *TenantsGetOK) Error() string { + return fmt.Sprintf("[GET /schema/{className}/tenants][%d] tenantsGetOK %+v", 200, o.Payload) +} + +func (o *TenantsGetOK) String() string { + return fmt.Sprintf("[GET /schema/{className}/tenants][%d] tenantsGetOK %+v", 200, o.Payload) +} + +func (o *TenantsGetOK) GetPayload() []*models.Tenant { + return o.Payload +} + +func (o *TenantsGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewTenantsGetUnauthorized creates a TenantsGetUnauthorized with default headers values +func NewTenantsGetUnauthorized() *TenantsGetUnauthorized { + return &TenantsGetUnauthorized{} +} + +/* +TenantsGetUnauthorized describes a response with status code 401, with default header values. + +Unauthorized or invalid credentials. +*/ +type TenantsGetUnauthorized struct { +} + +// IsSuccess returns true when this tenants get unauthorized response has a 2xx status code +func (o *TenantsGetUnauthorized) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this tenants get unauthorized response has a 3xx status code +func (o *TenantsGetUnauthorized) IsRedirect() bool { + return false +} + +// IsClientError returns true when this tenants get unauthorized response has a 4xx status code +func (o *TenantsGetUnauthorized) IsClientError() bool { + return true +} + +// IsServerError returns true when this tenants get unauthorized response has a 5xx status code +func (o *TenantsGetUnauthorized) IsServerError() bool { + return false +} + +// IsCode returns true when this tenants get unauthorized response a status code equal to that given +func (o *TenantsGetUnauthorized) IsCode(code int) bool { + return code == 401 +} + +// Code gets the status code for the tenants get unauthorized response +func (o *TenantsGetUnauthorized) Code() int { + return 401 +} + +func (o *TenantsGetUnauthorized) Error() string { + return fmt.Sprintf("[GET /schema/{className}/tenants][%d] tenantsGetUnauthorized ", 401) +} + +func (o *TenantsGetUnauthorized) String() string { + return fmt.Sprintf("[GET /schema/{className}/tenants][%d] tenantsGetUnauthorized ", 401) +} + +func (o *TenantsGetUnauthorized) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewTenantsGetForbidden creates a TenantsGetForbidden with default headers values +func NewTenantsGetForbidden() *TenantsGetForbidden { + return &TenantsGetForbidden{} +} + +/* +TenantsGetForbidden describes a response with status code 403, with default header values. + +Forbidden +*/ +type TenantsGetForbidden struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this tenants get forbidden response has a 2xx status code +func (o *TenantsGetForbidden) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this tenants get forbidden response has a 3xx status code +func (o *TenantsGetForbidden) IsRedirect() bool { + return false +} + +// IsClientError returns true when this tenants get forbidden response has a 4xx status code +func (o *TenantsGetForbidden) IsClientError() bool { + return true +} + +// IsServerError returns true when this tenants get forbidden response has a 5xx status code +func (o *TenantsGetForbidden) IsServerError() bool { + return false +} + +// IsCode returns true when this tenants get forbidden response a status code equal to that given +func (o *TenantsGetForbidden) IsCode(code int) bool { + return code == 403 +} + +// Code gets the status code for the tenants get forbidden response +func (o *TenantsGetForbidden) Code() int { + return 403 +} + +func (o *TenantsGetForbidden) Error() string { + return fmt.Sprintf("[GET /schema/{className}/tenants][%d] tenantsGetForbidden %+v", 403, o.Payload) +} + +func (o *TenantsGetForbidden) String() string { + return fmt.Sprintf("[GET /schema/{className}/tenants][%d] tenantsGetForbidden %+v", 403, o.Payload) +} + +func (o *TenantsGetForbidden) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *TenantsGetForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewTenantsGetUnprocessableEntity creates a TenantsGetUnprocessableEntity with default headers values +func NewTenantsGetUnprocessableEntity() *TenantsGetUnprocessableEntity { + return &TenantsGetUnprocessableEntity{} +} + +/* +TenantsGetUnprocessableEntity describes a response with status code 422, with default header values. + +Invalid Tenant class +*/ +type TenantsGetUnprocessableEntity struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this tenants get unprocessable entity response has a 2xx status code +func (o *TenantsGetUnprocessableEntity) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this tenants get unprocessable entity response has a 3xx status code +func (o *TenantsGetUnprocessableEntity) IsRedirect() bool { + return false +} + +// IsClientError returns true when this tenants get unprocessable entity response has a 4xx status code +func (o *TenantsGetUnprocessableEntity) IsClientError() bool { + return true +} + +// IsServerError returns true when this tenants get unprocessable entity response has a 5xx status code +func (o *TenantsGetUnprocessableEntity) IsServerError() bool { + return false +} + +// IsCode returns true when this tenants get unprocessable entity response a status code equal to that given +func (o *TenantsGetUnprocessableEntity) IsCode(code int) bool { + return code == 422 +} + +// Code gets the status code for the tenants get unprocessable entity response +func (o *TenantsGetUnprocessableEntity) Code() int { + return 422 +} + +func (o *TenantsGetUnprocessableEntity) Error() string { + return fmt.Sprintf("[GET /schema/{className}/tenants][%d] tenantsGetUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *TenantsGetUnprocessableEntity) String() string { + return fmt.Sprintf("[GET /schema/{className}/tenants][%d] tenantsGetUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *TenantsGetUnprocessableEntity) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *TenantsGetUnprocessableEntity) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewTenantsGetInternalServerError creates a TenantsGetInternalServerError with default headers values +func NewTenantsGetInternalServerError() *TenantsGetInternalServerError { + return &TenantsGetInternalServerError{} +} + +/* +TenantsGetInternalServerError describes a response with status code 500, with default header values. + +An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. +*/ +type TenantsGetInternalServerError struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this tenants get internal server error response has a 2xx status code +func (o *TenantsGetInternalServerError) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this tenants get internal server error response has a 3xx status code +func (o *TenantsGetInternalServerError) IsRedirect() bool { + return false +} + +// IsClientError returns true when this tenants get internal server error response has a 4xx status code +func (o *TenantsGetInternalServerError) IsClientError() bool { + return false +} + +// IsServerError returns true when this tenants get internal server error response has a 5xx status code +func (o *TenantsGetInternalServerError) IsServerError() bool { + return true +} + +// IsCode returns true when this tenants get internal server error response a status code equal to that given +func (o *TenantsGetInternalServerError) IsCode(code int) bool { + return code == 500 +} + +// Code gets the status code for the tenants get internal server error response +func (o *TenantsGetInternalServerError) Code() int { + return 500 +} + +func (o *TenantsGetInternalServerError) Error() string { + return fmt.Sprintf("[GET /schema/{className}/tenants][%d] tenantsGetInternalServerError %+v", 500, o.Payload) +} + +func (o *TenantsGetInternalServerError) String() string { + return fmt.Sprintf("[GET /schema/{className}/tenants][%d] tenantsGetInternalServerError %+v", 500, o.Payload) +} + +func (o *TenantsGetInternalServerError) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *TenantsGetInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/schema/tenants_update_parameters.go b/platform/dbops/binaries/weaviate-src/client/schema/tenants_update_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..14dd179127cf7f972d4ed182d986c1fe5e5c5eb2 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/schema/tenants_update_parameters.go @@ -0,0 +1,180 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package schema + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + + "github.com/weaviate/weaviate/entities/models" +) + +// NewTenantsUpdateParams creates a new TenantsUpdateParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewTenantsUpdateParams() *TenantsUpdateParams { + return &TenantsUpdateParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewTenantsUpdateParamsWithTimeout creates a new TenantsUpdateParams object +// with the ability to set a timeout on a request. +func NewTenantsUpdateParamsWithTimeout(timeout time.Duration) *TenantsUpdateParams { + return &TenantsUpdateParams{ + timeout: timeout, + } +} + +// NewTenantsUpdateParamsWithContext creates a new TenantsUpdateParams object +// with the ability to set a context for a request. +func NewTenantsUpdateParamsWithContext(ctx context.Context) *TenantsUpdateParams { + return &TenantsUpdateParams{ + Context: ctx, + } +} + +// NewTenantsUpdateParamsWithHTTPClient creates a new TenantsUpdateParams object +// with the ability to set a custom HTTPClient for a request. +func NewTenantsUpdateParamsWithHTTPClient(client *http.Client) *TenantsUpdateParams { + return &TenantsUpdateParams{ + HTTPClient: client, + } +} + +/* +TenantsUpdateParams contains all the parameters to send to the API endpoint + + for the tenants update operation. + + Typically these are written to a http.Request. +*/ +type TenantsUpdateParams struct { + + // Body. + Body []*models.Tenant + + // ClassName. + ClassName string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the tenants update params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *TenantsUpdateParams) WithDefaults() *TenantsUpdateParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the tenants update params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *TenantsUpdateParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the tenants update params +func (o *TenantsUpdateParams) WithTimeout(timeout time.Duration) *TenantsUpdateParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the tenants update params +func (o *TenantsUpdateParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the tenants update params +func (o *TenantsUpdateParams) WithContext(ctx context.Context) *TenantsUpdateParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the tenants update params +func (o *TenantsUpdateParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the tenants update params +func (o *TenantsUpdateParams) WithHTTPClient(client *http.Client) *TenantsUpdateParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the tenants update params +func (o *TenantsUpdateParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithBody adds the body to the tenants update params +func (o *TenantsUpdateParams) WithBody(body []*models.Tenant) *TenantsUpdateParams { + o.SetBody(body) + return o +} + +// SetBody adds the body to the tenants update params +func (o *TenantsUpdateParams) SetBody(body []*models.Tenant) { + o.Body = body +} + +// WithClassName adds the className to the tenants update params +func (o *TenantsUpdateParams) WithClassName(className string) *TenantsUpdateParams { + o.SetClassName(className) + return o +} + +// SetClassName adds the className to the tenants update params +func (o *TenantsUpdateParams) SetClassName(className string) { + o.ClassName = className +} + +// WriteToRequest writes these params to a swagger request +func (o *TenantsUpdateParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + if o.Body != nil { + if err := r.SetBodyParam(o.Body); err != nil { + return err + } + } + + // path param className + if err := r.SetPathParam("className", o.ClassName); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/schema/tenants_update_responses.go b/platform/dbops/binaries/weaviate-src/client/schema/tenants_update_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..fa829f32598c0ededdfd84a1823d4c7d53d518a5 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/schema/tenants_update_responses.go @@ -0,0 +1,396 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package schema + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/weaviate/weaviate/entities/models" +) + +// TenantsUpdateReader is a Reader for the TenantsUpdate structure. +type TenantsUpdateReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *TenantsUpdateReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewTenantsUpdateOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 401: + result := NewTenantsUpdateUnauthorized() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 403: + result := NewTenantsUpdateForbidden() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 422: + result := NewTenantsUpdateUnprocessableEntity() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 500: + result := NewTenantsUpdateInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + } +} + +// NewTenantsUpdateOK creates a TenantsUpdateOK with default headers values +func NewTenantsUpdateOK() *TenantsUpdateOK { + return &TenantsUpdateOK{} +} + +/* +TenantsUpdateOK describes a response with status code 200, with default header values. + +Updated tenants of the specified class +*/ +type TenantsUpdateOK struct { + Payload []*models.Tenant +} + +// IsSuccess returns true when this tenants update o k response has a 2xx status code +func (o *TenantsUpdateOK) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this tenants update o k response has a 3xx status code +func (o *TenantsUpdateOK) IsRedirect() bool { + return false +} + +// IsClientError returns true when this tenants update o k response has a 4xx status code +func (o *TenantsUpdateOK) IsClientError() bool { + return false +} + +// IsServerError returns true when this tenants update o k response has a 5xx status code +func (o *TenantsUpdateOK) IsServerError() bool { + return false +} + +// IsCode returns true when this tenants update o k response a status code equal to that given +func (o *TenantsUpdateOK) IsCode(code int) bool { + return code == 200 +} + +// Code gets the status code for the tenants update o k response +func (o *TenantsUpdateOK) Code() int { + return 200 +} + +func (o *TenantsUpdateOK) Error() string { + return fmt.Sprintf("[PUT /schema/{className}/tenants][%d] tenantsUpdateOK %+v", 200, o.Payload) +} + +func (o *TenantsUpdateOK) String() string { + return fmt.Sprintf("[PUT /schema/{className}/tenants][%d] tenantsUpdateOK %+v", 200, o.Payload) +} + +func (o *TenantsUpdateOK) GetPayload() []*models.Tenant { + return o.Payload +} + +func (o *TenantsUpdateOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewTenantsUpdateUnauthorized creates a TenantsUpdateUnauthorized with default headers values +func NewTenantsUpdateUnauthorized() *TenantsUpdateUnauthorized { + return &TenantsUpdateUnauthorized{} +} + +/* +TenantsUpdateUnauthorized describes a response with status code 401, with default header values. + +Unauthorized or invalid credentials. +*/ +type TenantsUpdateUnauthorized struct { +} + +// IsSuccess returns true when this tenants update unauthorized response has a 2xx status code +func (o *TenantsUpdateUnauthorized) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this tenants update unauthorized response has a 3xx status code +func (o *TenantsUpdateUnauthorized) IsRedirect() bool { + return false +} + +// IsClientError returns true when this tenants update unauthorized response has a 4xx status code +func (o *TenantsUpdateUnauthorized) IsClientError() bool { + return true +} + +// IsServerError returns true when this tenants update unauthorized response has a 5xx status code +func (o *TenantsUpdateUnauthorized) IsServerError() bool { + return false +} + +// IsCode returns true when this tenants update unauthorized response a status code equal to that given +func (o *TenantsUpdateUnauthorized) IsCode(code int) bool { + return code == 401 +} + +// Code gets the status code for the tenants update unauthorized response +func (o *TenantsUpdateUnauthorized) Code() int { + return 401 +} + +func (o *TenantsUpdateUnauthorized) Error() string { + return fmt.Sprintf("[PUT /schema/{className}/tenants][%d] tenantsUpdateUnauthorized ", 401) +} + +func (o *TenantsUpdateUnauthorized) String() string { + return fmt.Sprintf("[PUT /schema/{className}/tenants][%d] tenantsUpdateUnauthorized ", 401) +} + +func (o *TenantsUpdateUnauthorized) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewTenantsUpdateForbidden creates a TenantsUpdateForbidden with default headers values +func NewTenantsUpdateForbidden() *TenantsUpdateForbidden { + return &TenantsUpdateForbidden{} +} + +/* +TenantsUpdateForbidden describes a response with status code 403, with default header values. + +Forbidden +*/ +type TenantsUpdateForbidden struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this tenants update forbidden response has a 2xx status code +func (o *TenantsUpdateForbidden) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this tenants update forbidden response has a 3xx status code +func (o *TenantsUpdateForbidden) IsRedirect() bool { + return false +} + +// IsClientError returns true when this tenants update forbidden response has a 4xx status code +func (o *TenantsUpdateForbidden) IsClientError() bool { + return true +} + +// IsServerError returns true when this tenants update forbidden response has a 5xx status code +func (o *TenantsUpdateForbidden) IsServerError() bool { + return false +} + +// IsCode returns true when this tenants update forbidden response a status code equal to that given +func (o *TenantsUpdateForbidden) IsCode(code int) bool { + return code == 403 +} + +// Code gets the status code for the tenants update forbidden response +func (o *TenantsUpdateForbidden) Code() int { + return 403 +} + +func (o *TenantsUpdateForbidden) Error() string { + return fmt.Sprintf("[PUT /schema/{className}/tenants][%d] tenantsUpdateForbidden %+v", 403, o.Payload) +} + +func (o *TenantsUpdateForbidden) String() string { + return fmt.Sprintf("[PUT /schema/{className}/tenants][%d] tenantsUpdateForbidden %+v", 403, o.Payload) +} + +func (o *TenantsUpdateForbidden) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *TenantsUpdateForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewTenantsUpdateUnprocessableEntity creates a TenantsUpdateUnprocessableEntity with default headers values +func NewTenantsUpdateUnprocessableEntity() *TenantsUpdateUnprocessableEntity { + return &TenantsUpdateUnprocessableEntity{} +} + +/* +TenantsUpdateUnprocessableEntity describes a response with status code 422, with default header values. + +Invalid Tenant class +*/ +type TenantsUpdateUnprocessableEntity struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this tenants update unprocessable entity response has a 2xx status code +func (o *TenantsUpdateUnprocessableEntity) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this tenants update unprocessable entity response has a 3xx status code +func (o *TenantsUpdateUnprocessableEntity) IsRedirect() bool { + return false +} + +// IsClientError returns true when this tenants update unprocessable entity response has a 4xx status code +func (o *TenantsUpdateUnprocessableEntity) IsClientError() bool { + return true +} + +// IsServerError returns true when this tenants update unprocessable entity response has a 5xx status code +func (o *TenantsUpdateUnprocessableEntity) IsServerError() bool { + return false +} + +// IsCode returns true when this tenants update unprocessable entity response a status code equal to that given +func (o *TenantsUpdateUnprocessableEntity) IsCode(code int) bool { + return code == 422 +} + +// Code gets the status code for the tenants update unprocessable entity response +func (o *TenantsUpdateUnprocessableEntity) Code() int { + return 422 +} + +func (o *TenantsUpdateUnprocessableEntity) Error() string { + return fmt.Sprintf("[PUT /schema/{className}/tenants][%d] tenantsUpdateUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *TenantsUpdateUnprocessableEntity) String() string { + return fmt.Sprintf("[PUT /schema/{className}/tenants][%d] tenantsUpdateUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *TenantsUpdateUnprocessableEntity) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *TenantsUpdateUnprocessableEntity) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewTenantsUpdateInternalServerError creates a TenantsUpdateInternalServerError with default headers values +func NewTenantsUpdateInternalServerError() *TenantsUpdateInternalServerError { + return &TenantsUpdateInternalServerError{} +} + +/* +TenantsUpdateInternalServerError describes a response with status code 500, with default header values. + +An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. +*/ +type TenantsUpdateInternalServerError struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this tenants update internal server error response has a 2xx status code +func (o *TenantsUpdateInternalServerError) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this tenants update internal server error response has a 3xx status code +func (o *TenantsUpdateInternalServerError) IsRedirect() bool { + return false +} + +// IsClientError returns true when this tenants update internal server error response has a 4xx status code +func (o *TenantsUpdateInternalServerError) IsClientError() bool { + return false +} + +// IsServerError returns true when this tenants update internal server error response has a 5xx status code +func (o *TenantsUpdateInternalServerError) IsServerError() bool { + return true +} + +// IsCode returns true when this tenants update internal server error response a status code equal to that given +func (o *TenantsUpdateInternalServerError) IsCode(code int) bool { + return code == 500 +} + +// Code gets the status code for the tenants update internal server error response +func (o *TenantsUpdateInternalServerError) Code() int { + return 500 +} + +func (o *TenantsUpdateInternalServerError) Error() string { + return fmt.Sprintf("[PUT /schema/{className}/tenants][%d] tenantsUpdateInternalServerError %+v", 500, o.Payload) +} + +func (o *TenantsUpdateInternalServerError) String() string { + return fmt.Sprintf("[PUT /schema/{className}/tenants][%d] tenantsUpdateInternalServerError %+v", 500, o.Payload) +} + +func (o *TenantsUpdateInternalServerError) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *TenantsUpdateInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/users/activate_user_parameters.go b/platform/dbops/binaries/weaviate-src/client/users/activate_user_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..6f9a5b68a03afb3134c68bdabcbc9136f07f0147 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/users/activate_user_parameters.go @@ -0,0 +1,162 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package users + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewActivateUserParams creates a new ActivateUserParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewActivateUserParams() *ActivateUserParams { + return &ActivateUserParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewActivateUserParamsWithTimeout creates a new ActivateUserParams object +// with the ability to set a timeout on a request. +func NewActivateUserParamsWithTimeout(timeout time.Duration) *ActivateUserParams { + return &ActivateUserParams{ + timeout: timeout, + } +} + +// NewActivateUserParamsWithContext creates a new ActivateUserParams object +// with the ability to set a context for a request. +func NewActivateUserParamsWithContext(ctx context.Context) *ActivateUserParams { + return &ActivateUserParams{ + Context: ctx, + } +} + +// NewActivateUserParamsWithHTTPClient creates a new ActivateUserParams object +// with the ability to set a custom HTTPClient for a request. +func NewActivateUserParamsWithHTTPClient(client *http.Client) *ActivateUserParams { + return &ActivateUserParams{ + HTTPClient: client, + } +} + +/* +ActivateUserParams contains all the parameters to send to the API endpoint + + for the activate user operation. + + Typically these are written to a http.Request. +*/ +type ActivateUserParams struct { + + /* UserID. + + user id + */ + UserID string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the activate user params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *ActivateUserParams) WithDefaults() *ActivateUserParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the activate user params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *ActivateUserParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the activate user params +func (o *ActivateUserParams) WithTimeout(timeout time.Duration) *ActivateUserParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the activate user params +func (o *ActivateUserParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the activate user params +func (o *ActivateUserParams) WithContext(ctx context.Context) *ActivateUserParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the activate user params +func (o *ActivateUserParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the activate user params +func (o *ActivateUserParams) WithHTTPClient(client *http.Client) *ActivateUserParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the activate user params +func (o *ActivateUserParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithUserID adds the userID to the activate user params +func (o *ActivateUserParams) WithUserID(userID string) *ActivateUserParams { + o.SetUserID(userID) + return o +} + +// SetUserID adds the userId to the activate user params +func (o *ActivateUserParams) SetUserID(userID string) { + o.UserID = userID +} + +// WriteToRequest writes these params to a swagger request +func (o *ActivateUserParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param user_id + if err := r.SetPathParam("user_id", o.UserID); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/users/activate_user_responses.go b/platform/dbops/binaries/weaviate-src/client/users/activate_user_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..cf26e840f7e9838a3b670002876d642120222a21 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/users/activate_user_responses.go @@ -0,0 +1,584 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package users + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/weaviate/weaviate/entities/models" +) + +// ActivateUserReader is a Reader for the ActivateUser structure. +type ActivateUserReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ActivateUserReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewActivateUserOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 400: + result := NewActivateUserBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 401: + result := NewActivateUserUnauthorized() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 403: + result := NewActivateUserForbidden() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 404: + result := NewActivateUserNotFound() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 409: + result := NewActivateUserConflict() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 422: + result := NewActivateUserUnprocessableEntity() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 500: + result := NewActivateUserInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + } +} + +// NewActivateUserOK creates a ActivateUserOK with default headers values +func NewActivateUserOK() *ActivateUserOK { + return &ActivateUserOK{} +} + +/* +ActivateUserOK describes a response with status code 200, with default header values. + +User successfully activated +*/ +type ActivateUserOK struct { +} + +// IsSuccess returns true when this activate user o k response has a 2xx status code +func (o *ActivateUserOK) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this activate user o k response has a 3xx status code +func (o *ActivateUserOK) IsRedirect() bool { + return false +} + +// IsClientError returns true when this activate user o k response has a 4xx status code +func (o *ActivateUserOK) IsClientError() bool { + return false +} + +// IsServerError returns true when this activate user o k response has a 5xx status code +func (o *ActivateUserOK) IsServerError() bool { + return false +} + +// IsCode returns true when this activate user o k response a status code equal to that given +func (o *ActivateUserOK) IsCode(code int) bool { + return code == 200 +} + +// Code gets the status code for the activate user o k response +func (o *ActivateUserOK) Code() int { + return 200 +} + +func (o *ActivateUserOK) Error() string { + return fmt.Sprintf("[POST /users/db/{user_id}/activate][%d] activateUserOK ", 200) +} + +func (o *ActivateUserOK) String() string { + return fmt.Sprintf("[POST /users/db/{user_id}/activate][%d] activateUserOK ", 200) +} + +func (o *ActivateUserOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewActivateUserBadRequest creates a ActivateUserBadRequest with default headers values +func NewActivateUserBadRequest() *ActivateUserBadRequest { + return &ActivateUserBadRequest{} +} + +/* +ActivateUserBadRequest describes a response with status code 400, with default header values. + +Malformed request. +*/ +type ActivateUserBadRequest struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this activate user bad request response has a 2xx status code +func (o *ActivateUserBadRequest) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this activate user bad request response has a 3xx status code +func (o *ActivateUserBadRequest) IsRedirect() bool { + return false +} + +// IsClientError returns true when this activate user bad request response has a 4xx status code +func (o *ActivateUserBadRequest) IsClientError() bool { + return true +} + +// IsServerError returns true when this activate user bad request response has a 5xx status code +func (o *ActivateUserBadRequest) IsServerError() bool { + return false +} + +// IsCode returns true when this activate user bad request response a status code equal to that given +func (o *ActivateUserBadRequest) IsCode(code int) bool { + return code == 400 +} + +// Code gets the status code for the activate user bad request response +func (o *ActivateUserBadRequest) Code() int { + return 400 +} + +func (o *ActivateUserBadRequest) Error() string { + return fmt.Sprintf("[POST /users/db/{user_id}/activate][%d] activateUserBadRequest %+v", 400, o.Payload) +} + +func (o *ActivateUserBadRequest) String() string { + return fmt.Sprintf("[POST /users/db/{user_id}/activate][%d] activateUserBadRequest %+v", 400, o.Payload) +} + +func (o *ActivateUserBadRequest) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *ActivateUserBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewActivateUserUnauthorized creates a ActivateUserUnauthorized with default headers values +func NewActivateUserUnauthorized() *ActivateUserUnauthorized { + return &ActivateUserUnauthorized{} +} + +/* +ActivateUserUnauthorized describes a response with status code 401, with default header values. + +Unauthorized or invalid credentials. +*/ +type ActivateUserUnauthorized struct { +} + +// IsSuccess returns true when this activate user unauthorized response has a 2xx status code +func (o *ActivateUserUnauthorized) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this activate user unauthorized response has a 3xx status code +func (o *ActivateUserUnauthorized) IsRedirect() bool { + return false +} + +// IsClientError returns true when this activate user unauthorized response has a 4xx status code +func (o *ActivateUserUnauthorized) IsClientError() bool { + return true +} + +// IsServerError returns true when this activate user unauthorized response has a 5xx status code +func (o *ActivateUserUnauthorized) IsServerError() bool { + return false +} + +// IsCode returns true when this activate user unauthorized response a status code equal to that given +func (o *ActivateUserUnauthorized) IsCode(code int) bool { + return code == 401 +} + +// Code gets the status code for the activate user unauthorized response +func (o *ActivateUserUnauthorized) Code() int { + return 401 +} + +func (o *ActivateUserUnauthorized) Error() string { + return fmt.Sprintf("[POST /users/db/{user_id}/activate][%d] activateUserUnauthorized ", 401) +} + +func (o *ActivateUserUnauthorized) String() string { + return fmt.Sprintf("[POST /users/db/{user_id}/activate][%d] activateUserUnauthorized ", 401) +} + +func (o *ActivateUserUnauthorized) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewActivateUserForbidden creates a ActivateUserForbidden with default headers values +func NewActivateUserForbidden() *ActivateUserForbidden { + return &ActivateUserForbidden{} +} + +/* +ActivateUserForbidden describes a response with status code 403, with default header values. + +Forbidden +*/ +type ActivateUserForbidden struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this activate user forbidden response has a 2xx status code +func (o *ActivateUserForbidden) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this activate user forbidden response has a 3xx status code +func (o *ActivateUserForbidden) IsRedirect() bool { + return false +} + +// IsClientError returns true when this activate user forbidden response has a 4xx status code +func (o *ActivateUserForbidden) IsClientError() bool { + return true +} + +// IsServerError returns true when this activate user forbidden response has a 5xx status code +func (o *ActivateUserForbidden) IsServerError() bool { + return false +} + +// IsCode returns true when this activate user forbidden response a status code equal to that given +func (o *ActivateUserForbidden) IsCode(code int) bool { + return code == 403 +} + +// Code gets the status code for the activate user forbidden response +func (o *ActivateUserForbidden) Code() int { + return 403 +} + +func (o *ActivateUserForbidden) Error() string { + return fmt.Sprintf("[POST /users/db/{user_id}/activate][%d] activateUserForbidden %+v", 403, o.Payload) +} + +func (o *ActivateUserForbidden) String() string { + return fmt.Sprintf("[POST /users/db/{user_id}/activate][%d] activateUserForbidden %+v", 403, o.Payload) +} + +func (o *ActivateUserForbidden) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *ActivateUserForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewActivateUserNotFound creates a ActivateUserNotFound with default headers values +func NewActivateUserNotFound() *ActivateUserNotFound { + return &ActivateUserNotFound{} +} + +/* +ActivateUserNotFound describes a response with status code 404, with default header values. + +user not found +*/ +type ActivateUserNotFound struct { +} + +// IsSuccess returns true when this activate user not found response has a 2xx status code +func (o *ActivateUserNotFound) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this activate user not found response has a 3xx status code +func (o *ActivateUserNotFound) IsRedirect() bool { + return false +} + +// IsClientError returns true when this activate user not found response has a 4xx status code +func (o *ActivateUserNotFound) IsClientError() bool { + return true +} + +// IsServerError returns true when this activate user not found response has a 5xx status code +func (o *ActivateUserNotFound) IsServerError() bool { + return false +} + +// IsCode returns true when this activate user not found response a status code equal to that given +func (o *ActivateUserNotFound) IsCode(code int) bool { + return code == 404 +} + +// Code gets the status code for the activate user not found response +func (o *ActivateUserNotFound) Code() int { + return 404 +} + +func (o *ActivateUserNotFound) Error() string { + return fmt.Sprintf("[POST /users/db/{user_id}/activate][%d] activateUserNotFound ", 404) +} + +func (o *ActivateUserNotFound) String() string { + return fmt.Sprintf("[POST /users/db/{user_id}/activate][%d] activateUserNotFound ", 404) +} + +func (o *ActivateUserNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewActivateUserConflict creates a ActivateUserConflict with default headers values +func NewActivateUserConflict() *ActivateUserConflict { + return &ActivateUserConflict{} +} + +/* +ActivateUserConflict describes a response with status code 409, with default header values. + +user already activated +*/ +type ActivateUserConflict struct { +} + +// IsSuccess returns true when this activate user conflict response has a 2xx status code +func (o *ActivateUserConflict) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this activate user conflict response has a 3xx status code +func (o *ActivateUserConflict) IsRedirect() bool { + return false +} + +// IsClientError returns true when this activate user conflict response has a 4xx status code +func (o *ActivateUserConflict) IsClientError() bool { + return true +} + +// IsServerError returns true when this activate user conflict response has a 5xx status code +func (o *ActivateUserConflict) IsServerError() bool { + return false +} + +// IsCode returns true when this activate user conflict response a status code equal to that given +func (o *ActivateUserConflict) IsCode(code int) bool { + return code == 409 +} + +// Code gets the status code for the activate user conflict response +func (o *ActivateUserConflict) Code() int { + return 409 +} + +func (o *ActivateUserConflict) Error() string { + return fmt.Sprintf("[POST /users/db/{user_id}/activate][%d] activateUserConflict ", 409) +} + +func (o *ActivateUserConflict) String() string { + return fmt.Sprintf("[POST /users/db/{user_id}/activate][%d] activateUserConflict ", 409) +} + +func (o *ActivateUserConflict) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewActivateUserUnprocessableEntity creates a ActivateUserUnprocessableEntity with default headers values +func NewActivateUserUnprocessableEntity() *ActivateUserUnprocessableEntity { + return &ActivateUserUnprocessableEntity{} +} + +/* +ActivateUserUnprocessableEntity describes a response with status code 422, with default header values. + +Request body is well-formed (i.e., syntactically correct), but semantically erroneous. +*/ +type ActivateUserUnprocessableEntity struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this activate user unprocessable entity response has a 2xx status code +func (o *ActivateUserUnprocessableEntity) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this activate user unprocessable entity response has a 3xx status code +func (o *ActivateUserUnprocessableEntity) IsRedirect() bool { + return false +} + +// IsClientError returns true when this activate user unprocessable entity response has a 4xx status code +func (o *ActivateUserUnprocessableEntity) IsClientError() bool { + return true +} + +// IsServerError returns true when this activate user unprocessable entity response has a 5xx status code +func (o *ActivateUserUnprocessableEntity) IsServerError() bool { + return false +} + +// IsCode returns true when this activate user unprocessable entity response a status code equal to that given +func (o *ActivateUserUnprocessableEntity) IsCode(code int) bool { + return code == 422 +} + +// Code gets the status code for the activate user unprocessable entity response +func (o *ActivateUserUnprocessableEntity) Code() int { + return 422 +} + +func (o *ActivateUserUnprocessableEntity) Error() string { + return fmt.Sprintf("[POST /users/db/{user_id}/activate][%d] activateUserUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *ActivateUserUnprocessableEntity) String() string { + return fmt.Sprintf("[POST /users/db/{user_id}/activate][%d] activateUserUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *ActivateUserUnprocessableEntity) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *ActivateUserUnprocessableEntity) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewActivateUserInternalServerError creates a ActivateUserInternalServerError with default headers values +func NewActivateUserInternalServerError() *ActivateUserInternalServerError { + return &ActivateUserInternalServerError{} +} + +/* +ActivateUserInternalServerError describes a response with status code 500, with default header values. + +An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. +*/ +type ActivateUserInternalServerError struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this activate user internal server error response has a 2xx status code +func (o *ActivateUserInternalServerError) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this activate user internal server error response has a 3xx status code +func (o *ActivateUserInternalServerError) IsRedirect() bool { + return false +} + +// IsClientError returns true when this activate user internal server error response has a 4xx status code +func (o *ActivateUserInternalServerError) IsClientError() bool { + return false +} + +// IsServerError returns true when this activate user internal server error response has a 5xx status code +func (o *ActivateUserInternalServerError) IsServerError() bool { + return true +} + +// IsCode returns true when this activate user internal server error response a status code equal to that given +func (o *ActivateUserInternalServerError) IsCode(code int) bool { + return code == 500 +} + +// Code gets the status code for the activate user internal server error response +func (o *ActivateUserInternalServerError) Code() int { + return 500 +} + +func (o *ActivateUserInternalServerError) Error() string { + return fmt.Sprintf("[POST /users/db/{user_id}/activate][%d] activateUserInternalServerError %+v", 500, o.Payload) +} + +func (o *ActivateUserInternalServerError) String() string { + return fmt.Sprintf("[POST /users/db/{user_id}/activate][%d] activateUserInternalServerError %+v", 500, o.Payload) +} + +func (o *ActivateUserInternalServerError) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *ActivateUserInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/users/create_user_parameters.go b/platform/dbops/binaries/weaviate-src/client/users/create_user_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..60137aaef290aba0af2f7ffd78e893a9239833a5 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/users/create_user_parameters.go @@ -0,0 +1,179 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package users + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewCreateUserParams creates a new CreateUserParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewCreateUserParams() *CreateUserParams { + return &CreateUserParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewCreateUserParamsWithTimeout creates a new CreateUserParams object +// with the ability to set a timeout on a request. +func NewCreateUserParamsWithTimeout(timeout time.Duration) *CreateUserParams { + return &CreateUserParams{ + timeout: timeout, + } +} + +// NewCreateUserParamsWithContext creates a new CreateUserParams object +// with the ability to set a context for a request. +func NewCreateUserParamsWithContext(ctx context.Context) *CreateUserParams { + return &CreateUserParams{ + Context: ctx, + } +} + +// NewCreateUserParamsWithHTTPClient creates a new CreateUserParams object +// with the ability to set a custom HTTPClient for a request. +func NewCreateUserParamsWithHTTPClient(client *http.Client) *CreateUserParams { + return &CreateUserParams{ + HTTPClient: client, + } +} + +/* +CreateUserParams contains all the parameters to send to the API endpoint + + for the create user operation. + + Typically these are written to a http.Request. +*/ +type CreateUserParams struct { + + // Body. + Body CreateUserBody + + /* UserID. + + user id + */ + UserID string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the create user params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *CreateUserParams) WithDefaults() *CreateUserParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the create user params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *CreateUserParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the create user params +func (o *CreateUserParams) WithTimeout(timeout time.Duration) *CreateUserParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the create user params +func (o *CreateUserParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the create user params +func (o *CreateUserParams) WithContext(ctx context.Context) *CreateUserParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the create user params +func (o *CreateUserParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the create user params +func (o *CreateUserParams) WithHTTPClient(client *http.Client) *CreateUserParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the create user params +func (o *CreateUserParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithBody adds the body to the create user params +func (o *CreateUserParams) WithBody(body CreateUserBody) *CreateUserParams { + o.SetBody(body) + return o +} + +// SetBody adds the body to the create user params +func (o *CreateUserParams) SetBody(body CreateUserBody) { + o.Body = body +} + +// WithUserID adds the userID to the create user params +func (o *CreateUserParams) WithUserID(userID string) *CreateUserParams { + o.SetUserID(userID) + return o +} + +// SetUserID adds the userId to the create user params +func (o *CreateUserParams) SetUserID(userID string) { + o.UserID = userID +} + +// WriteToRequest writes these params to a swagger request +func (o *CreateUserParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + if err := r.SetBodyParam(o.Body); err != nil { + return err + } + + // path param user_id + if err := r.SetPathParam("user_id", o.UserID); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/users/create_user_responses.go b/platform/dbops/binaries/weaviate-src/client/users/create_user_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..36c02e7e3cb50b877efca689aa747064cacb4615 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/users/create_user_responses.go @@ -0,0 +1,687 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package users + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "fmt" + "io" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" + + "github.com/weaviate/weaviate/entities/models" +) + +// CreateUserReader is a Reader for the CreateUser structure. +type CreateUserReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *CreateUserReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 201: + result := NewCreateUserCreated() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 400: + result := NewCreateUserBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 401: + result := NewCreateUserUnauthorized() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 403: + result := NewCreateUserForbidden() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 404: + result := NewCreateUserNotFound() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 409: + result := NewCreateUserConflict() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 422: + result := NewCreateUserUnprocessableEntity() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 500: + result := NewCreateUserInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + } +} + +// NewCreateUserCreated creates a CreateUserCreated with default headers values +func NewCreateUserCreated() *CreateUserCreated { + return &CreateUserCreated{} +} + +/* +CreateUserCreated describes a response with status code 201, with default header values. + +User created successfully +*/ +type CreateUserCreated struct { + Payload *models.UserAPIKey +} + +// IsSuccess returns true when this create user created response has a 2xx status code +func (o *CreateUserCreated) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this create user created response has a 3xx status code +func (o *CreateUserCreated) IsRedirect() bool { + return false +} + +// IsClientError returns true when this create user created response has a 4xx status code +func (o *CreateUserCreated) IsClientError() bool { + return false +} + +// IsServerError returns true when this create user created response has a 5xx status code +func (o *CreateUserCreated) IsServerError() bool { + return false +} + +// IsCode returns true when this create user created response a status code equal to that given +func (o *CreateUserCreated) IsCode(code int) bool { + return code == 201 +} + +// Code gets the status code for the create user created response +func (o *CreateUserCreated) Code() int { + return 201 +} + +func (o *CreateUserCreated) Error() string { + return fmt.Sprintf("[POST /users/db/{user_id}][%d] createUserCreated %+v", 201, o.Payload) +} + +func (o *CreateUserCreated) String() string { + return fmt.Sprintf("[POST /users/db/{user_id}][%d] createUserCreated %+v", 201, o.Payload) +} + +func (o *CreateUserCreated) GetPayload() *models.UserAPIKey { + return o.Payload +} + +func (o *CreateUserCreated) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.UserAPIKey) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewCreateUserBadRequest creates a CreateUserBadRequest with default headers values +func NewCreateUserBadRequest() *CreateUserBadRequest { + return &CreateUserBadRequest{} +} + +/* +CreateUserBadRequest describes a response with status code 400, with default header values. + +Malformed request. +*/ +type CreateUserBadRequest struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this create user bad request response has a 2xx status code +func (o *CreateUserBadRequest) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this create user bad request response has a 3xx status code +func (o *CreateUserBadRequest) IsRedirect() bool { + return false +} + +// IsClientError returns true when this create user bad request response has a 4xx status code +func (o *CreateUserBadRequest) IsClientError() bool { + return true +} + +// IsServerError returns true when this create user bad request response has a 5xx status code +func (o *CreateUserBadRequest) IsServerError() bool { + return false +} + +// IsCode returns true when this create user bad request response a status code equal to that given +func (o *CreateUserBadRequest) IsCode(code int) bool { + return code == 400 +} + +// Code gets the status code for the create user bad request response +func (o *CreateUserBadRequest) Code() int { + return 400 +} + +func (o *CreateUserBadRequest) Error() string { + return fmt.Sprintf("[POST /users/db/{user_id}][%d] createUserBadRequest %+v", 400, o.Payload) +} + +func (o *CreateUserBadRequest) String() string { + return fmt.Sprintf("[POST /users/db/{user_id}][%d] createUserBadRequest %+v", 400, o.Payload) +} + +func (o *CreateUserBadRequest) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *CreateUserBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewCreateUserUnauthorized creates a CreateUserUnauthorized with default headers values +func NewCreateUserUnauthorized() *CreateUserUnauthorized { + return &CreateUserUnauthorized{} +} + +/* +CreateUserUnauthorized describes a response with status code 401, with default header values. + +Unauthorized or invalid credentials. +*/ +type CreateUserUnauthorized struct { +} + +// IsSuccess returns true when this create user unauthorized response has a 2xx status code +func (o *CreateUserUnauthorized) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this create user unauthorized response has a 3xx status code +func (o *CreateUserUnauthorized) IsRedirect() bool { + return false +} + +// IsClientError returns true when this create user unauthorized response has a 4xx status code +func (o *CreateUserUnauthorized) IsClientError() bool { + return true +} + +// IsServerError returns true when this create user unauthorized response has a 5xx status code +func (o *CreateUserUnauthorized) IsServerError() bool { + return false +} + +// IsCode returns true when this create user unauthorized response a status code equal to that given +func (o *CreateUserUnauthorized) IsCode(code int) bool { + return code == 401 +} + +// Code gets the status code for the create user unauthorized response +func (o *CreateUserUnauthorized) Code() int { + return 401 +} + +func (o *CreateUserUnauthorized) Error() string { + return fmt.Sprintf("[POST /users/db/{user_id}][%d] createUserUnauthorized ", 401) +} + +func (o *CreateUserUnauthorized) String() string { + return fmt.Sprintf("[POST /users/db/{user_id}][%d] createUserUnauthorized ", 401) +} + +func (o *CreateUserUnauthorized) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewCreateUserForbidden creates a CreateUserForbidden with default headers values +func NewCreateUserForbidden() *CreateUserForbidden { + return &CreateUserForbidden{} +} + +/* +CreateUserForbidden describes a response with status code 403, with default header values. + +Forbidden +*/ +type CreateUserForbidden struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this create user forbidden response has a 2xx status code +func (o *CreateUserForbidden) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this create user forbidden response has a 3xx status code +func (o *CreateUserForbidden) IsRedirect() bool { + return false +} + +// IsClientError returns true when this create user forbidden response has a 4xx status code +func (o *CreateUserForbidden) IsClientError() bool { + return true +} + +// IsServerError returns true when this create user forbidden response has a 5xx status code +func (o *CreateUserForbidden) IsServerError() bool { + return false +} + +// IsCode returns true when this create user forbidden response a status code equal to that given +func (o *CreateUserForbidden) IsCode(code int) bool { + return code == 403 +} + +// Code gets the status code for the create user forbidden response +func (o *CreateUserForbidden) Code() int { + return 403 +} + +func (o *CreateUserForbidden) Error() string { + return fmt.Sprintf("[POST /users/db/{user_id}][%d] createUserForbidden %+v", 403, o.Payload) +} + +func (o *CreateUserForbidden) String() string { + return fmt.Sprintf("[POST /users/db/{user_id}][%d] createUserForbidden %+v", 403, o.Payload) +} + +func (o *CreateUserForbidden) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *CreateUserForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewCreateUserNotFound creates a CreateUserNotFound with default headers values +func NewCreateUserNotFound() *CreateUserNotFound { + return &CreateUserNotFound{} +} + +/* +CreateUserNotFound describes a response with status code 404, with default header values. + +user not found +*/ +type CreateUserNotFound struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this create user not found response has a 2xx status code +func (o *CreateUserNotFound) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this create user not found response has a 3xx status code +func (o *CreateUserNotFound) IsRedirect() bool { + return false +} + +// IsClientError returns true when this create user not found response has a 4xx status code +func (o *CreateUserNotFound) IsClientError() bool { + return true +} + +// IsServerError returns true when this create user not found response has a 5xx status code +func (o *CreateUserNotFound) IsServerError() bool { + return false +} + +// IsCode returns true when this create user not found response a status code equal to that given +func (o *CreateUserNotFound) IsCode(code int) bool { + return code == 404 +} + +// Code gets the status code for the create user not found response +func (o *CreateUserNotFound) Code() int { + return 404 +} + +func (o *CreateUserNotFound) Error() string { + return fmt.Sprintf("[POST /users/db/{user_id}][%d] createUserNotFound %+v", 404, o.Payload) +} + +func (o *CreateUserNotFound) String() string { + return fmt.Sprintf("[POST /users/db/{user_id}][%d] createUserNotFound %+v", 404, o.Payload) +} + +func (o *CreateUserNotFound) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *CreateUserNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewCreateUserConflict creates a CreateUserConflict with default headers values +func NewCreateUserConflict() *CreateUserConflict { + return &CreateUserConflict{} +} + +/* +CreateUserConflict describes a response with status code 409, with default header values. + +User already exists +*/ +type CreateUserConflict struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this create user conflict response has a 2xx status code +func (o *CreateUserConflict) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this create user conflict response has a 3xx status code +func (o *CreateUserConflict) IsRedirect() bool { + return false +} + +// IsClientError returns true when this create user conflict response has a 4xx status code +func (o *CreateUserConflict) IsClientError() bool { + return true +} + +// IsServerError returns true when this create user conflict response has a 5xx status code +func (o *CreateUserConflict) IsServerError() bool { + return false +} + +// IsCode returns true when this create user conflict response a status code equal to that given +func (o *CreateUserConflict) IsCode(code int) bool { + return code == 409 +} + +// Code gets the status code for the create user conflict response +func (o *CreateUserConflict) Code() int { + return 409 +} + +func (o *CreateUserConflict) Error() string { + return fmt.Sprintf("[POST /users/db/{user_id}][%d] createUserConflict %+v", 409, o.Payload) +} + +func (o *CreateUserConflict) String() string { + return fmt.Sprintf("[POST /users/db/{user_id}][%d] createUserConflict %+v", 409, o.Payload) +} + +func (o *CreateUserConflict) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *CreateUserConflict) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewCreateUserUnprocessableEntity creates a CreateUserUnprocessableEntity with default headers values +func NewCreateUserUnprocessableEntity() *CreateUserUnprocessableEntity { + return &CreateUserUnprocessableEntity{} +} + +/* +CreateUserUnprocessableEntity describes a response with status code 422, with default header values. + +Request body is well-formed (i.e., syntactically correct), but semantically erroneous. +*/ +type CreateUserUnprocessableEntity struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this create user unprocessable entity response has a 2xx status code +func (o *CreateUserUnprocessableEntity) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this create user unprocessable entity response has a 3xx status code +func (o *CreateUserUnprocessableEntity) IsRedirect() bool { + return false +} + +// IsClientError returns true when this create user unprocessable entity response has a 4xx status code +func (o *CreateUserUnprocessableEntity) IsClientError() bool { + return true +} + +// IsServerError returns true when this create user unprocessable entity response has a 5xx status code +func (o *CreateUserUnprocessableEntity) IsServerError() bool { + return false +} + +// IsCode returns true when this create user unprocessable entity response a status code equal to that given +func (o *CreateUserUnprocessableEntity) IsCode(code int) bool { + return code == 422 +} + +// Code gets the status code for the create user unprocessable entity response +func (o *CreateUserUnprocessableEntity) Code() int { + return 422 +} + +func (o *CreateUserUnprocessableEntity) Error() string { + return fmt.Sprintf("[POST /users/db/{user_id}][%d] createUserUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *CreateUserUnprocessableEntity) String() string { + return fmt.Sprintf("[POST /users/db/{user_id}][%d] createUserUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *CreateUserUnprocessableEntity) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *CreateUserUnprocessableEntity) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewCreateUserInternalServerError creates a CreateUserInternalServerError with default headers values +func NewCreateUserInternalServerError() *CreateUserInternalServerError { + return &CreateUserInternalServerError{} +} + +/* +CreateUserInternalServerError describes a response with status code 500, with default header values. + +An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. +*/ +type CreateUserInternalServerError struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this create user internal server error response has a 2xx status code +func (o *CreateUserInternalServerError) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this create user internal server error response has a 3xx status code +func (o *CreateUserInternalServerError) IsRedirect() bool { + return false +} + +// IsClientError returns true when this create user internal server error response has a 4xx status code +func (o *CreateUserInternalServerError) IsClientError() bool { + return false +} + +// IsServerError returns true when this create user internal server error response has a 5xx status code +func (o *CreateUserInternalServerError) IsServerError() bool { + return true +} + +// IsCode returns true when this create user internal server error response a status code equal to that given +func (o *CreateUserInternalServerError) IsCode(code int) bool { + return code == 500 +} + +// Code gets the status code for the create user internal server error response +func (o *CreateUserInternalServerError) Code() int { + return 500 +} + +func (o *CreateUserInternalServerError) Error() string { + return fmt.Sprintf("[POST /users/db/{user_id}][%d] createUserInternalServerError %+v", 500, o.Payload) +} + +func (o *CreateUserInternalServerError) String() string { + return fmt.Sprintf("[POST /users/db/{user_id}][%d] createUserInternalServerError %+v", 500, o.Payload) +} + +func (o *CreateUserInternalServerError) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *CreateUserInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +/* +CreateUserBody create user body +swagger:model CreateUserBody +*/ +type CreateUserBody struct { + + // EXPERIMENTAL, DONT USE. THIS WILL BE REMOVED AGAIN. - set the given time as creation time + // Format: date-time + CreateTime strfmt.DateTime `json:"createTime,omitempty"` + + // EXPERIMENTAL, DONT USE. THIS WILL BE REMOVED AGAIN. - import api key from static user + Import *bool `json:"import,omitempty"` +} + +// Validate validates this create user body +func (o *CreateUserBody) Validate(formats strfmt.Registry) error { + var res []error + + if err := o.validateCreateTime(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (o *CreateUserBody) validateCreateTime(formats strfmt.Registry) error { + if swag.IsZero(o.CreateTime) { // not required + return nil + } + + if err := validate.FormatOf("body"+"."+"createTime", "body", "date-time", o.CreateTime.String(), formats); err != nil { + return err + } + + return nil +} + +// ContextValidate validates this create user body based on context it is used +func (o *CreateUserBody) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (o *CreateUserBody) MarshalBinary() ([]byte, error) { + if o == nil { + return nil, nil + } + return swag.WriteJSON(o) +} + +// UnmarshalBinary interface implementation +func (o *CreateUserBody) UnmarshalBinary(b []byte) error { + var res CreateUserBody + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *o = res + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/users/deactivate_user_parameters.go b/platform/dbops/binaries/weaviate-src/client/users/deactivate_user_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..20c3463e1b9a09395632693eae9c18eb6302b3e7 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/users/deactivate_user_parameters.go @@ -0,0 +1,179 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package users + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewDeactivateUserParams creates a new DeactivateUserParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewDeactivateUserParams() *DeactivateUserParams { + return &DeactivateUserParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewDeactivateUserParamsWithTimeout creates a new DeactivateUserParams object +// with the ability to set a timeout on a request. +func NewDeactivateUserParamsWithTimeout(timeout time.Duration) *DeactivateUserParams { + return &DeactivateUserParams{ + timeout: timeout, + } +} + +// NewDeactivateUserParamsWithContext creates a new DeactivateUserParams object +// with the ability to set a context for a request. +func NewDeactivateUserParamsWithContext(ctx context.Context) *DeactivateUserParams { + return &DeactivateUserParams{ + Context: ctx, + } +} + +// NewDeactivateUserParamsWithHTTPClient creates a new DeactivateUserParams object +// with the ability to set a custom HTTPClient for a request. +func NewDeactivateUserParamsWithHTTPClient(client *http.Client) *DeactivateUserParams { + return &DeactivateUserParams{ + HTTPClient: client, + } +} + +/* +DeactivateUserParams contains all the parameters to send to the API endpoint + + for the deactivate user operation. + + Typically these are written to a http.Request. +*/ +type DeactivateUserParams struct { + + // Body. + Body DeactivateUserBody + + /* UserID. + + user id + */ + UserID string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the deactivate user params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *DeactivateUserParams) WithDefaults() *DeactivateUserParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the deactivate user params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *DeactivateUserParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the deactivate user params +func (o *DeactivateUserParams) WithTimeout(timeout time.Duration) *DeactivateUserParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the deactivate user params +func (o *DeactivateUserParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the deactivate user params +func (o *DeactivateUserParams) WithContext(ctx context.Context) *DeactivateUserParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the deactivate user params +func (o *DeactivateUserParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the deactivate user params +func (o *DeactivateUserParams) WithHTTPClient(client *http.Client) *DeactivateUserParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the deactivate user params +func (o *DeactivateUserParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithBody adds the body to the deactivate user params +func (o *DeactivateUserParams) WithBody(body DeactivateUserBody) *DeactivateUserParams { + o.SetBody(body) + return o +} + +// SetBody adds the body to the deactivate user params +func (o *DeactivateUserParams) SetBody(body DeactivateUserBody) { + o.Body = body +} + +// WithUserID adds the userID to the deactivate user params +func (o *DeactivateUserParams) WithUserID(userID string) *DeactivateUserParams { + o.SetUserID(userID) + return o +} + +// SetUserID adds the userId to the deactivate user params +func (o *DeactivateUserParams) SetUserID(userID string) { + o.UserID = userID +} + +// WriteToRequest writes these params to a swagger request +func (o *DeactivateUserParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + if err := r.SetBodyParam(o.Body); err != nil { + return err + } + + // path param user_id + if err := r.SetPathParam("user_id", o.UserID); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/users/deactivate_user_responses.go b/platform/dbops/binaries/weaviate-src/client/users/deactivate_user_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..7bc492b569efdbb43bf45a8b47590c3086d6827e --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/users/deactivate_user_responses.go @@ -0,0 +1,624 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package users + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + + "github.com/weaviate/weaviate/entities/models" +) + +// DeactivateUserReader is a Reader for the DeactivateUser structure. +type DeactivateUserReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *DeactivateUserReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewDeactivateUserOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 400: + result := NewDeactivateUserBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 401: + result := NewDeactivateUserUnauthorized() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 403: + result := NewDeactivateUserForbidden() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 404: + result := NewDeactivateUserNotFound() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 409: + result := NewDeactivateUserConflict() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 422: + result := NewDeactivateUserUnprocessableEntity() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 500: + result := NewDeactivateUserInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + } +} + +// NewDeactivateUserOK creates a DeactivateUserOK with default headers values +func NewDeactivateUserOK() *DeactivateUserOK { + return &DeactivateUserOK{} +} + +/* +DeactivateUserOK describes a response with status code 200, with default header values. + +users successfully deactivated +*/ +type DeactivateUserOK struct { +} + +// IsSuccess returns true when this deactivate user o k response has a 2xx status code +func (o *DeactivateUserOK) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this deactivate user o k response has a 3xx status code +func (o *DeactivateUserOK) IsRedirect() bool { + return false +} + +// IsClientError returns true when this deactivate user o k response has a 4xx status code +func (o *DeactivateUserOK) IsClientError() bool { + return false +} + +// IsServerError returns true when this deactivate user o k response has a 5xx status code +func (o *DeactivateUserOK) IsServerError() bool { + return false +} + +// IsCode returns true when this deactivate user o k response a status code equal to that given +func (o *DeactivateUserOK) IsCode(code int) bool { + return code == 200 +} + +// Code gets the status code for the deactivate user o k response +func (o *DeactivateUserOK) Code() int { + return 200 +} + +func (o *DeactivateUserOK) Error() string { + return fmt.Sprintf("[POST /users/db/{user_id}/deactivate][%d] deactivateUserOK ", 200) +} + +func (o *DeactivateUserOK) String() string { + return fmt.Sprintf("[POST /users/db/{user_id}/deactivate][%d] deactivateUserOK ", 200) +} + +func (o *DeactivateUserOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewDeactivateUserBadRequest creates a DeactivateUserBadRequest with default headers values +func NewDeactivateUserBadRequest() *DeactivateUserBadRequest { + return &DeactivateUserBadRequest{} +} + +/* +DeactivateUserBadRequest describes a response with status code 400, with default header values. + +Malformed request. +*/ +type DeactivateUserBadRequest struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this deactivate user bad request response has a 2xx status code +func (o *DeactivateUserBadRequest) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this deactivate user bad request response has a 3xx status code +func (o *DeactivateUserBadRequest) IsRedirect() bool { + return false +} + +// IsClientError returns true when this deactivate user bad request response has a 4xx status code +func (o *DeactivateUserBadRequest) IsClientError() bool { + return true +} + +// IsServerError returns true when this deactivate user bad request response has a 5xx status code +func (o *DeactivateUserBadRequest) IsServerError() bool { + return false +} + +// IsCode returns true when this deactivate user bad request response a status code equal to that given +func (o *DeactivateUserBadRequest) IsCode(code int) bool { + return code == 400 +} + +// Code gets the status code for the deactivate user bad request response +func (o *DeactivateUserBadRequest) Code() int { + return 400 +} + +func (o *DeactivateUserBadRequest) Error() string { + return fmt.Sprintf("[POST /users/db/{user_id}/deactivate][%d] deactivateUserBadRequest %+v", 400, o.Payload) +} + +func (o *DeactivateUserBadRequest) String() string { + return fmt.Sprintf("[POST /users/db/{user_id}/deactivate][%d] deactivateUserBadRequest %+v", 400, o.Payload) +} + +func (o *DeactivateUserBadRequest) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *DeactivateUserBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewDeactivateUserUnauthorized creates a DeactivateUserUnauthorized with default headers values +func NewDeactivateUserUnauthorized() *DeactivateUserUnauthorized { + return &DeactivateUserUnauthorized{} +} + +/* +DeactivateUserUnauthorized describes a response with status code 401, with default header values. + +Unauthorized or invalid credentials. +*/ +type DeactivateUserUnauthorized struct { +} + +// IsSuccess returns true when this deactivate user unauthorized response has a 2xx status code +func (o *DeactivateUserUnauthorized) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this deactivate user unauthorized response has a 3xx status code +func (o *DeactivateUserUnauthorized) IsRedirect() bool { + return false +} + +// IsClientError returns true when this deactivate user unauthorized response has a 4xx status code +func (o *DeactivateUserUnauthorized) IsClientError() bool { + return true +} + +// IsServerError returns true when this deactivate user unauthorized response has a 5xx status code +func (o *DeactivateUserUnauthorized) IsServerError() bool { + return false +} + +// IsCode returns true when this deactivate user unauthorized response a status code equal to that given +func (o *DeactivateUserUnauthorized) IsCode(code int) bool { + return code == 401 +} + +// Code gets the status code for the deactivate user unauthorized response +func (o *DeactivateUserUnauthorized) Code() int { + return 401 +} + +func (o *DeactivateUserUnauthorized) Error() string { + return fmt.Sprintf("[POST /users/db/{user_id}/deactivate][%d] deactivateUserUnauthorized ", 401) +} + +func (o *DeactivateUserUnauthorized) String() string { + return fmt.Sprintf("[POST /users/db/{user_id}/deactivate][%d] deactivateUserUnauthorized ", 401) +} + +func (o *DeactivateUserUnauthorized) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewDeactivateUserForbidden creates a DeactivateUserForbidden with default headers values +func NewDeactivateUserForbidden() *DeactivateUserForbidden { + return &DeactivateUserForbidden{} +} + +/* +DeactivateUserForbidden describes a response with status code 403, with default header values. + +Forbidden +*/ +type DeactivateUserForbidden struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this deactivate user forbidden response has a 2xx status code +func (o *DeactivateUserForbidden) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this deactivate user forbidden response has a 3xx status code +func (o *DeactivateUserForbidden) IsRedirect() bool { + return false +} + +// IsClientError returns true when this deactivate user forbidden response has a 4xx status code +func (o *DeactivateUserForbidden) IsClientError() bool { + return true +} + +// IsServerError returns true when this deactivate user forbidden response has a 5xx status code +func (o *DeactivateUserForbidden) IsServerError() bool { + return false +} + +// IsCode returns true when this deactivate user forbidden response a status code equal to that given +func (o *DeactivateUserForbidden) IsCode(code int) bool { + return code == 403 +} + +// Code gets the status code for the deactivate user forbidden response +func (o *DeactivateUserForbidden) Code() int { + return 403 +} + +func (o *DeactivateUserForbidden) Error() string { + return fmt.Sprintf("[POST /users/db/{user_id}/deactivate][%d] deactivateUserForbidden %+v", 403, o.Payload) +} + +func (o *DeactivateUserForbidden) String() string { + return fmt.Sprintf("[POST /users/db/{user_id}/deactivate][%d] deactivateUserForbidden %+v", 403, o.Payload) +} + +func (o *DeactivateUserForbidden) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *DeactivateUserForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewDeactivateUserNotFound creates a DeactivateUserNotFound with default headers values +func NewDeactivateUserNotFound() *DeactivateUserNotFound { + return &DeactivateUserNotFound{} +} + +/* +DeactivateUserNotFound describes a response with status code 404, with default header values. + +user not found +*/ +type DeactivateUserNotFound struct { +} + +// IsSuccess returns true when this deactivate user not found response has a 2xx status code +func (o *DeactivateUserNotFound) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this deactivate user not found response has a 3xx status code +func (o *DeactivateUserNotFound) IsRedirect() bool { + return false +} + +// IsClientError returns true when this deactivate user not found response has a 4xx status code +func (o *DeactivateUserNotFound) IsClientError() bool { + return true +} + +// IsServerError returns true when this deactivate user not found response has a 5xx status code +func (o *DeactivateUserNotFound) IsServerError() bool { + return false +} + +// IsCode returns true when this deactivate user not found response a status code equal to that given +func (o *DeactivateUserNotFound) IsCode(code int) bool { + return code == 404 +} + +// Code gets the status code for the deactivate user not found response +func (o *DeactivateUserNotFound) Code() int { + return 404 +} + +func (o *DeactivateUserNotFound) Error() string { + return fmt.Sprintf("[POST /users/db/{user_id}/deactivate][%d] deactivateUserNotFound ", 404) +} + +func (o *DeactivateUserNotFound) String() string { + return fmt.Sprintf("[POST /users/db/{user_id}/deactivate][%d] deactivateUserNotFound ", 404) +} + +func (o *DeactivateUserNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewDeactivateUserConflict creates a DeactivateUserConflict with default headers values +func NewDeactivateUserConflict() *DeactivateUserConflict { + return &DeactivateUserConflict{} +} + +/* +DeactivateUserConflict describes a response with status code 409, with default header values. + +user already deactivated +*/ +type DeactivateUserConflict struct { +} + +// IsSuccess returns true when this deactivate user conflict response has a 2xx status code +func (o *DeactivateUserConflict) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this deactivate user conflict response has a 3xx status code +func (o *DeactivateUserConflict) IsRedirect() bool { + return false +} + +// IsClientError returns true when this deactivate user conflict response has a 4xx status code +func (o *DeactivateUserConflict) IsClientError() bool { + return true +} + +// IsServerError returns true when this deactivate user conflict response has a 5xx status code +func (o *DeactivateUserConflict) IsServerError() bool { + return false +} + +// IsCode returns true when this deactivate user conflict response a status code equal to that given +func (o *DeactivateUserConflict) IsCode(code int) bool { + return code == 409 +} + +// Code gets the status code for the deactivate user conflict response +func (o *DeactivateUserConflict) Code() int { + return 409 +} + +func (o *DeactivateUserConflict) Error() string { + return fmt.Sprintf("[POST /users/db/{user_id}/deactivate][%d] deactivateUserConflict ", 409) +} + +func (o *DeactivateUserConflict) String() string { + return fmt.Sprintf("[POST /users/db/{user_id}/deactivate][%d] deactivateUserConflict ", 409) +} + +func (o *DeactivateUserConflict) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewDeactivateUserUnprocessableEntity creates a DeactivateUserUnprocessableEntity with default headers values +func NewDeactivateUserUnprocessableEntity() *DeactivateUserUnprocessableEntity { + return &DeactivateUserUnprocessableEntity{} +} + +/* +DeactivateUserUnprocessableEntity describes a response with status code 422, with default header values. + +Request body is well-formed (i.e., syntactically correct), but semantically erroneous. Are you sure the class is defined in the configuration file? +*/ +type DeactivateUserUnprocessableEntity struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this deactivate user unprocessable entity response has a 2xx status code +func (o *DeactivateUserUnprocessableEntity) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this deactivate user unprocessable entity response has a 3xx status code +func (o *DeactivateUserUnprocessableEntity) IsRedirect() bool { + return false +} + +// IsClientError returns true when this deactivate user unprocessable entity response has a 4xx status code +func (o *DeactivateUserUnprocessableEntity) IsClientError() bool { + return true +} + +// IsServerError returns true when this deactivate user unprocessable entity response has a 5xx status code +func (o *DeactivateUserUnprocessableEntity) IsServerError() bool { + return false +} + +// IsCode returns true when this deactivate user unprocessable entity response a status code equal to that given +func (o *DeactivateUserUnprocessableEntity) IsCode(code int) bool { + return code == 422 +} + +// Code gets the status code for the deactivate user unprocessable entity response +func (o *DeactivateUserUnprocessableEntity) Code() int { + return 422 +} + +func (o *DeactivateUserUnprocessableEntity) Error() string { + return fmt.Sprintf("[POST /users/db/{user_id}/deactivate][%d] deactivateUserUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *DeactivateUserUnprocessableEntity) String() string { + return fmt.Sprintf("[POST /users/db/{user_id}/deactivate][%d] deactivateUserUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *DeactivateUserUnprocessableEntity) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *DeactivateUserUnprocessableEntity) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewDeactivateUserInternalServerError creates a DeactivateUserInternalServerError with default headers values +func NewDeactivateUserInternalServerError() *DeactivateUserInternalServerError { + return &DeactivateUserInternalServerError{} +} + +/* +DeactivateUserInternalServerError describes a response with status code 500, with default header values. + +An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. +*/ +type DeactivateUserInternalServerError struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this deactivate user internal server error response has a 2xx status code +func (o *DeactivateUserInternalServerError) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this deactivate user internal server error response has a 3xx status code +func (o *DeactivateUserInternalServerError) IsRedirect() bool { + return false +} + +// IsClientError returns true when this deactivate user internal server error response has a 4xx status code +func (o *DeactivateUserInternalServerError) IsClientError() bool { + return false +} + +// IsServerError returns true when this deactivate user internal server error response has a 5xx status code +func (o *DeactivateUserInternalServerError) IsServerError() bool { + return true +} + +// IsCode returns true when this deactivate user internal server error response a status code equal to that given +func (o *DeactivateUserInternalServerError) IsCode(code int) bool { + return code == 500 +} + +// Code gets the status code for the deactivate user internal server error response +func (o *DeactivateUserInternalServerError) Code() int { + return 500 +} + +func (o *DeactivateUserInternalServerError) Error() string { + return fmt.Sprintf("[POST /users/db/{user_id}/deactivate][%d] deactivateUserInternalServerError %+v", 500, o.Payload) +} + +func (o *DeactivateUserInternalServerError) String() string { + return fmt.Sprintf("[POST /users/db/{user_id}/deactivate][%d] deactivateUserInternalServerError %+v", 500, o.Payload) +} + +func (o *DeactivateUserInternalServerError) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *DeactivateUserInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +/* +DeactivateUserBody deactivate user body +swagger:model DeactivateUserBody +*/ +type DeactivateUserBody struct { + + // if the key should be revoked when deactivating the user + RevokeKey *bool `json:"revoke_key,omitempty"` +} + +// Validate validates this deactivate user body +func (o *DeactivateUserBody) Validate(formats strfmt.Registry) error { + return nil +} + +// ContextValidate validates this deactivate user body based on context it is used +func (o *DeactivateUserBody) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (o *DeactivateUserBody) MarshalBinary() ([]byte, error) { + if o == nil { + return nil, nil + } + return swag.WriteJSON(o) +} + +// UnmarshalBinary interface implementation +func (o *DeactivateUserBody) UnmarshalBinary(b []byte) error { + var res DeactivateUserBody + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *o = res + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/users/delete_user_parameters.go b/platform/dbops/binaries/weaviate-src/client/users/delete_user_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..e917b2eb174f2c431ef9591bb4ac6692ebdd903f --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/users/delete_user_parameters.go @@ -0,0 +1,162 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package users + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewDeleteUserParams creates a new DeleteUserParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewDeleteUserParams() *DeleteUserParams { + return &DeleteUserParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewDeleteUserParamsWithTimeout creates a new DeleteUserParams object +// with the ability to set a timeout on a request. +func NewDeleteUserParamsWithTimeout(timeout time.Duration) *DeleteUserParams { + return &DeleteUserParams{ + timeout: timeout, + } +} + +// NewDeleteUserParamsWithContext creates a new DeleteUserParams object +// with the ability to set a context for a request. +func NewDeleteUserParamsWithContext(ctx context.Context) *DeleteUserParams { + return &DeleteUserParams{ + Context: ctx, + } +} + +// NewDeleteUserParamsWithHTTPClient creates a new DeleteUserParams object +// with the ability to set a custom HTTPClient for a request. +func NewDeleteUserParamsWithHTTPClient(client *http.Client) *DeleteUserParams { + return &DeleteUserParams{ + HTTPClient: client, + } +} + +/* +DeleteUserParams contains all the parameters to send to the API endpoint + + for the delete user operation. + + Typically these are written to a http.Request. +*/ +type DeleteUserParams struct { + + /* UserID. + + user name + */ + UserID string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the delete user params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *DeleteUserParams) WithDefaults() *DeleteUserParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the delete user params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *DeleteUserParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the delete user params +func (o *DeleteUserParams) WithTimeout(timeout time.Duration) *DeleteUserParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the delete user params +func (o *DeleteUserParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the delete user params +func (o *DeleteUserParams) WithContext(ctx context.Context) *DeleteUserParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the delete user params +func (o *DeleteUserParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the delete user params +func (o *DeleteUserParams) WithHTTPClient(client *http.Client) *DeleteUserParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the delete user params +func (o *DeleteUserParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithUserID adds the userID to the delete user params +func (o *DeleteUserParams) WithUserID(userID string) *DeleteUserParams { + o.SetUserID(userID) + return o +} + +// SetUserID adds the userId to the delete user params +func (o *DeleteUserParams) SetUserID(userID string) { + o.UserID = userID +} + +// WriteToRequest writes these params to a swagger request +func (o *DeleteUserParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param user_id + if err := r.SetPathParam("user_id", o.UserID); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/users/delete_user_responses.go b/platform/dbops/binaries/weaviate-src/client/users/delete_user_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..3418abfb69ac4b3139f5ff32cc88ace91c16a6e1 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/users/delete_user_responses.go @@ -0,0 +1,522 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package users + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/weaviate/weaviate/entities/models" +) + +// DeleteUserReader is a Reader for the DeleteUser structure. +type DeleteUserReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *DeleteUserReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 204: + result := NewDeleteUserNoContent() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 400: + result := NewDeleteUserBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 401: + result := NewDeleteUserUnauthorized() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 403: + result := NewDeleteUserForbidden() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 404: + result := NewDeleteUserNotFound() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 422: + result := NewDeleteUserUnprocessableEntity() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 500: + result := NewDeleteUserInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + } +} + +// NewDeleteUserNoContent creates a DeleteUserNoContent with default headers values +func NewDeleteUserNoContent() *DeleteUserNoContent { + return &DeleteUserNoContent{} +} + +/* +DeleteUserNoContent describes a response with status code 204, with default header values. + +Successfully deleted. +*/ +type DeleteUserNoContent struct { +} + +// IsSuccess returns true when this delete user no content response has a 2xx status code +func (o *DeleteUserNoContent) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this delete user no content response has a 3xx status code +func (o *DeleteUserNoContent) IsRedirect() bool { + return false +} + +// IsClientError returns true when this delete user no content response has a 4xx status code +func (o *DeleteUserNoContent) IsClientError() bool { + return false +} + +// IsServerError returns true when this delete user no content response has a 5xx status code +func (o *DeleteUserNoContent) IsServerError() bool { + return false +} + +// IsCode returns true when this delete user no content response a status code equal to that given +func (o *DeleteUserNoContent) IsCode(code int) bool { + return code == 204 +} + +// Code gets the status code for the delete user no content response +func (o *DeleteUserNoContent) Code() int { + return 204 +} + +func (o *DeleteUserNoContent) Error() string { + return fmt.Sprintf("[DELETE /users/db/{user_id}][%d] deleteUserNoContent ", 204) +} + +func (o *DeleteUserNoContent) String() string { + return fmt.Sprintf("[DELETE /users/db/{user_id}][%d] deleteUserNoContent ", 204) +} + +func (o *DeleteUserNoContent) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewDeleteUserBadRequest creates a DeleteUserBadRequest with default headers values +func NewDeleteUserBadRequest() *DeleteUserBadRequest { + return &DeleteUserBadRequest{} +} + +/* +DeleteUserBadRequest describes a response with status code 400, with default header values. + +Malformed request. +*/ +type DeleteUserBadRequest struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this delete user bad request response has a 2xx status code +func (o *DeleteUserBadRequest) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this delete user bad request response has a 3xx status code +func (o *DeleteUserBadRequest) IsRedirect() bool { + return false +} + +// IsClientError returns true when this delete user bad request response has a 4xx status code +func (o *DeleteUserBadRequest) IsClientError() bool { + return true +} + +// IsServerError returns true when this delete user bad request response has a 5xx status code +func (o *DeleteUserBadRequest) IsServerError() bool { + return false +} + +// IsCode returns true when this delete user bad request response a status code equal to that given +func (o *DeleteUserBadRequest) IsCode(code int) bool { + return code == 400 +} + +// Code gets the status code for the delete user bad request response +func (o *DeleteUserBadRequest) Code() int { + return 400 +} + +func (o *DeleteUserBadRequest) Error() string { + return fmt.Sprintf("[DELETE /users/db/{user_id}][%d] deleteUserBadRequest %+v", 400, o.Payload) +} + +func (o *DeleteUserBadRequest) String() string { + return fmt.Sprintf("[DELETE /users/db/{user_id}][%d] deleteUserBadRequest %+v", 400, o.Payload) +} + +func (o *DeleteUserBadRequest) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *DeleteUserBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewDeleteUserUnauthorized creates a DeleteUserUnauthorized with default headers values +func NewDeleteUserUnauthorized() *DeleteUserUnauthorized { + return &DeleteUserUnauthorized{} +} + +/* +DeleteUserUnauthorized describes a response with status code 401, with default header values. + +Unauthorized or invalid credentials. +*/ +type DeleteUserUnauthorized struct { +} + +// IsSuccess returns true when this delete user unauthorized response has a 2xx status code +func (o *DeleteUserUnauthorized) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this delete user unauthorized response has a 3xx status code +func (o *DeleteUserUnauthorized) IsRedirect() bool { + return false +} + +// IsClientError returns true when this delete user unauthorized response has a 4xx status code +func (o *DeleteUserUnauthorized) IsClientError() bool { + return true +} + +// IsServerError returns true when this delete user unauthorized response has a 5xx status code +func (o *DeleteUserUnauthorized) IsServerError() bool { + return false +} + +// IsCode returns true when this delete user unauthorized response a status code equal to that given +func (o *DeleteUserUnauthorized) IsCode(code int) bool { + return code == 401 +} + +// Code gets the status code for the delete user unauthorized response +func (o *DeleteUserUnauthorized) Code() int { + return 401 +} + +func (o *DeleteUserUnauthorized) Error() string { + return fmt.Sprintf("[DELETE /users/db/{user_id}][%d] deleteUserUnauthorized ", 401) +} + +func (o *DeleteUserUnauthorized) String() string { + return fmt.Sprintf("[DELETE /users/db/{user_id}][%d] deleteUserUnauthorized ", 401) +} + +func (o *DeleteUserUnauthorized) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewDeleteUserForbidden creates a DeleteUserForbidden with default headers values +func NewDeleteUserForbidden() *DeleteUserForbidden { + return &DeleteUserForbidden{} +} + +/* +DeleteUserForbidden describes a response with status code 403, with default header values. + +Forbidden +*/ +type DeleteUserForbidden struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this delete user forbidden response has a 2xx status code +func (o *DeleteUserForbidden) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this delete user forbidden response has a 3xx status code +func (o *DeleteUserForbidden) IsRedirect() bool { + return false +} + +// IsClientError returns true when this delete user forbidden response has a 4xx status code +func (o *DeleteUserForbidden) IsClientError() bool { + return true +} + +// IsServerError returns true when this delete user forbidden response has a 5xx status code +func (o *DeleteUserForbidden) IsServerError() bool { + return false +} + +// IsCode returns true when this delete user forbidden response a status code equal to that given +func (o *DeleteUserForbidden) IsCode(code int) bool { + return code == 403 +} + +// Code gets the status code for the delete user forbidden response +func (o *DeleteUserForbidden) Code() int { + return 403 +} + +func (o *DeleteUserForbidden) Error() string { + return fmt.Sprintf("[DELETE /users/db/{user_id}][%d] deleteUserForbidden %+v", 403, o.Payload) +} + +func (o *DeleteUserForbidden) String() string { + return fmt.Sprintf("[DELETE /users/db/{user_id}][%d] deleteUserForbidden %+v", 403, o.Payload) +} + +func (o *DeleteUserForbidden) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *DeleteUserForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewDeleteUserNotFound creates a DeleteUserNotFound with default headers values +func NewDeleteUserNotFound() *DeleteUserNotFound { + return &DeleteUserNotFound{} +} + +/* +DeleteUserNotFound describes a response with status code 404, with default header values. + +user not found +*/ +type DeleteUserNotFound struct { +} + +// IsSuccess returns true when this delete user not found response has a 2xx status code +func (o *DeleteUserNotFound) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this delete user not found response has a 3xx status code +func (o *DeleteUserNotFound) IsRedirect() bool { + return false +} + +// IsClientError returns true when this delete user not found response has a 4xx status code +func (o *DeleteUserNotFound) IsClientError() bool { + return true +} + +// IsServerError returns true when this delete user not found response has a 5xx status code +func (o *DeleteUserNotFound) IsServerError() bool { + return false +} + +// IsCode returns true when this delete user not found response a status code equal to that given +func (o *DeleteUserNotFound) IsCode(code int) bool { + return code == 404 +} + +// Code gets the status code for the delete user not found response +func (o *DeleteUserNotFound) Code() int { + return 404 +} + +func (o *DeleteUserNotFound) Error() string { + return fmt.Sprintf("[DELETE /users/db/{user_id}][%d] deleteUserNotFound ", 404) +} + +func (o *DeleteUserNotFound) String() string { + return fmt.Sprintf("[DELETE /users/db/{user_id}][%d] deleteUserNotFound ", 404) +} + +func (o *DeleteUserNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewDeleteUserUnprocessableEntity creates a DeleteUserUnprocessableEntity with default headers values +func NewDeleteUserUnprocessableEntity() *DeleteUserUnprocessableEntity { + return &DeleteUserUnprocessableEntity{} +} + +/* +DeleteUserUnprocessableEntity describes a response with status code 422, with default header values. + +Request body is well-formed (i.e., syntactically correct), but semantically erroneous. +*/ +type DeleteUserUnprocessableEntity struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this delete user unprocessable entity response has a 2xx status code +func (o *DeleteUserUnprocessableEntity) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this delete user unprocessable entity response has a 3xx status code +func (o *DeleteUserUnprocessableEntity) IsRedirect() bool { + return false +} + +// IsClientError returns true when this delete user unprocessable entity response has a 4xx status code +func (o *DeleteUserUnprocessableEntity) IsClientError() bool { + return true +} + +// IsServerError returns true when this delete user unprocessable entity response has a 5xx status code +func (o *DeleteUserUnprocessableEntity) IsServerError() bool { + return false +} + +// IsCode returns true when this delete user unprocessable entity response a status code equal to that given +func (o *DeleteUserUnprocessableEntity) IsCode(code int) bool { + return code == 422 +} + +// Code gets the status code for the delete user unprocessable entity response +func (o *DeleteUserUnprocessableEntity) Code() int { + return 422 +} + +func (o *DeleteUserUnprocessableEntity) Error() string { + return fmt.Sprintf("[DELETE /users/db/{user_id}][%d] deleteUserUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *DeleteUserUnprocessableEntity) String() string { + return fmt.Sprintf("[DELETE /users/db/{user_id}][%d] deleteUserUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *DeleteUserUnprocessableEntity) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *DeleteUserUnprocessableEntity) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewDeleteUserInternalServerError creates a DeleteUserInternalServerError with default headers values +func NewDeleteUserInternalServerError() *DeleteUserInternalServerError { + return &DeleteUserInternalServerError{} +} + +/* +DeleteUserInternalServerError describes a response with status code 500, with default header values. + +An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. +*/ +type DeleteUserInternalServerError struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this delete user internal server error response has a 2xx status code +func (o *DeleteUserInternalServerError) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this delete user internal server error response has a 3xx status code +func (o *DeleteUserInternalServerError) IsRedirect() bool { + return false +} + +// IsClientError returns true when this delete user internal server error response has a 4xx status code +func (o *DeleteUserInternalServerError) IsClientError() bool { + return false +} + +// IsServerError returns true when this delete user internal server error response has a 5xx status code +func (o *DeleteUserInternalServerError) IsServerError() bool { + return true +} + +// IsCode returns true when this delete user internal server error response a status code equal to that given +func (o *DeleteUserInternalServerError) IsCode(code int) bool { + return code == 500 +} + +// Code gets the status code for the delete user internal server error response +func (o *DeleteUserInternalServerError) Code() int { + return 500 +} + +func (o *DeleteUserInternalServerError) Error() string { + return fmt.Sprintf("[DELETE /users/db/{user_id}][%d] deleteUserInternalServerError %+v", 500, o.Payload) +} + +func (o *DeleteUserInternalServerError) String() string { + return fmt.Sprintf("[DELETE /users/db/{user_id}][%d] deleteUserInternalServerError %+v", 500, o.Payload) +} + +func (o *DeleteUserInternalServerError) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *DeleteUserInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/users/get_own_info_parameters.go b/platform/dbops/binaries/weaviate-src/client/users/get_own_info_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..16edee9de80d6296000be85b744b421e647041e7 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/users/get_own_info_parameters.go @@ -0,0 +1,139 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package users + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewGetOwnInfoParams creates a new GetOwnInfoParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewGetOwnInfoParams() *GetOwnInfoParams { + return &GetOwnInfoParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewGetOwnInfoParamsWithTimeout creates a new GetOwnInfoParams object +// with the ability to set a timeout on a request. +func NewGetOwnInfoParamsWithTimeout(timeout time.Duration) *GetOwnInfoParams { + return &GetOwnInfoParams{ + timeout: timeout, + } +} + +// NewGetOwnInfoParamsWithContext creates a new GetOwnInfoParams object +// with the ability to set a context for a request. +func NewGetOwnInfoParamsWithContext(ctx context.Context) *GetOwnInfoParams { + return &GetOwnInfoParams{ + Context: ctx, + } +} + +// NewGetOwnInfoParamsWithHTTPClient creates a new GetOwnInfoParams object +// with the ability to set a custom HTTPClient for a request. +func NewGetOwnInfoParamsWithHTTPClient(client *http.Client) *GetOwnInfoParams { + return &GetOwnInfoParams{ + HTTPClient: client, + } +} + +/* +GetOwnInfoParams contains all the parameters to send to the API endpoint + + for the get own info operation. + + Typically these are written to a http.Request. +*/ +type GetOwnInfoParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the get own info params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *GetOwnInfoParams) WithDefaults() *GetOwnInfoParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the get own info params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *GetOwnInfoParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the get own info params +func (o *GetOwnInfoParams) WithTimeout(timeout time.Duration) *GetOwnInfoParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the get own info params +func (o *GetOwnInfoParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the get own info params +func (o *GetOwnInfoParams) WithContext(ctx context.Context) *GetOwnInfoParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the get own info params +func (o *GetOwnInfoParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the get own info params +func (o *GetOwnInfoParams) WithHTTPClient(client *http.Client) *GetOwnInfoParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the get own info params +func (o *GetOwnInfoParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *GetOwnInfoParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/users/get_own_info_responses.go b/platform/dbops/binaries/weaviate-src/client/users/get_own_info_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..9d707ab74bcd726f8a5fe7a679e57c293e38dea9 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/users/get_own_info_responses.go @@ -0,0 +1,324 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package users + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/weaviate/weaviate/entities/models" +) + +// GetOwnInfoReader is a Reader for the GetOwnInfo structure. +type GetOwnInfoReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *GetOwnInfoReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewGetOwnInfoOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 401: + result := NewGetOwnInfoUnauthorized() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 500: + result := NewGetOwnInfoInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 501: + result := NewGetOwnInfoNotImplemented() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + } +} + +// NewGetOwnInfoOK creates a GetOwnInfoOK with default headers values +func NewGetOwnInfoOK() *GetOwnInfoOK { + return &GetOwnInfoOK{} +} + +/* +GetOwnInfoOK describes a response with status code 200, with default header values. + +Info about the user +*/ +type GetOwnInfoOK struct { + Payload *models.UserOwnInfo +} + +// IsSuccess returns true when this get own info o k response has a 2xx status code +func (o *GetOwnInfoOK) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this get own info o k response has a 3xx status code +func (o *GetOwnInfoOK) IsRedirect() bool { + return false +} + +// IsClientError returns true when this get own info o k response has a 4xx status code +func (o *GetOwnInfoOK) IsClientError() bool { + return false +} + +// IsServerError returns true when this get own info o k response has a 5xx status code +func (o *GetOwnInfoOK) IsServerError() bool { + return false +} + +// IsCode returns true when this get own info o k response a status code equal to that given +func (o *GetOwnInfoOK) IsCode(code int) bool { + return code == 200 +} + +// Code gets the status code for the get own info o k response +func (o *GetOwnInfoOK) Code() int { + return 200 +} + +func (o *GetOwnInfoOK) Error() string { + return fmt.Sprintf("[GET /users/own-info][%d] getOwnInfoOK %+v", 200, o.Payload) +} + +func (o *GetOwnInfoOK) String() string { + return fmt.Sprintf("[GET /users/own-info][%d] getOwnInfoOK %+v", 200, o.Payload) +} + +func (o *GetOwnInfoOK) GetPayload() *models.UserOwnInfo { + return o.Payload +} + +func (o *GetOwnInfoOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.UserOwnInfo) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewGetOwnInfoUnauthorized creates a GetOwnInfoUnauthorized with default headers values +func NewGetOwnInfoUnauthorized() *GetOwnInfoUnauthorized { + return &GetOwnInfoUnauthorized{} +} + +/* +GetOwnInfoUnauthorized describes a response with status code 401, with default header values. + +Unauthorized or invalid credentials. +*/ +type GetOwnInfoUnauthorized struct { +} + +// IsSuccess returns true when this get own info unauthorized response has a 2xx status code +func (o *GetOwnInfoUnauthorized) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this get own info unauthorized response has a 3xx status code +func (o *GetOwnInfoUnauthorized) IsRedirect() bool { + return false +} + +// IsClientError returns true when this get own info unauthorized response has a 4xx status code +func (o *GetOwnInfoUnauthorized) IsClientError() bool { + return true +} + +// IsServerError returns true when this get own info unauthorized response has a 5xx status code +func (o *GetOwnInfoUnauthorized) IsServerError() bool { + return false +} + +// IsCode returns true when this get own info unauthorized response a status code equal to that given +func (o *GetOwnInfoUnauthorized) IsCode(code int) bool { + return code == 401 +} + +// Code gets the status code for the get own info unauthorized response +func (o *GetOwnInfoUnauthorized) Code() int { + return 401 +} + +func (o *GetOwnInfoUnauthorized) Error() string { + return fmt.Sprintf("[GET /users/own-info][%d] getOwnInfoUnauthorized ", 401) +} + +func (o *GetOwnInfoUnauthorized) String() string { + return fmt.Sprintf("[GET /users/own-info][%d] getOwnInfoUnauthorized ", 401) +} + +func (o *GetOwnInfoUnauthorized) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewGetOwnInfoInternalServerError creates a GetOwnInfoInternalServerError with default headers values +func NewGetOwnInfoInternalServerError() *GetOwnInfoInternalServerError { + return &GetOwnInfoInternalServerError{} +} + +/* +GetOwnInfoInternalServerError describes a response with status code 500, with default header values. + +An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. +*/ +type GetOwnInfoInternalServerError struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this get own info internal server error response has a 2xx status code +func (o *GetOwnInfoInternalServerError) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this get own info internal server error response has a 3xx status code +func (o *GetOwnInfoInternalServerError) IsRedirect() bool { + return false +} + +// IsClientError returns true when this get own info internal server error response has a 4xx status code +func (o *GetOwnInfoInternalServerError) IsClientError() bool { + return false +} + +// IsServerError returns true when this get own info internal server error response has a 5xx status code +func (o *GetOwnInfoInternalServerError) IsServerError() bool { + return true +} + +// IsCode returns true when this get own info internal server error response a status code equal to that given +func (o *GetOwnInfoInternalServerError) IsCode(code int) bool { + return code == 500 +} + +// Code gets the status code for the get own info internal server error response +func (o *GetOwnInfoInternalServerError) Code() int { + return 500 +} + +func (o *GetOwnInfoInternalServerError) Error() string { + return fmt.Sprintf("[GET /users/own-info][%d] getOwnInfoInternalServerError %+v", 500, o.Payload) +} + +func (o *GetOwnInfoInternalServerError) String() string { + return fmt.Sprintf("[GET /users/own-info][%d] getOwnInfoInternalServerError %+v", 500, o.Payload) +} + +func (o *GetOwnInfoInternalServerError) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *GetOwnInfoInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewGetOwnInfoNotImplemented creates a GetOwnInfoNotImplemented with default headers values +func NewGetOwnInfoNotImplemented() *GetOwnInfoNotImplemented { + return &GetOwnInfoNotImplemented{} +} + +/* +GetOwnInfoNotImplemented describes a response with status code 501, with default header values. + +Replica movement operations are disabled. +*/ +type GetOwnInfoNotImplemented struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this get own info not implemented response has a 2xx status code +func (o *GetOwnInfoNotImplemented) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this get own info not implemented response has a 3xx status code +func (o *GetOwnInfoNotImplemented) IsRedirect() bool { + return false +} + +// IsClientError returns true when this get own info not implemented response has a 4xx status code +func (o *GetOwnInfoNotImplemented) IsClientError() bool { + return false +} + +// IsServerError returns true when this get own info not implemented response has a 5xx status code +func (o *GetOwnInfoNotImplemented) IsServerError() bool { + return true +} + +// IsCode returns true when this get own info not implemented response a status code equal to that given +func (o *GetOwnInfoNotImplemented) IsCode(code int) bool { + return code == 501 +} + +// Code gets the status code for the get own info not implemented response +func (o *GetOwnInfoNotImplemented) Code() int { + return 501 +} + +func (o *GetOwnInfoNotImplemented) Error() string { + return fmt.Sprintf("[GET /users/own-info][%d] getOwnInfoNotImplemented %+v", 501, o.Payload) +} + +func (o *GetOwnInfoNotImplemented) String() string { + return fmt.Sprintf("[GET /users/own-info][%d] getOwnInfoNotImplemented %+v", 501, o.Payload) +} + +func (o *GetOwnInfoNotImplemented) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *GetOwnInfoNotImplemented) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/users/get_user_info_parameters.go b/platform/dbops/binaries/weaviate-src/client/users/get_user_info_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..b549e5b1d1cb1e57dfb011ecb0292200eb3c3459 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/users/get_user_info_parameters.go @@ -0,0 +1,208 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package users + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// NewGetUserInfoParams creates a new GetUserInfoParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewGetUserInfoParams() *GetUserInfoParams { + return &GetUserInfoParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewGetUserInfoParamsWithTimeout creates a new GetUserInfoParams object +// with the ability to set a timeout on a request. +func NewGetUserInfoParamsWithTimeout(timeout time.Duration) *GetUserInfoParams { + return &GetUserInfoParams{ + timeout: timeout, + } +} + +// NewGetUserInfoParamsWithContext creates a new GetUserInfoParams object +// with the ability to set a context for a request. +func NewGetUserInfoParamsWithContext(ctx context.Context) *GetUserInfoParams { + return &GetUserInfoParams{ + Context: ctx, + } +} + +// NewGetUserInfoParamsWithHTTPClient creates a new GetUserInfoParams object +// with the ability to set a custom HTTPClient for a request. +func NewGetUserInfoParamsWithHTTPClient(client *http.Client) *GetUserInfoParams { + return &GetUserInfoParams{ + HTTPClient: client, + } +} + +/* +GetUserInfoParams contains all the parameters to send to the API endpoint + + for the get user info operation. + + Typically these are written to a http.Request. +*/ +type GetUserInfoParams struct { + + /* IncludeLastUsedTime. + + Whether to include the last used time of the given user + */ + IncludeLastUsedTime *bool + + /* UserID. + + user id + */ + UserID string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the get user info params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *GetUserInfoParams) WithDefaults() *GetUserInfoParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the get user info params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *GetUserInfoParams) SetDefaults() { + var ( + includeLastUsedTimeDefault = bool(false) + ) + + val := GetUserInfoParams{ + IncludeLastUsedTime: &includeLastUsedTimeDefault, + } + + val.timeout = o.timeout + val.Context = o.Context + val.HTTPClient = o.HTTPClient + *o = val +} + +// WithTimeout adds the timeout to the get user info params +func (o *GetUserInfoParams) WithTimeout(timeout time.Duration) *GetUserInfoParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the get user info params +func (o *GetUserInfoParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the get user info params +func (o *GetUserInfoParams) WithContext(ctx context.Context) *GetUserInfoParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the get user info params +func (o *GetUserInfoParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the get user info params +func (o *GetUserInfoParams) WithHTTPClient(client *http.Client) *GetUserInfoParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the get user info params +func (o *GetUserInfoParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithIncludeLastUsedTime adds the includeLastUsedTime to the get user info params +func (o *GetUserInfoParams) WithIncludeLastUsedTime(includeLastUsedTime *bool) *GetUserInfoParams { + o.SetIncludeLastUsedTime(includeLastUsedTime) + return o +} + +// SetIncludeLastUsedTime adds the includeLastUsedTime to the get user info params +func (o *GetUserInfoParams) SetIncludeLastUsedTime(includeLastUsedTime *bool) { + o.IncludeLastUsedTime = includeLastUsedTime +} + +// WithUserID adds the userID to the get user info params +func (o *GetUserInfoParams) WithUserID(userID string) *GetUserInfoParams { + o.SetUserID(userID) + return o +} + +// SetUserID adds the userId to the get user info params +func (o *GetUserInfoParams) SetUserID(userID string) { + o.UserID = userID +} + +// WriteToRequest writes these params to a swagger request +func (o *GetUserInfoParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if o.IncludeLastUsedTime != nil { + + // query param includeLastUsedTime + var qrIncludeLastUsedTime bool + + if o.IncludeLastUsedTime != nil { + qrIncludeLastUsedTime = *o.IncludeLastUsedTime + } + qIncludeLastUsedTime := swag.FormatBool(qrIncludeLastUsedTime) + if qIncludeLastUsedTime != "" { + + if err := r.SetQueryParam("includeLastUsedTime", qIncludeLastUsedTime); err != nil { + return err + } + } + } + + // path param user_id + if err := r.SetPathParam("user_id", o.UserID); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/users/get_user_info_responses.go b/platform/dbops/binaries/weaviate-src/client/users/get_user_info_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..4fd8c028646ab3c8f2404f4e6234bfc829f07c65 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/users/get_user_info_responses.go @@ -0,0 +1,460 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package users + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/weaviate/weaviate/entities/models" +) + +// GetUserInfoReader is a Reader for the GetUserInfo structure. +type GetUserInfoReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *GetUserInfoReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewGetUserInfoOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 401: + result := NewGetUserInfoUnauthorized() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 403: + result := NewGetUserInfoForbidden() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 404: + result := NewGetUserInfoNotFound() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 422: + result := NewGetUserInfoUnprocessableEntity() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 500: + result := NewGetUserInfoInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + } +} + +// NewGetUserInfoOK creates a GetUserInfoOK with default headers values +func NewGetUserInfoOK() *GetUserInfoOK { + return &GetUserInfoOK{} +} + +/* +GetUserInfoOK describes a response with status code 200, with default header values. + +Info about the user +*/ +type GetUserInfoOK struct { + Payload *models.DBUserInfo +} + +// IsSuccess returns true when this get user info o k response has a 2xx status code +func (o *GetUserInfoOK) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this get user info o k response has a 3xx status code +func (o *GetUserInfoOK) IsRedirect() bool { + return false +} + +// IsClientError returns true when this get user info o k response has a 4xx status code +func (o *GetUserInfoOK) IsClientError() bool { + return false +} + +// IsServerError returns true when this get user info o k response has a 5xx status code +func (o *GetUserInfoOK) IsServerError() bool { + return false +} + +// IsCode returns true when this get user info o k response a status code equal to that given +func (o *GetUserInfoOK) IsCode(code int) bool { + return code == 200 +} + +// Code gets the status code for the get user info o k response +func (o *GetUserInfoOK) Code() int { + return 200 +} + +func (o *GetUserInfoOK) Error() string { + return fmt.Sprintf("[GET /users/db/{user_id}][%d] getUserInfoOK %+v", 200, o.Payload) +} + +func (o *GetUserInfoOK) String() string { + return fmt.Sprintf("[GET /users/db/{user_id}][%d] getUserInfoOK %+v", 200, o.Payload) +} + +func (o *GetUserInfoOK) GetPayload() *models.DBUserInfo { + return o.Payload +} + +func (o *GetUserInfoOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.DBUserInfo) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewGetUserInfoUnauthorized creates a GetUserInfoUnauthorized with default headers values +func NewGetUserInfoUnauthorized() *GetUserInfoUnauthorized { + return &GetUserInfoUnauthorized{} +} + +/* +GetUserInfoUnauthorized describes a response with status code 401, with default header values. + +Unauthorized or invalid credentials. +*/ +type GetUserInfoUnauthorized struct { +} + +// IsSuccess returns true when this get user info unauthorized response has a 2xx status code +func (o *GetUserInfoUnauthorized) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this get user info unauthorized response has a 3xx status code +func (o *GetUserInfoUnauthorized) IsRedirect() bool { + return false +} + +// IsClientError returns true when this get user info unauthorized response has a 4xx status code +func (o *GetUserInfoUnauthorized) IsClientError() bool { + return true +} + +// IsServerError returns true when this get user info unauthorized response has a 5xx status code +func (o *GetUserInfoUnauthorized) IsServerError() bool { + return false +} + +// IsCode returns true when this get user info unauthorized response a status code equal to that given +func (o *GetUserInfoUnauthorized) IsCode(code int) bool { + return code == 401 +} + +// Code gets the status code for the get user info unauthorized response +func (o *GetUserInfoUnauthorized) Code() int { + return 401 +} + +func (o *GetUserInfoUnauthorized) Error() string { + return fmt.Sprintf("[GET /users/db/{user_id}][%d] getUserInfoUnauthorized ", 401) +} + +func (o *GetUserInfoUnauthorized) String() string { + return fmt.Sprintf("[GET /users/db/{user_id}][%d] getUserInfoUnauthorized ", 401) +} + +func (o *GetUserInfoUnauthorized) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewGetUserInfoForbidden creates a GetUserInfoForbidden with default headers values +func NewGetUserInfoForbidden() *GetUserInfoForbidden { + return &GetUserInfoForbidden{} +} + +/* +GetUserInfoForbidden describes a response with status code 403, with default header values. + +Forbidden +*/ +type GetUserInfoForbidden struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this get user info forbidden response has a 2xx status code +func (o *GetUserInfoForbidden) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this get user info forbidden response has a 3xx status code +func (o *GetUserInfoForbidden) IsRedirect() bool { + return false +} + +// IsClientError returns true when this get user info forbidden response has a 4xx status code +func (o *GetUserInfoForbidden) IsClientError() bool { + return true +} + +// IsServerError returns true when this get user info forbidden response has a 5xx status code +func (o *GetUserInfoForbidden) IsServerError() bool { + return false +} + +// IsCode returns true when this get user info forbidden response a status code equal to that given +func (o *GetUserInfoForbidden) IsCode(code int) bool { + return code == 403 +} + +// Code gets the status code for the get user info forbidden response +func (o *GetUserInfoForbidden) Code() int { + return 403 +} + +func (o *GetUserInfoForbidden) Error() string { + return fmt.Sprintf("[GET /users/db/{user_id}][%d] getUserInfoForbidden %+v", 403, o.Payload) +} + +func (o *GetUserInfoForbidden) String() string { + return fmt.Sprintf("[GET /users/db/{user_id}][%d] getUserInfoForbidden %+v", 403, o.Payload) +} + +func (o *GetUserInfoForbidden) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *GetUserInfoForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewGetUserInfoNotFound creates a GetUserInfoNotFound with default headers values +func NewGetUserInfoNotFound() *GetUserInfoNotFound { + return &GetUserInfoNotFound{} +} + +/* +GetUserInfoNotFound describes a response with status code 404, with default header values. + +user not found +*/ +type GetUserInfoNotFound struct { +} + +// IsSuccess returns true when this get user info not found response has a 2xx status code +func (o *GetUserInfoNotFound) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this get user info not found response has a 3xx status code +func (o *GetUserInfoNotFound) IsRedirect() bool { + return false +} + +// IsClientError returns true when this get user info not found response has a 4xx status code +func (o *GetUserInfoNotFound) IsClientError() bool { + return true +} + +// IsServerError returns true when this get user info not found response has a 5xx status code +func (o *GetUserInfoNotFound) IsServerError() bool { + return false +} + +// IsCode returns true when this get user info not found response a status code equal to that given +func (o *GetUserInfoNotFound) IsCode(code int) bool { + return code == 404 +} + +// Code gets the status code for the get user info not found response +func (o *GetUserInfoNotFound) Code() int { + return 404 +} + +func (o *GetUserInfoNotFound) Error() string { + return fmt.Sprintf("[GET /users/db/{user_id}][%d] getUserInfoNotFound ", 404) +} + +func (o *GetUserInfoNotFound) String() string { + return fmt.Sprintf("[GET /users/db/{user_id}][%d] getUserInfoNotFound ", 404) +} + +func (o *GetUserInfoNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewGetUserInfoUnprocessableEntity creates a GetUserInfoUnprocessableEntity with default headers values +func NewGetUserInfoUnprocessableEntity() *GetUserInfoUnprocessableEntity { + return &GetUserInfoUnprocessableEntity{} +} + +/* +GetUserInfoUnprocessableEntity describes a response with status code 422, with default header values. + +Request body is well-formed (i.e., syntactically correct), but semantically erroneous. +*/ +type GetUserInfoUnprocessableEntity struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this get user info unprocessable entity response has a 2xx status code +func (o *GetUserInfoUnprocessableEntity) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this get user info unprocessable entity response has a 3xx status code +func (o *GetUserInfoUnprocessableEntity) IsRedirect() bool { + return false +} + +// IsClientError returns true when this get user info unprocessable entity response has a 4xx status code +func (o *GetUserInfoUnprocessableEntity) IsClientError() bool { + return true +} + +// IsServerError returns true when this get user info unprocessable entity response has a 5xx status code +func (o *GetUserInfoUnprocessableEntity) IsServerError() bool { + return false +} + +// IsCode returns true when this get user info unprocessable entity response a status code equal to that given +func (o *GetUserInfoUnprocessableEntity) IsCode(code int) bool { + return code == 422 +} + +// Code gets the status code for the get user info unprocessable entity response +func (o *GetUserInfoUnprocessableEntity) Code() int { + return 422 +} + +func (o *GetUserInfoUnprocessableEntity) Error() string { + return fmt.Sprintf("[GET /users/db/{user_id}][%d] getUserInfoUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *GetUserInfoUnprocessableEntity) String() string { + return fmt.Sprintf("[GET /users/db/{user_id}][%d] getUserInfoUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *GetUserInfoUnprocessableEntity) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *GetUserInfoUnprocessableEntity) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewGetUserInfoInternalServerError creates a GetUserInfoInternalServerError with default headers values +func NewGetUserInfoInternalServerError() *GetUserInfoInternalServerError { + return &GetUserInfoInternalServerError{} +} + +/* +GetUserInfoInternalServerError describes a response with status code 500, with default header values. + +An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. +*/ +type GetUserInfoInternalServerError struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this get user info internal server error response has a 2xx status code +func (o *GetUserInfoInternalServerError) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this get user info internal server error response has a 3xx status code +func (o *GetUserInfoInternalServerError) IsRedirect() bool { + return false +} + +// IsClientError returns true when this get user info internal server error response has a 4xx status code +func (o *GetUserInfoInternalServerError) IsClientError() bool { + return false +} + +// IsServerError returns true when this get user info internal server error response has a 5xx status code +func (o *GetUserInfoInternalServerError) IsServerError() bool { + return true +} + +// IsCode returns true when this get user info internal server error response a status code equal to that given +func (o *GetUserInfoInternalServerError) IsCode(code int) bool { + return code == 500 +} + +// Code gets the status code for the get user info internal server error response +func (o *GetUserInfoInternalServerError) Code() int { + return 500 +} + +func (o *GetUserInfoInternalServerError) Error() string { + return fmt.Sprintf("[GET /users/db/{user_id}][%d] getUserInfoInternalServerError %+v", 500, o.Payload) +} + +func (o *GetUserInfoInternalServerError) String() string { + return fmt.Sprintf("[GET /users/db/{user_id}][%d] getUserInfoInternalServerError %+v", 500, o.Payload) +} + +func (o *GetUserInfoInternalServerError) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *GetUserInfoInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/users/list_all_users_parameters.go b/platform/dbops/binaries/weaviate-src/client/users/list_all_users_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..8d556a4b9ab04e85a325cf9f3a0ff90e0e909bf7 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/users/list_all_users_parameters.go @@ -0,0 +1,186 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package users + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// NewListAllUsersParams creates a new ListAllUsersParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewListAllUsersParams() *ListAllUsersParams { + return &ListAllUsersParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewListAllUsersParamsWithTimeout creates a new ListAllUsersParams object +// with the ability to set a timeout on a request. +func NewListAllUsersParamsWithTimeout(timeout time.Duration) *ListAllUsersParams { + return &ListAllUsersParams{ + timeout: timeout, + } +} + +// NewListAllUsersParamsWithContext creates a new ListAllUsersParams object +// with the ability to set a context for a request. +func NewListAllUsersParamsWithContext(ctx context.Context) *ListAllUsersParams { + return &ListAllUsersParams{ + Context: ctx, + } +} + +// NewListAllUsersParamsWithHTTPClient creates a new ListAllUsersParams object +// with the ability to set a custom HTTPClient for a request. +func NewListAllUsersParamsWithHTTPClient(client *http.Client) *ListAllUsersParams { + return &ListAllUsersParams{ + HTTPClient: client, + } +} + +/* +ListAllUsersParams contains all the parameters to send to the API endpoint + + for the list all users operation. + + Typically these are written to a http.Request. +*/ +type ListAllUsersParams struct { + + /* IncludeLastUsedTime. + + Whether to include the last used time of the users + */ + IncludeLastUsedTime *bool + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the list all users params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *ListAllUsersParams) WithDefaults() *ListAllUsersParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the list all users params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *ListAllUsersParams) SetDefaults() { + var ( + includeLastUsedTimeDefault = bool(false) + ) + + val := ListAllUsersParams{ + IncludeLastUsedTime: &includeLastUsedTimeDefault, + } + + val.timeout = o.timeout + val.Context = o.Context + val.HTTPClient = o.HTTPClient + *o = val +} + +// WithTimeout adds the timeout to the list all users params +func (o *ListAllUsersParams) WithTimeout(timeout time.Duration) *ListAllUsersParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the list all users params +func (o *ListAllUsersParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the list all users params +func (o *ListAllUsersParams) WithContext(ctx context.Context) *ListAllUsersParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the list all users params +func (o *ListAllUsersParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the list all users params +func (o *ListAllUsersParams) WithHTTPClient(client *http.Client) *ListAllUsersParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the list all users params +func (o *ListAllUsersParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithIncludeLastUsedTime adds the includeLastUsedTime to the list all users params +func (o *ListAllUsersParams) WithIncludeLastUsedTime(includeLastUsedTime *bool) *ListAllUsersParams { + o.SetIncludeLastUsedTime(includeLastUsedTime) + return o +} + +// SetIncludeLastUsedTime adds the includeLastUsedTime to the list all users params +func (o *ListAllUsersParams) SetIncludeLastUsedTime(includeLastUsedTime *bool) { + o.IncludeLastUsedTime = includeLastUsedTime +} + +// WriteToRequest writes these params to a swagger request +func (o *ListAllUsersParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if o.IncludeLastUsedTime != nil { + + // query param includeLastUsedTime + var qrIncludeLastUsedTime bool + + if o.IncludeLastUsedTime != nil { + qrIncludeLastUsedTime = *o.IncludeLastUsedTime + } + qIncludeLastUsedTime := swag.FormatBool(qrIncludeLastUsedTime) + if qIncludeLastUsedTime != "" { + + if err := r.SetQueryParam("includeLastUsedTime", qIncludeLastUsedTime); err != nil { + return err + } + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/users/list_all_users_responses.go b/platform/dbops/binaries/weaviate-src/client/users/list_all_users_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..cfc2bd508ba845a4ec5578b799afb6ac197399b8 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/users/list_all_users_responses.go @@ -0,0 +1,322 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package users + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/weaviate/weaviate/entities/models" +) + +// ListAllUsersReader is a Reader for the ListAllUsers structure. +type ListAllUsersReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ListAllUsersReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewListAllUsersOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 401: + result := NewListAllUsersUnauthorized() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 403: + result := NewListAllUsersForbidden() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 500: + result := NewListAllUsersInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + } +} + +// NewListAllUsersOK creates a ListAllUsersOK with default headers values +func NewListAllUsersOK() *ListAllUsersOK { + return &ListAllUsersOK{} +} + +/* +ListAllUsersOK describes a response with status code 200, with default header values. + +Info about the users +*/ +type ListAllUsersOK struct { + Payload []*models.DBUserInfo +} + +// IsSuccess returns true when this list all users o k response has a 2xx status code +func (o *ListAllUsersOK) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this list all users o k response has a 3xx status code +func (o *ListAllUsersOK) IsRedirect() bool { + return false +} + +// IsClientError returns true when this list all users o k response has a 4xx status code +func (o *ListAllUsersOK) IsClientError() bool { + return false +} + +// IsServerError returns true when this list all users o k response has a 5xx status code +func (o *ListAllUsersOK) IsServerError() bool { + return false +} + +// IsCode returns true when this list all users o k response a status code equal to that given +func (o *ListAllUsersOK) IsCode(code int) bool { + return code == 200 +} + +// Code gets the status code for the list all users o k response +func (o *ListAllUsersOK) Code() int { + return 200 +} + +func (o *ListAllUsersOK) Error() string { + return fmt.Sprintf("[GET /users/db][%d] listAllUsersOK %+v", 200, o.Payload) +} + +func (o *ListAllUsersOK) String() string { + return fmt.Sprintf("[GET /users/db][%d] listAllUsersOK %+v", 200, o.Payload) +} + +func (o *ListAllUsersOK) GetPayload() []*models.DBUserInfo { + return o.Payload +} + +func (o *ListAllUsersOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewListAllUsersUnauthorized creates a ListAllUsersUnauthorized with default headers values +func NewListAllUsersUnauthorized() *ListAllUsersUnauthorized { + return &ListAllUsersUnauthorized{} +} + +/* +ListAllUsersUnauthorized describes a response with status code 401, with default header values. + +Unauthorized or invalid credentials. +*/ +type ListAllUsersUnauthorized struct { +} + +// IsSuccess returns true when this list all users unauthorized response has a 2xx status code +func (o *ListAllUsersUnauthorized) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this list all users unauthorized response has a 3xx status code +func (o *ListAllUsersUnauthorized) IsRedirect() bool { + return false +} + +// IsClientError returns true when this list all users unauthorized response has a 4xx status code +func (o *ListAllUsersUnauthorized) IsClientError() bool { + return true +} + +// IsServerError returns true when this list all users unauthorized response has a 5xx status code +func (o *ListAllUsersUnauthorized) IsServerError() bool { + return false +} + +// IsCode returns true when this list all users unauthorized response a status code equal to that given +func (o *ListAllUsersUnauthorized) IsCode(code int) bool { + return code == 401 +} + +// Code gets the status code for the list all users unauthorized response +func (o *ListAllUsersUnauthorized) Code() int { + return 401 +} + +func (o *ListAllUsersUnauthorized) Error() string { + return fmt.Sprintf("[GET /users/db][%d] listAllUsersUnauthorized ", 401) +} + +func (o *ListAllUsersUnauthorized) String() string { + return fmt.Sprintf("[GET /users/db][%d] listAllUsersUnauthorized ", 401) +} + +func (o *ListAllUsersUnauthorized) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewListAllUsersForbidden creates a ListAllUsersForbidden with default headers values +func NewListAllUsersForbidden() *ListAllUsersForbidden { + return &ListAllUsersForbidden{} +} + +/* +ListAllUsersForbidden describes a response with status code 403, with default header values. + +Forbidden +*/ +type ListAllUsersForbidden struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this list all users forbidden response has a 2xx status code +func (o *ListAllUsersForbidden) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this list all users forbidden response has a 3xx status code +func (o *ListAllUsersForbidden) IsRedirect() bool { + return false +} + +// IsClientError returns true when this list all users forbidden response has a 4xx status code +func (o *ListAllUsersForbidden) IsClientError() bool { + return true +} + +// IsServerError returns true when this list all users forbidden response has a 5xx status code +func (o *ListAllUsersForbidden) IsServerError() bool { + return false +} + +// IsCode returns true when this list all users forbidden response a status code equal to that given +func (o *ListAllUsersForbidden) IsCode(code int) bool { + return code == 403 +} + +// Code gets the status code for the list all users forbidden response +func (o *ListAllUsersForbidden) Code() int { + return 403 +} + +func (o *ListAllUsersForbidden) Error() string { + return fmt.Sprintf("[GET /users/db][%d] listAllUsersForbidden %+v", 403, o.Payload) +} + +func (o *ListAllUsersForbidden) String() string { + return fmt.Sprintf("[GET /users/db][%d] listAllUsersForbidden %+v", 403, o.Payload) +} + +func (o *ListAllUsersForbidden) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *ListAllUsersForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewListAllUsersInternalServerError creates a ListAllUsersInternalServerError with default headers values +func NewListAllUsersInternalServerError() *ListAllUsersInternalServerError { + return &ListAllUsersInternalServerError{} +} + +/* +ListAllUsersInternalServerError describes a response with status code 500, with default header values. + +An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. +*/ +type ListAllUsersInternalServerError struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this list all users internal server error response has a 2xx status code +func (o *ListAllUsersInternalServerError) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this list all users internal server error response has a 3xx status code +func (o *ListAllUsersInternalServerError) IsRedirect() bool { + return false +} + +// IsClientError returns true when this list all users internal server error response has a 4xx status code +func (o *ListAllUsersInternalServerError) IsClientError() bool { + return false +} + +// IsServerError returns true when this list all users internal server error response has a 5xx status code +func (o *ListAllUsersInternalServerError) IsServerError() bool { + return true +} + +// IsCode returns true when this list all users internal server error response a status code equal to that given +func (o *ListAllUsersInternalServerError) IsCode(code int) bool { + return code == 500 +} + +// Code gets the status code for the list all users internal server error response +func (o *ListAllUsersInternalServerError) Code() int { + return 500 +} + +func (o *ListAllUsersInternalServerError) Error() string { + return fmt.Sprintf("[GET /users/db][%d] listAllUsersInternalServerError %+v", 500, o.Payload) +} + +func (o *ListAllUsersInternalServerError) String() string { + return fmt.Sprintf("[GET /users/db][%d] listAllUsersInternalServerError %+v", 500, o.Payload) +} + +func (o *ListAllUsersInternalServerError) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *ListAllUsersInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/users/rotate_user_api_key_parameters.go b/platform/dbops/binaries/weaviate-src/client/users/rotate_user_api_key_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..7576193be19f042cc232d200b95deae9879da60c --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/users/rotate_user_api_key_parameters.go @@ -0,0 +1,162 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package users + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewRotateUserAPIKeyParams creates a new RotateUserAPIKeyParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewRotateUserAPIKeyParams() *RotateUserAPIKeyParams { + return &RotateUserAPIKeyParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewRotateUserAPIKeyParamsWithTimeout creates a new RotateUserAPIKeyParams object +// with the ability to set a timeout on a request. +func NewRotateUserAPIKeyParamsWithTimeout(timeout time.Duration) *RotateUserAPIKeyParams { + return &RotateUserAPIKeyParams{ + timeout: timeout, + } +} + +// NewRotateUserAPIKeyParamsWithContext creates a new RotateUserAPIKeyParams object +// with the ability to set a context for a request. +func NewRotateUserAPIKeyParamsWithContext(ctx context.Context) *RotateUserAPIKeyParams { + return &RotateUserAPIKeyParams{ + Context: ctx, + } +} + +// NewRotateUserAPIKeyParamsWithHTTPClient creates a new RotateUserAPIKeyParams object +// with the ability to set a custom HTTPClient for a request. +func NewRotateUserAPIKeyParamsWithHTTPClient(client *http.Client) *RotateUserAPIKeyParams { + return &RotateUserAPIKeyParams{ + HTTPClient: client, + } +} + +/* +RotateUserAPIKeyParams contains all the parameters to send to the API endpoint + + for the rotate user Api key operation. + + Typically these are written to a http.Request. +*/ +type RotateUserAPIKeyParams struct { + + /* UserID. + + user id + */ + UserID string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the rotate user Api key params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *RotateUserAPIKeyParams) WithDefaults() *RotateUserAPIKeyParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the rotate user Api key params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *RotateUserAPIKeyParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the rotate user Api key params +func (o *RotateUserAPIKeyParams) WithTimeout(timeout time.Duration) *RotateUserAPIKeyParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the rotate user Api key params +func (o *RotateUserAPIKeyParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the rotate user Api key params +func (o *RotateUserAPIKeyParams) WithContext(ctx context.Context) *RotateUserAPIKeyParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the rotate user Api key params +func (o *RotateUserAPIKeyParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the rotate user Api key params +func (o *RotateUserAPIKeyParams) WithHTTPClient(client *http.Client) *RotateUserAPIKeyParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the rotate user Api key params +func (o *RotateUserAPIKeyParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithUserID adds the userID to the rotate user Api key params +func (o *RotateUserAPIKeyParams) WithUserID(userID string) *RotateUserAPIKeyParams { + o.SetUserID(userID) + return o +} + +// SetUserID adds the userId to the rotate user Api key params +func (o *RotateUserAPIKeyParams) SetUserID(userID string) { + o.UserID = userID +} + +// WriteToRequest writes these params to a swagger request +func (o *RotateUserAPIKeyParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param user_id + if err := r.SetPathParam("user_id", o.UserID); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/users/rotate_user_api_key_responses.go b/platform/dbops/binaries/weaviate-src/client/users/rotate_user_api_key_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..ba571898f78f01fe89377cd3c18e7a95fa028985 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/users/rotate_user_api_key_responses.go @@ -0,0 +1,534 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package users + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/weaviate/weaviate/entities/models" +) + +// RotateUserAPIKeyReader is a Reader for the RotateUserAPIKey structure. +type RotateUserAPIKeyReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *RotateUserAPIKeyReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewRotateUserAPIKeyOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 400: + result := NewRotateUserAPIKeyBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 401: + result := NewRotateUserAPIKeyUnauthorized() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 403: + result := NewRotateUserAPIKeyForbidden() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 404: + result := NewRotateUserAPIKeyNotFound() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 422: + result := NewRotateUserAPIKeyUnprocessableEntity() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 500: + result := NewRotateUserAPIKeyInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + } +} + +// NewRotateUserAPIKeyOK creates a RotateUserAPIKeyOK with default headers values +func NewRotateUserAPIKeyOK() *RotateUserAPIKeyOK { + return &RotateUserAPIKeyOK{} +} + +/* +RotateUserAPIKeyOK describes a response with status code 200, with default header values. + +ApiKey successfully changed +*/ +type RotateUserAPIKeyOK struct { + Payload *models.UserAPIKey +} + +// IsSuccess returns true when this rotate user Api key o k response has a 2xx status code +func (o *RotateUserAPIKeyOK) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this rotate user Api key o k response has a 3xx status code +func (o *RotateUserAPIKeyOK) IsRedirect() bool { + return false +} + +// IsClientError returns true when this rotate user Api key o k response has a 4xx status code +func (o *RotateUserAPIKeyOK) IsClientError() bool { + return false +} + +// IsServerError returns true when this rotate user Api key o k response has a 5xx status code +func (o *RotateUserAPIKeyOK) IsServerError() bool { + return false +} + +// IsCode returns true when this rotate user Api key o k response a status code equal to that given +func (o *RotateUserAPIKeyOK) IsCode(code int) bool { + return code == 200 +} + +// Code gets the status code for the rotate user Api key o k response +func (o *RotateUserAPIKeyOK) Code() int { + return 200 +} + +func (o *RotateUserAPIKeyOK) Error() string { + return fmt.Sprintf("[POST /users/db/{user_id}/rotate-key][%d] rotateUserApiKeyOK %+v", 200, o.Payload) +} + +func (o *RotateUserAPIKeyOK) String() string { + return fmt.Sprintf("[POST /users/db/{user_id}/rotate-key][%d] rotateUserApiKeyOK %+v", 200, o.Payload) +} + +func (o *RotateUserAPIKeyOK) GetPayload() *models.UserAPIKey { + return o.Payload +} + +func (o *RotateUserAPIKeyOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.UserAPIKey) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewRotateUserAPIKeyBadRequest creates a RotateUserAPIKeyBadRequest with default headers values +func NewRotateUserAPIKeyBadRequest() *RotateUserAPIKeyBadRequest { + return &RotateUserAPIKeyBadRequest{} +} + +/* +RotateUserAPIKeyBadRequest describes a response with status code 400, with default header values. + +Malformed request. +*/ +type RotateUserAPIKeyBadRequest struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this rotate user Api key bad request response has a 2xx status code +func (o *RotateUserAPIKeyBadRequest) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this rotate user Api key bad request response has a 3xx status code +func (o *RotateUserAPIKeyBadRequest) IsRedirect() bool { + return false +} + +// IsClientError returns true when this rotate user Api key bad request response has a 4xx status code +func (o *RotateUserAPIKeyBadRequest) IsClientError() bool { + return true +} + +// IsServerError returns true when this rotate user Api key bad request response has a 5xx status code +func (o *RotateUserAPIKeyBadRequest) IsServerError() bool { + return false +} + +// IsCode returns true when this rotate user Api key bad request response a status code equal to that given +func (o *RotateUserAPIKeyBadRequest) IsCode(code int) bool { + return code == 400 +} + +// Code gets the status code for the rotate user Api key bad request response +func (o *RotateUserAPIKeyBadRequest) Code() int { + return 400 +} + +func (o *RotateUserAPIKeyBadRequest) Error() string { + return fmt.Sprintf("[POST /users/db/{user_id}/rotate-key][%d] rotateUserApiKeyBadRequest %+v", 400, o.Payload) +} + +func (o *RotateUserAPIKeyBadRequest) String() string { + return fmt.Sprintf("[POST /users/db/{user_id}/rotate-key][%d] rotateUserApiKeyBadRequest %+v", 400, o.Payload) +} + +func (o *RotateUserAPIKeyBadRequest) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *RotateUserAPIKeyBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewRotateUserAPIKeyUnauthorized creates a RotateUserAPIKeyUnauthorized with default headers values +func NewRotateUserAPIKeyUnauthorized() *RotateUserAPIKeyUnauthorized { + return &RotateUserAPIKeyUnauthorized{} +} + +/* +RotateUserAPIKeyUnauthorized describes a response with status code 401, with default header values. + +Unauthorized or invalid credentials. +*/ +type RotateUserAPIKeyUnauthorized struct { +} + +// IsSuccess returns true when this rotate user Api key unauthorized response has a 2xx status code +func (o *RotateUserAPIKeyUnauthorized) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this rotate user Api key unauthorized response has a 3xx status code +func (o *RotateUserAPIKeyUnauthorized) IsRedirect() bool { + return false +} + +// IsClientError returns true when this rotate user Api key unauthorized response has a 4xx status code +func (o *RotateUserAPIKeyUnauthorized) IsClientError() bool { + return true +} + +// IsServerError returns true when this rotate user Api key unauthorized response has a 5xx status code +func (o *RotateUserAPIKeyUnauthorized) IsServerError() bool { + return false +} + +// IsCode returns true when this rotate user Api key unauthorized response a status code equal to that given +func (o *RotateUserAPIKeyUnauthorized) IsCode(code int) bool { + return code == 401 +} + +// Code gets the status code for the rotate user Api key unauthorized response +func (o *RotateUserAPIKeyUnauthorized) Code() int { + return 401 +} + +func (o *RotateUserAPIKeyUnauthorized) Error() string { + return fmt.Sprintf("[POST /users/db/{user_id}/rotate-key][%d] rotateUserApiKeyUnauthorized ", 401) +} + +func (o *RotateUserAPIKeyUnauthorized) String() string { + return fmt.Sprintf("[POST /users/db/{user_id}/rotate-key][%d] rotateUserApiKeyUnauthorized ", 401) +} + +func (o *RotateUserAPIKeyUnauthorized) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewRotateUserAPIKeyForbidden creates a RotateUserAPIKeyForbidden with default headers values +func NewRotateUserAPIKeyForbidden() *RotateUserAPIKeyForbidden { + return &RotateUserAPIKeyForbidden{} +} + +/* +RotateUserAPIKeyForbidden describes a response with status code 403, with default header values. + +Forbidden +*/ +type RotateUserAPIKeyForbidden struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this rotate user Api key forbidden response has a 2xx status code +func (o *RotateUserAPIKeyForbidden) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this rotate user Api key forbidden response has a 3xx status code +func (o *RotateUserAPIKeyForbidden) IsRedirect() bool { + return false +} + +// IsClientError returns true when this rotate user Api key forbidden response has a 4xx status code +func (o *RotateUserAPIKeyForbidden) IsClientError() bool { + return true +} + +// IsServerError returns true when this rotate user Api key forbidden response has a 5xx status code +func (o *RotateUserAPIKeyForbidden) IsServerError() bool { + return false +} + +// IsCode returns true when this rotate user Api key forbidden response a status code equal to that given +func (o *RotateUserAPIKeyForbidden) IsCode(code int) bool { + return code == 403 +} + +// Code gets the status code for the rotate user Api key forbidden response +func (o *RotateUserAPIKeyForbidden) Code() int { + return 403 +} + +func (o *RotateUserAPIKeyForbidden) Error() string { + return fmt.Sprintf("[POST /users/db/{user_id}/rotate-key][%d] rotateUserApiKeyForbidden %+v", 403, o.Payload) +} + +func (o *RotateUserAPIKeyForbidden) String() string { + return fmt.Sprintf("[POST /users/db/{user_id}/rotate-key][%d] rotateUserApiKeyForbidden %+v", 403, o.Payload) +} + +func (o *RotateUserAPIKeyForbidden) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *RotateUserAPIKeyForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewRotateUserAPIKeyNotFound creates a RotateUserAPIKeyNotFound with default headers values +func NewRotateUserAPIKeyNotFound() *RotateUserAPIKeyNotFound { + return &RotateUserAPIKeyNotFound{} +} + +/* +RotateUserAPIKeyNotFound describes a response with status code 404, with default header values. + +user not found +*/ +type RotateUserAPIKeyNotFound struct { +} + +// IsSuccess returns true when this rotate user Api key not found response has a 2xx status code +func (o *RotateUserAPIKeyNotFound) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this rotate user Api key not found response has a 3xx status code +func (o *RotateUserAPIKeyNotFound) IsRedirect() bool { + return false +} + +// IsClientError returns true when this rotate user Api key not found response has a 4xx status code +func (o *RotateUserAPIKeyNotFound) IsClientError() bool { + return true +} + +// IsServerError returns true when this rotate user Api key not found response has a 5xx status code +func (o *RotateUserAPIKeyNotFound) IsServerError() bool { + return false +} + +// IsCode returns true when this rotate user Api key not found response a status code equal to that given +func (o *RotateUserAPIKeyNotFound) IsCode(code int) bool { + return code == 404 +} + +// Code gets the status code for the rotate user Api key not found response +func (o *RotateUserAPIKeyNotFound) Code() int { + return 404 +} + +func (o *RotateUserAPIKeyNotFound) Error() string { + return fmt.Sprintf("[POST /users/db/{user_id}/rotate-key][%d] rotateUserApiKeyNotFound ", 404) +} + +func (o *RotateUserAPIKeyNotFound) String() string { + return fmt.Sprintf("[POST /users/db/{user_id}/rotate-key][%d] rotateUserApiKeyNotFound ", 404) +} + +func (o *RotateUserAPIKeyNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewRotateUserAPIKeyUnprocessableEntity creates a RotateUserAPIKeyUnprocessableEntity with default headers values +func NewRotateUserAPIKeyUnprocessableEntity() *RotateUserAPIKeyUnprocessableEntity { + return &RotateUserAPIKeyUnprocessableEntity{} +} + +/* +RotateUserAPIKeyUnprocessableEntity describes a response with status code 422, with default header values. + +Request body is well-formed (i.e., syntactically correct), but semantically erroneous. +*/ +type RotateUserAPIKeyUnprocessableEntity struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this rotate user Api key unprocessable entity response has a 2xx status code +func (o *RotateUserAPIKeyUnprocessableEntity) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this rotate user Api key unprocessable entity response has a 3xx status code +func (o *RotateUserAPIKeyUnprocessableEntity) IsRedirect() bool { + return false +} + +// IsClientError returns true when this rotate user Api key unprocessable entity response has a 4xx status code +func (o *RotateUserAPIKeyUnprocessableEntity) IsClientError() bool { + return true +} + +// IsServerError returns true when this rotate user Api key unprocessable entity response has a 5xx status code +func (o *RotateUserAPIKeyUnprocessableEntity) IsServerError() bool { + return false +} + +// IsCode returns true when this rotate user Api key unprocessable entity response a status code equal to that given +func (o *RotateUserAPIKeyUnprocessableEntity) IsCode(code int) bool { + return code == 422 +} + +// Code gets the status code for the rotate user Api key unprocessable entity response +func (o *RotateUserAPIKeyUnprocessableEntity) Code() int { + return 422 +} + +func (o *RotateUserAPIKeyUnprocessableEntity) Error() string { + return fmt.Sprintf("[POST /users/db/{user_id}/rotate-key][%d] rotateUserApiKeyUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *RotateUserAPIKeyUnprocessableEntity) String() string { + return fmt.Sprintf("[POST /users/db/{user_id}/rotate-key][%d] rotateUserApiKeyUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *RotateUserAPIKeyUnprocessableEntity) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *RotateUserAPIKeyUnprocessableEntity) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewRotateUserAPIKeyInternalServerError creates a RotateUserAPIKeyInternalServerError with default headers values +func NewRotateUserAPIKeyInternalServerError() *RotateUserAPIKeyInternalServerError { + return &RotateUserAPIKeyInternalServerError{} +} + +/* +RotateUserAPIKeyInternalServerError describes a response with status code 500, with default header values. + +An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. +*/ +type RotateUserAPIKeyInternalServerError struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this rotate user Api key internal server error response has a 2xx status code +func (o *RotateUserAPIKeyInternalServerError) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this rotate user Api key internal server error response has a 3xx status code +func (o *RotateUserAPIKeyInternalServerError) IsRedirect() bool { + return false +} + +// IsClientError returns true when this rotate user Api key internal server error response has a 4xx status code +func (o *RotateUserAPIKeyInternalServerError) IsClientError() bool { + return false +} + +// IsServerError returns true when this rotate user Api key internal server error response has a 5xx status code +func (o *RotateUserAPIKeyInternalServerError) IsServerError() bool { + return true +} + +// IsCode returns true when this rotate user Api key internal server error response a status code equal to that given +func (o *RotateUserAPIKeyInternalServerError) IsCode(code int) bool { + return code == 500 +} + +// Code gets the status code for the rotate user Api key internal server error response +func (o *RotateUserAPIKeyInternalServerError) Code() int { + return 500 +} + +func (o *RotateUserAPIKeyInternalServerError) Error() string { + return fmt.Sprintf("[POST /users/db/{user_id}/rotate-key][%d] rotateUserApiKeyInternalServerError %+v", 500, o.Payload) +} + +func (o *RotateUserAPIKeyInternalServerError) String() string { + return fmt.Sprintf("[POST /users/db/{user_id}/rotate-key][%d] rotateUserApiKeyInternalServerError %+v", 500, o.Payload) +} + +func (o *RotateUserAPIKeyInternalServerError) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *RotateUserAPIKeyInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/users/users_client.go b/platform/dbops/binaries/weaviate-src/client/users/users_client.go new file mode 100644 index 0000000000000000000000000000000000000000..b46ac14cc8adab585ce82b064949a26bd0f63420 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/users/users_client.go @@ -0,0 +1,378 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package users + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" +) + +// New creates a new users API client. +func New(transport runtime.ClientTransport, formats strfmt.Registry) ClientService { + return &Client{transport: transport, formats: formats} +} + +/* +Client for users API +*/ +type Client struct { + transport runtime.ClientTransport + formats strfmt.Registry +} + +// ClientOption is the option for Client methods +type ClientOption func(*runtime.ClientOperation) + +// ClientService is the interface for Client methods +type ClientService interface { + ActivateUser(params *ActivateUserParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ActivateUserOK, error) + + CreateUser(params *CreateUserParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*CreateUserCreated, error) + + DeactivateUser(params *DeactivateUserParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*DeactivateUserOK, error) + + DeleteUser(params *DeleteUserParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*DeleteUserNoContent, error) + + GetOwnInfo(params *GetOwnInfoParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*GetOwnInfoOK, error) + + GetUserInfo(params *GetUserInfoParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*GetUserInfoOK, error) + + ListAllUsers(params *ListAllUsersParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ListAllUsersOK, error) + + RotateUserAPIKey(params *RotateUserAPIKeyParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*RotateUserAPIKeyOK, error) + + SetTransport(transport runtime.ClientTransport) +} + +/* +ActivateUser activates a deactivated user +*/ +func (a *Client) ActivateUser(params *ActivateUserParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ActivateUserOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewActivateUserParams() + } + op := &runtime.ClientOperation{ + ID: "activateUser", + Method: "POST", + PathPattern: "/users/db/{user_id}/activate", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json", "application/yaml"}, + Schemes: []string{"https"}, + Params: params, + Reader: &ActivateUserReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + success, ok := result.(*ActivateUserOK) + if ok { + return success, nil + } + // unexpected success response + // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for activateUser: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + +/* +CreateUser creates new user +*/ +func (a *Client) CreateUser(params *CreateUserParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*CreateUserCreated, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewCreateUserParams() + } + op := &runtime.ClientOperation{ + ID: "createUser", + Method: "POST", + PathPattern: "/users/db/{user_id}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json", "application/yaml"}, + Schemes: []string{"https"}, + Params: params, + Reader: &CreateUserReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + success, ok := result.(*CreateUserCreated) + if ok { + return success, nil + } + // unexpected success response + // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for createUser: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + +/* +DeactivateUser deactivates a user +*/ +func (a *Client) DeactivateUser(params *DeactivateUserParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*DeactivateUserOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewDeactivateUserParams() + } + op := &runtime.ClientOperation{ + ID: "deactivateUser", + Method: "POST", + PathPattern: "/users/db/{user_id}/deactivate", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json", "application/yaml"}, + Schemes: []string{"https"}, + Params: params, + Reader: &DeactivateUserReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + success, ok := result.(*DeactivateUserOK) + if ok { + return success, nil + } + // unexpected success response + // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for deactivateUser: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + +/* +DeleteUser deletes user +*/ +func (a *Client) DeleteUser(params *DeleteUserParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*DeleteUserNoContent, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewDeleteUserParams() + } + op := &runtime.ClientOperation{ + ID: "deleteUser", + Method: "DELETE", + PathPattern: "/users/db/{user_id}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json", "application/yaml"}, + Schemes: []string{"https"}, + Params: params, + Reader: &DeleteUserReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + success, ok := result.(*DeleteUserNoContent) + if ok { + return success, nil + } + // unexpected success response + // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for deleteUser: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + +/* +GetOwnInfo gets info relevant to own user e g username roles +*/ +func (a *Client) GetOwnInfo(params *GetOwnInfoParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*GetOwnInfoOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewGetOwnInfoParams() + } + op := &runtime.ClientOperation{ + ID: "getOwnInfo", + Method: "GET", + PathPattern: "/users/own-info", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json", "application/yaml"}, + Schemes: []string{"https"}, + Params: params, + Reader: &GetOwnInfoReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + success, ok := result.(*GetOwnInfoOK) + if ok { + return success, nil + } + // unexpected success response + // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for getOwnInfo: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + +/* +GetUserInfo gets info relevant to user e g username roles +*/ +func (a *Client) GetUserInfo(params *GetUserInfoParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*GetUserInfoOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewGetUserInfoParams() + } + op := &runtime.ClientOperation{ + ID: "getUserInfo", + Method: "GET", + PathPattern: "/users/db/{user_id}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json", "application/yaml"}, + Schemes: []string{"https"}, + Params: params, + Reader: &GetUserInfoReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + success, ok := result.(*GetUserInfoOK) + if ok { + return success, nil + } + // unexpected success response + // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for getUserInfo: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + +/* +ListAllUsers lists all db users +*/ +func (a *Client) ListAllUsers(params *ListAllUsersParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ListAllUsersOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewListAllUsersParams() + } + op := &runtime.ClientOperation{ + ID: "listAllUsers", + Method: "GET", + PathPattern: "/users/db", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json", "application/yaml"}, + Schemes: []string{"https"}, + Params: params, + Reader: &ListAllUsersReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + success, ok := result.(*ListAllUsersOK) + if ok { + return success, nil + } + // unexpected success response + // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for listAllUsers: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + +/* +RotateUserAPIKey rotates user api key +*/ +func (a *Client) RotateUserAPIKey(params *RotateUserAPIKeyParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*RotateUserAPIKeyOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewRotateUserAPIKeyParams() + } + op := &runtime.ClientOperation{ + ID: "rotateUserApiKey", + Method: "POST", + PathPattern: "/users/db/{user_id}/rotate-key", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json", "application/yaml"}, + Schemes: []string{"https"}, + Params: params, + Reader: &RotateUserAPIKeyReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + success, ok := result.(*RotateUserAPIKeyOK) + if ok { + return success, nil + } + // unexpected success response + // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for rotateUserApiKey: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + +// SetTransport changes the transport on the client +func (a *Client) SetTransport(transport runtime.ClientTransport) { + a.transport = transport +} diff --git a/platform/dbops/binaries/weaviate-src/client/well_known/get_well_known_openid_configuration_parameters.go b/platform/dbops/binaries/weaviate-src/client/well_known/get_well_known_openid_configuration_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..e4b1d203ce382c94bc49db11f72a0de3863caca4 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/well_known/get_well_known_openid_configuration_parameters.go @@ -0,0 +1,139 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package well_known + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewGetWellKnownOpenidConfigurationParams creates a new GetWellKnownOpenidConfigurationParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewGetWellKnownOpenidConfigurationParams() *GetWellKnownOpenidConfigurationParams { + return &GetWellKnownOpenidConfigurationParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewGetWellKnownOpenidConfigurationParamsWithTimeout creates a new GetWellKnownOpenidConfigurationParams object +// with the ability to set a timeout on a request. +func NewGetWellKnownOpenidConfigurationParamsWithTimeout(timeout time.Duration) *GetWellKnownOpenidConfigurationParams { + return &GetWellKnownOpenidConfigurationParams{ + timeout: timeout, + } +} + +// NewGetWellKnownOpenidConfigurationParamsWithContext creates a new GetWellKnownOpenidConfigurationParams object +// with the ability to set a context for a request. +func NewGetWellKnownOpenidConfigurationParamsWithContext(ctx context.Context) *GetWellKnownOpenidConfigurationParams { + return &GetWellKnownOpenidConfigurationParams{ + Context: ctx, + } +} + +// NewGetWellKnownOpenidConfigurationParamsWithHTTPClient creates a new GetWellKnownOpenidConfigurationParams object +// with the ability to set a custom HTTPClient for a request. +func NewGetWellKnownOpenidConfigurationParamsWithHTTPClient(client *http.Client) *GetWellKnownOpenidConfigurationParams { + return &GetWellKnownOpenidConfigurationParams{ + HTTPClient: client, + } +} + +/* +GetWellKnownOpenidConfigurationParams contains all the parameters to send to the API endpoint + + for the get well known openid configuration operation. + + Typically these are written to a http.Request. +*/ +type GetWellKnownOpenidConfigurationParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the get well known openid configuration params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *GetWellKnownOpenidConfigurationParams) WithDefaults() *GetWellKnownOpenidConfigurationParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the get well known openid configuration params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *GetWellKnownOpenidConfigurationParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the get well known openid configuration params +func (o *GetWellKnownOpenidConfigurationParams) WithTimeout(timeout time.Duration) *GetWellKnownOpenidConfigurationParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the get well known openid configuration params +func (o *GetWellKnownOpenidConfigurationParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the get well known openid configuration params +func (o *GetWellKnownOpenidConfigurationParams) WithContext(ctx context.Context) *GetWellKnownOpenidConfigurationParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the get well known openid configuration params +func (o *GetWellKnownOpenidConfigurationParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the get well known openid configuration params +func (o *GetWellKnownOpenidConfigurationParams) WithHTTPClient(client *http.Client) *GetWellKnownOpenidConfigurationParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the get well known openid configuration params +func (o *GetWellKnownOpenidConfigurationParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *GetWellKnownOpenidConfigurationParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/well_known/get_well_known_openid_configuration_responses.go b/platform/dbops/binaries/weaviate-src/client/well_known/get_well_known_openid_configuration_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..6bb5e87e856064450360e781618fa6a02f72610b --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/well_known/get_well_known_openid_configuration_responses.go @@ -0,0 +1,296 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package well_known + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + + "github.com/weaviate/weaviate/entities/models" +) + +// GetWellKnownOpenidConfigurationReader is a Reader for the GetWellKnownOpenidConfiguration structure. +type GetWellKnownOpenidConfigurationReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *GetWellKnownOpenidConfigurationReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewGetWellKnownOpenidConfigurationOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 404: + result := NewGetWellKnownOpenidConfigurationNotFound() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 500: + result := NewGetWellKnownOpenidConfigurationInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + } +} + +// NewGetWellKnownOpenidConfigurationOK creates a GetWellKnownOpenidConfigurationOK with default headers values +func NewGetWellKnownOpenidConfigurationOK() *GetWellKnownOpenidConfigurationOK { + return &GetWellKnownOpenidConfigurationOK{} +} + +/* +GetWellKnownOpenidConfigurationOK describes a response with status code 200, with default header values. + +Successful response, inspect body +*/ +type GetWellKnownOpenidConfigurationOK struct { + Payload *GetWellKnownOpenidConfigurationOKBody +} + +// IsSuccess returns true when this get well known openid configuration o k response has a 2xx status code +func (o *GetWellKnownOpenidConfigurationOK) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this get well known openid configuration o k response has a 3xx status code +func (o *GetWellKnownOpenidConfigurationOK) IsRedirect() bool { + return false +} + +// IsClientError returns true when this get well known openid configuration o k response has a 4xx status code +func (o *GetWellKnownOpenidConfigurationOK) IsClientError() bool { + return false +} + +// IsServerError returns true when this get well known openid configuration o k response has a 5xx status code +func (o *GetWellKnownOpenidConfigurationOK) IsServerError() bool { + return false +} + +// IsCode returns true when this get well known openid configuration o k response a status code equal to that given +func (o *GetWellKnownOpenidConfigurationOK) IsCode(code int) bool { + return code == 200 +} + +// Code gets the status code for the get well known openid configuration o k response +func (o *GetWellKnownOpenidConfigurationOK) Code() int { + return 200 +} + +func (o *GetWellKnownOpenidConfigurationOK) Error() string { + return fmt.Sprintf("[GET /.well-known/openid-configuration][%d] getWellKnownOpenidConfigurationOK %+v", 200, o.Payload) +} + +func (o *GetWellKnownOpenidConfigurationOK) String() string { + return fmt.Sprintf("[GET /.well-known/openid-configuration][%d] getWellKnownOpenidConfigurationOK %+v", 200, o.Payload) +} + +func (o *GetWellKnownOpenidConfigurationOK) GetPayload() *GetWellKnownOpenidConfigurationOKBody { + return o.Payload +} + +func (o *GetWellKnownOpenidConfigurationOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(GetWellKnownOpenidConfigurationOKBody) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewGetWellKnownOpenidConfigurationNotFound creates a GetWellKnownOpenidConfigurationNotFound with default headers values +func NewGetWellKnownOpenidConfigurationNotFound() *GetWellKnownOpenidConfigurationNotFound { + return &GetWellKnownOpenidConfigurationNotFound{} +} + +/* +GetWellKnownOpenidConfigurationNotFound describes a response with status code 404, with default header values. + +Not found, no oidc provider present +*/ +type GetWellKnownOpenidConfigurationNotFound struct { +} + +// IsSuccess returns true when this get well known openid configuration not found response has a 2xx status code +func (o *GetWellKnownOpenidConfigurationNotFound) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this get well known openid configuration not found response has a 3xx status code +func (o *GetWellKnownOpenidConfigurationNotFound) IsRedirect() bool { + return false +} + +// IsClientError returns true when this get well known openid configuration not found response has a 4xx status code +func (o *GetWellKnownOpenidConfigurationNotFound) IsClientError() bool { + return true +} + +// IsServerError returns true when this get well known openid configuration not found response has a 5xx status code +func (o *GetWellKnownOpenidConfigurationNotFound) IsServerError() bool { + return false +} + +// IsCode returns true when this get well known openid configuration not found response a status code equal to that given +func (o *GetWellKnownOpenidConfigurationNotFound) IsCode(code int) bool { + return code == 404 +} + +// Code gets the status code for the get well known openid configuration not found response +func (o *GetWellKnownOpenidConfigurationNotFound) Code() int { + return 404 +} + +func (o *GetWellKnownOpenidConfigurationNotFound) Error() string { + return fmt.Sprintf("[GET /.well-known/openid-configuration][%d] getWellKnownOpenidConfigurationNotFound ", 404) +} + +func (o *GetWellKnownOpenidConfigurationNotFound) String() string { + return fmt.Sprintf("[GET /.well-known/openid-configuration][%d] getWellKnownOpenidConfigurationNotFound ", 404) +} + +func (o *GetWellKnownOpenidConfigurationNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewGetWellKnownOpenidConfigurationInternalServerError creates a GetWellKnownOpenidConfigurationInternalServerError with default headers values +func NewGetWellKnownOpenidConfigurationInternalServerError() *GetWellKnownOpenidConfigurationInternalServerError { + return &GetWellKnownOpenidConfigurationInternalServerError{} +} + +/* +GetWellKnownOpenidConfigurationInternalServerError describes a response with status code 500, with default header values. + +An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. +*/ +type GetWellKnownOpenidConfigurationInternalServerError struct { + Payload *models.ErrorResponse +} + +// IsSuccess returns true when this get well known openid configuration internal server error response has a 2xx status code +func (o *GetWellKnownOpenidConfigurationInternalServerError) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this get well known openid configuration internal server error response has a 3xx status code +func (o *GetWellKnownOpenidConfigurationInternalServerError) IsRedirect() bool { + return false +} + +// IsClientError returns true when this get well known openid configuration internal server error response has a 4xx status code +func (o *GetWellKnownOpenidConfigurationInternalServerError) IsClientError() bool { + return false +} + +// IsServerError returns true when this get well known openid configuration internal server error response has a 5xx status code +func (o *GetWellKnownOpenidConfigurationInternalServerError) IsServerError() bool { + return true +} + +// IsCode returns true when this get well known openid configuration internal server error response a status code equal to that given +func (o *GetWellKnownOpenidConfigurationInternalServerError) IsCode(code int) bool { + return code == 500 +} + +// Code gets the status code for the get well known openid configuration internal server error response +func (o *GetWellKnownOpenidConfigurationInternalServerError) Code() int { + return 500 +} + +func (o *GetWellKnownOpenidConfigurationInternalServerError) Error() string { + return fmt.Sprintf("[GET /.well-known/openid-configuration][%d] getWellKnownOpenidConfigurationInternalServerError %+v", 500, o.Payload) +} + +func (o *GetWellKnownOpenidConfigurationInternalServerError) String() string { + return fmt.Sprintf("[GET /.well-known/openid-configuration][%d] getWellKnownOpenidConfigurationInternalServerError %+v", 500, o.Payload) +} + +func (o *GetWellKnownOpenidConfigurationInternalServerError) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *GetWellKnownOpenidConfigurationInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +/* +GetWellKnownOpenidConfigurationOKBody get well known openid configuration o k body +swagger:model GetWellKnownOpenidConfigurationOKBody +*/ +type GetWellKnownOpenidConfigurationOKBody struct { + + // OAuth Client ID + ClientID string `json:"clientId,omitempty"` + + // The Location to redirect to + Href string `json:"href,omitempty"` + + // OAuth Scopes + Scopes []string `json:"scopes,omitempty"` +} + +// Validate validates this get well known openid configuration o k body +func (o *GetWellKnownOpenidConfigurationOKBody) Validate(formats strfmt.Registry) error { + return nil +} + +// ContextValidate validates this get well known openid configuration o k body based on context it is used +func (o *GetWellKnownOpenidConfigurationOKBody) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (o *GetWellKnownOpenidConfigurationOKBody) MarshalBinary() ([]byte, error) { + if o == nil { + return nil, nil + } + return swag.WriteJSON(o) +} + +// UnmarshalBinary interface implementation +func (o *GetWellKnownOpenidConfigurationOKBody) UnmarshalBinary(b []byte) error { + var res GetWellKnownOpenidConfigurationOKBody + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *o = res + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/client/well_known/well_known_client.go b/platform/dbops/binaries/weaviate-src/client/well_known/well_known_client.go new file mode 100644 index 0000000000000000000000000000000000000000..76d6fed816c3836aea4745923ef4584e998066a3 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/well_known/well_known_client.go @@ -0,0 +1,93 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package well_known + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" +) + +// New creates a new well known API client. +func New(transport runtime.ClientTransport, formats strfmt.Registry) ClientService { + return &Client{transport: transport, formats: formats} +} + +/* +Client for well known API +*/ +type Client struct { + transport runtime.ClientTransport + formats strfmt.Registry +} + +// ClientOption is the option for Client methods +type ClientOption func(*runtime.ClientOperation) + +// ClientService is the interface for Client methods +type ClientService interface { + GetWellKnownOpenidConfiguration(params *GetWellKnownOpenidConfigurationParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*GetWellKnownOpenidConfigurationOK, error) + + SetTransport(transport runtime.ClientTransport) +} + +/* +GetWellKnownOpenidConfiguration os ID c discovery information if o ID c auth is enabled + +OIDC Discovery page, redirects to the token issuer if one is configured +*/ +func (a *Client) GetWellKnownOpenidConfiguration(params *GetWellKnownOpenidConfigurationParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*GetWellKnownOpenidConfigurationOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewGetWellKnownOpenidConfigurationParams() + } + op := &runtime.ClientOperation{ + ID: "GetWellKnownOpenidConfiguration", + Method: "GET", + PathPattern: "/.well-known/openid-configuration", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json", "application/yaml"}, + Schemes: []string{"https"}, + Params: params, + Reader: &GetWellKnownOpenidConfigurationReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + success, ok := result.(*GetWellKnownOpenidConfigurationOK) + if ok { + return success, nil + } + // unexpected success response + // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for GetWellKnownOpenidConfiguration: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + +// SetTransport changes the transport on the client +func (a *Client) SetTransport(transport runtime.ClientTransport) { + a.transport = transport +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/bootstrap/bootstrap.go b/platform/dbops/binaries/weaviate-src/cluster/bootstrap/bootstrap.go new file mode 100644 index 0000000000000000000000000000000000000000..a056077df8500ccd1d2e7682fe96d80452214e49 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/bootstrap/bootstrap.go @@ -0,0 +1,166 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package bootstrap + +import ( + "context" + "fmt" + "math/rand" + "time" + + "github.com/getsentry/sentry-go" + "github.com/sirupsen/logrus" + cmd "github.com/weaviate/weaviate/cluster/proto/api" + "github.com/weaviate/weaviate/cluster/resolver" + entSentry "github.com/weaviate/weaviate/entities/sentry" +) + +// PeerJoiner is the interface we expect to be able to talk to the other peers to either Join or Notify them +type PeerJoiner interface { + Join(_ context.Context, leaderAddr string, _ *cmd.JoinPeerRequest) (*cmd.JoinPeerResponse, error) + Notify(_ context.Context, leaderAddr string, _ *cmd.NotifyPeerRequest) (*cmd.NotifyPeerResponse, error) +} + +// Bootstrapper is used to bootstrap this node by attempting to join it to a RAFT cluster. +type Bootstrapper struct { + peerJoiner PeerJoiner + addrResolver resolver.ClusterStateReader + isStoreReady func() bool + + localRaftAddr string + localNodeID string + voter bool + + retryPeriod time.Duration + jitter time.Duration +} + +// NewBootstrapper constructs a new bootsrapper +func NewBootstrapper(peerJoiner PeerJoiner, raftID string, raftAddr string, voter bool, r resolver.ClusterStateReader, isStoreReady func() bool) *Bootstrapper { + return &Bootstrapper{ + peerJoiner: peerJoiner, + addrResolver: r, + retryPeriod: time.Second, + jitter: time.Second, + localNodeID: raftID, + localRaftAddr: raftAddr, + isStoreReady: isStoreReady, + voter: voter, + } +} + +// Do iterates over a list of servers in an attempt to join this node to a cluster. +func (b *Bootstrapper) Do(ctx context.Context, serverPortMap map[string]int, lg *logrus.Logger, stop chan struct{}) error { + if entSentry.Enabled() { + transaction := sentry.StartTransaction(ctx, "raft.bootstrap", + sentry.WithOpName("init"), + sentry.WithDescription("Attempt to bootstrap a raft cluster"), + ) + ctx = transaction.Context() + defer transaction.Finish() + } + ticker := time.NewTicker(jitter(b.retryPeriod, b.jitter)) + defer ticker.Stop() + for { + select { + case <-stop: + return nil + case <-ctx.Done(): + return ctx.Err() + case <-ticker.C: + if b.isStoreReady() { + lg.WithField("action", "bootstrap").Info("node reporting ready, exiting bootstrap process") + return nil + } + + remoteNodes := ResolveRemoteNodes(b.addrResolver, serverPortMap) + // We were not able to resolve any nodes to an address + if len(remoteNodes) == 0 { + lg.WithField("action", "bootstrap").WithField("join_list", serverPortMap).Warn("unable to resolve any node address to join") + continue + } + + // Always try to join an existing cluster first + joiner := NewJoiner(b.peerJoiner, b.localNodeID, b.localRaftAddr, b.voter) + if leader, err := joiner.Do(ctx, lg, remoteNodes); err != nil { + lg.WithFields(logrus.Fields{ + "action": "bootstrap", + "servers": remoteNodes, + "voter": b.voter, + }).WithError(err).Warning("failed to join cluster") + } else { + lg.WithFields(logrus.Fields{ + "action": "bootstrap", + "leader": leader, + }).Info("successfully joined cluster") + return nil + } + + // We are a voter, we resolve other peers but we're unable to join them. We're in the situation where we are + // bootstrapping a new cluster and now we want to notify the other nodes. + // Each node on notify will build a list of notified node. Once bootstrap expect is reached the nodes will + // bootstrap together. + if b.voter { + // notify other servers about readiness of this node to be joined + if err := b.notify(ctx, remoteNodes); err != nil { + lg.WithFields(logrus.Fields{ + "action": "bootstrap", + "servers": remoteNodes, + }).WithError(err).Error("failed to notify peers") + continue + } + lg.WithFields(logrus.Fields{ + "action": "bootstrap", + "servers": remoteNodes, + }).Info("notified peers this node is ready to join as voter") + } + } + } +} + +// notify attempts to notify all nodes in remoteNodes that this server is ready to bootstrap +func (b *Bootstrapper) notify(ctx context.Context, remoteNodes map[string]string) (err error) { + if entSentry.Enabled() { + span := sentry.StartSpan(ctx, "raft.bootstrap.notify", + sentry.WithOpName("notify"), + sentry.WithDescription("Attempt to notify existing node(s) to join a cluster"), + ) + ctx = span.Context() + span.SetData("servers", remoteNodes) + defer span.Finish() + } + for _, addr := range remoteNodes { + req := &cmd.NotifyPeerRequest{Id: b.localNodeID, Address: b.localRaftAddr} + _, err = b.peerJoiner.Notify(ctx, addr, req) + if err != nil { + return err + } + } + return +} + +// ResolveRemoteNodes returns a list of remoteNodes addresses resolved using addrResolver. The nodes id used are +// taken from serverPortMap keys and ports from the values +func ResolveRemoteNodes(addrResolver resolver.ClusterStateReader, serverPortMap map[string]int) map[string]string { + candidates := make(map[string]string, len(serverPortMap)) + for name, raftPort := range serverPortMap { + if addr := addrResolver.NodeAddress(name); addr != "" { + candidates[name] = fmt.Sprintf("%s:%d", addr, raftPort) + } + } + return candidates +} + +// jitter introduce some jitter to a given duration d + [0, 1) * jit -> [d, d+jit] +func jitter(d time.Duration, jit time.Duration) time.Duration { + return d + time.Duration(float64(jit)*rand.Float64()) +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/bootstrap/bootstrap_test.go b/platform/dbops/binaries/weaviate-src/cluster/bootstrap/bootstrap_test.go new file mode 100644 index 0000000000000000000000000000000000000000..d3aab330b03b0e7466ccc52b81f755d256062c62 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/bootstrap/bootstrap_test.go @@ -0,0 +1,153 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package bootstrap + +import ( + "context" + "errors" + "testing" + "time" + + logrustest "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/mock" + "github.com/weaviate/weaviate/usecases/cluster/mocks" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + cmd "github.com/weaviate/weaviate/cluster/proto/api" +) + +var errAny = errors.New("any error") + +func TestBootstrapper(t *testing.T) { + ctx := context.Background() + anything := mock.Anything + nodesSlice := []string{"S1", "S2"} + nodes := map[string]int{"S1": 1, "S2": 2} + + tests := []struct { + name string + voter bool + nodes map[string]int + doBefore func(*MockNodeClient) + isReady func() bool + success bool + }{ + { + name: "empty server list", + voter: true, + nodes: nil, + doBefore: func(m *MockNodeClient) {}, + isReady: func() bool { return false }, + success: false, + }, + { + name: "leader exist", + voter: true, + nodes: nodes, + doBefore: func(m *MockNodeClient) { + m.On("Join", anything, anything, anything).Return(&cmd.JoinPeerResponse{}, nil) + }, + isReady: func() bool { return false }, + success: true, + }, + { + name: "nodes not available", + voter: true, + nodes: nodes, + doBefore: func(m *MockNodeClient) { + m.On("Join", anything, "S1:1", anything).Return(&cmd.JoinPeerResponse{}, errAny) + m.On("Join", anything, "S2:2", anything).Return(&cmd.JoinPeerResponse{}, errAny) + + m.On("Notify", anything, "S1:1", anything).Return(&cmd.NotifyPeerResponse{}, nil) + m.On("Notify", anything, "S2:2", anything).Return(&cmd.NotifyPeerResponse{}, errAny) + }, + isReady: func() bool { return false }, + success: false, + }, + { + name: "follow the leader", + voter: true, + nodes: nodes, + doBefore: func(m *MockNodeClient) { + // This test performs a join request to the leader, but the leader is not + // available. The bootstrapper should retry the join request until it is + // successful. + errLeaderElected := status.Error(codes.NotFound, "follow the leader") + count := 0 + m.On("Join", anything, anything, anything). + Run(func(args mock.Arguments) { + count++ + switch count { + case 1: + m.ExpectedCalls[len(m.ExpectedCalls)-1].ReturnArguments = mock.Arguments{&cmd.JoinPeerResponse{}, errAny} + case 2: + m.ExpectedCalls[len(m.ExpectedCalls)-1].ReturnArguments = mock.Arguments{&cmd.JoinPeerResponse{Leader: "Leader"}, errLeaderElected} + case 3: + m.ExpectedCalls[len(m.ExpectedCalls)-1].ReturnArguments = mock.Arguments{&cmd.JoinPeerResponse{}, nil} + } + }).Times(3) + }, + isReady: func() bool { return false }, + success: true, + }, + { + name: "exit early on cluster ready", + voter: true, + nodes: nodes, + doBefore: func(m *MockNodeClient) {}, + isReady: func() bool { return true }, + success: true, + }, + } + for _, test := range tests { + test := test + t.Run(test.name, func(t *testing.T) { + // Ensure the mocks are setup + m := &MockNodeClient{} + test.doBefore(m) + + // Configure the bootstrapper + b := NewBootstrapper(m, "RID", "ADDR", test.voter, mocks.NewMockNodeSelector(nodesSlice...), test.isReady) + b.retryPeriod = time.Millisecond + b.jitter = time.Millisecond + ctx, cancel := context.WithTimeout(ctx, time.Millisecond*100) + logger, _ := logrustest.NewNullLogger() + + // Do the bootstrap + err := b.Do(ctx, test.nodes, logger, make(chan struct{})) + cancel() + + // Check all assertions + if test.success && err != nil { + t.Errorf("%s: %v", test.name, err) + } else if !test.success && err == nil { + t.Errorf("%s: test must fail", test.name) + } + m.AssertExpectations(t) + }) + } +} + +type MockNodeClient struct { + mock.Mock +} + +func (m *MockNodeClient) Join(ctx context.Context, leaderAddr string, req *cmd.JoinPeerRequest) (*cmd.JoinPeerResponse, error) { + args := m.Called(ctx, leaderAddr, req) + return args.Get(0).(*cmd.JoinPeerResponse), args.Error(1) +} + +func (m *MockNodeClient) Notify(ctx context.Context, leaderAddr string, req *cmd.NotifyPeerRequest) (*cmd.NotifyPeerResponse, error) { + args := m.Called(ctx, leaderAddr, req) + return args.Get(0).(*cmd.NotifyPeerResponse), args.Error(1) +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/bootstrap/joiner.go b/platform/dbops/binaries/weaviate-src/cluster/bootstrap/joiner.go new file mode 100644 index 0000000000000000000000000000000000000000..163f8219d14f60bd825a67ef11533cb2710af1b9 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/bootstrap/joiner.go @@ -0,0 +1,84 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package bootstrap + +import ( + "context" + "fmt" + + "github.com/getsentry/sentry-go" + "github.com/sirupsen/logrus" + cmd "github.com/weaviate/weaviate/cluster/proto/api" + entSentry "github.com/weaviate/weaviate/entities/sentry" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +type Joiner struct { + localRaftAddr string + localNodeID string + voter bool + peerJoiner PeerJoiner +} + +// NewJoiner returns a *Joiner configured with localNodeID, localRaftAddr and voter. +func NewJoiner(peerJoiner PeerJoiner, localNodeID string, localRaftAddr string, voter bool) *Joiner { + return &Joiner{ + peerJoiner: peerJoiner, + localNodeID: localNodeID, + localRaftAddr: localRaftAddr, + voter: voter, + } +} + +// Do will attempt to send to any nodes in remoteNodes a JoinPeerRequest for j.localNodeID with the address j.localRaftAddr. +// Will join as voter if j.voter is true, non voter otherwise. +// Returns the leader address if a cluster was joined or an error otherwise. +func (j *Joiner) Do(ctx context.Context, lg *logrus.Logger, remoteNodes map[string]string) (string, error) { + if entSentry.Enabled() { + span := sentry.StartSpan(ctx, "raft.bootstrap.join", + sentry.WithOpName("join"), + sentry.WithDescription("Attempt to join an existing cluster"), + ) + ctx = span.Context() + span.SetData("servers", remoteNodes) + defer span.Finish() + } + + var resp *cmd.JoinPeerResponse + var err error + req := &cmd.JoinPeerRequest{Id: j.localNodeID, Address: j.localRaftAddr, Voter: j.voter} + lg.WithField("remoteNodes", remoteNodes).Info("attempting to join") + + // For each server, try to join. + // If we have no error then we have a leader + // If we have an error check for err == NOT_FOUND and leader != "" -> we contacted a non-leader node part of the + // cluster, let's join the leader. + // If no server allows us to join a cluster, return an error + for _, addr := range remoteNodes { + resp, err = j.peerJoiner.Join(ctx, addr, req) + if err == nil { + return addr, nil + } + st := status.Convert(err) + lg.WithField("remoteNode", addr).WithField("status", st.Code()).Info("attempted to join and failed") + // Get the leader from response and if not empty try to join it + if leader := resp.GetLeader(); st.Code() == codes.NotFound && leader != "" { + _, err = j.peerJoiner.Join(ctx, leader, req) + if err == nil { + return leader, nil + } + lg.WithField("leader", leader).WithError(err).Info("attempted to follow to leader and failed") + } + } + return "", fmt.Errorf("could not join a cluster from %v", remoteNodes) +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/distributedtask/manager.go b/platform/dbops/binaries/weaviate-src/cluster/distributedtask/manager.go new file mode 100644 index 0000000000000000000000000000000000000000..5b203e94a9f1713050b91ee22efedcdd99e1fb97 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/distributedtask/manager.go @@ -0,0 +1,271 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package distributedtask + +import ( + "context" + "encoding/json" + "fmt" + "sync" + "time" + + "github.com/jonboulle/clockwork" + "github.com/weaviate/weaviate/cluster/proto/api" +) + +// Manager is responsible for managing distributed tasks across the cluster. +type Manager struct { + mu sync.Mutex + tasks map[string]map[string]*Task // namespace -> taskID -> Task + + completedTaskTTL time.Duration + + clock clockwork.Clock +} + +type ManagerParameters struct { + Clock clockwork.Clock + + CompletedTaskTTL time.Duration +} + +func NewManager(params ManagerParameters) *Manager { + if params.Clock == nil { + params.Clock = clockwork.NewRealClock() + } + + return &Manager{ + tasks: make(map[string]map[string]*Task), + + completedTaskTTL: params.CompletedTaskTTL, + + clock: params.Clock, + } +} + +func (m *Manager) AddTask(c *api.ApplyRequest, seqNum uint64) error { + var r api.AddDistributedTaskRequest + if err := json.Unmarshal(c.SubCommand, &r); err != nil { + return fmt.Errorf("unmarshal add task request: %w", err) + } + + m.mu.Lock() + defer m.mu.Unlock() + + task := m.findTaskWithLock(r.Namespace, r.Id) + if task != nil { + if task.Status == TaskStatusStarted { + return fmt.Errorf("task %s/%s is already running with version %d", r.Namespace, r.Id, task.Version) + } + + if seqNum <= task.Version { + return fmt.Errorf("task %s/%s is already finished with version %d", r.Namespace, r.Id, task.Version) + } + } + + m.setTaskWithLock(&Task{ + Namespace: r.Namespace, + TaskDescriptor: TaskDescriptor{ID: r.Id, Version: seqNum}, + Payload: r.Payload, + Status: TaskStatusStarted, + StartedAt: time.UnixMilli(r.SubmittedAtUnixMillis), + FinishedNodes: map[string]bool{}, + }) + + return nil +} + +func (m *Manager) RecordNodeCompletion(c *api.ApplyRequest, numberOfNodesInTheCluster int) error { + var r api.RecordDistributedTaskNodeCompletionRequest + if err := json.Unmarshal(c.SubCommand, &r); err != nil { + return fmt.Errorf("unmarshal record task node completion request: %w", err) + } + + m.mu.Lock() + defer m.mu.Unlock() + + task, err := m.findVersionedTaskWithLock(r.Namespace, r.Id, r.Version) + if err != nil { + return err + } + + if task.Status != TaskStatusStarted { + return fmt.Errorf("task %s/%s/%d is no longer running", r.Namespace, r.Id, task.Version) + } + + if r.Error != nil { + task.Status = TaskStatusFailed + task.Error = *r.Error + task.FinishedAt = time.UnixMilli(r.FinishedAtUnixMillis) + return nil + } + + task.FinishedNodes[r.NodeId] = true + if len(task.FinishedNodes) == numberOfNodesInTheCluster { + task.Status = TaskStatusFinished + task.FinishedAt = time.UnixMilli(r.FinishedAtUnixMillis) + return nil + } + + return nil +} + +func (m *Manager) CancelTask(a *api.ApplyRequest) error { + var r api.CancelDistributedTaskRequest + if err := json.Unmarshal(a.SubCommand, &r); err != nil { + return fmt.Errorf("unmarshal cancel task request: %w", err) + } + + m.mu.Lock() + defer m.mu.Unlock() + + task, err := m.findVersionedTaskWithLock(r.Namespace, r.Id, r.Version) + if err != nil { + return err + } + + if task.Status != TaskStatusStarted { + return fmt.Errorf("task %s/%s/%d is no longer running", r.Namespace, r.Id, task.Version) + } + + task.Status = TaskStatusCancelled + task.FinishedAt = time.UnixMilli(r.CancelledAtUnixMillis) + return nil +} + +func (m *Manager) CleanUpTask(a *api.ApplyRequest) error { + var r api.CleanUpDistributedTaskRequest + if err := json.Unmarshal(a.SubCommand, &r); err != nil { + return fmt.Errorf("unmarshal clean up task request: %w", err) + } + + m.mu.Lock() + defer m.mu.Unlock() + + task, err := m.findVersionedTaskWithLock(r.Namespace, r.Id, r.Version) + if err != nil { + return err + } + + if task.Status == TaskStatusStarted { + return fmt.Errorf("task %s/%s/%d is still running", r.Namespace, r.Id, task.Version) + } + + if m.clock.Since(task.FinishedAt) <= m.completedTaskTTL { + return fmt.Errorf("task %s/%s/%d is too fresh to clean up", r.Namespace, r.Id, task.Version) + } + + delete(m.tasks[task.Namespace], task.ID) + return nil +} + +func (m *Manager) ListDistributedTasks(_ context.Context) (map[string][]*Task, error) { + m.mu.Lock() + defer m.mu.Unlock() + + result := make(map[string][]*Task, len(m.tasks)) + for namespace, tasks := range m.tasks { + if len(tasks) == 0 { + continue + } + + result[namespace] = make([]*Task, 0, len(tasks)) + for _, task := range tasks { + result[namespace] = append(result[namespace], task.Clone()) + } + } + return result, nil +} + +func (m *Manager) ListDistributedTasksPayload(ctx context.Context) ([]byte, error) { + tasks, err := m.ListDistributedTasks(ctx) + if err != nil { + return nil, fmt.Errorf("list distributed tasks: %w", err) + } + + return json.Marshal(&ListDistributedTasksResponse{ + Tasks: tasks, + }) +} + +func (m *Manager) findVersionedTaskWithLock(namespace, taskID string, taskVersion uint64) (*Task, error) { + task := m.findTaskWithLock(namespace, taskID) + if task == nil || task.Version != taskVersion { + return nil, fmt.Errorf("task %s/%s/%d does not exist", namespace, taskID, taskVersion) + } + + return task, nil +} + +func (m *Manager) findTaskWithLock(namespace, taskID string) *Task { + tasksNamespace, ok := m.tasks[namespace] + if !ok { + return nil + } + + task, ok := tasksNamespace[taskID] + if !ok { + return nil + } + + return task +} + +func (m *Manager) setTaskWithLock(task *Task) { + if _, ok := m.tasks[task.Namespace]; !ok { + m.tasks[task.Namespace] = make(map[string]*Task) + } + + m.tasks[task.Namespace][task.ID] = task +} + +type snapshot struct { + Tasks map[string][]*Task `json:"tasks,omitempty"` +} + +func (m *Manager) Snapshot() ([]byte, error) { + tasks, err := m.ListDistributedTasks(context.Background()) + if err != nil { + return nil, fmt.Errorf("list tasks: %w", err) + } + + bytes, err := json.Marshal(&snapshot{ + Tasks: tasks, + }) + if err != nil { + return nil, fmt.Errorf("marshal snapshot: %w", err) + } + + return bytes, nil +} + +func (m *Manager) Restore(bytes []byte) error { + var s snapshot + if err := json.Unmarshal(bytes, &s); err != nil { + return fmt.Errorf("unmarshal snapshot: %w", err) + } + + m.mu.Lock() + defer m.mu.Unlock() + + for namespace, tasks := range s.Tasks { + for _, task := range tasks { + if _, ok := m.tasks[namespace]; !ok { + m.tasks[namespace] = make(map[string]*Task) + } + + m.tasks[namespace][task.ID] = task + } + } + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/distributedtask/manager_test.go b/platform/dbops/binaries/weaviate-src/cluster/distributedtask/manager_test.go new file mode 100644 index 0000000000000000000000000000000000000000..3ee5c24273b95c7329596118c8ffd29233284a84 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/distributedtask/manager_test.go @@ -0,0 +1,527 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package distributedtask + +import ( + "context" + "encoding/json" + "sort" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + cmd "github.com/weaviate/weaviate/cluster/proto/api" +) + +func TestManager_AddTask_Failures(t *testing.T) { + t.Run("add duplicate task", func(t *testing.T) { + var ( + h = newTestHarness(t).init(t) + c = toCmd(t, &cmd.AddDistributedTaskRequest{ + Namespace: "test", + Id: "1", + SubmittedAtUnixMillis: time.Now().UnixMilli(), + }) + version uint64 = 100 + ) + + err := h.manager.AddTask(c, version) + require.NoError(t, err) + + err = h.manager.AddTask(c, version) + require.ErrorContains(t, err, "already running") + }) + + t.Run("add task with the same version as already finished one", func(t *testing.T) { + var ( + h = newTestHarness(t).init(t) + + namespace = "test" + taskID = "1" + addTaskCmd = toCmd(t, &cmd.AddDistributedTaskRequest{ + Namespace: namespace, + Id: taskID, + SubmittedAtUnixMillis: h.clock.Now().UnixMilli(), + }) + version uint64 = 100 + ) + + err := h.manager.AddTask(addTaskCmd, version) + require.NoError(t, err) + + err = h.manager.RecordNodeCompletion(toCmd(t, &cmd.RecordDistributedTaskNodeCompletionRequest{ + Namespace: namespace, + Id: taskID, + Version: version, + NodeId: "local-node", + FinishedAtUnixMillis: h.clock.Now().UnixMilli(), + }), 1) + require.NoError(t, err) + + err = h.manager.AddTask(addTaskCmd, version) + require.ErrorContains(t, err, "already finished with version") + }) + + t.Run("add task with a lower version as already finished one", func(t *testing.T) { + var ( + h = newTestHarness(t).init(t) + + namespace = "test" + taskID = "1" + addTaskCmd = toCmd(t, &cmd.AddDistributedTaskRequest{ + Namespace: namespace, + Id: taskID, + SubmittedAtUnixMillis: h.clock.Now().UnixMilli(), + }) + version uint64 = 100 + ) + + err := h.manager.AddTask(addTaskCmd, version) + require.NoError(t, err) + + err = h.manager.RecordNodeCompletion(toCmd(t, &cmd.RecordDistributedTaskNodeCompletionRequest{ + Namespace: namespace, + Id: taskID, + Version: version, + NodeId: "local-node", + FinishedAtUnixMillis: h.clock.Now().UnixMilli(), + }), 1) + require.NoError(t, err) + + err = h.manager.AddTask(addTaskCmd, version-10) + require.ErrorContains(t, err, "already finished with version") + }) +} + +func TestManager_RecordNodeCompletion_Failures(t *testing.T) { + t.Run("task does not exist", func(t *testing.T) { + var ( + h = newTestHarness(t).init(t) + c = toCmd(t, &cmd.RecordDistributedTaskNodeCompletionRequest{ + Namespace: "test", + Id: "1", + Version: 1, + NodeId: "local-node", + FinishedAtUnixMillis: h.clock.Now().UnixMilli(), + }) + ) + + err := h.manager.RecordNodeCompletion(c, 1) + require.ErrorContains(t, err, "does not exist") + }) + + t.Run("task with the given version does not exist", func(t *testing.T) { + var ( + h = newTestHarness(t).init(t) + + namespace = "test" + taskID = "1" + version uint64 = 10 + + addCmd = toCmd(t, &cmd.AddDistributedTaskRequest{ + Namespace: namespace, + Id: taskID, + SubmittedAtUnixMillis: h.clock.Now().UnixMilli(), + }) + + completeCmd = toCmd(t, &cmd.RecordDistributedTaskNodeCompletionRequest{ + Namespace: namespace, + Id: taskID, + Version: 1, + NodeId: "local-node", + FinishedAtUnixMillis: h.clock.Now().UnixMilli(), + }) + ) + + err := h.manager.AddTask(addCmd, version) + require.NoError(t, err) + + err = h.manager.RecordNodeCompletion(completeCmd, 1) + require.ErrorContains(t, err, "does not exist") + }) + + t.Run("task is already completed", func(t *testing.T) { + var ( + h = newTestHarness(t).init(t) + + namespace = "test" + taskID = "1" + version uint64 = 10 + + addCmd = toCmd(t, &cmd.AddDistributedTaskRequest{ + Namespace: namespace, + Id: taskID, + SubmittedAtUnixMillis: h.clock.Now().UnixMilli(), + }) + + completeCmd = toCmd(t, &cmd.RecordDistributedTaskNodeCompletionRequest{ + Namespace: namespace, + Id: taskID, + Version: version, + NodeId: "local-node", + FinishedAtUnixMillis: h.clock.Now().UnixMilli(), + }) + ) + + err := h.manager.AddTask(addCmd, version) + require.NoError(t, err) + + err = h.manager.RecordNodeCompletion(completeCmd, 1) + require.NoError(t, err) + + err = h.manager.RecordNodeCompletion(completeCmd, 1) + require.ErrorContains(t, err, "no longer running") + }) +} + +func TestManager_CancelTask_Failures(t *testing.T) { + t.Run("task does not exist", func(t *testing.T) { + var ( + h = newTestHarness(t).init(t) + c = toCmd(t, &cmd.CancelDistributedTaskRequest{ + Namespace: "test", + Id: "1", + Version: 1, + CancelledAtUnixMillis: h.clock.Now().UnixMilli(), + }) + ) + + err := h.manager.CancelTask(c) + require.ErrorContains(t, err, "does not exist") + }) + + t.Run("task with the given version does not exist", func(t *testing.T) { + var ( + h = newTestHarness(t).init(t) + + namespace = "test" + taskID = "1" + version uint64 = 10 + + addCmd = toCmd(t, &cmd.AddDistributedTaskRequest{ + Namespace: namespace, + Id: taskID, + SubmittedAtUnixMillis: h.clock.Now().UnixMilli(), + }) + + cancelCmd = toCmd(t, &cmd.CancelDistributedTaskRequest{ + Namespace: namespace, + Id: taskID, + Version: version - 1, + CancelledAtUnixMillis: h.clock.Now().UnixMilli(), + }) + ) + + err := h.manager.AddTask(addCmd, version) + require.NoError(t, err) + + err = h.manager.CancelTask(cancelCmd) + require.ErrorContains(t, err, "does not exist") + }) + + t.Run("task is already cancelled", func(t *testing.T) { + var ( + h = newTestHarness(t).init(t) + + namespace = "test" + taskID = "1" + version uint64 = 10 + + addCmd = toCmd(t, &cmd.AddDistributedTaskRequest{ + Namespace: namespace, + Id: taskID, + SubmittedAtUnixMillis: h.clock.Now().UnixMilli(), + }) + + cancelCmd = toCmd(t, &cmd.CancelDistributedTaskRequest{ + Namespace: "test", + Id: "1", + Version: version, + CancelledAtUnixMillis: h.clock.Now().UnixMilli(), + }) + ) + + err := h.manager.AddTask(addCmd, version) + require.NoError(t, err) + + err = h.manager.CancelTask(cancelCmd) + require.NoError(t, err) + + err = h.manager.CancelTask(cancelCmd) + require.ErrorContains(t, err, "no longer running") + }) +} + +func TestManager_CleanUpTask_Failures(t *testing.T) { + t.Run("task does not exist", func(t *testing.T) { + var ( + h = newTestHarness(t).init(t) + c = toCmd(t, &cmd.CleanUpDistributedTaskRequest{ + Namespace: "test", + Id: "1", + Version: 1, + }) + ) + + err := h.manager.CleanUpTask(c) + require.ErrorContains(t, err, "does not exist") + }) + + t.Run("task with the given version does not exist", func(t *testing.T) { + var ( + h = newTestHarness(t).init(t) + + namespace = "test" + taskID = "1" + version uint64 = 10 + + addCmd = toCmd(t, &cmd.AddDistributedTaskRequest{ + Namespace: namespace, + Id: taskID, + SubmittedAtUnixMillis: h.clock.Now().Add(-3 * h.completedTaskTTL).UnixMilli(), + }) + + cleanUpCmd = toCmd(t, &cmd.CleanUpDistributedTaskRequest{ + Namespace: namespace, + Id: taskID, + Version: version - 1, + }) + ) + + err := h.manager.AddTask(addCmd, version) + require.NoError(t, err) + + err = h.manager.CleanUpTask(cleanUpCmd) + require.ErrorContains(t, err, "does not exist") + }) + + t.Run("task is still running", func(t *testing.T) { + var ( + h = newTestHarness(t).init(t) + + namespace = "test" + taskID = "1" + version uint64 = 10 + + addCmd = toCmd(t, &cmd.AddDistributedTaskRequest{ + Namespace: namespace, + Id: taskID, + SubmittedAtUnixMillis: h.clock.Now().Add(-3 * h.completedTaskTTL).UnixMilli(), + }) + + cleanUpCmd = toCmd(t, &cmd.CleanUpDistributedTaskRequest{ + Namespace: namespace, + Id: taskID, + Version: version, + }) + ) + + err := h.manager.AddTask(addCmd, version) + require.NoError(t, err) + + err = h.manager.CleanUpTask(cleanUpCmd) + require.ErrorContains(t, err, "still running") + }) + + t.Run("completed task TTL did not pass yet", func(t *testing.T) { + var ( + h = newTestHarness(t).init(t) + + namespace = "test" + taskID = "1" + version uint64 = 10 + + addCmd = toCmd(t, &cmd.AddDistributedTaskRequest{ + Namespace: namespace, + Id: taskID, + SubmittedAtUnixMillis: h.clock.Now().Add(-3 * h.completedTaskTTL).UnixMilli(), + }) + + cancelCmd = toCmd(t, &cmd.CancelDistributedTaskRequest{ + Namespace: namespace, + Id: taskID, + Version: version, + CancelledAtUnixMillis: h.clock.Now().Add(-h.completedTaskTTL).Add(time.Minute).UnixMilli(), + }) + + cleanUpCmd = toCmd(t, &cmd.CleanUpDistributedTaskRequest{ + Namespace: namespace, + Id: taskID, + Version: version, + }) + ) + + err := h.manager.AddTask(addCmd, version) + require.NoError(t, err) + + err = h.manager.CancelTask(cancelCmd) + require.NoError(t, err) + + err = h.manager.CleanUpTask(cleanUpCmd) + require.ErrorContains(t, err, "too fresh") + }) +} + +func TestManager_ListDistributedTasksPayload(t *testing.T) { + var ( + h = newTestHarness(t).init(t) + now = h.clock.Now().Local().Truncate(time.Millisecond) + ) + + expectedTasks := ingestSampleTasks(t, h.manager, now) + + payload, err := h.manager.ListDistributedTasksPayload(context.Background()) + require.NoError(t, err) + + var resp ListDistributedTasksResponse + require.NoError(t, json.Unmarshal(payload, &resp)) + + assertTasks(t, expectedTasks, resp.Tasks) +} + +func TestManager_SnapshotRestore(t *testing.T) { + var ( + h = newTestHarness(t).init(t) + now = h.clock.Now().Truncate(time.Millisecond) + ) + + expectedTasks := ingestSampleTasks(t, h.manager, now) + + snap, err := h.manager.Snapshot() + require.NoError(t, err) + + h = newTestHarness(t).init(t) + require.NoError(t, h.manager.Restore(snap)) + + tasks, err := h.manager.ListDistributedTasks(context.Background()) + require.NoError(t, err) + + assertTasks(t, expectedTasks, tasks) +} + +func ingestSampleTasks(t *testing.T, m *Manager, now time.Time) map[string][]*Task { + require.NoError(t, m.AddTask(toCmd(t, &cmd.AddDistributedTaskRequest{ + Namespace: "ns1", + Id: "task1", + Payload: []byte("test1"), + SubmittedAtUnixMillis: now.UnixMilli(), + }), 10)) + + require.NoError(t, m.CancelTask(toCmd(t, &cmd.CancelDistributedTaskRequest{ + Namespace: "ns1", + Id: "task1", + Version: 10, + CancelledAtUnixMillis: now.Add(time.Minute).UnixMilli(), + }))) + + require.NoError(t, m.AddTask(toCmd(t, &cmd.AddDistributedTaskRequest{ + Namespace: "ns1", + Id: "task2", + Payload: []byte("test2"), + SubmittedAtUnixMillis: now.UnixMilli(), + }), 13)) + + require.NoError(t, m.RecordNodeCompletion(toCmd(t, &cmd.RecordDistributedTaskNodeCompletionRequest{ + Namespace: "ns1", + Id: "task2", + Version: 13, + NodeId: "local-node", + FinishedAtUnixMillis: now.Add(time.Minute).UnixMilli(), + }), 1)) + + require.NoError(t, m.AddTask(toCmd(t, &cmd.AddDistributedTaskRequest{ + Namespace: "ns2", + Id: "task3", + Payload: []byte("test3"), + SubmittedAtUnixMillis: now.UnixMilli(), + }), 15)) + + return map[string][]*Task{ + "ns1": { + { + Namespace: "ns1", + TaskDescriptor: TaskDescriptor{ + ID: "task1", + Version: 10, + }, + Payload: []byte("test1"), + Status: TaskStatusCancelled, + StartedAt: now, + FinishedAt: now.Add(time.Minute), + FinishedNodes: map[string]bool{}, + }, + { + Namespace: "ns1", + TaskDescriptor: TaskDescriptor{ + ID: "task2", + Version: 13, + }, + Payload: []byte("test2"), + Status: TaskStatusFinished, + StartedAt: now, + FinishedAt: now.Add(time.Minute), + FinishedNodes: map[string]bool{ + "local-node": true, + }, + }, + }, + "ns2": { + { + Namespace: "ns2", + TaskDescriptor: TaskDescriptor{ + ID: "task3", + Version: 15, + }, + Payload: []byte("test3"), + Status: TaskStatusStarted, + StartedAt: now, + FinishedNodes: map[string]bool{}, + }, + }, + } +} + +func assertTasks(t *testing.T, expected, actual map[string][]*Task) { + require.Equal(t, len(expected), len(actual)) + for namespace := range expected { + expectedTasks, ok := expected[namespace] + require.True(t, ok) + + actualTasks, ok := actual[namespace] + require.True(t, ok) + + require.Equal(t, len(expectedTasks), len(actualTasks)) + sortTasks := func(tasks []*Task) { + sort.Slice(tasks, func(i, j int) bool { + return tasks[i].ID < tasks[j].ID + }) + } + sortTasks(expectedTasks) + sortTasks(actualTasks) + + for i := range expectedTasks { + assertTask(t, expectedTasks[i], actualTasks[i]) + } + } +} + +func assertTask(t *testing.T, expected, actual *Task) { + assert.Equal(t, expected.Namespace, actual.Namespace) + assert.Equal(t, expected.TaskDescriptor, actual.TaskDescriptor) + assert.Equal(t, expected.Payload, actual.Payload) + assert.Equal(t, expected.Status, actual.Status) + assert.Equal(t, expected.StartedAt.UTC(), actual.StartedAt.UTC()) + assert.Equal(t, expected.FinishedAt.UTC(), actual.FinishedAt.UTC()) + assert.Equal(t, expected.Error, actual.Error) + assert.Equal(t, expected.FinishedNodes, actual.FinishedNodes) +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/distributedtask/mock_task_cleaner.go b/platform/dbops/binaries/weaviate-src/cluster/distributedtask/mock_task_cleaner.go new file mode 100644 index 0000000000000000000000000000000000000000..74abcdd73183b7cdbd94e5b28007f3536efe81d5 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/distributedtask/mock_task_cleaner.go @@ -0,0 +1,96 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by mockery v2.53.2. DO NOT EDIT. + +package distributedtask + +import ( + context "context" + + mock "github.com/stretchr/testify/mock" +) + +// MockTaskCleaner is an autogenerated mock type for the TaskCleaner type +type MockTaskCleaner struct { + mock.Mock +} + +type MockTaskCleaner_Expecter struct { + mock *mock.Mock +} + +func (_m *MockTaskCleaner) EXPECT() *MockTaskCleaner_Expecter { + return &MockTaskCleaner_Expecter{mock: &_m.Mock} +} + +// CleanUpDistributedTask provides a mock function with given fields: ctx, namespace, taskID, taskVersion +func (_m *MockTaskCleaner) CleanUpDistributedTask(ctx context.Context, namespace string, taskID string, taskVersion uint64) error { + ret := _m.Called(ctx, namespace, taskID, taskVersion) + + if len(ret) == 0 { + panic("no return value specified for CleanUpDistributedTask") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, string, string, uint64) error); ok { + r0 = rf(ctx, namespace, taskID, taskVersion) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockTaskCleaner_CleanUpDistributedTask_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CleanUpDistributedTask' +type MockTaskCleaner_CleanUpDistributedTask_Call struct { + *mock.Call +} + +// CleanUpDistributedTask is a helper method to define mock.On call +// - ctx context.Context +// - namespace string +// - taskID string +// - taskVersion uint64 +func (_e *MockTaskCleaner_Expecter) CleanUpDistributedTask(ctx interface{}, namespace interface{}, taskID interface{}, taskVersion interface{}) *MockTaskCleaner_CleanUpDistributedTask_Call { + return &MockTaskCleaner_CleanUpDistributedTask_Call{Call: _e.mock.On("CleanUpDistributedTask", ctx, namespace, taskID, taskVersion)} +} + +func (_c *MockTaskCleaner_CleanUpDistributedTask_Call) Run(run func(ctx context.Context, namespace string, taskID string, taskVersion uint64)) *MockTaskCleaner_CleanUpDistributedTask_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string), args[2].(string), args[3].(uint64)) + }) + return _c +} + +func (_c *MockTaskCleaner_CleanUpDistributedTask_Call) Return(_a0 error) *MockTaskCleaner_CleanUpDistributedTask_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockTaskCleaner_CleanUpDistributedTask_Call) RunAndReturn(run func(context.Context, string, string, uint64) error) *MockTaskCleaner_CleanUpDistributedTask_Call { + _c.Call.Return(run) + return _c +} + +// NewMockTaskCleaner creates a new instance of MockTaskCleaner. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewMockTaskCleaner(t interface { + mock.TestingT + Cleanup(func()) +}) *MockTaskCleaner { + mock := &MockTaskCleaner{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/distributedtask/mock_task_completion_recorder.go b/platform/dbops/binaries/weaviate-src/cluster/distributedtask/mock_task_completion_recorder.go new file mode 100644 index 0000000000000000000000000000000000000000..a040029a0e074da84ac68ebf811990de97d91154 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/distributedtask/mock_task_completion_recorder.go @@ -0,0 +1,146 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by mockery v2.53.2. DO NOT EDIT. + +package distributedtask + +import ( + context "context" + + mock "github.com/stretchr/testify/mock" +) + +// MockTaskCompletionRecorder is an autogenerated mock type for the TaskCompletionRecorder type +type MockTaskCompletionRecorder struct { + mock.Mock +} + +type MockTaskCompletionRecorder_Expecter struct { + mock *mock.Mock +} + +func (_m *MockTaskCompletionRecorder) EXPECT() *MockTaskCompletionRecorder_Expecter { + return &MockTaskCompletionRecorder_Expecter{mock: &_m.Mock} +} + +// RecordDistributedTaskNodeCompletion provides a mock function with given fields: ctx, namespace, taskID, version +func (_m *MockTaskCompletionRecorder) RecordDistributedTaskNodeCompletion(ctx context.Context, namespace string, taskID string, version uint64) error { + ret := _m.Called(ctx, namespace, taskID, version) + + if len(ret) == 0 { + panic("no return value specified for RecordDistributedTaskNodeCompletion") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, string, string, uint64) error); ok { + r0 = rf(ctx, namespace, taskID, version) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockTaskCompletionRecorder_RecordDistributedTaskNodeCompletion_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'RecordDistributedTaskNodeCompletion' +type MockTaskCompletionRecorder_RecordDistributedTaskNodeCompletion_Call struct { + *mock.Call +} + +// RecordDistributedTaskNodeCompletion is a helper method to define mock.On call +// - ctx context.Context +// - namespace string +// - taskID string +// - version uint64 +func (_e *MockTaskCompletionRecorder_Expecter) RecordDistributedTaskNodeCompletion(ctx interface{}, namespace interface{}, taskID interface{}, version interface{}) *MockTaskCompletionRecorder_RecordDistributedTaskNodeCompletion_Call { + return &MockTaskCompletionRecorder_RecordDistributedTaskNodeCompletion_Call{Call: _e.mock.On("RecordDistributedTaskNodeCompletion", ctx, namespace, taskID, version)} +} + +func (_c *MockTaskCompletionRecorder_RecordDistributedTaskNodeCompletion_Call) Run(run func(ctx context.Context, namespace string, taskID string, version uint64)) *MockTaskCompletionRecorder_RecordDistributedTaskNodeCompletion_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string), args[2].(string), args[3].(uint64)) + }) + return _c +} + +func (_c *MockTaskCompletionRecorder_RecordDistributedTaskNodeCompletion_Call) Return(_a0 error) *MockTaskCompletionRecorder_RecordDistributedTaskNodeCompletion_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockTaskCompletionRecorder_RecordDistributedTaskNodeCompletion_Call) RunAndReturn(run func(context.Context, string, string, uint64) error) *MockTaskCompletionRecorder_RecordDistributedTaskNodeCompletion_Call { + _c.Call.Return(run) + return _c +} + +// RecordDistributedTaskNodeFailure provides a mock function with given fields: ctx, namespace, taskID, version, errMsg +func (_m *MockTaskCompletionRecorder) RecordDistributedTaskNodeFailure(ctx context.Context, namespace string, taskID string, version uint64, errMsg string) error { + ret := _m.Called(ctx, namespace, taskID, version, errMsg) + + if len(ret) == 0 { + panic("no return value specified for RecordDistributedTaskNodeFailure") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, string, string, uint64, string) error); ok { + r0 = rf(ctx, namespace, taskID, version, errMsg) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockTaskCompletionRecorder_RecordDistributedTaskNodeFailure_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'RecordDistributedTaskNodeFailure' +type MockTaskCompletionRecorder_RecordDistributedTaskNodeFailure_Call struct { + *mock.Call +} + +// RecordDistributedTaskNodeFailure is a helper method to define mock.On call +// - ctx context.Context +// - namespace string +// - taskID string +// - version uint64 +// - errMsg string +func (_e *MockTaskCompletionRecorder_Expecter) RecordDistributedTaskNodeFailure(ctx interface{}, namespace interface{}, taskID interface{}, version interface{}, errMsg interface{}) *MockTaskCompletionRecorder_RecordDistributedTaskNodeFailure_Call { + return &MockTaskCompletionRecorder_RecordDistributedTaskNodeFailure_Call{Call: _e.mock.On("RecordDistributedTaskNodeFailure", ctx, namespace, taskID, version, errMsg)} +} + +func (_c *MockTaskCompletionRecorder_RecordDistributedTaskNodeFailure_Call) Run(run func(ctx context.Context, namespace string, taskID string, version uint64, errMsg string)) *MockTaskCompletionRecorder_RecordDistributedTaskNodeFailure_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string), args[2].(string), args[3].(uint64), args[4].(string)) + }) + return _c +} + +func (_c *MockTaskCompletionRecorder_RecordDistributedTaskNodeFailure_Call) Return(_a0 error) *MockTaskCompletionRecorder_RecordDistributedTaskNodeFailure_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockTaskCompletionRecorder_RecordDistributedTaskNodeFailure_Call) RunAndReturn(run func(context.Context, string, string, uint64, string) error) *MockTaskCompletionRecorder_RecordDistributedTaskNodeFailure_Call { + _c.Call.Return(run) + return _c +} + +// NewMockTaskCompletionRecorder creates a new instance of MockTaskCompletionRecorder. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewMockTaskCompletionRecorder(t interface { + mock.TestingT + Cleanup(func()) +}) *MockTaskCompletionRecorder { + mock := &MockTaskCompletionRecorder{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/distributedtask/scheduler.go b/platform/dbops/binaries/weaviate-src/cluster/distributedtask/scheduler.go new file mode 100644 index 0000000000000000000000000000000000000000..4d00d826d2609faf6561f5efc0b9049656d2f1c7 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/distributedtask/scheduler.go @@ -0,0 +1,337 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package distributedtask + +import ( + "context" + "fmt" + "sync" + "time" + + "github.com/jonboulle/clockwork" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + "github.com/sirupsen/logrus" + enterrors "github.com/weaviate/weaviate/entities/errors" + "github.com/weaviate/weaviate/usecases/logrusext" + "github.com/weaviate/weaviate/usecases/monitoring" +) + +// Scheduler is the component which is responsible for polling the active tasks in the cluster (via the Manager) +// and making sure that the tasks are running on the local node. +// +// The general flow of a distributed task is as follows: +// 1. A Provider is registered with the Scheduler at startup to handle all tasks under a specific namespace. +// 2. A task is created and added to the cluster via the Manager.AddTask. +// 3. Scheduler regularly scans all available tasks in the cluster, picks up new ones and instructs the Provider to execute them locally. +// 4. A task is responsible for updating its status in the cluster via TaskCompletionRecorder. +// 6. Scheduler polls the cluster for the task status and checks if it is still running. It cancels the local task if it is not marked as STARTED anymore. +// 7. After completed task TTL has passed, the Scheduler issues the Manager.CleanUpDistributedTask request to remove the task from the cluster list. +// 8. After a task is removed from the cluster list, the Scheduler instructs the Provider to clean up the local task state. +type Scheduler struct { + mu sync.Mutex + runningTasks map[string]map[TaskDescriptor]TaskHandle + + providers map[string]Provider // namespace -> Provider + completionRecorder TaskCompletionRecorder + tasksLister TasksLister + taskCleaner TaskCleaner + clock clockwork.Clock + + localNode string + completedTaskTTL time.Duration + tickInterval time.Duration + + logger logrus.FieldLogger + sampledLogger *logrusext.Sampler + + tasksRunning *prometheus.GaugeVec + + stopCh chan struct{} +} + +type SchedulerParams struct { + CompletionRecorder TaskCompletionRecorder + TasksLister TasksLister + TaskCleaner TaskCleaner + Providers map[string]Provider + Clock clockwork.Clock + Logger logrus.FieldLogger + MetricsRegisterer prometheus.Registerer + + LocalNode string + CompletedTaskTTL time.Duration + TickInterval time.Duration +} + +func NewScheduler(params SchedulerParams) *Scheduler { + if params.Clock == nil { + params.Clock = clockwork.NewRealClock() + } + + if params.MetricsRegisterer == nil { + params.MetricsRegisterer = monitoring.NoopRegisterer + } + + return &Scheduler{ + runningTasks: map[string]map[TaskDescriptor]TaskHandle{}, + + providers: params.Providers, + completionRecorder: params.CompletionRecorder, + tasksLister: params.TasksLister, + taskCleaner: params.TaskCleaner, + clock: params.Clock, + + localNode: params.LocalNode, + completedTaskTTL: params.CompletedTaskTTL, + tickInterval: params.TickInterval, + + logger: params.Logger, + sampledLogger: logrusext.NewSampler(params.Logger, 5, 5*params.TickInterval), + + tasksRunning: promauto.With(params.MetricsRegisterer).NewGaugeVec(prometheus.GaugeOpts{ + Name: "weaviate_distributed_tasks_running", + Help: "Number of active distributed tasks running per namespace", + }, []string{"namespace"}), + + stopCh: make(chan struct{}), + } +} + +func (s *Scheduler) Start(ctx context.Context) error { + tasksByNamespace, err := s.listTasks(ctx) + if err != nil { + return fmt.Errorf("list distributed tasks: %w", err) + } + + s.mu.Lock() + defer s.mu.Unlock() + for namespace, provider := range s.providers { + provider.SetCompletionRecorder(s.completionRecorder) + + var ( + tasks = tasksByNamespace[namespace] + startedTasks = s.filterStartedTasks(tasks) + localTaskDesc = provider.GetLocalTasks() + ) + for _, taskDesc := range localTaskDesc { + if _, ok := startedTasks[taskDesc]; ok { + continue + } + + if err = provider.CleanupTask(taskDesc); err != nil { + s.loggerWithTask(namespace, taskDesc).WithError(err). + Error("failed to clean up local distributed task state") + continue + } + + s.loggerWithTask(namespace, taskDesc).Info("cleaned up local distributed task state") + } + + for desc, task := range startedTasks { + handle, err := provider.StartTask(task) + if err != nil { + return fmt.Errorf("provider %s start task %v: %w", namespace, desc, err) + } + + s.setRunningTaskHandleWithLock(namespace, desc, handle) + s.loggerWithTask(namespace, desc).Info("started distributed task execution") + } + + s.tasksRunning. + WithLabelValues(namespace). + Set(float64(len(startedTasks))) + } + + enterrors.GoWrapper(s.loop, s.logger) + + return nil +} + +func (s *Scheduler) filterStartedTasks(tasks map[TaskDescriptor]*Task) map[TaskDescriptor]*Task { + return filterTasks(tasks, func(task *Task) bool { + return task.Status == TaskStatusStarted && !task.FinishedNodes[s.localNode] + }) +} + +func filterTasks(tasks map[TaskDescriptor]*Task, predicate func(task *Task) bool) map[TaskDescriptor]*Task { + filtered := make(map[TaskDescriptor]*Task, len(tasks)) + for _, task := range tasks { + if !predicate(task) { + continue + } + + filtered[TaskDescriptor{ + ID: task.ID, + Version: task.Version, + }] = task + } + return filtered +} + +func (s *Scheduler) loop() { + ticker := s.clock.NewTicker(s.tickInterval) + defer ticker.Stop() + + for { + select { + case <-ticker.Chan(): + s.tick() + case <-s.stopCh: + return + } + } +} + +func (s *Scheduler) tick() { + tasksByNamespace, err := s.listTasks(context.Background()) + if err != nil { + s.sampledLogger.WithSampling(func(l logrus.FieldLogger) { + l.WithError(err).Error("failed to list distributed tasks") + }) + return + } + + s.mu.Lock() + defer s.mu.Unlock() + + for namespace, provider := range s.providers { + tasks := tasksByNamespace[namespace] + + // Check that all tasks that are supposed to be running + // and launch if they aren't. + startedTasks := s.filterStartedTasks(tasks) + for _, activeTask := range startedTasks { + if _, alreadyLaunched := s.runningTasks[namespace][activeTask.TaskDescriptor]; alreadyLaunched { + continue + } + + handle, err := provider.StartTask(activeTask) + if err != nil { + s.sampledLogger.WithSampling(func(l logrus.FieldLogger) { + s.loggerWithTask(namespace, activeTask.TaskDescriptor).WithError(err). + Error("failed to start distributed task") + }) + continue + } + + s.setRunningTaskHandleWithLock(namespace, activeTask.TaskDescriptor, handle) + s.loggerWithTask(namespace, activeTask.TaskDescriptor).Info("started distributed task execution") + } + + s.tasksRunning. + WithLabelValues(namespace). + Set(float64(len(startedTasks))) + + // Check that all tasks that are not supposed to be running are not running. + for desc, taskHandle := range s.runningTasks[namespace] { + if _, ok := startedTasks[desc]; ok { + continue + } + + taskHandle.Terminate() + delete(s.runningTasks[namespace], desc) + + s.loggerWithTask(namespace, desc).Info("terminated distributed task execution") + + } + + // Check that all tasks that are already finished and if their TTL has passed, so we can clean them up. + cleanableTasks := filterTasks(tasks, func(task *Task) bool { + return task.Status != TaskStatusStarted && s.completedTaskTTL <= s.clock.Since(task.FinishedAt) + }) + for _, task := range cleanableTasks { + err = s.taskCleaner.CleanUpDistributedTask(context.Background(), namespace, task.ID, task.Version) + if err != nil { + s.sampledLogger.WithSampling(func(l logrus.FieldLogger) { + s.loggerWithTask(namespace, task.TaskDescriptor).WithError(err). + Error("failed to clean up distributed task") + }) + continue + } + + s.loggerWithTask(namespace, task.TaskDescriptor). + Info("successfully submitted request to clean up distributed task") + } + + // Check that tasks that can be cleaned up locally + localTasks := provider.GetLocalTasks() + for _, desc := range localTasks { + if _, ok := tasks[desc]; ok { + // task still present in the list + continue + } + + if err = provider.CleanupTask(desc); err != nil { + s.sampledLogger.WithSampling(func(l logrus.FieldLogger) { + s.loggerWithTask(namespace, desc).WithError(err). + Error("failed to clean up local distributed task state") + }) + } + } + } +} + +func (s *Scheduler) listTasks(ctx context.Context) (map[string]map[TaskDescriptor]*Task, error) { + tasksByNamespace, err := s.tasksLister.ListDistributedTasks(ctx) + if err != nil { + return nil, fmt.Errorf("list distributed tasks: %w", err) + } + + result := make(map[string]map[TaskDescriptor]*Task, len(tasksByNamespace)) + for namespace, tasks := range tasksByNamespace { + result[namespace] = make(map[TaskDescriptor]*Task, len(tasks)) + for _, task := range tasks { + result[namespace][task.TaskDescriptor] = task + } + } + return result, nil +} + +func (s *Scheduler) setRunningTaskHandleWithLock(namespace string, desc TaskDescriptor, handle TaskHandle) { + if _, ok := s.runningTasks[namespace]; !ok { + s.runningTasks[namespace] = map[TaskDescriptor]TaskHandle{} + } + s.runningTasks[namespace][desc] = handle +} + +func (s *Scheduler) Close() { + close(s.stopCh) + + s.mu.Lock() + defer s.mu.Unlock() + + for _, tasks := range s.runningTasks { + for _, task := range tasks { + task.Terminate() + } + } +} + +func (s *Scheduler) totalRunningTaskCount() int { + s.mu.Lock() + defer s.mu.Unlock() + + count := 0 + for _, tasks := range s.runningTasks { + count += len(tasks) + } + return count +} + +func (s *Scheduler) loggerWithTask(namespace string, taskDesc TaskDescriptor) *logrus.Entry { + return s.logger.WithFields(logrus.Fields{ + "namespace": namespace, + "taskID": taskDesc.ID, + "taskVersion": taskDesc.Version, + }) +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/distributedtask/scheduler_test.go b/platform/dbops/binaries/weaviate-src/cluster/distributedtask/scheduler_test.go new file mode 100644 index 0000000000000000000000000000000000000000..e8e64d0291144a5e4e81f91e9e7a5f7e0df79fc1 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/distributedtask/scheduler_test.go @@ -0,0 +1,794 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package distributedtask + +import ( + "context" + "encoding/json" + "fmt" + "sync" + "sync/atomic" + "testing" + "time" + + "github.com/fortytw2/leaktest" + "github.com/jonboulle/clockwork" + "github.com/sirupsen/logrus" + logrustest "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + cmd "github.com/weaviate/weaviate/cluster/proto/api" + "github.com/weaviate/weaviate/usecases/monitoring" +) + +func TestHappyPathTaskLifecycleWithSingleNode(t *testing.T) { + defer leaktest.Check(t)() + + var ( + h = newTestHarness(t).init(t) + taskID = "1234" + version uint64 = 10 + taskPayload = []byte("payload") + ) + + h.startScheduler(t) + defer h.scheduler.Close() + + err := h.manager.AddTask(toCmd(t, &cmd.AddDistributedTaskRequest{ + Namespace: h.tasksNamespace, + Id: taskID, + Payload: taskPayload, + SubmittedAtUnixMillis: h.clock.Now().UnixMilli(), + }), version) + require.NoError(t, err) + h.advanceClock(h.schedulerTickInterval) + + startedTask := recvWithTimeout(t, h.provider.startedCh) + require.Equal(t, h.tasksNamespace, startedTask.Namespace) + require.Equal(t, taskID, startedTask.ID) + require.Equal(t, taskPayload, startedTask.Payload) + + h.expectRecordNodeTaskCompletion(t, h.tasksNamespace, taskID, version) + startedTask.Complete() + + require.Equal(t, taskID, recvWithTimeout(t, h.provider.completedCh).ID) + + h.advanceClock(h.schedulerTickInterval) + require.Zero(t, h.scheduler.totalRunningTaskCount()) + + // advance the clock just before expected clean up time to check whether it respects it + h.advanceClock(h.completedTaskTTL - h.clockAdvancedSoFar - time.Minute) + + h.expectCleanUpTask(t, h.tasksNamespace, taskID, version) + h.advanceClock(h.schedulerTickInterval + time.Minute) + + require.Empty(t, h.listManagerTasks(t)) +} + +func TestHappyPathTaskLifecycleWithMultipleNode(t *testing.T) { + defer leaktest.Check(t)() + + h := newTestHarness(t) + h.nodesInTheCluster = 2 + h = h.init(t) + + var ( + taskID = "1234" + version uint64 = 10 + taskPayload = []byte("payload") + ) + + h.startScheduler(t) + defer h.scheduler.Close() + + err := h.manager.AddTask(toCmd(t, &cmd.AddDistributedTaskRequest{ + Namespace: h.tasksNamespace, + Id: taskID, + Payload: taskPayload, + SubmittedAtUnixMillis: h.clock.Now().UnixMilli(), + }), version) + require.NoError(t, err) + h.advanceClock(h.schedulerTickInterval) + + // local task launched + localTask := recvWithTimeout(t, h.provider.startedCh) + require.Equal(t, taskID, localTask.ID) + + h.expectRecordNodeTaskCompletion(t, h.tasksNamespace, taskID, version) + localTask.Complete() + require.Equal(t, taskID, recvWithTimeout(t, h.provider.completedCh).ID) + + // local task completed + h.advanceClock(h.schedulerTickInterval) + require.Zero(t, h.scheduler.totalRunningTaskCount()) + + // however, task is not finished in the cluster yet + tasks := h.listManagerTasks(t)[h.tasksNamespace] + require.Len(t, tasks, 1) + require.Equal(t, taskID, tasks[0].ID) + require.Equal(t, TaskStatusStarted, tasks[0].Status) + + // finish the task across the cluster + h.completeTaskFromNode(t, h.tasksNamespace, taskID, version, "remote-node") + + tasks = h.listManagerTasks(t)[h.tasksNamespace] + require.Len(t, tasks, 1) + require.Equal(t, TaskStatusFinished, tasks[0].Status) + require.Equal(t, map[string]bool{ + h.localNodeID: true, + "remote-node": true, + }, tasks[0].FinishedNodes) +} + +func TestTaskCancellation(t *testing.T) { + defer leaktest.Check(t)() + + var ( + h = newTestHarness(t).init(t) + taskID = "1234" + version uint64 = 10 + ) + + h.startScheduler(t) + defer h.scheduler.Close() + + err := h.manager.AddTask(toCmd(t, &cmd.AddDistributedTaskRequest{ + Namespace: h.tasksNamespace, + Id: taskID, + SubmittedAtUnixMillis: h.clock.Now().UnixMilli(), + }), version) + require.NoError(t, err) + h.advanceClock(h.schedulerTickInterval) + + require.Equal(t, taskID, recvWithTimeout(t, h.provider.startedCh).ID) + + cancellationTime := h.clock.Now().UnixMilli() + err = h.manager.CancelTask(toCmd(t, &cmd.CancelDistributedTaskRequest{ + Namespace: h.tasksNamespace, + Id: taskID, + Version: version, + CancelledAtUnixMillis: cancellationTime, + })) + require.NoError(t, err) + h.advanceClock(h.schedulerTickInterval) + + require.Equal(t, taskID, recvWithTimeout(t, h.provider.cancelledCh).ID) + + tasks := h.listManagerTasks(t)[h.tasksNamespace] + require.Len(t, tasks, 1) + require.Equal(t, h.tasksNamespace, tasks[0].Namespace) + require.Equal(t, taskID, tasks[0].ID) + require.Equal(t, version, tasks[0].Version) + require.Equal(t, TaskStatusCancelled, tasks[0].Status) + require.Equal(t, cancellationTime, tasks[0].FinishedAt.UnixMilli()) +} + +func TestTaskFailureInAnotherNode(t *testing.T) { + defer leaktest.Check(t)() + + h := newTestHarness(t) + h.nodesInTheCluster = 2 + h = h.init(t) + var ( + taskID = "1234" + version uint64 = 10 + ) + + h.startScheduler(t) + defer h.scheduler.Close() + + err := h.manager.AddTask(toCmd(t, &cmd.AddDistributedTaskRequest{ + Namespace: h.tasksNamespace, + Id: taskID, + SubmittedAtUnixMillis: h.clock.Now().UnixMilli(), + }), version) + require.NoError(t, err) + h.advanceClock(h.schedulerTickInterval) + + require.Equal(t, taskID, recvWithTimeout(t, h.provider.startedCh).ID) + + // send a failure command from another node + failureMessage := "servers are on fire!!!" + failureTime := h.clock.Now().UnixMilli() + h.recordTaskCompletion(t, h.tasksNamespace, taskID, version, "other-node", &failureMessage) + + // locally running task should be cancelled + h.advanceClock(h.schedulerTickInterval) + require.Equal(t, taskID, recvWithTimeout(t, h.provider.cancelledCh).ID) + require.Zero(t, h.scheduler.totalRunningTaskCount()) + + tasks := h.listManagerTasks(t)[h.tasksNamespace] + require.Len(t, tasks, 1) + require.Equal(t, h.tasksNamespace, tasks[0].Namespace) + require.Equal(t, taskID, tasks[0].ID) + require.Equal(t, version, tasks[0].Version) + require.Equal(t, TaskStatusFailed, tasks[0].Status) + require.Equal(t, failureTime, tasks[0].FinishedAt.UnixMilli()) +} + +func TestTaskFailureInLocalNode(t *testing.T) { + defer leaktest.Check(t)() + + h := newTestHarness(t) + h.nodesInTheCluster = 2 + h = h.init(t) + var ( + taskID = "1234" + version uint64 = 10 + ) + + h.startScheduler(t) + defer h.scheduler.Close() + + err := h.manager.AddTask(toCmd(t, &cmd.AddDistributedTaskRequest{ + Namespace: h.tasksNamespace, + Id: taskID, + SubmittedAtUnixMillis: h.clock.Now().UnixMilli(), + }), version) + require.NoError(t, err) + h.advanceClock(h.schedulerTickInterval) + + startedTask := recvWithTimeout(t, h.provider.startedCh) + require.Equal(t, taskID, startedTask.ID) + + failureMessage := "servers are on fire!!!" + failureTime := h.clock.Now().UnixMilli() + h.expectRecordNodeTaskFailure(t, h.tasksNamespace, taskID, version, failureMessage) + startedTask.Fail(failureMessage) + + recvWithTimeout(t, h.provider.failedCh) + + h.advanceClock(h.schedulerTickInterval) + require.Zero(t, h.scheduler.totalRunningTaskCount()) + + tasks := h.listManagerTasks(t)[h.tasksNamespace] + require.Len(t, tasks, 1) + require.Equal(t, h.tasksNamespace, tasks[0].Namespace) + require.Equal(t, taskID, tasks[0].ID) + require.Equal(t, version, tasks[0].Version) + require.Equal(t, TaskStatusFailed, tasks[0].Status) + require.Equal(t, failureTime, tasks[0].FinishedAt.UnixMilli()) +} + +func TestTaskRecovery(t *testing.T) { + defer leaktest.Check(t)() + + var ( + h = newTestHarness(t).init(t) + tasksCount = 5 + ) + + // add some tasks before launching the scheduler + tasksIDs := map[string]bool{} + for i := range tasksCount { + taskID := fmt.Sprintf("%d", i) + err := h.manager.AddTask(toCmd(t, &cmd.AddDistributedTaskRequest{ + Namespace: h.tasksNamespace, + Id: taskID, + SubmittedAtUnixMillis: h.clock.Now().UnixMilli(), + }), 1) + require.NoError(t, err) + tasksIDs[taskID] = true + } + + h.startScheduler(t) + defer h.scheduler.Close() + + // tasksIDs should be launched right away + launchedTasks := map[string]*testTask{} + for range tasksCount { + launchedTask := recvWithTimeout(t, h.provider.startedCh) + require.Contains(t, tasksIDs, launchedTask.ID) + launchedTasks[launchedTask.ID] = launchedTask + } + require.Len(t, launchedTasks, tasksCount) + + // clean up launched goroutines + for _, task := range launchedTasks { + task.Terminate() + } +} + +func TestRemoveCleanedUpTaskLocalStateOnStartup(t *testing.T) { + defer leaktest.Check(t)() + + var ( + localTaskList = []TaskDescriptor{ + {ID: "1", Version: 1}, + {ID: "2", Version: 10}, + {ID: "3", Version: 15}, + } + provider = newTestTaskProvider(t, localTaskList) + ) + + h := newTestHarness(t) + h.registeredProviders = map[string]Provider{ + h.tasksNamespace: provider, + } + h = h.init(t) + + // add one of the local tasks to the manager state before launching the scheduler + // to simulate that it was there before the restart + err := h.manager.AddTask(toCmd(t, &cmd.AddDistributedTaskRequest{ + Namespace: h.tasksNamespace, + Id: "3", + SubmittedAtUnixMillis: h.clock.Now().UnixMilli(), + }), 15) + require.NoError(t, err) + + // add one new task + err = h.manager.AddTask(toCmd(t, &cmd.AddDistributedTaskRequest{ + Namespace: h.tasksNamespace, + Id: "4", + SubmittedAtUnixMillis: h.clock.Now().UnixMilli(), + }), 18) + require.NoError(t, err) + + h.startScheduler(t) + defer h.scheduler.Close() + + // make sure only tasks are not running cleaned up + cleanedUpTasks := collectChToSet(t, 2, provider.cleanedUpCh) + require.Len(t, cleanedUpTasks, 2) + require.Contains(t, cleanedUpTasks, localTaskList[0]) + require.Contains(t, cleanedUpTasks, localTaskList[1]) + + expectStartedTasks := map[string]struct{}{"3": {}, "4": {}} + for range len(expectStartedTasks) { + startedTask := <-provider.startedCh + require.Contains(t, expectStartedTasks, startedTask.ID) + startedTask.Terminate() + } +} + +func TestRemoveCleanedUpTaskLocalStateDuringRuntime(t *testing.T) { + defer leaktest.Check(t)() + + h := newTestHarness(t).init(t) + + h.startScheduler(t) + defer h.scheduler.Close() + + err := h.manager.AddTask(toCmd(t, &cmd.AddDistributedTaskRequest{ + Namespace: h.tasksNamespace, + Id: "1", + SubmittedAtUnixMillis: h.clock.Now().UnixMilli(), + }), 1) + require.NoError(t, err) + + h.advanceClock(h.schedulerTickInterval) + + startedTask := recvWithTimeout(t, h.provider.startedCh) + + h.expectRecordNodeTaskCompletion(t, h.tasksNamespace, startedTask.ID, startedTask.Version) + startedTask.Complete() + + recvWithTimeout(t, h.provider.completedCh) + + h.expectCleanUpTask(t, h.tasksNamespace, startedTask.ID, startedTask.Version) + h.advanceClock(h.completedTaskTTL) + + h.advanceClock(h.schedulerTickInterval) + cleanedDesc := recvWithTimeout(t, h.provider.cleanedUpCh) + require.Equal(t, startedTask.TaskDescriptor, cleanedDesc) +} + +func TestMultiNamespaceMultiTasks(t *testing.T) { + defer leaktest.Check(t)() + + var ( + tasksNamespace1 = "tasks-namespace-1" + provider1StaleTasks = []TaskDescriptor{ + {ID: "1", Version: 1}, + {ID: "2", Version: 10}, + } + provider1 = newTestTaskProvider(t, provider1StaleTasks) + + tasksNamespace2 = "tasks-namespace-2" + provider2 = newTestTaskProvider(t, nil) + ) + + h := newTestHarness(t) + h.registeredProviders = map[string]Provider{ + tasksNamespace1: provider1, + tasksNamespace2: provider2, + } + h = h.init(t) + + h.startScheduler(t) + defer h.scheduler.Close() + + // cleanup tasks for one of the providers + cleanedUpTasks := collectChToSet(t, 2, provider1.cleanedUpCh) + require.Len(t, cleanedUpTasks, 2) + require.Contains(t, cleanedUpTasks, provider1StaleTasks[0]) + require.Contains(t, cleanedUpTasks, provider1StaleTasks[1]) + + require.Len(t, provider2.cleanedUpCh, 0) + + // add some tasks for both providers + err := h.manager.AddTask(toCmd(t, &cmd.AddDistributedTaskRequest{ + Namespace: tasksNamespace1, + Id: "complete", + SubmittedAtUnixMillis: h.clock.Now().UnixMilli(), + }), 10) + require.NoError(t, err) + + err = h.manager.AddTask(toCmd(t, &cmd.AddDistributedTaskRequest{ + Namespace: tasksNamespace2, + Id: "fail", + SubmittedAtUnixMillis: h.clock.Now().UnixMilli(), + }), 11) + require.NoError(t, err) + + err = h.manager.AddTask(toCmd(t, &cmd.AddDistributedTaskRequest{ + Namespace: tasksNamespace1, + Id: "cancel", + SubmittedAtUnixMillis: h.clock.Now().UnixMilli(), + }), 12) + require.NoError(t, err) + + h.advanceClock(h.schedulerTickInterval) + require.Equal(t, 3, h.scheduler.totalRunningTaskCount()) + + startedTasks := map[string]*testTask{} + for range 2 { + task := recvWithTimeout(t, provider1.startedCh) + startedTasks[task.ID] = task + } + for range 1 { + task := recvWithTimeout(t, provider2.startedCh) + startedTasks[task.ID] = task + } + require.Len(t, startedTasks, 3) + + h.expectRecordNodeTaskCompletion(t, tasksNamespace1, "complete", 10) + startedTasks["complete"].Complete() + recvWithTimeout(t, provider1.completedCh) + + h.expectRecordNodeTaskFailure(t, tasksNamespace2, "fail", 11, "failed") + startedTasks["fail"].Fail("failed") + recvWithTimeout(t, provider2.failedCh) + + err = h.manager.CancelTask(toCmd(t, &cmd.CancelDistributedTaskRequest{ + Namespace: tasksNamespace1, + Id: "cancel", + Version: 12, + CancelledAtUnixMillis: h.clock.Now().UnixMilli(), + })) + require.NoError(t, err) + + h.advanceClock(h.schedulerTickInterval) + + require.Zero(t, h.scheduler.totalRunningTaskCount()) +} + +func TestOverrideExistingFinishedTask(t *testing.T) { + defer leaktest.Check(t)() + + h := newTestHarness(t).init(t) + + h.startScheduler(t) + defer h.scheduler.Close() + + err := h.manager.AddTask(toCmd(t, &cmd.AddDistributedTaskRequest{ + Namespace: h.tasksNamespace, + Id: "1", + Payload: []byte("old payload"), + SubmittedAtUnixMillis: h.clock.Now().UnixMilli(), + }), 1) + require.NoError(t, err) + + h.advanceClock(h.schedulerTickInterval) + + startedTaskV1 := recvWithTimeout(t, h.provider.startedCh) + + h.expectRecordNodeTaskCompletion(t, h.tasksNamespace, startedTaskV1.ID, startedTaskV1.Version) + startedTaskV1.Complete() + recvWithTimeout(t, h.provider.completedCh) + + h.advanceClock(h.schedulerTickInterval) + + require.Zero(t, h.scheduler.totalRunningTaskCount()) + + err = h.manager.AddTask(toCmd(t, &cmd.AddDistributedTaskRequest{ + Namespace: h.tasksNamespace, + Id: "1", + Payload: []byte("new payload"), + SubmittedAtUnixMillis: h.clock.Now().UnixMilli(), + }), 2) + require.NoError(t, err) + + h.advanceClock(h.schedulerTickInterval) + + startedTaskV2 := recvWithTimeout(t, h.provider.startedCh) + require.Equal(t, []byte("new payload"), startedTaskV2.Payload) + startedTaskV2.Terminate() + + require.Equal(t, startedTaskV1.TaskDescriptor, recvWithTimeout(t, h.provider.cleanedUpCh)) +} + +func recvWithTimeout[T any](t *testing.T, ch <-chan T) T { + select { + case el := <-ch: + return el + case <-time.After(time.Second): + require.Fail(t, "timeout") + } + panic("unreachable") +} + +func toCmd[T any](t *testing.T, subCommand T) *cmd.ApplyRequest { + bytes, err := json.Marshal(subCommand) + require.NoError(t, err) + + return &cmd.ApplyRequest{ + SubCommand: bytes, + } +} + +type testHarness struct { + localNodeID string + tasksNamespace string + nodesInTheCluster int + completedTaskTTL time.Duration + schedulerTickInterval time.Duration + clock *clockwork.FakeClock + logger logrus.FieldLogger + completionRecorder *MockTaskCompletionRecorder + cleaner *MockTaskCleaner + provider *testTaskProvider + registeredProviders map[string]Provider + + manager *Manager + scheduler *Scheduler + + clockAdvancedSoFar time.Duration +} + +func newTestHarness(t *testing.T) *testHarness { + var ( + defaultNamespace = "tasks-namespace" + defaultProvider = newTestTaskProvider(t, nil) + logger, _ = logrustest.NewNullLogger() + ) + + return &testHarness{ + localNodeID: "local-node", + tasksNamespace: defaultNamespace, + nodesInTheCluster: 1, + completedTaskTTL: 24 * time.Hour, + schedulerTickInterval: 30 * time.Second, + clock: clockwork.NewFakeClock(), + logger: logger, + completionRecorder: NewMockTaskCompletionRecorder(t), + cleaner: NewMockTaskCleaner(t), + provider: defaultProvider, + registeredProviders: map[string]Provider{ + defaultNamespace: defaultProvider, + }, + } +} + +func (h *testHarness) init(t *testing.T) *testHarness { + h.manager = NewManager(ManagerParameters{ + Clock: h.clock, + CompletedTaskTTL: h.completedTaskTTL, + }) + + h.scheduler = NewScheduler(SchedulerParams{ + CompletionRecorder: h.completionRecorder, + TasksLister: h.manager, + TaskCleaner: h.cleaner, + Providers: h.registeredProviders, + Clock: h.clock, + Logger: h.logger, + MetricsRegisterer: monitoring.NoopRegisterer, + LocalNode: h.localNodeID, + CompletedTaskTTL: h.completedTaskTTL, + TickInterval: h.schedulerTickInterval, + }) + return h +} + +func (h *testHarness) advanceClock(duration time.Duration) { + h.clock.Advance(duration) + h.clockAdvancedSoFar += duration + + // after moving the clock, give some time for the unblocked goroutines to wake up and execute + time.Sleep(50 * time.Millisecond) +} + +func (h *testHarness) expectRecordNodeTaskCompletion(t *testing.T, expectNamespace, expectTaskID string, expectTaskVersion uint64) { + h.completionRecorder.EXPECT().RecordDistributedTaskNodeCompletion(mock.Anything, expectNamespace, expectTaskID, expectTaskVersion). + RunAndReturn(func(_ context.Context, namespace, taskID string, taskVersion uint64) error { + h.completeTaskFromNode(t, namespace, taskID, taskVersion, h.localNodeID) + return nil + }) +} + +func (h *testHarness) expectRecordNodeTaskFailure(t *testing.T, expectNamespace, expectTaskID string, expectTaskVersion uint64, expectErrMsg string) { + h.completionRecorder.EXPECT().RecordDistributedTaskNodeFailure(mock.Anything, expectNamespace, expectTaskID, expectTaskVersion, expectErrMsg). + RunAndReturn(func(_ context.Context, namespace, taskID string, taskVersion uint64, errMsg string) error { + h.recordTaskCompletion(t, namespace, taskID, taskVersion, h.localNodeID, &expectErrMsg) + return nil + }) +} + +func (h *testHarness) completeTaskFromNode(t *testing.T, namespace, taskID string, taskVersion uint64, node string) { + h.recordTaskCompletion(t, namespace, taskID, taskVersion, node, nil) +} + +func (h *testHarness) recordTaskCompletion(t *testing.T, namespace, taskID string, taskVersion uint64, node string, errMsg *string) { + c := toCmd(t, &cmd.RecordDistributedTaskNodeCompletionRequest{ + Namespace: namespace, + Id: taskID, + Version: taskVersion, + NodeId: node, + Error: errMsg, + FinishedAtUnixMillis: h.clock.Now().UnixMilli(), + }) + + require.NoError(t, h.manager.RecordNodeCompletion(c, h.nodesInTheCluster)) +} + +func (h *testHarness) expectCleanUpTask(t *testing.T, expectNamespace, expectTaskID string, expectTaskVersion uint64) { + h.cleaner.EXPECT().CleanUpDistributedTask(mock.Anything, mock.Anything, mock.Anything, mock.Anything). + RunAndReturn(func(_ context.Context, namespace, taskID string, taskVersion uint64) error { + require.Equal(t, expectNamespace, namespace) + require.Equal(t, expectTaskID, taskID) + require.Equal(t, expectTaskVersion, taskVersion) + + err := h.manager.CleanUpTask(toCmd(t, &cmd.CleanUpDistributedTaskRequest{ + Namespace: namespace, + Id: taskID, + Version: taskVersion, + })) + require.NoError(t, err) + return nil + }) +} + +func (h *testHarness) startScheduler(t *testing.T) { + require.NoError(t, h.scheduler.Start(context.Background())) + + // give some time for the newly launched goroutines to start + time.Sleep(50 * time.Millisecond) +} + +func (h *testHarness) listManagerTasks(t *testing.T) map[string][]*Task { + tasks, err := h.manager.ListDistributedTasks(context.Background()) + require.NoError(t, err) + return tasks +} + +type testTask struct { + *Task + + completeCh chan struct{} + failCh chan string + + cancelled atomic.Bool + cancelCh chan struct{} + + provider *testTaskProvider +} + +func newTestTask(task *Task, p *testTaskProvider) *testTask { + t := &testTask{ + Task: task, + provider: p, + + completeCh: make(chan struct{}), + failCh: make(chan string), + cancelCh: make(chan struct{}), + } + + go t.run() + + return t +} + +func (t *testTask) run() { + t.provider.startedCh <- t + + select { + case <-t.completeCh: + err := t.provider.recorder.RecordDistributedTaskNodeCompletion(context.Background(), t.Namespace, t.ID, t.Version) + require.NoError(t.provider.t, err) + t.provider.completedCh <- t + return + case errMsg := <-t.failCh: + err := t.provider.recorder.RecordDistributedTaskNodeFailure(context.Background(), t.Namespace, t.ID, t.Version, errMsg) + require.NoError(t.provider.t, err) + t.provider.failedCh <- t + case <-t.cancelCh: + t.provider.cancelledCh <- t + return + } +} + +func (t *testTask) Complete() { + close(t.completeCh) +} + +func (t *testTask) Terminate() { + if t.cancelled.CompareAndSwap(false, true) { + close(t.cancelCh) + } +} + +func (t *testTask) Fail(errMsg string) { + t.failCh <- errMsg +} + +type testTaskProvider struct { + t *testing.T + + mu sync.Mutex + localTaskIds []TaskDescriptor + + startedCh chan *testTask + completedCh chan *testTask + failedCh chan *testTask + cancelledCh chan *testTask + cleanedUpCh chan TaskDescriptor + + recorder TaskCompletionRecorder +} + +func newTestTaskProvider(t *testing.T, initialLocalTaskIds []TaskDescriptor) *testTaskProvider { + return &testTaskProvider{ + t: t, + + localTaskIds: initialLocalTaskIds, + + // give the channels plenty of space to avoid blocking test + startedCh: make(chan *testTask, 100), + completedCh: make(chan *testTask, 100), + failedCh: make(chan *testTask, 100), + cancelledCh: make(chan *testTask, 100), + cleanedUpCh: make(chan TaskDescriptor, 100), + } +} + +func (p *testTaskProvider) SetCompletionRecorder(recorder TaskCompletionRecorder) { + p.recorder = recorder +} + +func (p *testTaskProvider) GetLocalTasks() []TaskDescriptor { + p.mu.Lock() + defer p.mu.Unlock() + + return p.localTaskIds +} + +func (p *testTaskProvider) CleanupTask(desc TaskDescriptor) error { + p.cleanedUpCh <- desc + return nil +} + +func (p *testTaskProvider) StartTask(task *Task) (TaskHandle, error) { + p.mu.Lock() + defer p.mu.Unlock() + + p.localTaskIds = append(p.localTaskIds, task.TaskDescriptor) + return newTestTask(task, p), nil +} + +func collectChToSet[T comparable](t *testing.T, expectCount int, ch chan T) map[T]struct{} { + cleanedUpTasks := map[T]struct{}{} + for range expectCount { + cleanedUpTasks[recvWithTimeout(t, ch)] = struct{}{} + } + return cleanedUpTasks +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/distributedtask/types.go b/platform/dbops/binaries/weaviate-src/cluster/distributedtask/types.go new file mode 100644 index 0000000000000000000000000000000000000000..3e7cc64ecb3529ccd13e35a713025cd8832bb878 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/distributedtask/types.go @@ -0,0 +1,123 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package distributedtask + +import ( + "context" + "maps" + "time" +) + +// TasksLister is an interface for listing distributed tasks in the cluster. +type TasksLister interface { + ListDistributedTasks(ctx context.Context) (map[string][]*Task, error) +} + +// TaskCleaner is an interface for issuing a request to clean up a distributed task. +type TaskCleaner interface { + CleanUpDistributedTask(ctx context.Context, namespace, taskID string, taskVersion uint64) error +} + +// TaskCompletionRecorder is an interface for recording the completion of a distributed task. +type TaskCompletionRecorder interface { + RecordDistributedTaskNodeCompletion(ctx context.Context, namespace, taskID string, version uint64) error + RecordDistributedTaskNodeFailure(ctx context.Context, namespace, taskID string, version uint64, errMsg string) error +} + +// TaskHandle is an interface to control a locally running task. +type TaskHandle interface { + // Terminate is a signal to stop executing the task. If the task is no longer running because it already finished, + // the method call should be a no-op. + // + // Terminated task can be started later again, therefore, no local state can be removed. + Terminate() +} + +// Provider is an interface for the management and execution of a group of tasks denoted by a namespace. +type Provider interface { + // SetCompletionRecorder is invoked on node startup to register TaskCompletionRecorder which + // should be passed to all launch tasks so they could mark their completion. + SetCompletionRecorder(recorder TaskCompletionRecorder) + + // GetLocalTasks returns a list of tasks that provider is aware of from the local node state. + GetLocalTasks() []TaskDescriptor + + // CleanupTask is a signal to clean up the task local state. + CleanupTask(desc TaskDescriptor) error + + // StartTask is a signal to start executing the task in the background. + StartTask(task *Task) (TaskHandle, error) +} + +type TaskStatus string + +const ( + // TaskStatusStarted means that the task is still running on some of the nodes. + TaskStatusStarted TaskStatus = "STARTED" + // TaskStatusFinished means that the task was successfully executed by all nodes. + TaskStatusFinished TaskStatus = "FINISHED" + // TaskStatusCancelled means that the task was cancelled by user. + TaskStatusCancelled TaskStatus = "CANCELLED" + // TaskStatusFailed means that one of the nodes got a non-retryable error and all other nodes + // terminated the execution. + TaskStatusFailed TaskStatus = "FAILED" +) + +func (t TaskStatus) String() string { + return string(t) +} + +// TaskDescriptor is a struct identifying a task execution under a certain task namespace. +type TaskDescriptor struct { + // ID is the identifier of the task in the namespace. + ID string `json:"ID"` + + // Version is the version of the task with task ID. + // It is used to differentiate between multiple runs of the same task. + Version uint64 `json:"version"` +} + +type Task struct { + // Namespace is the namespace of distributed tasks which are managed by different Provider implementations + Namespace string `json:"namespace"` + + TaskDescriptor `json:",inline"` + + // Payload is arbitrary data that is needed to execute a task of Namespace. + Payload []byte `json:"payload"` + + // Status is the current status of the task. + Status TaskStatus `json:"status"` + + // StartedAt is the time that a task was submitted to the cluster. + StartedAt time.Time `json:"startedAt"` + + // FinishedAt is the time that task reached a terminal status. + // Additionally, it is used to schedule task clean up. + FinishedAt time.Time `json:"finishedAt"` + + // Error is an optional field to store the error which moved the task to FAILED status. + Error string `json:"error,omitempty"` + + // FinishedNodes is a map of nodeIDs that successfully finished the task. + FinishedNodes map[string]bool `json:"finishedNodes"` +} + +func (t *Task) Clone() *Task { + clone := *t + clone.FinishedNodes = maps.Clone(t.FinishedNodes) + return &clone +} + +type ListDistributedTasksResponse struct { + Tasks map[string][]*Task `json:"tasks"` +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/dynusers/dynamic_users.go b/platform/dbops/binaries/weaviate-src/cluster/dynusers/dynamic_users.go new file mode 100644 index 0000000000000000000000000000000000000000..6a955dc200ba1654d2520e4279e5a99c5f3b8ad9 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/dynusers/dynamic_users.go @@ -0,0 +1,170 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package dynusers + +import ( + "encoding/json" + "errors" + "fmt" + + "github.com/sirupsen/logrus" + cmd "github.com/weaviate/weaviate/cluster/proto/api" + "github.com/weaviate/weaviate/usecases/auth/authentication/apikey" +) + +var ErrBadRequest = errors.New("bad request") + +type Manager struct { + dynUser *apikey.DBUser + logger logrus.FieldLogger +} + +func NewManager(dynUser *apikey.DBUser, logger logrus.FieldLogger) *Manager { + return &Manager{dynUser: dynUser, logger: logger} +} + +func (m *Manager) CreateUser(c *cmd.ApplyRequest) error { + if m.dynUser == nil { + return nil + } + req := &cmd.CreateUsersRequest{} + if err := json.Unmarshal(c.SubCommand, req); err != nil { + return fmt.Errorf("%w: %w", ErrBadRequest, err) + } + + return m.dynUser.CreateUser(req.UserId, req.SecureHash, req.UserIdentifier, req.ApiKeyFirstLetters, req.CreatedAt) +} + +func (m *Manager) CreateUserWithKeyRequest(c *cmd.ApplyRequest) error { + if m.dynUser == nil { + return nil + } + req := &cmd.CreateUserWithKeyRequest{} + if err := json.Unmarshal(c.SubCommand, req); err != nil { + return fmt.Errorf("%w: %w", ErrBadRequest, err) + } + + return m.dynUser.CreateUserWithKey(req.UserId, req.ApiKeyFirstLetters, req.WeakHash, req.CreatedAt) +} + +func (m *Manager) DeleteUser(c *cmd.ApplyRequest) error { + if m.dynUser == nil { + return nil + } + req := &cmd.DeleteUsersRequest{} + if err := json.Unmarshal(c.SubCommand, req); err != nil { + return fmt.Errorf("%w: %w", ErrBadRequest, err) + } + + return m.dynUser.DeleteUser(req.UserId) +} + +func (m *Manager) ActivateUser(c *cmd.ApplyRequest) error { + if m.dynUser == nil { + return nil + } + req := &cmd.ActivateUsersRequest{} + if err := json.Unmarshal(c.SubCommand, req); err != nil { + return fmt.Errorf("%w: %w", ErrBadRequest, err) + } + + return m.dynUser.ActivateUser(req.UserId) +} + +func (m *Manager) SuspendUser(c *cmd.ApplyRequest) error { + if m.dynUser == nil { + return nil + } + req := &cmd.SuspendUserRequest{} + if err := json.Unmarshal(c.SubCommand, req); err != nil { + return fmt.Errorf("%w: %w", ErrBadRequest, err) + } + + return m.dynUser.DeactivateUser(req.UserId, req.RevokeKey) +} + +func (m *Manager) RotateKey(c *cmd.ApplyRequest) error { + if m.dynUser == nil { + return nil + } + req := &cmd.RotateUserApiKeyRequest{} + if err := json.Unmarshal(c.SubCommand, req); err != nil { + return fmt.Errorf("%w: %w", ErrBadRequest, err) + } + + return m.dynUser.RotateKey(req.UserId, req.ApiKeyFirstLetters, req.SecureHash, req.OldIdentifier, req.NewIdentifier) +} + +func (m *Manager) GetUsers(req *cmd.QueryRequest) ([]byte, error) { + if m.dynUser == nil { + payload, _ := json.Marshal(cmd.QueryGetUsersRequest{}) + return payload, nil + } + subCommand := cmd.QueryGetUsersRequest{} + if err := json.Unmarshal(req.SubCommand, &subCommand); err != nil { + return []byte{}, fmt.Errorf("%w: %w", ErrBadRequest, err) + } + + users, err := m.dynUser.GetUsers(subCommand.UserIds...) + if err != nil { + return []byte{}, fmt.Errorf("%w: %w", ErrBadRequest, err) + } + + response := cmd.QueryGetUsersResponse{Users: users} + payload, err := json.Marshal(response) + if err != nil { + return []byte{}, fmt.Errorf("could not marshal query response: %w", err) + } + return payload, nil +} + +func (m *Manager) CheckUserIdentifierExists(req *cmd.QueryRequest) ([]byte, error) { + if m.dynUser == nil { + payload, _ := json.Marshal(cmd.QueryGetUsersRequest{}) + return payload, nil + } + subCommand := cmd.QueryUserIdentifierExistsRequest{} + if err := json.Unmarshal(req.SubCommand, &subCommand); err != nil { + return []byte{}, fmt.Errorf("%w: %w", ErrBadRequest, err) + } + exists, err := m.dynUser.CheckUserIdentifierExists(subCommand.UserIdentifier) + if err != nil { + return []byte{}, fmt.Errorf("%w: %w", ErrBadRequest, err) + } + + response := cmd.QueryUserIdentifierExistsResponse{Exists: exists} + payload, err := json.Marshal(response) + if err != nil { + return []byte{}, fmt.Errorf("could not marshal query response: %w", err) + } + return payload, nil +} + +func (m *Manager) Snapshot() ([]byte, error) { + if m.dynUser == nil { + return nil, nil + } + return m.dynUser.Snapshot() +} + +func (m *Manager) Restore(snapshot []byte) error { + if m.dynUser == nil { + return nil + } + err := m.dynUser.Restore(snapshot) + if err != nil { + m.logger.Errorf("restored db users from snapshot failed with: %v", err) + return err + } + m.logger.Info("successfully restored dynamic users from snapshot") + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/fsm/snapshot.go b/platform/dbops/binaries/weaviate-src/cluster/fsm/snapshot.go new file mode 100644 index 0000000000000000000000000000000000000000..bc00257cb304f736d5f84d96bb653e0a45c572b8 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/fsm/snapshot.go @@ -0,0 +1,44 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package fsm + +// Snapshot is the snapshot of the cluster FSMs (schema, rbac, etc) +// it is used to restore the Snapshot to a previous state, +// or to bring out-of-date followers up to a recent log index. +type Snapshot struct { + // NodeID is the id of the node that created the snapshot + NodeID string `json:"node_id"` + // SnapshotID is the id of the snapshot comes from the provided Sink + SnapshotID string `json:"snapshot_id"` + // LegacySchema is the old schema that was used to create the snapshot + // it is used to restore the schema if the snapshot is not compatible with the current schema + // note: this is not used anymore, but we keep it for backwards compatibility + LegacySchema map[string]any `json:"classes"` + // Schema is the new schema that will be used to restore the FSM + Schema []byte `json:"schema,omitempty"` + // Aliases is the collection alias mapping + Aliases []byte `json:"aliases,omitempty"` + // RBAC is the rbac that will be used to restore the FSM + RBAC []byte `json:"rbac,omitempty"` + // DistributedTasks are the tasks that will be used to restore the FSM. + DistributedTasks []byte `json:"distributed_tasks,omitempty"` + // ReplicationOps are the currently ongoing operation for replica replication + ReplicationOps []byte `json:"replication_ops,omitempty"` + // DbUsers is the state of dynamic db users that will be used to restore the FSM + DbUsers []byte `json:"dbusers,omitempty"` +} + +// Snapshotter is used to snapshot and restore any (FSM) state +type Snapshotter interface { + Snapshot() ([]byte, error) + Restore([]byte) error +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/log/logger.go b/platform/dbops/binaries/weaviate-src/cluster/log/logger.go new file mode 100644 index 0000000000000000000000000000000000000000..491814eb3e4dcd6b1f9968a178f7abf8f6e00f76 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/log/logger.go @@ -0,0 +1,182 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package log + +import ( + "fmt" + "io" + "log" + "os" + "strings" + + "github.com/hashicorp/go-hclog" + "github.com/sirupsen/logrus" +) + +func NewHCLogrusLogger(name string, logger *logrus.Logger) hclog.Logger { + return &hclogLogrus{ + entry: logrus.NewEntry(logger), + name: fmt.Sprintf("%s ", name), + } +} + +// hclogLogrus is an adapter logger between `logrus.Logger` and `hclog.Logger`. +type hclogLogrus struct { + // entry is global set of fields shared by single logger + entry *logrus.Entry + name string +} + +func (hclogger *hclogLogrus) GetLevel() hclog.Level { + switch hclogger.entry.Logger.Level { + case logrus.TraceLevel: + return hclog.Trace + case logrus.DebugLevel: + return hclog.Debug + case logrus.InfoLevel: + return hclog.Info + case logrus.WarnLevel: + return hclog.Warn + case logrus.ErrorLevel: + return hclog.Error + case logrus.FatalLevel: + case logrus.PanicLevel: + return hclog.Error + } + return hclog.DefaultLevel +} + +func (hclogger *hclogLogrus) Log(level hclog.Level, msg string, args ...interface{}) { + switch level { + case hclog.Trace: + hclogger.Trace(msg, args...) + case hclog.Debug: + hclogger.Debug(msg, args...) + case hclog.Info: + case hclog.NoLevel: + hclogger.Info(msg, args...) + case hclog.Warn: + hclogger.Warn(msg, args...) + case hclog.Error: + hclogger.Error(msg, args...) + case hclog.Off: + } +} + +func (hclogger *hclogLogrus) ImpliedArgs() []interface{} { + var fields []interface{} + for k, v := range hclogger.entry.Data { + fields = append(fields, k) + fields = append(fields, v) + } + return fields +} + +func (hclogger *hclogLogrus) Name() string { + return hclogger.name +} + +func (hclogger *hclogLogrus) Trace(msg string, args ...interface{}) { + hclogger.logToLogrus(logrus.TraceLevel, msg, args...) +} + +func (hclogger *hclogLogrus) Debug(msg string, args ...interface{}) { + hclogger.logToLogrus(logrus.DebugLevel, msg, args...) +} + +func (hclogger *hclogLogrus) Info(msg string, args ...interface{}) { + hclogger.logToLogrus(logrus.InfoLevel, msg, args...) +} + +func (hclogger *hclogLogrus) Warn(msg string, args ...interface{}) { + hclogger.logToLogrus(logrus.WarnLevel, msg, args...) +} + +func (hclogger *hclogLogrus) Error(msg string, args ...interface{}) { + hclogger.logToLogrus(logrus.ErrorLevel, msg, args...) +} + +func (hclogger *hclogLogrus) logToLogrus(level logrus.Level, msg string, args ...interface{}) { + // we create new log entry merging per-logger `fields` (hclogger.entry) + entry := hclogger.entry.WithFields(hclogger.loggerWith(args).WithField("action", strings.TrimSpace(hclogger.name)).Data) + entry.Log(level, msg) +} + +func (hclogger *hclogLogrus) IsTrace() bool { + return hclogger.entry.Logger.IsLevelEnabled(logrus.TraceLevel) +} + +func (hclogger *hclogLogrus) IsDebug() bool { + return hclogger.entry.Logger.IsLevelEnabled(logrus.DebugLevel) +} + +func (hclogger *hclogLogrus) IsInfo() bool { + return hclogger.entry.Logger.IsLevelEnabled(logrus.InfoLevel) +} + +func (hclogger *hclogLogrus) IsWarn() bool { + return hclogger.entry.Logger.IsLevelEnabled(logrus.WarnLevel) +} + +func (hclogger *hclogLogrus) IsError() bool { + return hclogger.entry.Logger.IsLevelEnabled(logrus.ErrorLevel) +} + +func (hclogger *hclogLogrus) With(args ...interface{}) hclog.Logger { + return &hclogLogrus{ + name: hclogger.name, + entry: hclogger.loggerWith(args).WithField("action", strings.TrimSpace(hclogger.name)), + } +} + +func (hclogger *hclogLogrus) loggerWith(args []interface{}) *logrus.Entry { + l := hclogger.entry + ml := len(args) + var key string + for i := 0; i < ml-1; i += 2 { + keyVal := args[i] + if keyStr, ok := keyVal.(string); ok { + key = keyStr + } else { + key = fmt.Sprintf("%v", keyVal) + } + val := args[i+1] + if f, ok := val.(hclog.Format); ok { + val = fmt.Sprintf(f[0].(string), f[1:]) + } + l = l.WithField(key, val) + } + return l +} + +func (hclogger *hclogLogrus) Named(name string) hclog.Logger { + return hclogger.ResetNamed(name + hclogger.name) +} + +func (hclogger *hclogLogrus) ResetNamed(name string) hclog.Logger { + return &hclogLogrus{ + name: name, + entry: hclogger.entry, + } +} + +func (hclogger *hclogLogrus) SetLevel(l hclog.Level) { + hclogger.entry.Level = logrus.Level(l) +} + +func (hclogger *hclogLogrus) StandardLogger(*hclog.StandardLoggerOptions) *log.Logger { + return log.Default() +} + +func (hclogger *hclogLogrus) StandardWriter(*hclog.StandardLoggerOptions) io.Writer { + return os.Stdout +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/log/logger_test.go b/platform/dbops/binaries/weaviate-src/cluster/log/logger_test.go new file mode 100644 index 0000000000000000000000000000000000000000..e596a7e61e0ecb77bdcc55f65b189a709ed505db --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/log/logger_test.go @@ -0,0 +1,100 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package log + +import ( + "bytes" + "testing" + + "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" +) + +func Test_hclogger(t *testing.T) { + buf := bytes.Buffer{} + + r := logrus.New() + r.SetOutput(&buf) + + v := NewHCLogrusLogger("test", r) + + v.Warn("Election time out") + assert.Contains(t, buf.String(), "Election time out") + buf.Reset() + + v.Warn("heartbeat timeout reached", "last-leader-addr", "fake", "last-leader-id", "fake") + assert.NotContains(t, buf.String(), "Election time out") + assert.Contains(t, buf.String(), "heartbeat timeout reached") + assert.Contains(t, buf.String(), "last-leader-addr=fake") + assert.Contains(t, buf.String(), "last-leader-id=fake") + buf.Reset() + + v.Warn("Election time out") + assert.Contains(t, buf.String(), "Election time out") + assert.NotContains(t, buf.String(), "heartbeat timeout reached") + assert.NotContains(t, buf.String(), "last-leader-addr=fake") + assert.NotContains(t, buf.String(), "last-leader-id=fake") + buf.Reset() + + // check if any fields added to it later should be available in future log lines + v = v.With("oh-new", "oh-new-value") + v.Warn("Election time out") + assert.Contains(t, buf.String(), "Election time out") + assert.Contains(t, buf.String(), "oh-new=oh-new-value") + assert.NotContains(t, buf.String(), "heartbeat timeout reached") + assert.NotContains(t, buf.String(), "last-leader-addr=fake") + assert.NotContains(t, buf.String(), "last-leader-id=fake") + buf.Reset() + + // ResetNamed API + { + v.Warn("Election time out") + assert.Contains(t, buf.String(), "Election time out") + assert.Contains(t, buf.String(), "action=test") + buf.Reset() + + v = v.ResetNamed("test2") // test -> test2 + v.Warn("Election time out") + assert.Contains(t, buf.String(), "Election time out") + assert.Contains(t, buf.String(), "action=test2") // renamed successfully + buf.Reset() + } + + // After renaming, no duplicate fileds from previous log lines + { + v.Warn("heartbeat timeout reached", "last-leader-addr", "fake", "last-leader-id", "fake") + assert.NotContains(t, buf.String(), "Election time out") + assert.Contains(t, buf.String(), "heartbeat timeout reached") + assert.Contains(t, buf.String(), "last-leader-addr=fake") + assert.Contains(t, buf.String(), "last-leader-id=fake") + buf.Reset() + + v.Warn("Election time out") + assert.Contains(t, buf.String(), "Election time out") + assert.NotContains(t, buf.String(), "heartbeat timeout reached") + assert.NotContains(t, buf.String(), "last-leader-addr=fake") + assert.NotContains(t, buf.String(), "last-leader-id=fake") + buf.Reset() + } + + // After renaming, logger should respect fields added via future `With()` api + { + v = v.With("oh-new", "oh-new-value") + v.Warn("Election time out") + assert.Contains(t, buf.String(), "Election time out") + assert.Contains(t, buf.String(), "oh-new=oh-new-value") + assert.NotContains(t, buf.String(), "heartbeat timeout reached") + assert.NotContains(t, buf.String(), "last-leader-addr=fake") + assert.NotContains(t, buf.String(), "last-leader-id=fake") + buf.Reset() + } +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/mocks/sink.go b/platform/dbops/binaries/weaviate-src/cluster/mocks/sink.go new file mode 100644 index 0000000000000000000000000000000000000000..8d7eaa0231cb737980763e6cd652c229af122081 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/mocks/sink.go @@ -0,0 +1,39 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package mocks + +import "bytes" + +// Snapshot sink implementation for testing +type SnapshotSink struct { + Buffer *bytes.Buffer + WriteError error +} + +func (t *SnapshotSink) Write(p []byte) (n int, err error) { + if t.WriteError != nil { + return 0, t.WriteError + } + return t.Buffer.Write(p) +} + +func (t *SnapshotSink) Close() error { + return nil +} + +func (t *SnapshotSink) ID() string { + return "test-snapshot-id" +} + +func (t *SnapshotSink) Cancel() error { + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/proto/api/dyn_user_requests.go b/platform/dbops/binaries/weaviate-src/cluster/proto/api/dyn_user_requests.go new file mode 100644 index 0000000000000000000000000000000000000000..6ff9ec768da3b6dfc91c95318a41aa118b896bf9 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/proto/api/dyn_user_requests.go @@ -0,0 +1,82 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package api + +import ( + "crypto/sha256" + "time" + + "github.com/weaviate/weaviate/usecases/auth/authentication/apikey" +) + +const ( + // NOTE: in case changes happens to the dynamic user message, add new version + DynUserLatestCommandPolicyVersion = iota +) + +type CreateUsersRequest struct { + UserId string + SecureHash string + UserIdentifier string + ApiKeyFirstLetters string + CreatedAt time.Time + Version int +} + +type CreateUserWithKeyRequest struct { + UserId string + ApiKeyFirstLetters string + WeakHash [sha256.Size]byte + CreatedAt time.Time + Version int +} + +type RotateUserApiKeyRequest struct { + UserId string + ApiKeyFirstLetters string + SecureHash string + OldIdentifier string + NewIdentifier string + Version int +} + +type DeleteUsersRequest struct { + UserId string + Version int +} + +type ActivateUsersRequest struct { + UserId string + Version int +} + +type SuspendUserRequest struct { + UserId string + RevokeKey bool + Version int +} + +type QueryGetUsersRequest struct { + UserIds []string +} + +type QueryGetUsersResponse struct { + Users map[string]*apikey.User +} + +type QueryUserIdentifierExistsRequest struct { + UserIdentifier string +} + +type QueryUserIdentifierExistsResponse struct { + Exists bool +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/proto/api/message.pb.go b/platform/dbops/binaries/weaviate-src/cluster/proto/api/message.pb.go new file mode 100644 index 0000000000000000000000000000000000000000..50f6725f99738d6f28f5275c4684c3f881324807 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/proto/api/message.pb.go @@ -0,0 +1,2583 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.31.0 +// protoc (unknown) +// source: api/message.proto + +// NOTE run `buf generate` from `cluster/proto` to regenerate code + +package api + +import ( + reflect "reflect" + sync "sync" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type ApplyRequest_Type int32 + +const ( + ApplyRequest_TYPE_UNSPECIFIED ApplyRequest_Type = 0 + ApplyRequest_TYPE_ADD_CLASS ApplyRequest_Type = 1 + ApplyRequest_TYPE_UPDATE_CLASS ApplyRequest_Type = 2 + ApplyRequest_TYPE_DELETE_CLASS ApplyRequest_Type = 3 + ApplyRequest_TYPE_RESTORE_CLASS ApplyRequest_Type = 4 + ApplyRequest_TYPE_ADD_PROPERTY ApplyRequest_Type = 5 + ApplyRequest_TYPE_UPDATE_SHARD_STATUS ApplyRequest_Type = 10 + ApplyRequest_TYPE_ADD_REPLICA_TO_SHARD ApplyRequest_Type = 11 + ApplyRequest_TYPE_DELETE_REPLICA_FROM_SHARD ApplyRequest_Type = 12 + ApplyRequest_TYPE_ADD_TENANT ApplyRequest_Type = 16 + ApplyRequest_TYPE_UPDATE_TENANT ApplyRequest_Type = 17 + ApplyRequest_TYPE_DELETE_TENANT ApplyRequest_Type = 18 + ApplyRequest_TYPE_TENANT_PROCESS ApplyRequest_Type = 19 + ApplyRequest_TYPE_CREATE_ALIAS ApplyRequest_Type = 40 + ApplyRequest_TYPE_REPLACE_ALIAS ApplyRequest_Type = 41 + ApplyRequest_TYPE_DELETE_ALIAS ApplyRequest_Type = 42 + ApplyRequest_TYPE_UPSERT_ROLES_PERMISSIONS ApplyRequest_Type = 60 + ApplyRequest_TYPE_DELETE_ROLES ApplyRequest_Type = 61 + ApplyRequest_TYPE_REMOVE_PERMISSIONS ApplyRequest_Type = 62 + ApplyRequest_TYPE_ADD_ROLES_FOR_USER ApplyRequest_Type = 63 + ApplyRequest_TYPE_REVOKE_ROLES_FOR_USER ApplyRequest_Type = 64 + ApplyRequest_TYPE_UPSERT_USER ApplyRequest_Type = 80 + ApplyRequest_TYPE_DELETE_USER ApplyRequest_Type = 81 + ApplyRequest_TYPE_ROTATE_USER_API_KEY ApplyRequest_Type = 82 + ApplyRequest_TYPE_SUSPEND_USER ApplyRequest_Type = 83 + ApplyRequest_TYPE_ACTIVATE_USER ApplyRequest_Type = 84 + ApplyRequest_TYPE_CREATE_USER_WITH_KEY ApplyRequest_Type = 85 + ApplyRequest_TYPE_STORE_SCHEMA_V1 ApplyRequest_Type = 99 + ApplyRequest_TYPE_REPLICATION_REPLICATE ApplyRequest_Type = 200 + ApplyRequest_TYPE_REPLICATION_REPLICATE_UPDATE_STATE ApplyRequest_Type = 201 + ApplyRequest_TYPE_REPLICATION_REPLICATE_REGISTER_ERROR ApplyRequest_Type = 202 + ApplyRequest_TYPE_REPLICATION_REPLICATE_CANCEL ApplyRequest_Type = 203 + ApplyRequest_TYPE_REPLICATION_REPLICATE_DELETE ApplyRequest_Type = 204 + ApplyRequest_TYPE_REPLICATION_REPLICATE_REMOVE ApplyRequest_Type = 205 + ApplyRequest_TYPE_REPLICATION_REPLICATE_CANCELLATION_COMPLETE ApplyRequest_Type = 206 + ApplyRequest_TYPE_REPLICATION_REPLICATE_DELETE_ALL ApplyRequest_Type = 207 + ApplyRequest_TYPE_REPLICATION_REPLICATE_DELETE_BY_COLLECTION ApplyRequest_Type = 208 + ApplyRequest_TYPE_REPLICATION_REPLICATE_DELETE_BY_TENANTS ApplyRequest_Type = 209 + ApplyRequest_TYPE_REPLICATION_REPLICATE_SYNC_SHARD ApplyRequest_Type = 210 + ApplyRequest_TYPE_REPLICATION_REGISTER_SCHEMA_VERSION ApplyRequest_Type = 211 + ApplyRequest_TYPE_REPLICATION_REPLICATE_ADD_REPLICA_TO_SHARD ApplyRequest_Type = 212 + ApplyRequest_TYPE_REPLICATION_REPLICATE_FORCE_DELETE_ALL ApplyRequest_Type = 220 + ApplyRequest_TYPE_REPLICATION_REPLICATE_FORCE_DELETE_BY_COLLECTION ApplyRequest_Type = 221 + ApplyRequest_TYPE_REPLICATION_REPLICATE_FORCE_DELETE_BY_COLLECTION_AND_SHARD ApplyRequest_Type = 222 + ApplyRequest_TYPE_REPLICATION_REPLICATE_FORCE_DELETE_BY_TARGET_NODE ApplyRequest_Type = 223 + ApplyRequest_TYPE_REPLICATION_REPLICATE_FORCE_DELETE_BY_UUID ApplyRequest_Type = 224 + ApplyRequest_TYPE_DISTRIBUTED_TASK_ADD ApplyRequest_Type = 300 + ApplyRequest_TYPE_DISTRIBUTED_TASK_CANCEL ApplyRequest_Type = 301 + ApplyRequest_TYPE_DISTRIBUTED_TASK_RECORD_NODE_COMPLETED ApplyRequest_Type = 302 + ApplyRequest_TYPE_DISTRIBUTED_TASK_CLEAN_UP ApplyRequest_Type = 303 +) + +// Enum value maps for ApplyRequest_Type. +var ( + ApplyRequest_Type_name = map[int32]string{ + 0: "TYPE_UNSPECIFIED", + 1: "TYPE_ADD_CLASS", + 2: "TYPE_UPDATE_CLASS", + 3: "TYPE_DELETE_CLASS", + 4: "TYPE_RESTORE_CLASS", + 5: "TYPE_ADD_PROPERTY", + 10: "TYPE_UPDATE_SHARD_STATUS", + 11: "TYPE_ADD_REPLICA_TO_SHARD", + 12: "TYPE_DELETE_REPLICA_FROM_SHARD", + 16: "TYPE_ADD_TENANT", + 17: "TYPE_UPDATE_TENANT", + 18: "TYPE_DELETE_TENANT", + 19: "TYPE_TENANT_PROCESS", + 40: "TYPE_CREATE_ALIAS", + 41: "TYPE_REPLACE_ALIAS", + 42: "TYPE_DELETE_ALIAS", + 60: "TYPE_UPSERT_ROLES_PERMISSIONS", + 61: "TYPE_DELETE_ROLES", + 62: "TYPE_REMOVE_PERMISSIONS", + 63: "TYPE_ADD_ROLES_FOR_USER", + 64: "TYPE_REVOKE_ROLES_FOR_USER", + 80: "TYPE_UPSERT_USER", + 81: "TYPE_DELETE_USER", + 82: "TYPE_ROTATE_USER_API_KEY", + 83: "TYPE_SUSPEND_USER", + 84: "TYPE_ACTIVATE_USER", + 85: "TYPE_CREATE_USER_WITH_KEY", + 99: "TYPE_STORE_SCHEMA_V1", + 200: "TYPE_REPLICATION_REPLICATE", + 201: "TYPE_REPLICATION_REPLICATE_UPDATE_STATE", + 202: "TYPE_REPLICATION_REPLICATE_REGISTER_ERROR", + 203: "TYPE_REPLICATION_REPLICATE_CANCEL", + 204: "TYPE_REPLICATION_REPLICATE_DELETE", + 205: "TYPE_REPLICATION_REPLICATE_REMOVE", + 206: "TYPE_REPLICATION_REPLICATE_CANCELLATION_COMPLETE", + 207: "TYPE_REPLICATION_REPLICATE_DELETE_ALL", + 208: "TYPE_REPLICATION_REPLICATE_DELETE_BY_COLLECTION", + 209: "TYPE_REPLICATION_REPLICATE_DELETE_BY_TENANTS", + 210: "TYPE_REPLICATION_REPLICATE_SYNC_SHARD", + 211: "TYPE_REPLICATION_REGISTER_SCHEMA_VERSION", + 212: "TYPE_REPLICATION_REPLICATE_ADD_REPLICA_TO_SHARD", + 220: "TYPE_REPLICATION_REPLICATE_FORCE_DELETE_ALL", + 221: "TYPE_REPLICATION_REPLICATE_FORCE_DELETE_BY_COLLECTION", + 222: "TYPE_REPLICATION_REPLICATE_FORCE_DELETE_BY_COLLECTION_AND_SHARD", + 223: "TYPE_REPLICATION_REPLICATE_FORCE_DELETE_BY_TARGET_NODE", + 224: "TYPE_REPLICATION_REPLICATE_FORCE_DELETE_BY_UUID", + 300: "TYPE_DISTRIBUTED_TASK_ADD", + 301: "TYPE_DISTRIBUTED_TASK_CANCEL", + 302: "TYPE_DISTRIBUTED_TASK_RECORD_NODE_COMPLETED", + 303: "TYPE_DISTRIBUTED_TASK_CLEAN_UP", + } + ApplyRequest_Type_value = map[string]int32{ + "TYPE_UNSPECIFIED": 0, + "TYPE_ADD_CLASS": 1, + "TYPE_UPDATE_CLASS": 2, + "TYPE_DELETE_CLASS": 3, + "TYPE_RESTORE_CLASS": 4, + "TYPE_ADD_PROPERTY": 5, + "TYPE_UPDATE_SHARD_STATUS": 10, + "TYPE_ADD_REPLICA_TO_SHARD": 11, + "TYPE_DELETE_REPLICA_FROM_SHARD": 12, + "TYPE_ADD_TENANT": 16, + "TYPE_UPDATE_TENANT": 17, + "TYPE_DELETE_TENANT": 18, + "TYPE_TENANT_PROCESS": 19, + "TYPE_CREATE_ALIAS": 40, + "TYPE_REPLACE_ALIAS": 41, + "TYPE_DELETE_ALIAS": 42, + "TYPE_UPSERT_ROLES_PERMISSIONS": 60, + "TYPE_DELETE_ROLES": 61, + "TYPE_REMOVE_PERMISSIONS": 62, + "TYPE_ADD_ROLES_FOR_USER": 63, + "TYPE_REVOKE_ROLES_FOR_USER": 64, + "TYPE_UPSERT_USER": 80, + "TYPE_DELETE_USER": 81, + "TYPE_ROTATE_USER_API_KEY": 82, + "TYPE_SUSPEND_USER": 83, + "TYPE_ACTIVATE_USER": 84, + "TYPE_CREATE_USER_WITH_KEY": 85, + "TYPE_STORE_SCHEMA_V1": 99, + "TYPE_REPLICATION_REPLICATE": 200, + "TYPE_REPLICATION_REPLICATE_UPDATE_STATE": 201, + "TYPE_REPLICATION_REPLICATE_REGISTER_ERROR": 202, + "TYPE_REPLICATION_REPLICATE_CANCEL": 203, + "TYPE_REPLICATION_REPLICATE_DELETE": 204, + "TYPE_REPLICATION_REPLICATE_REMOVE": 205, + "TYPE_REPLICATION_REPLICATE_CANCELLATION_COMPLETE": 206, + "TYPE_REPLICATION_REPLICATE_DELETE_ALL": 207, + "TYPE_REPLICATION_REPLICATE_DELETE_BY_COLLECTION": 208, + "TYPE_REPLICATION_REPLICATE_DELETE_BY_TENANTS": 209, + "TYPE_REPLICATION_REPLICATE_SYNC_SHARD": 210, + "TYPE_REPLICATION_REGISTER_SCHEMA_VERSION": 211, + "TYPE_REPLICATION_REPLICATE_ADD_REPLICA_TO_SHARD": 212, + "TYPE_REPLICATION_REPLICATE_FORCE_DELETE_ALL": 220, + "TYPE_REPLICATION_REPLICATE_FORCE_DELETE_BY_COLLECTION": 221, + "TYPE_REPLICATION_REPLICATE_FORCE_DELETE_BY_COLLECTION_AND_SHARD": 222, + "TYPE_REPLICATION_REPLICATE_FORCE_DELETE_BY_TARGET_NODE": 223, + "TYPE_REPLICATION_REPLICATE_FORCE_DELETE_BY_UUID": 224, + "TYPE_DISTRIBUTED_TASK_ADD": 300, + "TYPE_DISTRIBUTED_TASK_CANCEL": 301, + "TYPE_DISTRIBUTED_TASK_RECORD_NODE_COMPLETED": 302, + "TYPE_DISTRIBUTED_TASK_CLEAN_UP": 303, + } +) + +func (x ApplyRequest_Type) Enum() *ApplyRequest_Type { + p := new(ApplyRequest_Type) + *p = x + return p +} + +func (x ApplyRequest_Type) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ApplyRequest_Type) Descriptor() protoreflect.EnumDescriptor { + return file_api_message_proto_enumTypes[0].Descriptor() +} + +func (ApplyRequest_Type) Type() protoreflect.EnumType { + return &file_api_message_proto_enumTypes[0] +} + +func (x ApplyRequest_Type) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ApplyRequest_Type.Descriptor instead. +func (ApplyRequest_Type) EnumDescriptor() ([]byte, []int) { + return file_api_message_proto_rawDescGZIP(), []int{6, 0} +} + +type QueryRequest_Type int32 + +const ( + QueryRequest_TYPE_UNSPECIFIED QueryRequest_Type = 0 + QueryRequest_TYPE_GET_CLASSES QueryRequest_Type = 1 + QueryRequest_TYPE_GET_SCHEMA QueryRequest_Type = 2 + QueryRequest_TYPE_GET_TENANTS QueryRequest_Type = 3 + QueryRequest_TYPE_GET_SHARD_OWNER QueryRequest_Type = 4 + QueryRequest_TYPE_GET_TENANTS_SHARDS QueryRequest_Type = 5 + QueryRequest_TYPE_GET_SHARDING_STATE QueryRequest_Type = 6 + QueryRequest_TYPE_GET_CLASS_VERSIONS QueryRequest_Type = 7 + QueryRequest_TYPE_GET_COLLECTIONS_COUNT QueryRequest_Type = 8 + QueryRequest_TYPE_HAS_PERMISSION QueryRequest_Type = 30 + QueryRequest_TYPE_GET_ROLES QueryRequest_Type = 31 + QueryRequest_TYPE_GET_ROLES_FOR_USER QueryRequest_Type = 32 + QueryRequest_TYPE_GET_USERS_FOR_ROLE QueryRequest_Type = 33 + QueryRequest_TYPE_GET_USERS_OR_GROUPS_WITH_ROLES QueryRequest_Type = 34 + QueryRequest_TYPE_GET_USERS QueryRequest_Type = 61 + QueryRequest_TYPE_USER_IDENTIFIER_EXISTS QueryRequest_Type = 62 + QueryRequest_TYPE_RESOLVE_ALIAS QueryRequest_Type = 100 + QueryRequest_TYPE_GET_ALIASES QueryRequest_Type = 101 + QueryRequest_TYPE_GET_REPLICATION_DETAILS QueryRequest_Type = 200 + QueryRequest_TYPE_GET_REPLICATION_DETAILS_BY_COLLECTION QueryRequest_Type = 201 + QueryRequest_TYPE_GET_REPLICATION_DETAILS_BY_COLLECTION_AND_SHARD QueryRequest_Type = 202 + QueryRequest_TYPE_GET_REPLICATION_DETAILS_BY_TARGET_NODE QueryRequest_Type = 203 + QueryRequest_TYPE_GET_SHARDING_STATE_BY_COLLECTION QueryRequest_Type = 204 + QueryRequest_TYPE_GET_SHARDING_STATE_BY_COLLECTION_AND_SHARD QueryRequest_Type = 205 + QueryRequest_TYPE_GET_ALL_REPLICATION_DETAILS QueryRequest_Type = 206 + QueryRequest_TYPE_GET_REPLICATION_OPERATION_STATE QueryRequest_Type = 207 + QueryRequest_TYPE_DISTRIBUTED_TASK_LIST QueryRequest_Type = 300 +) + +// Enum value maps for QueryRequest_Type. +var ( + QueryRequest_Type_name = map[int32]string{ + 0: "TYPE_UNSPECIFIED", + 1: "TYPE_GET_CLASSES", + 2: "TYPE_GET_SCHEMA", + 3: "TYPE_GET_TENANTS", + 4: "TYPE_GET_SHARD_OWNER", + 5: "TYPE_GET_TENANTS_SHARDS", + 6: "TYPE_GET_SHARDING_STATE", + 7: "TYPE_GET_CLASS_VERSIONS", + 8: "TYPE_GET_COLLECTIONS_COUNT", + 30: "TYPE_HAS_PERMISSION", + 31: "TYPE_GET_ROLES", + 32: "TYPE_GET_ROLES_FOR_USER", + 33: "TYPE_GET_USERS_FOR_ROLE", + 34: "TYPE_GET_USERS_OR_GROUPS_WITH_ROLES", + 61: "TYPE_GET_USERS", + 62: "TYPE_USER_IDENTIFIER_EXISTS", + 100: "TYPE_RESOLVE_ALIAS", + 101: "TYPE_GET_ALIASES", + 200: "TYPE_GET_REPLICATION_DETAILS", + 201: "TYPE_GET_REPLICATION_DETAILS_BY_COLLECTION", + 202: "TYPE_GET_REPLICATION_DETAILS_BY_COLLECTION_AND_SHARD", + 203: "TYPE_GET_REPLICATION_DETAILS_BY_TARGET_NODE", + 204: "TYPE_GET_SHARDING_STATE_BY_COLLECTION", + 205: "TYPE_GET_SHARDING_STATE_BY_COLLECTION_AND_SHARD", + 206: "TYPE_GET_ALL_REPLICATION_DETAILS", + 207: "TYPE_GET_REPLICATION_OPERATION_STATE", + 300: "TYPE_DISTRIBUTED_TASK_LIST", + } + QueryRequest_Type_value = map[string]int32{ + "TYPE_UNSPECIFIED": 0, + "TYPE_GET_CLASSES": 1, + "TYPE_GET_SCHEMA": 2, + "TYPE_GET_TENANTS": 3, + "TYPE_GET_SHARD_OWNER": 4, + "TYPE_GET_TENANTS_SHARDS": 5, + "TYPE_GET_SHARDING_STATE": 6, + "TYPE_GET_CLASS_VERSIONS": 7, + "TYPE_GET_COLLECTIONS_COUNT": 8, + "TYPE_HAS_PERMISSION": 30, + "TYPE_GET_ROLES": 31, + "TYPE_GET_ROLES_FOR_USER": 32, + "TYPE_GET_USERS_FOR_ROLE": 33, + "TYPE_GET_USERS_OR_GROUPS_WITH_ROLES": 34, + "TYPE_GET_USERS": 61, + "TYPE_USER_IDENTIFIER_EXISTS": 62, + "TYPE_RESOLVE_ALIAS": 100, + "TYPE_GET_ALIASES": 101, + "TYPE_GET_REPLICATION_DETAILS": 200, + "TYPE_GET_REPLICATION_DETAILS_BY_COLLECTION": 201, + "TYPE_GET_REPLICATION_DETAILS_BY_COLLECTION_AND_SHARD": 202, + "TYPE_GET_REPLICATION_DETAILS_BY_TARGET_NODE": 203, + "TYPE_GET_SHARDING_STATE_BY_COLLECTION": 204, + "TYPE_GET_SHARDING_STATE_BY_COLLECTION_AND_SHARD": 205, + "TYPE_GET_ALL_REPLICATION_DETAILS": 206, + "TYPE_GET_REPLICATION_OPERATION_STATE": 207, + "TYPE_DISTRIBUTED_TASK_LIST": 300, + } +) + +func (x QueryRequest_Type) Enum() *QueryRequest_Type { + p := new(QueryRequest_Type) + *p = x + return p +} + +func (x QueryRequest_Type) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (QueryRequest_Type) Descriptor() protoreflect.EnumDescriptor { + return file_api_message_proto_enumTypes[1].Descriptor() +} + +func (QueryRequest_Type) Type() protoreflect.EnumType { + return &file_api_message_proto_enumTypes[1] +} + +func (x QueryRequest_Type) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use QueryRequest_Type.Descriptor instead. +func (QueryRequest_Type) EnumDescriptor() ([]byte, []int) { + return file_api_message_proto_rawDescGZIP(), []int{8, 0} +} + +type TenantsProcess_Op int32 + +const ( + TenantsProcess_OP_UNSPECIFIED TenantsProcess_Op = 0 + TenantsProcess_OP_START TenantsProcess_Op = 1 + TenantsProcess_OP_DONE TenantsProcess_Op = 2 + TenantsProcess_OP_ABORT TenantsProcess_Op = 3 +) + +// Enum value maps for TenantsProcess_Op. +var ( + TenantsProcess_Op_name = map[int32]string{ + 0: "OP_UNSPECIFIED", + 1: "OP_START", + 2: "OP_DONE", + 3: "OP_ABORT", + } + TenantsProcess_Op_value = map[string]int32{ + "OP_UNSPECIFIED": 0, + "OP_START": 1, + "OP_DONE": 2, + "OP_ABORT": 3, + } +) + +func (x TenantsProcess_Op) Enum() *TenantsProcess_Op { + p := new(TenantsProcess_Op) + *p = x + return p +} + +func (x TenantsProcess_Op) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (TenantsProcess_Op) Descriptor() protoreflect.EnumDescriptor { + return file_api_message_proto_enumTypes[2].Descriptor() +} + +func (TenantsProcess_Op) Type() protoreflect.EnumType { + return &file_api_message_proto_enumTypes[2] +} + +func (x TenantsProcess_Op) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use TenantsProcess_Op.Descriptor instead. +func (TenantsProcess_Op) EnumDescriptor() ([]byte, []int) { + return file_api_message_proto_rawDescGZIP(), []int{12, 0} +} + +type TenantProcessRequest_Action int32 + +const ( + TenantProcessRequest_ACTION_UNSPECIFIED TenantProcessRequest_Action = 0 + TenantProcessRequest_ACTION_FREEZING TenantProcessRequest_Action = 1 + TenantProcessRequest_ACTION_UNFREEZING TenantProcessRequest_Action = 2 +) + +// Enum value maps for TenantProcessRequest_Action. +var ( + TenantProcessRequest_Action_name = map[int32]string{ + 0: "ACTION_UNSPECIFIED", + 1: "ACTION_FREEZING", + 2: "ACTION_UNFREEZING", + } + TenantProcessRequest_Action_value = map[string]int32{ + "ACTION_UNSPECIFIED": 0, + "ACTION_FREEZING": 1, + "ACTION_UNFREEZING": 2, + } +) + +func (x TenantProcessRequest_Action) Enum() *TenantProcessRequest_Action { + p := new(TenantProcessRequest_Action) + *p = x + return p +} + +func (x TenantProcessRequest_Action) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (TenantProcessRequest_Action) Descriptor() protoreflect.EnumDescriptor { + return file_api_message_proto_enumTypes[3].Descriptor() +} + +func (TenantProcessRequest_Action) Type() protoreflect.EnumType { + return &file_api_message_proto_enumTypes[3] +} + +func (x TenantProcessRequest_Action) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use TenantProcessRequest_Action.Descriptor instead. +func (TenantProcessRequest_Action) EnumDescriptor() ([]byte, []int) { + return file_api_message_proto_rawDescGZIP(), []int{13, 0} +} + +type JoinPeerRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Address string `protobuf:"bytes,2,opt,name=address,proto3" json:"address,omitempty"` + Voter bool `protobuf:"varint,3,opt,name=voter,proto3" json:"voter,omitempty"` +} + +func (x *JoinPeerRequest) Reset() { + *x = JoinPeerRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_api_message_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *JoinPeerRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*JoinPeerRequest) ProtoMessage() {} + +func (x *JoinPeerRequest) ProtoReflect() protoreflect.Message { + mi := &file_api_message_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use JoinPeerRequest.ProtoReflect.Descriptor instead. +func (*JoinPeerRequest) Descriptor() ([]byte, []int) { + return file_api_message_proto_rawDescGZIP(), []int{0} +} + +func (x *JoinPeerRequest) GetId() string { + if x != nil { + return x.Id + } + return "" +} + +func (x *JoinPeerRequest) GetAddress() string { + if x != nil { + return x.Address + } + return "" +} + +func (x *JoinPeerRequest) GetVoter() bool { + if x != nil { + return x.Voter + } + return false +} + +type JoinPeerResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Leader string `protobuf:"bytes,1,opt,name=leader,proto3" json:"leader,omitempty"` +} + +func (x *JoinPeerResponse) Reset() { + *x = JoinPeerResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_api_message_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *JoinPeerResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*JoinPeerResponse) ProtoMessage() {} + +func (x *JoinPeerResponse) ProtoReflect() protoreflect.Message { + mi := &file_api_message_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use JoinPeerResponse.ProtoReflect.Descriptor instead. +func (*JoinPeerResponse) Descriptor() ([]byte, []int) { + return file_api_message_proto_rawDescGZIP(), []int{1} +} + +func (x *JoinPeerResponse) GetLeader() string { + if x != nil { + return x.Leader + } + return "" +} + +type RemovePeerRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` +} + +func (x *RemovePeerRequest) Reset() { + *x = RemovePeerRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_api_message_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RemovePeerRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RemovePeerRequest) ProtoMessage() {} + +func (x *RemovePeerRequest) ProtoReflect() protoreflect.Message { + mi := &file_api_message_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RemovePeerRequest.ProtoReflect.Descriptor instead. +func (*RemovePeerRequest) Descriptor() ([]byte, []int) { + return file_api_message_proto_rawDescGZIP(), []int{2} +} + +func (x *RemovePeerRequest) GetId() string { + if x != nil { + return x.Id + } + return "" +} + +type RemovePeerResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Leader string `protobuf:"bytes,1,opt,name=leader,proto3" json:"leader,omitempty"` +} + +func (x *RemovePeerResponse) Reset() { + *x = RemovePeerResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_api_message_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RemovePeerResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RemovePeerResponse) ProtoMessage() {} + +func (x *RemovePeerResponse) ProtoReflect() protoreflect.Message { + mi := &file_api_message_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RemovePeerResponse.ProtoReflect.Descriptor instead. +func (*RemovePeerResponse) Descriptor() ([]byte, []int) { + return file_api_message_proto_rawDescGZIP(), []int{3} +} + +func (x *RemovePeerResponse) GetLeader() string { + if x != nil { + return x.Leader + } + return "" +} + +type NotifyPeerRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Address string `protobuf:"bytes,2,opt,name=address,proto3" json:"address,omitempty"` +} + +func (x *NotifyPeerRequest) Reset() { + *x = NotifyPeerRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_api_message_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NotifyPeerRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NotifyPeerRequest) ProtoMessage() {} + +func (x *NotifyPeerRequest) ProtoReflect() protoreflect.Message { + mi := &file_api_message_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NotifyPeerRequest.ProtoReflect.Descriptor instead. +func (*NotifyPeerRequest) Descriptor() ([]byte, []int) { + return file_api_message_proto_rawDescGZIP(), []int{4} +} + +func (x *NotifyPeerRequest) GetId() string { + if x != nil { + return x.Id + } + return "" +} + +func (x *NotifyPeerRequest) GetAddress() string { + if x != nil { + return x.Address + } + return "" +} + +type NotifyPeerResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *NotifyPeerResponse) Reset() { + *x = NotifyPeerResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_api_message_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NotifyPeerResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NotifyPeerResponse) ProtoMessage() {} + +func (x *NotifyPeerResponse) ProtoReflect() protoreflect.Message { + mi := &file_api_message_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NotifyPeerResponse.ProtoReflect.Descriptor instead. +func (*NotifyPeerResponse) Descriptor() ([]byte, []int) { + return file_api_message_proto_rawDescGZIP(), []int{5} +} + +type ApplyRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Type ApplyRequest_Type `protobuf:"varint,1,opt,name=type,proto3,enum=weaviate.internal.cluster.ApplyRequest_Type" json:"type,omitempty"` + Class string `protobuf:"bytes,2,opt,name=class,proto3" json:"class,omitempty"` + Version uint64 `protobuf:"varint,3,opt,name=version,proto3" json:"version,omitempty"` + SubCommand []byte `protobuf:"bytes,4,opt,name=sub_command,json=subCommand,proto3" json:"sub_command,omitempty"` +} + +func (x *ApplyRequest) Reset() { + *x = ApplyRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_api_message_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ApplyRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ApplyRequest) ProtoMessage() {} + +func (x *ApplyRequest) ProtoReflect() protoreflect.Message { + mi := &file_api_message_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ApplyRequest.ProtoReflect.Descriptor instead. +func (*ApplyRequest) Descriptor() ([]byte, []int) { + return file_api_message_proto_rawDescGZIP(), []int{6} +} + +func (x *ApplyRequest) GetType() ApplyRequest_Type { + if x != nil { + return x.Type + } + return ApplyRequest_TYPE_UNSPECIFIED +} + +func (x *ApplyRequest) GetClass() string { + if x != nil { + return x.Class + } + return "" +} + +func (x *ApplyRequest) GetVersion() uint64 { + if x != nil { + return x.Version + } + return 0 +} + +func (x *ApplyRequest) GetSubCommand() []byte { + if x != nil { + return x.SubCommand + } + return nil +} + +type ApplyResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Version uint64 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"` + Leader string `protobuf:"bytes,2,opt,name=leader,proto3" json:"leader,omitempty"` +} + +func (x *ApplyResponse) Reset() { + *x = ApplyResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_api_message_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ApplyResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ApplyResponse) ProtoMessage() {} + +func (x *ApplyResponse) ProtoReflect() protoreflect.Message { + mi := &file_api_message_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ApplyResponse.ProtoReflect.Descriptor instead. +func (*ApplyResponse) Descriptor() ([]byte, []int) { + return file_api_message_proto_rawDescGZIP(), []int{7} +} + +func (x *ApplyResponse) GetVersion() uint64 { + if x != nil { + return x.Version + } + return 0 +} + +func (x *ApplyResponse) GetLeader() string { + if x != nil { + return x.Leader + } + return "" +} + +type QueryRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Type QueryRequest_Type `protobuf:"varint,1,opt,name=type,proto3,enum=weaviate.internal.cluster.QueryRequest_Type" json:"type,omitempty"` + SubCommand []byte `protobuf:"bytes,2,opt,name=sub_command,json=subCommand,proto3" json:"sub_command,omitempty"` +} + +func (x *QueryRequest) Reset() { + *x = QueryRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_api_message_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *QueryRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*QueryRequest) ProtoMessage() {} + +func (x *QueryRequest) ProtoReflect() protoreflect.Message { + mi := &file_api_message_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use QueryRequest.ProtoReflect.Descriptor instead. +func (*QueryRequest) Descriptor() ([]byte, []int) { + return file_api_message_proto_rawDescGZIP(), []int{8} +} + +func (x *QueryRequest) GetType() QueryRequest_Type { + if x != nil { + return x.Type + } + return QueryRequest_TYPE_UNSPECIFIED +} + +func (x *QueryRequest) GetSubCommand() []byte { + if x != nil { + return x.SubCommand + } + return nil +} + +type QueryResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Payload []byte `protobuf:"bytes,1,opt,name=payload,proto3" json:"payload,omitempty"` +} + +func (x *QueryResponse) Reset() { + *x = QueryResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_api_message_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *QueryResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*QueryResponse) ProtoMessage() {} + +func (x *QueryResponse) ProtoReflect() protoreflect.Message { + mi := &file_api_message_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use QueryResponse.ProtoReflect.Descriptor instead. +func (*QueryResponse) Descriptor() ([]byte, []int) { + return file_api_message_proto_rawDescGZIP(), []int{9} +} + +func (x *QueryResponse) GetPayload() []byte { + if x != nil { + return x.Payload + } + return nil +} + +type AddTenantsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ClusterNodes []string `protobuf:"bytes,1,rep,name=cluster_nodes,json=clusterNodes,proto3" json:"cluster_nodes,omitempty"` + Tenants []*Tenant `protobuf:"bytes,2,rep,name=tenants,proto3" json:"tenants,omitempty"` +} + +func (x *AddTenantsRequest) Reset() { + *x = AddTenantsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_api_message_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AddTenantsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AddTenantsRequest) ProtoMessage() {} + +func (x *AddTenantsRequest) ProtoReflect() protoreflect.Message { + mi := &file_api_message_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AddTenantsRequest.ProtoReflect.Descriptor instead. +func (*AddTenantsRequest) Descriptor() ([]byte, []int) { + return file_api_message_proto_rawDescGZIP(), []int{10} +} + +func (x *AddTenantsRequest) GetClusterNodes() []string { + if x != nil { + return x.ClusterNodes + } + return nil +} + +func (x *AddTenantsRequest) GetTenants() []*Tenant { + if x != nil { + return x.Tenants + } + return nil +} + +type UpdateTenantsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Tenants []*Tenant `protobuf:"bytes,1,rep,name=tenants,proto3" json:"tenants,omitempty"` + ClusterNodes []string `protobuf:"bytes,2,rep,name=cluster_nodes,json=clusterNodes,proto3" json:"cluster_nodes,omitempty"` + ImplicitUpdateRequest bool `protobuf:"varint,3,opt,name=implicit_update_request,json=implicitUpdateRequest,proto3" json:"implicit_update_request,omitempty"` +} + +func (x *UpdateTenantsRequest) Reset() { + *x = UpdateTenantsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_api_message_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpdateTenantsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateTenantsRequest) ProtoMessage() {} + +func (x *UpdateTenantsRequest) ProtoReflect() protoreflect.Message { + mi := &file_api_message_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateTenantsRequest.ProtoReflect.Descriptor instead. +func (*UpdateTenantsRequest) Descriptor() ([]byte, []int) { + return file_api_message_proto_rawDescGZIP(), []int{11} +} + +func (x *UpdateTenantsRequest) GetTenants() []*Tenant { + if x != nil { + return x.Tenants + } + return nil +} + +func (x *UpdateTenantsRequest) GetClusterNodes() []string { + if x != nil { + return x.ClusterNodes + } + return nil +} + +func (x *UpdateTenantsRequest) GetImplicitUpdateRequest() bool { + if x != nil { + return x.ImplicitUpdateRequest + } + return false +} + +type TenantsProcess struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Op TenantsProcess_Op `protobuf:"varint,1,opt,name=op,proto3,enum=weaviate.internal.cluster.TenantsProcess_Op" json:"op,omitempty"` + Tenant *Tenant `protobuf:"bytes,2,opt,name=tenant,proto3" json:"tenant,omitempty"` +} + +func (x *TenantsProcess) Reset() { + *x = TenantsProcess{} + if protoimpl.UnsafeEnabled { + mi := &file_api_message_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TenantsProcess) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TenantsProcess) ProtoMessage() {} + +func (x *TenantsProcess) ProtoReflect() protoreflect.Message { + mi := &file_api_message_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TenantsProcess.ProtoReflect.Descriptor instead. +func (*TenantsProcess) Descriptor() ([]byte, []int) { + return file_api_message_proto_rawDescGZIP(), []int{12} +} + +func (x *TenantsProcess) GetOp() TenantsProcess_Op { + if x != nil { + return x.Op + } + return TenantsProcess_OP_UNSPECIFIED +} + +func (x *TenantsProcess) GetTenant() *Tenant { + if x != nil { + return x.Tenant + } + return nil +} + +type TenantProcessRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Node string `protobuf:"bytes,1,opt,name=node,proto3" json:"node,omitempty"` + Action TenantProcessRequest_Action `protobuf:"varint,2,opt,name=action,proto3,enum=weaviate.internal.cluster.TenantProcessRequest_Action" json:"action,omitempty"` + TenantsProcesses []*TenantsProcess `protobuf:"bytes,3,rep,name=tenants_processes,json=tenantsProcesses,proto3" json:"tenants_processes,omitempty"` +} + +func (x *TenantProcessRequest) Reset() { + *x = TenantProcessRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_api_message_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TenantProcessRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TenantProcessRequest) ProtoMessage() {} + +func (x *TenantProcessRequest) ProtoReflect() protoreflect.Message { + mi := &file_api_message_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TenantProcessRequest.ProtoReflect.Descriptor instead. +func (*TenantProcessRequest) Descriptor() ([]byte, []int) { + return file_api_message_proto_rawDescGZIP(), []int{13} +} + +func (x *TenantProcessRequest) GetNode() string { + if x != nil { + return x.Node + } + return "" +} + +func (x *TenantProcessRequest) GetAction() TenantProcessRequest_Action { + if x != nil { + return x.Action + } + return TenantProcessRequest_ACTION_UNSPECIFIED +} + +func (x *TenantProcessRequest) GetTenantsProcesses() []*TenantsProcess { + if x != nil { + return x.TenantsProcesses + } + return nil +} + +type DeleteTenantsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Tenants []string `protobuf:"bytes,1,rep,name=tenants,proto3" json:"tenants,omitempty"` +} + +func (x *DeleteTenantsRequest) Reset() { + *x = DeleteTenantsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_api_message_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DeleteTenantsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteTenantsRequest) ProtoMessage() {} + +func (x *DeleteTenantsRequest) ProtoReflect() protoreflect.Message { + mi := &file_api_message_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteTenantsRequest.ProtoReflect.Descriptor instead. +func (*DeleteTenantsRequest) Descriptor() ([]byte, []int) { + return file_api_message_proto_rawDescGZIP(), []int{14} +} + +func (x *DeleteTenantsRequest) GetTenants() []string { + if x != nil { + return x.Tenants + } + return nil +} + +type Tenant struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Status string `protobuf:"bytes,2,opt,name=status,proto3" json:"status,omitempty"` +} + +func (x *Tenant) Reset() { + *x = Tenant{} + if protoimpl.UnsafeEnabled { + mi := &file_api_message_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Tenant) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Tenant) ProtoMessage() {} + +func (x *Tenant) ProtoReflect() protoreflect.Message { + mi := &file_api_message_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Tenant.ProtoReflect.Descriptor instead. +func (*Tenant) Descriptor() ([]byte, []int) { + return file_api_message_proto_rawDescGZIP(), []int{15} +} + +func (x *Tenant) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Tenant) GetStatus() string { + if x != nil { + return x.Status + } + return "" +} + +type AddDistributedTaskRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Namespace string `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace,omitempty"` + Id string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"` + Payload []byte `protobuf:"bytes,4,opt,name=payload,proto3" json:"payload,omitempty"` + SubmittedAtUnixMillis int64 `protobuf:"varint,5,opt,name=submitted_at_unix_millis,json=submittedAtUnixMillis,proto3" json:"submitted_at_unix_millis,omitempty"` +} + +func (x *AddDistributedTaskRequest) Reset() { + *x = AddDistributedTaskRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_api_message_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AddDistributedTaskRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AddDistributedTaskRequest) ProtoMessage() {} + +func (x *AddDistributedTaskRequest) ProtoReflect() protoreflect.Message { + mi := &file_api_message_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AddDistributedTaskRequest.ProtoReflect.Descriptor instead. +func (*AddDistributedTaskRequest) Descriptor() ([]byte, []int) { + return file_api_message_proto_rawDescGZIP(), []int{16} +} + +func (x *AddDistributedTaskRequest) GetNamespace() string { + if x != nil { + return x.Namespace + } + return "" +} + +func (x *AddDistributedTaskRequest) GetId() string { + if x != nil { + return x.Id + } + return "" +} + +func (x *AddDistributedTaskRequest) GetPayload() []byte { + if x != nil { + return x.Payload + } + return nil +} + +func (x *AddDistributedTaskRequest) GetSubmittedAtUnixMillis() int64 { + if x != nil { + return x.SubmittedAtUnixMillis + } + return 0 +} + +type RecordDistributedTaskNodeCompletionRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Namespace string `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace,omitempty"` + Id string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"` + Version uint64 `protobuf:"varint,3,opt,name=version,proto3" json:"version,omitempty"` + NodeId string `protobuf:"bytes,4,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"` + Error *string `protobuf:"bytes,5,opt,name=error,proto3,oneof" json:"error,omitempty"` + FinishedAtUnixMillis int64 `protobuf:"varint,6,opt,name=finished_at_unix_millis,json=finishedAtUnixMillis,proto3" json:"finished_at_unix_millis,omitempty"` +} + +func (x *RecordDistributedTaskNodeCompletionRequest) Reset() { + *x = RecordDistributedTaskNodeCompletionRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_api_message_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RecordDistributedTaskNodeCompletionRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RecordDistributedTaskNodeCompletionRequest) ProtoMessage() {} + +func (x *RecordDistributedTaskNodeCompletionRequest) ProtoReflect() protoreflect.Message { + mi := &file_api_message_proto_msgTypes[17] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RecordDistributedTaskNodeCompletionRequest.ProtoReflect.Descriptor instead. +func (*RecordDistributedTaskNodeCompletionRequest) Descriptor() ([]byte, []int) { + return file_api_message_proto_rawDescGZIP(), []int{17} +} + +func (x *RecordDistributedTaskNodeCompletionRequest) GetNamespace() string { + if x != nil { + return x.Namespace + } + return "" +} + +func (x *RecordDistributedTaskNodeCompletionRequest) GetId() string { + if x != nil { + return x.Id + } + return "" +} + +func (x *RecordDistributedTaskNodeCompletionRequest) GetVersion() uint64 { + if x != nil { + return x.Version + } + return 0 +} + +func (x *RecordDistributedTaskNodeCompletionRequest) GetNodeId() string { + if x != nil { + return x.NodeId + } + return "" +} + +func (x *RecordDistributedTaskNodeCompletionRequest) GetError() string { + if x != nil && x.Error != nil { + return *x.Error + } + return "" +} + +func (x *RecordDistributedTaskNodeCompletionRequest) GetFinishedAtUnixMillis() int64 { + if x != nil { + return x.FinishedAtUnixMillis + } + return 0 +} + +type CancelDistributedTaskRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Namespace string `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace,omitempty"` + Id string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"` + Version uint64 `protobuf:"varint,3,opt,name=version,proto3" json:"version,omitempty"` + CancelledAtUnixMillis int64 `protobuf:"varint,6,opt,name=cancelled_at_unix_millis,json=cancelledAtUnixMillis,proto3" json:"cancelled_at_unix_millis,omitempty"` +} + +func (x *CancelDistributedTaskRequest) Reset() { + *x = CancelDistributedTaskRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_api_message_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CancelDistributedTaskRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CancelDistributedTaskRequest) ProtoMessage() {} + +func (x *CancelDistributedTaskRequest) ProtoReflect() protoreflect.Message { + mi := &file_api_message_proto_msgTypes[18] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CancelDistributedTaskRequest.ProtoReflect.Descriptor instead. +func (*CancelDistributedTaskRequest) Descriptor() ([]byte, []int) { + return file_api_message_proto_rawDescGZIP(), []int{18} +} + +func (x *CancelDistributedTaskRequest) GetNamespace() string { + if x != nil { + return x.Namespace + } + return "" +} + +func (x *CancelDistributedTaskRequest) GetId() string { + if x != nil { + return x.Id + } + return "" +} + +func (x *CancelDistributedTaskRequest) GetVersion() uint64 { + if x != nil { + return x.Version + } + return 0 +} + +func (x *CancelDistributedTaskRequest) GetCancelledAtUnixMillis() int64 { + if x != nil { + return x.CancelledAtUnixMillis + } + return 0 +} + +type CleanUpDistributedTaskRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Namespace string `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace,omitempty"` + Id string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"` + Version uint64 `protobuf:"varint,3,opt,name=version,proto3" json:"version,omitempty"` +} + +func (x *CleanUpDistributedTaskRequest) Reset() { + *x = CleanUpDistributedTaskRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_api_message_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CleanUpDistributedTaskRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CleanUpDistributedTaskRequest) ProtoMessage() {} + +func (x *CleanUpDistributedTaskRequest) ProtoReflect() protoreflect.Message { + mi := &file_api_message_proto_msgTypes[19] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CleanUpDistributedTaskRequest.ProtoReflect.Descriptor instead. +func (*CleanUpDistributedTaskRequest) Descriptor() ([]byte, []int) { + return file_api_message_proto_rawDescGZIP(), []int{19} +} + +func (x *CleanUpDistributedTaskRequest) GetNamespace() string { + if x != nil { + return x.Namespace + } + return "" +} + +func (x *CleanUpDistributedTaskRequest) GetId() string { + if x != nil { + return x.Id + } + return "" +} + +func (x *CleanUpDistributedTaskRequest) GetVersion() uint64 { + if x != nil { + return x.Version + } + return 0 +} + +type SyncShardRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Collection string `protobuf:"bytes,1,opt,name=collection,proto3" json:"collection,omitempty"` + Shard string `protobuf:"bytes,2,opt,name=shard,proto3" json:"shard,omitempty"` + NodeId string `protobuf:"bytes,3,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"` +} + +func (x *SyncShardRequest) Reset() { + *x = SyncShardRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_api_message_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SyncShardRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SyncShardRequest) ProtoMessage() {} + +func (x *SyncShardRequest) ProtoReflect() protoreflect.Message { + mi := &file_api_message_proto_msgTypes[20] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SyncShardRequest.ProtoReflect.Descriptor instead. +func (*SyncShardRequest) Descriptor() ([]byte, []int) { + return file_api_message_proto_rawDescGZIP(), []int{20} +} + +func (x *SyncShardRequest) GetCollection() string { + if x != nil { + return x.Collection + } + return "" +} + +func (x *SyncShardRequest) GetShard() string { + if x != nil { + return x.Shard + } + return "" +} + +func (x *SyncShardRequest) GetNodeId() string { + if x != nil { + return x.NodeId + } + return "" +} + +type CreateAliasRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Collection string `protobuf:"bytes,1,opt,name=collection,proto3" json:"collection,omitempty"` + Alias string `protobuf:"bytes,2,opt,name=alias,proto3" json:"alias,omitempty"` +} + +func (x *CreateAliasRequest) Reset() { + *x = CreateAliasRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_api_message_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateAliasRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateAliasRequest) ProtoMessage() {} + +func (x *CreateAliasRequest) ProtoReflect() protoreflect.Message { + mi := &file_api_message_proto_msgTypes[21] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateAliasRequest.ProtoReflect.Descriptor instead. +func (*CreateAliasRequest) Descriptor() ([]byte, []int) { + return file_api_message_proto_rawDescGZIP(), []int{21} +} + +func (x *CreateAliasRequest) GetCollection() string { + if x != nil { + return x.Collection + } + return "" +} + +func (x *CreateAliasRequest) GetAlias() string { + if x != nil { + return x.Alias + } + return "" +} + +type ReplaceAliasRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Collection string `protobuf:"bytes,1,opt,name=collection,proto3" json:"collection,omitempty"` + Alias string `protobuf:"bytes,2,opt,name=alias,proto3" json:"alias,omitempty"` +} + +func (x *ReplaceAliasRequest) Reset() { + *x = ReplaceAliasRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_api_message_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ReplaceAliasRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ReplaceAliasRequest) ProtoMessage() {} + +func (x *ReplaceAliasRequest) ProtoReflect() protoreflect.Message { + mi := &file_api_message_proto_msgTypes[22] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ReplaceAliasRequest.ProtoReflect.Descriptor instead. +func (*ReplaceAliasRequest) Descriptor() ([]byte, []int) { + return file_api_message_proto_rawDescGZIP(), []int{22} +} + +func (x *ReplaceAliasRequest) GetCollection() string { + if x != nil { + return x.Collection + } + return "" +} + +func (x *ReplaceAliasRequest) GetAlias() string { + if x != nil { + return x.Alias + } + return "" +} + +type DeleteAliasRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Alias string `protobuf:"bytes,1,opt,name=alias,proto3" json:"alias,omitempty"` +} + +func (x *DeleteAliasRequest) Reset() { + *x = DeleteAliasRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_api_message_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DeleteAliasRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteAliasRequest) ProtoMessage() {} + +func (x *DeleteAliasRequest) ProtoReflect() protoreflect.Message { + mi := &file_api_message_proto_msgTypes[23] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteAliasRequest.ProtoReflect.Descriptor instead. +func (*DeleteAliasRequest) Descriptor() ([]byte, []int) { + return file_api_message_proto_rawDescGZIP(), []int{23} +} + +func (x *DeleteAliasRequest) GetAlias() string { + if x != nil { + return x.Alias + } + return "" +} + +var File_api_message_proto protoreflect.FileDescriptor + +var file_api_message_proto_rawDesc = []byte{ + 0x0a, 0x11, 0x61, 0x70, 0x69, 0x2f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x12, 0x19, 0x77, 0x65, 0x61, 0x76, 0x69, 0x61, 0x74, 0x65, 0x2e, 0x69, 0x6e, + 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x22, 0x51, + 0x0a, 0x0f, 0x4a, 0x6f, 0x69, 0x6e, 0x50, 0x65, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, + 0x64, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x76, + 0x6f, 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x76, 0x6f, 0x74, 0x65, + 0x72, 0x22, 0x2a, 0x0a, 0x10, 0x4a, 0x6f, 0x69, 0x6e, 0x50, 0x65, 0x65, 0x72, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6c, 0x65, 0x61, 0x64, 0x65, 0x72, 0x22, 0x23, 0x0a, + 0x11, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x50, 0x65, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, + 0x69, 0x64, 0x22, 0x2c, 0x0a, 0x12, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x50, 0x65, 0x65, 0x72, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6c, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x22, 0x3d, 0x0a, 0x11, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x79, 0x50, 0x65, 0x65, 0x72, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x22, + 0x14, 0x0a, 0x12, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x79, 0x50, 0x65, 0x65, 0x72, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x91, 0x0f, 0x0a, 0x0c, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x40, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2c, 0x2e, 0x77, 0x65, 0x61, 0x76, 0x69, 0x61, 0x74, 0x65, 0x2e, + 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, + 0x2e, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x54, 0x79, + 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6c, 0x61, 0x73, + 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x12, 0x18, + 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, + 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x75, 0x62, 0x5f, + 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0a, 0x73, + 0x75, 0x62, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x22, 0xed, 0x0d, 0x0a, 0x04, 0x54, 0x79, + 0x70, 0x65, 0x12, 0x14, 0x0a, 0x10, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, + 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x12, 0x0a, 0x0e, 0x54, 0x59, 0x50, 0x45, + 0x5f, 0x41, 0x44, 0x44, 0x5f, 0x43, 0x4c, 0x41, 0x53, 0x53, 0x10, 0x01, 0x12, 0x15, 0x0a, 0x11, + 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x50, 0x44, 0x41, 0x54, 0x45, 0x5f, 0x43, 0x4c, 0x41, 0x53, + 0x53, 0x10, 0x02, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x44, 0x45, 0x4c, 0x45, + 0x54, 0x45, 0x5f, 0x43, 0x4c, 0x41, 0x53, 0x53, 0x10, 0x03, 0x12, 0x16, 0x0a, 0x12, 0x54, 0x59, + 0x50, 0x45, 0x5f, 0x52, 0x45, 0x53, 0x54, 0x4f, 0x52, 0x45, 0x5f, 0x43, 0x4c, 0x41, 0x53, 0x53, + 0x10, 0x04, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x41, 0x44, 0x44, 0x5f, 0x50, + 0x52, 0x4f, 0x50, 0x45, 0x52, 0x54, 0x59, 0x10, 0x05, 0x12, 0x1c, 0x0a, 0x18, 0x54, 0x59, 0x50, + 0x45, 0x5f, 0x55, 0x50, 0x44, 0x41, 0x54, 0x45, 0x5f, 0x53, 0x48, 0x41, 0x52, 0x44, 0x5f, 0x53, + 0x54, 0x41, 0x54, 0x55, 0x53, 0x10, 0x0a, 0x12, 0x1d, 0x0a, 0x19, 0x54, 0x59, 0x50, 0x45, 0x5f, + 0x41, 0x44, 0x44, 0x5f, 0x52, 0x45, 0x50, 0x4c, 0x49, 0x43, 0x41, 0x5f, 0x54, 0x4f, 0x5f, 0x53, + 0x48, 0x41, 0x52, 0x44, 0x10, 0x0b, 0x12, 0x22, 0x0a, 0x1e, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x44, + 0x45, 0x4c, 0x45, 0x54, 0x45, 0x5f, 0x52, 0x45, 0x50, 0x4c, 0x49, 0x43, 0x41, 0x5f, 0x46, 0x52, + 0x4f, 0x4d, 0x5f, 0x53, 0x48, 0x41, 0x52, 0x44, 0x10, 0x0c, 0x12, 0x13, 0x0a, 0x0f, 0x54, 0x59, + 0x50, 0x45, 0x5f, 0x41, 0x44, 0x44, 0x5f, 0x54, 0x45, 0x4e, 0x41, 0x4e, 0x54, 0x10, 0x10, 0x12, + 0x16, 0x0a, 0x12, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x50, 0x44, 0x41, 0x54, 0x45, 0x5f, 0x54, + 0x45, 0x4e, 0x41, 0x4e, 0x54, 0x10, 0x11, 0x12, 0x16, 0x0a, 0x12, 0x54, 0x59, 0x50, 0x45, 0x5f, + 0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, 0x5f, 0x54, 0x45, 0x4e, 0x41, 0x4e, 0x54, 0x10, 0x12, 0x12, + 0x17, 0x0a, 0x13, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x54, 0x45, 0x4e, 0x41, 0x4e, 0x54, 0x5f, 0x50, + 0x52, 0x4f, 0x43, 0x45, 0x53, 0x53, 0x10, 0x13, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x59, 0x50, 0x45, + 0x5f, 0x43, 0x52, 0x45, 0x41, 0x54, 0x45, 0x5f, 0x41, 0x4c, 0x49, 0x41, 0x53, 0x10, 0x28, 0x12, + 0x16, 0x0a, 0x12, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x52, 0x45, 0x50, 0x4c, 0x41, 0x43, 0x45, 0x5f, + 0x41, 0x4c, 0x49, 0x41, 0x53, 0x10, 0x29, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x59, 0x50, 0x45, 0x5f, + 0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, 0x5f, 0x41, 0x4c, 0x49, 0x41, 0x53, 0x10, 0x2a, 0x12, 0x21, + 0x0a, 0x1d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x50, 0x53, 0x45, 0x52, 0x54, 0x5f, 0x52, 0x4f, + 0x4c, 0x45, 0x53, 0x5f, 0x50, 0x45, 0x52, 0x4d, 0x49, 0x53, 0x53, 0x49, 0x4f, 0x4e, 0x53, 0x10, + 0x3c, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, + 0x5f, 0x52, 0x4f, 0x4c, 0x45, 0x53, 0x10, 0x3d, 0x12, 0x1b, 0x0a, 0x17, 0x54, 0x59, 0x50, 0x45, + 0x5f, 0x52, 0x45, 0x4d, 0x4f, 0x56, 0x45, 0x5f, 0x50, 0x45, 0x52, 0x4d, 0x49, 0x53, 0x53, 0x49, + 0x4f, 0x4e, 0x53, 0x10, 0x3e, 0x12, 0x1b, 0x0a, 0x17, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x41, 0x44, + 0x44, 0x5f, 0x52, 0x4f, 0x4c, 0x45, 0x53, 0x5f, 0x46, 0x4f, 0x52, 0x5f, 0x55, 0x53, 0x45, 0x52, + 0x10, 0x3f, 0x12, 0x1e, 0x0a, 0x1a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x52, 0x45, 0x56, 0x4f, 0x4b, + 0x45, 0x5f, 0x52, 0x4f, 0x4c, 0x45, 0x53, 0x5f, 0x46, 0x4f, 0x52, 0x5f, 0x55, 0x53, 0x45, 0x52, + 0x10, 0x40, 0x12, 0x14, 0x0a, 0x10, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x50, 0x53, 0x45, 0x52, + 0x54, 0x5f, 0x55, 0x53, 0x45, 0x52, 0x10, 0x50, 0x12, 0x14, 0x0a, 0x10, 0x54, 0x59, 0x50, 0x45, + 0x5f, 0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, 0x5f, 0x55, 0x53, 0x45, 0x52, 0x10, 0x51, 0x12, 0x1c, + 0x0a, 0x18, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x52, 0x4f, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x55, 0x53, + 0x45, 0x52, 0x5f, 0x41, 0x50, 0x49, 0x5f, 0x4b, 0x45, 0x59, 0x10, 0x52, 0x12, 0x15, 0x0a, 0x11, + 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x55, 0x53, 0x50, 0x45, 0x4e, 0x44, 0x5f, 0x55, 0x53, 0x45, + 0x52, 0x10, 0x53, 0x12, 0x16, 0x0a, 0x12, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x41, 0x43, 0x54, 0x49, + 0x56, 0x41, 0x54, 0x45, 0x5f, 0x55, 0x53, 0x45, 0x52, 0x10, 0x54, 0x12, 0x1d, 0x0a, 0x19, 0x54, + 0x59, 0x50, 0x45, 0x5f, 0x43, 0x52, 0x45, 0x41, 0x54, 0x45, 0x5f, 0x55, 0x53, 0x45, 0x52, 0x5f, + 0x57, 0x49, 0x54, 0x48, 0x5f, 0x4b, 0x45, 0x59, 0x10, 0x55, 0x12, 0x18, 0x0a, 0x14, 0x54, 0x59, + 0x50, 0x45, 0x5f, 0x53, 0x54, 0x4f, 0x52, 0x45, 0x5f, 0x53, 0x43, 0x48, 0x45, 0x4d, 0x41, 0x5f, + 0x56, 0x31, 0x10, 0x63, 0x12, 0x1f, 0x0a, 0x1a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x52, 0x45, 0x50, + 0x4c, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x52, 0x45, 0x50, 0x4c, 0x49, 0x43, 0x41, + 0x54, 0x45, 0x10, 0xc8, 0x01, 0x12, 0x2c, 0x0a, 0x27, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x52, 0x45, + 0x50, 0x4c, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x52, 0x45, 0x50, 0x4c, 0x49, 0x43, + 0x41, 0x54, 0x45, 0x5f, 0x55, 0x50, 0x44, 0x41, 0x54, 0x45, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x45, + 0x10, 0xc9, 0x01, 0x12, 0x2e, 0x0a, 0x29, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x52, 0x45, 0x50, 0x4c, + 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x52, 0x45, 0x50, 0x4c, 0x49, 0x43, 0x41, 0x54, + 0x45, 0x5f, 0x52, 0x45, 0x47, 0x49, 0x53, 0x54, 0x45, 0x52, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, + 0x10, 0xca, 0x01, 0x12, 0x26, 0x0a, 0x21, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x52, 0x45, 0x50, 0x4c, + 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x52, 0x45, 0x50, 0x4c, 0x49, 0x43, 0x41, 0x54, + 0x45, 0x5f, 0x43, 0x41, 0x4e, 0x43, 0x45, 0x4c, 0x10, 0xcb, 0x01, 0x12, 0x26, 0x0a, 0x21, 0x54, + 0x59, 0x50, 0x45, 0x5f, 0x52, 0x45, 0x50, 0x4c, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, + 0x52, 0x45, 0x50, 0x4c, 0x49, 0x43, 0x41, 0x54, 0x45, 0x5f, 0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, + 0x10, 0xcc, 0x01, 0x12, 0x26, 0x0a, 0x21, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x52, 0x45, 0x50, 0x4c, + 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x52, 0x45, 0x50, 0x4c, 0x49, 0x43, 0x41, 0x54, + 0x45, 0x5f, 0x52, 0x45, 0x4d, 0x4f, 0x56, 0x45, 0x10, 0xcd, 0x01, 0x12, 0x35, 0x0a, 0x30, 0x54, + 0x59, 0x50, 0x45, 0x5f, 0x52, 0x45, 0x50, 0x4c, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, + 0x52, 0x45, 0x50, 0x4c, 0x49, 0x43, 0x41, 0x54, 0x45, 0x5f, 0x43, 0x41, 0x4e, 0x43, 0x45, 0x4c, + 0x4c, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x43, 0x4f, 0x4d, 0x50, 0x4c, 0x45, 0x54, 0x45, 0x10, + 0xce, 0x01, 0x12, 0x2a, 0x0a, 0x25, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x52, 0x45, 0x50, 0x4c, 0x49, + 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x52, 0x45, 0x50, 0x4c, 0x49, 0x43, 0x41, 0x54, 0x45, + 0x5f, 0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, 0x5f, 0x41, 0x4c, 0x4c, 0x10, 0xcf, 0x01, 0x12, 0x34, + 0x0a, 0x2f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x52, 0x45, 0x50, 0x4c, 0x49, 0x43, 0x41, 0x54, 0x49, + 0x4f, 0x4e, 0x5f, 0x52, 0x45, 0x50, 0x4c, 0x49, 0x43, 0x41, 0x54, 0x45, 0x5f, 0x44, 0x45, 0x4c, + 0x45, 0x54, 0x45, 0x5f, 0x42, 0x59, 0x5f, 0x43, 0x4f, 0x4c, 0x4c, 0x45, 0x43, 0x54, 0x49, 0x4f, + 0x4e, 0x10, 0xd0, 0x01, 0x12, 0x31, 0x0a, 0x2c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x52, 0x45, 0x50, + 0x4c, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x52, 0x45, 0x50, 0x4c, 0x49, 0x43, 0x41, + 0x54, 0x45, 0x5f, 0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, 0x5f, 0x42, 0x59, 0x5f, 0x54, 0x45, 0x4e, + 0x41, 0x4e, 0x54, 0x53, 0x10, 0xd1, 0x01, 0x12, 0x2a, 0x0a, 0x25, 0x54, 0x59, 0x50, 0x45, 0x5f, + 0x52, 0x45, 0x50, 0x4c, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x52, 0x45, 0x50, 0x4c, + 0x49, 0x43, 0x41, 0x54, 0x45, 0x5f, 0x53, 0x59, 0x4e, 0x43, 0x5f, 0x53, 0x48, 0x41, 0x52, 0x44, + 0x10, 0xd2, 0x01, 0x12, 0x2d, 0x0a, 0x28, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x52, 0x45, 0x50, 0x4c, + 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x52, 0x45, 0x47, 0x49, 0x53, 0x54, 0x45, 0x52, + 0x5f, 0x53, 0x43, 0x48, 0x45, 0x4d, 0x41, 0x5f, 0x56, 0x45, 0x52, 0x53, 0x49, 0x4f, 0x4e, 0x10, + 0xd3, 0x01, 0x12, 0x34, 0x0a, 0x2f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x52, 0x45, 0x50, 0x4c, 0x49, + 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x52, 0x45, 0x50, 0x4c, 0x49, 0x43, 0x41, 0x54, 0x45, + 0x5f, 0x41, 0x44, 0x44, 0x5f, 0x52, 0x45, 0x50, 0x4c, 0x49, 0x43, 0x41, 0x5f, 0x54, 0x4f, 0x5f, + 0x53, 0x48, 0x41, 0x52, 0x44, 0x10, 0xd4, 0x01, 0x12, 0x30, 0x0a, 0x2b, 0x54, 0x59, 0x50, 0x45, + 0x5f, 0x52, 0x45, 0x50, 0x4c, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x52, 0x45, 0x50, + 0x4c, 0x49, 0x43, 0x41, 0x54, 0x45, 0x5f, 0x46, 0x4f, 0x52, 0x43, 0x45, 0x5f, 0x44, 0x45, 0x4c, + 0x45, 0x54, 0x45, 0x5f, 0x41, 0x4c, 0x4c, 0x10, 0xdc, 0x01, 0x12, 0x3a, 0x0a, 0x35, 0x54, 0x59, + 0x50, 0x45, 0x5f, 0x52, 0x45, 0x50, 0x4c, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x52, + 0x45, 0x50, 0x4c, 0x49, 0x43, 0x41, 0x54, 0x45, 0x5f, 0x46, 0x4f, 0x52, 0x43, 0x45, 0x5f, 0x44, + 0x45, 0x4c, 0x45, 0x54, 0x45, 0x5f, 0x42, 0x59, 0x5f, 0x43, 0x4f, 0x4c, 0x4c, 0x45, 0x43, 0x54, + 0x49, 0x4f, 0x4e, 0x10, 0xdd, 0x01, 0x12, 0x44, 0x0a, 0x3f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x52, + 0x45, 0x50, 0x4c, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x52, 0x45, 0x50, 0x4c, 0x49, + 0x43, 0x41, 0x54, 0x45, 0x5f, 0x46, 0x4f, 0x52, 0x43, 0x45, 0x5f, 0x44, 0x45, 0x4c, 0x45, 0x54, + 0x45, 0x5f, 0x42, 0x59, 0x5f, 0x43, 0x4f, 0x4c, 0x4c, 0x45, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, + 0x41, 0x4e, 0x44, 0x5f, 0x53, 0x48, 0x41, 0x52, 0x44, 0x10, 0xde, 0x01, 0x12, 0x3b, 0x0a, 0x36, + 0x54, 0x59, 0x50, 0x45, 0x5f, 0x52, 0x45, 0x50, 0x4c, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, + 0x5f, 0x52, 0x45, 0x50, 0x4c, 0x49, 0x43, 0x41, 0x54, 0x45, 0x5f, 0x46, 0x4f, 0x52, 0x43, 0x45, + 0x5f, 0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, 0x5f, 0x42, 0x59, 0x5f, 0x54, 0x41, 0x52, 0x47, 0x45, + 0x54, 0x5f, 0x4e, 0x4f, 0x44, 0x45, 0x10, 0xdf, 0x01, 0x12, 0x34, 0x0a, 0x2f, 0x54, 0x59, 0x50, + 0x45, 0x5f, 0x52, 0x45, 0x50, 0x4c, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x52, 0x45, + 0x50, 0x4c, 0x49, 0x43, 0x41, 0x54, 0x45, 0x5f, 0x46, 0x4f, 0x52, 0x43, 0x45, 0x5f, 0x44, 0x45, + 0x4c, 0x45, 0x54, 0x45, 0x5f, 0x42, 0x59, 0x5f, 0x55, 0x55, 0x49, 0x44, 0x10, 0xe0, 0x01, 0x12, + 0x1e, 0x0a, 0x19, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x44, 0x49, 0x53, 0x54, 0x52, 0x49, 0x42, 0x55, + 0x54, 0x45, 0x44, 0x5f, 0x54, 0x41, 0x53, 0x4b, 0x5f, 0x41, 0x44, 0x44, 0x10, 0xac, 0x02, 0x12, + 0x21, 0x0a, 0x1c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x44, 0x49, 0x53, 0x54, 0x52, 0x49, 0x42, 0x55, + 0x54, 0x45, 0x44, 0x5f, 0x54, 0x41, 0x53, 0x4b, 0x5f, 0x43, 0x41, 0x4e, 0x43, 0x45, 0x4c, 0x10, + 0xad, 0x02, 0x12, 0x30, 0x0a, 0x2b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x44, 0x49, 0x53, 0x54, 0x52, + 0x49, 0x42, 0x55, 0x54, 0x45, 0x44, 0x5f, 0x54, 0x41, 0x53, 0x4b, 0x5f, 0x52, 0x45, 0x43, 0x4f, + 0x52, 0x44, 0x5f, 0x4e, 0x4f, 0x44, 0x45, 0x5f, 0x43, 0x4f, 0x4d, 0x50, 0x4c, 0x45, 0x54, 0x45, + 0x44, 0x10, 0xae, 0x02, 0x12, 0x23, 0x0a, 0x1e, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x44, 0x49, 0x53, + 0x54, 0x52, 0x49, 0x42, 0x55, 0x54, 0x45, 0x44, 0x5f, 0x54, 0x41, 0x53, 0x4b, 0x5f, 0x43, 0x4c, + 0x45, 0x41, 0x4e, 0x5f, 0x55, 0x50, 0x10, 0xaf, 0x02, 0x22, 0x41, 0x0a, 0x0d, 0x41, 0x70, 0x70, + 0x6c, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x76, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6c, 0x65, 0x61, 0x64, 0x65, 0x72, 0x22, 0xeb, 0x07, 0x0a, + 0x0c, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x40, 0x0a, + 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2c, 0x2e, 0x77, 0x65, + 0x61, 0x76, 0x69, 0x61, 0x74, 0x65, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, + 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, + 0x1f, 0x0a, 0x0b, 0x73, 0x75, 0x62, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0a, 0x73, 0x75, 0x62, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, + 0x22, 0xf7, 0x06, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x14, 0x0a, 0x10, 0x54, 0x59, 0x50, + 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, + 0x14, 0x0a, 0x10, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x47, 0x45, 0x54, 0x5f, 0x43, 0x4c, 0x41, 0x53, + 0x53, 0x45, 0x53, 0x10, 0x01, 0x12, 0x13, 0x0a, 0x0f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x47, 0x45, + 0x54, 0x5f, 0x53, 0x43, 0x48, 0x45, 0x4d, 0x41, 0x10, 0x02, 0x12, 0x14, 0x0a, 0x10, 0x54, 0x59, + 0x50, 0x45, 0x5f, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x45, 0x4e, 0x41, 0x4e, 0x54, 0x53, 0x10, 0x03, + 0x12, 0x18, 0x0a, 0x14, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x47, 0x45, 0x54, 0x5f, 0x53, 0x48, 0x41, + 0x52, 0x44, 0x5f, 0x4f, 0x57, 0x4e, 0x45, 0x52, 0x10, 0x04, 0x12, 0x1b, 0x0a, 0x17, 0x54, 0x59, + 0x50, 0x45, 0x5f, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x45, 0x4e, 0x41, 0x4e, 0x54, 0x53, 0x5f, 0x53, + 0x48, 0x41, 0x52, 0x44, 0x53, 0x10, 0x05, 0x12, 0x1b, 0x0a, 0x17, 0x54, 0x59, 0x50, 0x45, 0x5f, + 0x47, 0x45, 0x54, 0x5f, 0x53, 0x48, 0x41, 0x52, 0x44, 0x49, 0x4e, 0x47, 0x5f, 0x53, 0x54, 0x41, + 0x54, 0x45, 0x10, 0x06, 0x12, 0x1b, 0x0a, 0x17, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x47, 0x45, 0x54, + 0x5f, 0x43, 0x4c, 0x41, 0x53, 0x53, 0x5f, 0x56, 0x45, 0x52, 0x53, 0x49, 0x4f, 0x4e, 0x53, 0x10, + 0x07, 0x12, 0x1e, 0x0a, 0x1a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x47, 0x45, 0x54, 0x5f, 0x43, 0x4f, + 0x4c, 0x4c, 0x45, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x53, 0x5f, 0x43, 0x4f, 0x55, 0x4e, 0x54, 0x10, + 0x08, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x48, 0x41, 0x53, 0x5f, 0x50, 0x45, + 0x52, 0x4d, 0x49, 0x53, 0x53, 0x49, 0x4f, 0x4e, 0x10, 0x1e, 0x12, 0x12, 0x0a, 0x0e, 0x54, 0x59, + 0x50, 0x45, 0x5f, 0x47, 0x45, 0x54, 0x5f, 0x52, 0x4f, 0x4c, 0x45, 0x53, 0x10, 0x1f, 0x12, 0x1b, + 0x0a, 0x17, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x47, 0x45, 0x54, 0x5f, 0x52, 0x4f, 0x4c, 0x45, 0x53, + 0x5f, 0x46, 0x4f, 0x52, 0x5f, 0x55, 0x53, 0x45, 0x52, 0x10, 0x20, 0x12, 0x1b, 0x0a, 0x17, 0x54, + 0x59, 0x50, 0x45, 0x5f, 0x47, 0x45, 0x54, 0x5f, 0x55, 0x53, 0x45, 0x52, 0x53, 0x5f, 0x46, 0x4f, + 0x52, 0x5f, 0x52, 0x4f, 0x4c, 0x45, 0x10, 0x21, 0x12, 0x27, 0x0a, 0x23, 0x54, 0x59, 0x50, 0x45, + 0x5f, 0x47, 0x45, 0x54, 0x5f, 0x55, 0x53, 0x45, 0x52, 0x53, 0x5f, 0x4f, 0x52, 0x5f, 0x47, 0x52, + 0x4f, 0x55, 0x50, 0x53, 0x5f, 0x57, 0x49, 0x54, 0x48, 0x5f, 0x52, 0x4f, 0x4c, 0x45, 0x53, 0x10, + 0x22, 0x12, 0x12, 0x0a, 0x0e, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x47, 0x45, 0x54, 0x5f, 0x55, 0x53, + 0x45, 0x52, 0x53, 0x10, 0x3d, 0x12, 0x1f, 0x0a, 0x1b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x53, + 0x45, 0x52, 0x5f, 0x49, 0x44, 0x45, 0x4e, 0x54, 0x49, 0x46, 0x49, 0x45, 0x52, 0x5f, 0x45, 0x58, + 0x49, 0x53, 0x54, 0x53, 0x10, 0x3e, 0x12, 0x16, 0x0a, 0x12, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x52, + 0x45, 0x53, 0x4f, 0x4c, 0x56, 0x45, 0x5f, 0x41, 0x4c, 0x49, 0x41, 0x53, 0x10, 0x64, 0x12, 0x14, + 0x0a, 0x10, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x47, 0x45, 0x54, 0x5f, 0x41, 0x4c, 0x49, 0x41, 0x53, + 0x45, 0x53, 0x10, 0x65, 0x12, 0x21, 0x0a, 0x1c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x47, 0x45, 0x54, + 0x5f, 0x52, 0x45, 0x50, 0x4c, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x44, 0x45, 0x54, + 0x41, 0x49, 0x4c, 0x53, 0x10, 0xc8, 0x01, 0x12, 0x2f, 0x0a, 0x2a, 0x54, 0x59, 0x50, 0x45, 0x5f, + 0x47, 0x45, 0x54, 0x5f, 0x52, 0x45, 0x50, 0x4c, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, + 0x44, 0x45, 0x54, 0x41, 0x49, 0x4c, 0x53, 0x5f, 0x42, 0x59, 0x5f, 0x43, 0x4f, 0x4c, 0x4c, 0x45, + 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0xc9, 0x01, 0x12, 0x39, 0x0a, 0x34, 0x54, 0x59, 0x50, 0x45, + 0x5f, 0x47, 0x45, 0x54, 0x5f, 0x52, 0x45, 0x50, 0x4c, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, + 0x5f, 0x44, 0x45, 0x54, 0x41, 0x49, 0x4c, 0x53, 0x5f, 0x42, 0x59, 0x5f, 0x43, 0x4f, 0x4c, 0x4c, + 0x45, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x41, 0x4e, 0x44, 0x5f, 0x53, 0x48, 0x41, 0x52, 0x44, + 0x10, 0xca, 0x01, 0x12, 0x30, 0x0a, 0x2b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x47, 0x45, 0x54, 0x5f, + 0x52, 0x45, 0x50, 0x4c, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x44, 0x45, 0x54, 0x41, + 0x49, 0x4c, 0x53, 0x5f, 0x42, 0x59, 0x5f, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x4e, 0x4f, + 0x44, 0x45, 0x10, 0xcb, 0x01, 0x12, 0x2a, 0x0a, 0x25, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x47, 0x45, + 0x54, 0x5f, 0x53, 0x48, 0x41, 0x52, 0x44, 0x49, 0x4e, 0x47, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x45, + 0x5f, 0x42, 0x59, 0x5f, 0x43, 0x4f, 0x4c, 0x4c, 0x45, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0xcc, + 0x01, 0x12, 0x34, 0x0a, 0x2f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x47, 0x45, 0x54, 0x5f, 0x53, 0x48, + 0x41, 0x52, 0x44, 0x49, 0x4e, 0x47, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x42, 0x59, 0x5f, + 0x43, 0x4f, 0x4c, 0x4c, 0x45, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x41, 0x4e, 0x44, 0x5f, 0x53, + 0x48, 0x41, 0x52, 0x44, 0x10, 0xcd, 0x01, 0x12, 0x25, 0x0a, 0x20, 0x54, 0x59, 0x50, 0x45, 0x5f, + 0x47, 0x45, 0x54, 0x5f, 0x41, 0x4c, 0x4c, 0x5f, 0x52, 0x45, 0x50, 0x4c, 0x49, 0x43, 0x41, 0x54, + 0x49, 0x4f, 0x4e, 0x5f, 0x44, 0x45, 0x54, 0x41, 0x49, 0x4c, 0x53, 0x10, 0xce, 0x01, 0x12, 0x29, + 0x0a, 0x24, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x47, 0x45, 0x54, 0x5f, 0x52, 0x45, 0x50, 0x4c, 0x49, + 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4f, 0x50, 0x45, 0x52, 0x41, 0x54, 0x49, 0x4f, 0x4e, + 0x5f, 0x53, 0x54, 0x41, 0x54, 0x45, 0x10, 0xcf, 0x01, 0x12, 0x1f, 0x0a, 0x1a, 0x54, 0x59, 0x50, + 0x45, 0x5f, 0x44, 0x49, 0x53, 0x54, 0x52, 0x49, 0x42, 0x55, 0x54, 0x45, 0x44, 0x5f, 0x54, 0x41, + 0x53, 0x4b, 0x5f, 0x4c, 0x49, 0x53, 0x54, 0x10, 0xac, 0x02, 0x22, 0x29, 0x0a, 0x0d, 0x51, 0x75, + 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x70, + 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x70, 0x61, + 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x22, 0x75, 0x0a, 0x11, 0x41, 0x64, 0x64, 0x54, 0x65, 0x6e, 0x61, + 0x6e, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x63, 0x6c, + 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, + 0x09, 0x52, 0x0c, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x12, + 0x3b, 0x0a, 0x07, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x21, 0x2e, 0x77, 0x65, 0x61, 0x76, 0x69, 0x61, 0x74, 0x65, 0x2e, 0x69, 0x6e, 0x74, 0x65, + 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x54, 0x65, 0x6e, + 0x61, 0x6e, 0x74, 0x52, 0x07, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x74, 0x73, 0x22, 0xb0, 0x01, 0x0a, + 0x14, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x65, 0x6e, 0x61, 0x6e, 0x74, 0x73, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3b, 0x0a, 0x07, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x74, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x77, 0x65, 0x61, 0x76, 0x69, 0x61, 0x74, + 0x65, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, + 0x65, 0x72, 0x2e, 0x54, 0x65, 0x6e, 0x61, 0x6e, 0x74, 0x52, 0x07, 0x74, 0x65, 0x6e, 0x61, 0x6e, + 0x74, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x6e, 0x6f, + 0x64, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x63, 0x6c, 0x75, 0x73, 0x74, + 0x65, 0x72, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x12, 0x36, 0x0a, 0x17, 0x69, 0x6d, 0x70, 0x6c, 0x69, + 0x63, 0x69, 0x74, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x69, 0x6d, 0x70, 0x6c, 0x69, 0x63, + 0x69, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, + 0xcc, 0x01, 0x0a, 0x0e, 0x54, 0x65, 0x6e, 0x61, 0x6e, 0x74, 0x73, 0x50, 0x72, 0x6f, 0x63, 0x65, + 0x73, 0x73, 0x12, 0x3c, 0x0a, 0x02, 0x6f, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2c, + 0x2e, 0x77, 0x65, 0x61, 0x76, 0x69, 0x61, 0x74, 0x65, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, + 0x61, 0x6c, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x54, 0x65, 0x6e, 0x61, 0x6e, + 0x74, 0x73, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x2e, 0x4f, 0x70, 0x52, 0x02, 0x6f, 0x70, + 0x12, 0x39, 0x0a, 0x06, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x21, 0x2e, 0x77, 0x65, 0x61, 0x76, 0x69, 0x61, 0x74, 0x65, 0x2e, 0x69, 0x6e, 0x74, 0x65, + 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x54, 0x65, 0x6e, + 0x61, 0x6e, 0x74, 0x52, 0x06, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x74, 0x22, 0x41, 0x0a, 0x02, 0x4f, + 0x70, 0x12, 0x12, 0x0a, 0x0e, 0x4f, 0x50, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, + 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x4f, 0x50, 0x5f, 0x53, 0x54, 0x41, 0x52, + 0x54, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x4f, 0x50, 0x5f, 0x44, 0x4f, 0x4e, 0x45, 0x10, 0x02, + 0x12, 0x0c, 0x0a, 0x08, 0x4f, 0x50, 0x5f, 0x41, 0x42, 0x4f, 0x52, 0x54, 0x10, 0x03, 0x22, 0xa0, + 0x02, 0x0a, 0x14, 0x54, 0x65, 0x6e, 0x61, 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x6f, 0x64, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x6f, 0x64, 0x65, 0x12, 0x4e, 0x0a, 0x06, 0x61, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x36, 0x2e, 0x77, 0x65, + 0x61, 0x76, 0x69, 0x61, 0x74, 0x65, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, + 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x54, 0x65, 0x6e, 0x61, 0x6e, 0x74, 0x50, 0x72, + 0x6f, 0x63, 0x65, 0x73, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x41, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x56, 0x0a, 0x11, 0x74, + 0x65, 0x6e, 0x61, 0x6e, 0x74, 0x73, 0x5f, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, 0x73, + 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x77, 0x65, 0x61, 0x76, 0x69, 0x61, 0x74, + 0x65, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, + 0x65, 0x72, 0x2e, 0x54, 0x65, 0x6e, 0x61, 0x6e, 0x74, 0x73, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, + 0x73, 0x52, 0x10, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x74, 0x73, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, + 0x73, 0x65, 0x73, 0x22, 0x4c, 0x0a, 0x06, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, + 0x12, 0x41, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, + 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x41, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, + 0x46, 0x52, 0x45, 0x45, 0x5a, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x15, 0x0a, 0x11, 0x41, 0x43, + 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x46, 0x52, 0x45, 0x45, 0x5a, 0x49, 0x4e, 0x47, 0x10, + 0x02, 0x22, 0x30, 0x0a, 0x14, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x65, 0x6e, 0x61, 0x6e, + 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x74, 0x65, 0x6e, + 0x61, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x74, 0x65, 0x6e, 0x61, + 0x6e, 0x74, 0x73, 0x22, 0x34, 0x0a, 0x06, 0x54, 0x65, 0x6e, 0x61, 0x6e, 0x74, 0x12, 0x12, 0x0a, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x9c, 0x01, 0x0a, 0x19, 0x41, 0x64, + 0x64, 0x44, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x64, 0x54, 0x61, 0x73, 0x6b, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, + 0x37, 0x0a, 0x18, 0x73, 0x75, 0x62, 0x6d, 0x69, 0x74, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x5f, + 0x75, 0x6e, 0x69, 0x78, 0x5f, 0x6d, 0x69, 0x6c, 0x6c, 0x69, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x03, 0x52, 0x15, 0x73, 0x75, 0x62, 0x6d, 0x69, 0x74, 0x74, 0x65, 0x64, 0x41, 0x74, 0x55, 0x6e, + 0x69, 0x78, 0x4d, 0x69, 0x6c, 0x6c, 0x69, 0x73, 0x22, 0xe9, 0x01, 0x0a, 0x2a, 0x52, 0x65, 0x63, + 0x6f, 0x72, 0x64, 0x44, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x64, 0x54, 0x61, + 0x73, 0x6b, 0x4e, 0x6f, 0x64, 0x65, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, + 0x17, 0x0a, 0x07, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x06, 0x6e, 0x6f, 0x64, 0x65, 0x49, 0x64, 0x12, 0x19, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, + 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, + 0x88, 0x01, 0x01, 0x12, 0x35, 0x0a, 0x17, 0x66, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x65, 0x64, 0x5f, + 0x61, 0x74, 0x5f, 0x75, 0x6e, 0x69, 0x78, 0x5f, 0x6d, 0x69, 0x6c, 0x6c, 0x69, 0x73, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x03, 0x52, 0x14, 0x66, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x65, 0x64, 0x41, 0x74, + 0x55, 0x6e, 0x69, 0x78, 0x4d, 0x69, 0x6c, 0x6c, 0x69, 0x73, 0x42, 0x08, 0x0a, 0x06, 0x5f, 0x65, + 0x72, 0x72, 0x6f, 0x72, 0x22, 0x9f, 0x01, 0x0a, 0x1c, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x44, + 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x64, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, + 0x61, 0x63, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x02, 0x69, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x37, 0x0a, + 0x18, 0x63, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x6c, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x5f, 0x75, 0x6e, + 0x69, 0x78, 0x5f, 0x6d, 0x69, 0x6c, 0x6c, 0x69, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, + 0x15, 0x63, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x6c, 0x65, 0x64, 0x41, 0x74, 0x55, 0x6e, 0x69, 0x78, + 0x4d, 0x69, 0x6c, 0x6c, 0x69, 0x73, 0x22, 0x67, 0x0a, 0x1d, 0x43, 0x6c, 0x65, 0x61, 0x6e, 0x55, + 0x70, 0x44, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x64, 0x54, 0x61, 0x73, 0x6b, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, + 0x61, 0x0a, 0x10, 0x53, 0x79, 0x6e, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x6e, 0x6f, 0x64, + 0x65, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6e, 0x6f, 0x64, 0x65, + 0x49, 0x64, 0x22, 0x4a, 0x0a, 0x12, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x41, 0x6c, 0x69, 0x61, + 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, + 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x61, 0x6c, 0x69, 0x61, + 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x22, 0x4b, + 0x0a, 0x13, 0x52, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x22, 0x2a, 0x0a, 0x12, 0x44, + 0x65, 0x6c, 0x65, 0x74, 0x65, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x14, 0x0a, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x32, 0x8d, 0x04, 0x0a, 0x0e, 0x43, 0x6c, 0x75, 0x73, + 0x74, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x6b, 0x0a, 0x0a, 0x52, 0x65, + 0x6d, 0x6f, 0x76, 0x65, 0x50, 0x65, 0x65, 0x72, 0x12, 0x2c, 0x2e, 0x77, 0x65, 0x61, 0x76, 0x69, + 0x61, 0x74, 0x65, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x63, 0x6c, 0x75, + 0x73, 0x74, 0x65, 0x72, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x50, 0x65, 0x65, 0x72, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2d, 0x2e, 0x77, 0x65, 0x61, 0x76, 0x69, 0x61, 0x74, + 0x65, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, + 0x65, 0x72, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x50, 0x65, 0x65, 0x72, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x65, 0x0a, 0x08, 0x4a, 0x6f, 0x69, 0x6e, 0x50, + 0x65, 0x65, 0x72, 0x12, 0x2a, 0x2e, 0x77, 0x65, 0x61, 0x76, 0x69, 0x61, 0x74, 0x65, 0x2e, 0x69, + 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, + 0x4a, 0x6f, 0x69, 0x6e, 0x50, 0x65, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x2b, 0x2e, 0x77, 0x65, 0x61, 0x76, 0x69, 0x61, 0x74, 0x65, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, + 0x6e, 0x61, 0x6c, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x4a, 0x6f, 0x69, 0x6e, + 0x50, 0x65, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x6b, + 0x0a, 0x0a, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x79, 0x50, 0x65, 0x65, 0x72, 0x12, 0x2c, 0x2e, 0x77, + 0x65, 0x61, 0x76, 0x69, 0x61, 0x74, 0x65, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, + 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x79, 0x50, + 0x65, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2d, 0x2e, 0x77, 0x65, 0x61, + 0x76, 0x69, 0x61, 0x74, 0x65, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x63, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x79, 0x50, 0x65, 0x65, + 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5c, 0x0a, 0x05, 0x41, + 0x70, 0x70, 0x6c, 0x79, 0x12, 0x27, 0x2e, 0x77, 0x65, 0x61, 0x76, 0x69, 0x61, 0x74, 0x65, 0x2e, + 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, + 0x2e, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, + 0x77, 0x65, 0x61, 0x76, 0x69, 0x61, 0x74, 0x65, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, + 0x6c, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5c, 0x0a, 0x05, 0x51, 0x75, 0x65, + 0x72, 0x79, 0x12, 0x27, 0x2e, 0x77, 0x65, 0x61, 0x76, 0x69, 0x61, 0x74, 0x65, 0x2e, 0x69, 0x6e, + 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x51, + 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x77, 0x65, + 0x61, 0x76, 0x69, 0x61, 0x74, 0x65, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, + 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0xe1, 0x01, 0x0a, 0x1d, 0x63, 0x6f, 0x6d, 0x2e, + 0x77, 0x65, 0x61, 0x76, 0x69, 0x61, 0x74, 0x65, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, + 0x6c, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x42, 0x0c, 0x4d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2c, 0x67, 0x69, 0x74, 0x68, 0x75, + 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x77, 0x65, 0x61, 0x76, 0x69, 0x61, 0x74, 0x65, 0x2f, 0x77, + 0x65, 0x61, 0x76, 0x69, 0x61, 0x74, 0x65, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2f, 0x61, 0x70, 0x69, 0xa2, 0x02, 0x03, 0x57, 0x49, 0x43, 0xaa, 0x02, 0x19, + 0x57, 0x65, 0x61, 0x76, 0x69, 0x61, 0x74, 0x65, 0x2e, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, + 0x6c, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0xca, 0x02, 0x19, 0x57, 0x65, 0x61, 0x76, + 0x69, 0x61, 0x74, 0x65, 0x5c, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5c, 0x43, 0x6c, + 0x75, 0x73, 0x74, 0x65, 0x72, 0xe2, 0x02, 0x25, 0x57, 0x65, 0x61, 0x76, 0x69, 0x61, 0x74, 0x65, + 0x5c, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5c, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, + 0x72, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x1b, + 0x57, 0x65, 0x61, 0x76, 0x69, 0x61, 0x74, 0x65, 0x3a, 0x3a, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, + 0x61, 0x6c, 0x3a, 0x3a, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x33, +} + +var ( + file_api_message_proto_rawDescOnce sync.Once + file_api_message_proto_rawDescData = file_api_message_proto_rawDesc +) + +func file_api_message_proto_rawDescGZIP() []byte { + file_api_message_proto_rawDescOnce.Do(func() { + file_api_message_proto_rawDescData = protoimpl.X.CompressGZIP(file_api_message_proto_rawDescData) + }) + return file_api_message_proto_rawDescData +} + +var ( + file_api_message_proto_enumTypes = make([]protoimpl.EnumInfo, 4) + file_api_message_proto_msgTypes = make([]protoimpl.MessageInfo, 24) + file_api_message_proto_goTypes = []interface{}{ + (ApplyRequest_Type)(0), // 0: weaviate.internal.cluster.ApplyRequest.Type + (QueryRequest_Type)(0), // 1: weaviate.internal.cluster.QueryRequest.Type + (TenantsProcess_Op)(0), // 2: weaviate.internal.cluster.TenantsProcess.Op + (TenantProcessRequest_Action)(0), // 3: weaviate.internal.cluster.TenantProcessRequest.Action + (*JoinPeerRequest)(nil), // 4: weaviate.internal.cluster.JoinPeerRequest + (*JoinPeerResponse)(nil), // 5: weaviate.internal.cluster.JoinPeerResponse + (*RemovePeerRequest)(nil), // 6: weaviate.internal.cluster.RemovePeerRequest + (*RemovePeerResponse)(nil), // 7: weaviate.internal.cluster.RemovePeerResponse + (*NotifyPeerRequest)(nil), // 8: weaviate.internal.cluster.NotifyPeerRequest + (*NotifyPeerResponse)(nil), // 9: weaviate.internal.cluster.NotifyPeerResponse + (*ApplyRequest)(nil), // 10: weaviate.internal.cluster.ApplyRequest + (*ApplyResponse)(nil), // 11: weaviate.internal.cluster.ApplyResponse + (*QueryRequest)(nil), // 12: weaviate.internal.cluster.QueryRequest + (*QueryResponse)(nil), // 13: weaviate.internal.cluster.QueryResponse + (*AddTenantsRequest)(nil), // 14: weaviate.internal.cluster.AddTenantsRequest + (*UpdateTenantsRequest)(nil), // 15: weaviate.internal.cluster.UpdateTenantsRequest + (*TenantsProcess)(nil), // 16: weaviate.internal.cluster.TenantsProcess + (*TenantProcessRequest)(nil), // 17: weaviate.internal.cluster.TenantProcessRequest + (*DeleteTenantsRequest)(nil), // 18: weaviate.internal.cluster.DeleteTenantsRequest + (*Tenant)(nil), // 19: weaviate.internal.cluster.Tenant + (*AddDistributedTaskRequest)(nil), // 20: weaviate.internal.cluster.AddDistributedTaskRequest + (*RecordDistributedTaskNodeCompletionRequest)(nil), // 21: weaviate.internal.cluster.RecordDistributedTaskNodeCompletionRequest + (*CancelDistributedTaskRequest)(nil), // 22: weaviate.internal.cluster.CancelDistributedTaskRequest + (*CleanUpDistributedTaskRequest)(nil), // 23: weaviate.internal.cluster.CleanUpDistributedTaskRequest + (*SyncShardRequest)(nil), // 24: weaviate.internal.cluster.SyncShardRequest + (*CreateAliasRequest)(nil), // 25: weaviate.internal.cluster.CreateAliasRequest + (*ReplaceAliasRequest)(nil), // 26: weaviate.internal.cluster.ReplaceAliasRequest + (*DeleteAliasRequest)(nil), // 27: weaviate.internal.cluster.DeleteAliasRequest + } +) + +var file_api_message_proto_depIdxs = []int32{ + 0, // 0: weaviate.internal.cluster.ApplyRequest.type:type_name -> weaviate.internal.cluster.ApplyRequest.Type + 1, // 1: weaviate.internal.cluster.QueryRequest.type:type_name -> weaviate.internal.cluster.QueryRequest.Type + 19, // 2: weaviate.internal.cluster.AddTenantsRequest.tenants:type_name -> weaviate.internal.cluster.Tenant + 19, // 3: weaviate.internal.cluster.UpdateTenantsRequest.tenants:type_name -> weaviate.internal.cluster.Tenant + 2, // 4: weaviate.internal.cluster.TenantsProcess.op:type_name -> weaviate.internal.cluster.TenantsProcess.Op + 19, // 5: weaviate.internal.cluster.TenantsProcess.tenant:type_name -> weaviate.internal.cluster.Tenant + 3, // 6: weaviate.internal.cluster.TenantProcessRequest.action:type_name -> weaviate.internal.cluster.TenantProcessRequest.Action + 16, // 7: weaviate.internal.cluster.TenantProcessRequest.tenants_processes:type_name -> weaviate.internal.cluster.TenantsProcess + 6, // 8: weaviate.internal.cluster.ClusterService.RemovePeer:input_type -> weaviate.internal.cluster.RemovePeerRequest + 4, // 9: weaviate.internal.cluster.ClusterService.JoinPeer:input_type -> weaviate.internal.cluster.JoinPeerRequest + 8, // 10: weaviate.internal.cluster.ClusterService.NotifyPeer:input_type -> weaviate.internal.cluster.NotifyPeerRequest + 10, // 11: weaviate.internal.cluster.ClusterService.Apply:input_type -> weaviate.internal.cluster.ApplyRequest + 12, // 12: weaviate.internal.cluster.ClusterService.Query:input_type -> weaviate.internal.cluster.QueryRequest + 7, // 13: weaviate.internal.cluster.ClusterService.RemovePeer:output_type -> weaviate.internal.cluster.RemovePeerResponse + 5, // 14: weaviate.internal.cluster.ClusterService.JoinPeer:output_type -> weaviate.internal.cluster.JoinPeerResponse + 9, // 15: weaviate.internal.cluster.ClusterService.NotifyPeer:output_type -> weaviate.internal.cluster.NotifyPeerResponse + 11, // 16: weaviate.internal.cluster.ClusterService.Apply:output_type -> weaviate.internal.cluster.ApplyResponse + 13, // 17: weaviate.internal.cluster.ClusterService.Query:output_type -> weaviate.internal.cluster.QueryResponse + 13, // [13:18] is the sub-list for method output_type + 8, // [8:13] is the sub-list for method input_type + 8, // [8:8] is the sub-list for extension type_name + 8, // [8:8] is the sub-list for extension extendee + 0, // [0:8] is the sub-list for field type_name +} + +func init() { file_api_message_proto_init() } +func file_api_message_proto_init() { + if File_api_message_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_api_message_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*JoinPeerRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_api_message_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*JoinPeerResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_api_message_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RemovePeerRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_api_message_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RemovePeerResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_api_message_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NotifyPeerRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_api_message_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NotifyPeerResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_api_message_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ApplyRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_api_message_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ApplyResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_api_message_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*QueryRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_api_message_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*QueryResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_api_message_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AddTenantsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_api_message_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpdateTenantsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_api_message_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TenantsProcess); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_api_message_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TenantProcessRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_api_message_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteTenantsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_api_message_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Tenant); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_api_message_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AddDistributedTaskRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_api_message_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RecordDistributedTaskNodeCompletionRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_api_message_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CancelDistributedTaskRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_api_message_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CleanUpDistributedTaskRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_api_message_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SyncShardRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_api_message_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateAliasRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_api_message_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ReplaceAliasRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_api_message_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteAliasRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_api_message_proto_msgTypes[17].OneofWrappers = []interface{}{} + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_api_message_proto_rawDesc, + NumEnums: 4, + NumMessages: 24, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_api_message_proto_goTypes, + DependencyIndexes: file_api_message_proto_depIdxs, + EnumInfos: file_api_message_proto_enumTypes, + MessageInfos: file_api_message_proto_msgTypes, + }.Build() + File_api_message_proto = out.File + file_api_message_proto_rawDesc = nil + file_api_message_proto_goTypes = nil + file_api_message_proto_depIdxs = nil +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/proto/api/message.proto b/platform/dbops/binaries/weaviate-src/cluster/proto/api/message.proto new file mode 100644 index 0000000000000000000000000000000000000000..dbdfe54103d8211e0b26b335e84005ff034b0ea8 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/proto/api/message.proto @@ -0,0 +1,246 @@ +syntax = "proto3"; + +// NOTE run `buf generate` from `cluster/proto` to regenerate code +package weaviate.internal.cluster; + +service ClusterService { + rpc RemovePeer(RemovePeerRequest) returns (RemovePeerResponse) {} + rpc JoinPeer(JoinPeerRequest) returns (JoinPeerResponse) {} + rpc NotifyPeer(NotifyPeerRequest) returns (NotifyPeerResponse) {} + rpc Apply(ApplyRequest) returns (ApplyResponse) {} + rpc Query(QueryRequest) returns (QueryResponse) {} +} + +message JoinPeerRequest { + string id = 1; + string address = 2; + bool voter = 3; +} + +message JoinPeerResponse { + string leader = 1; +} + +message RemovePeerRequest { + string id = 1; +} +message RemovePeerResponse { + string leader = 1; +} + +message NotifyPeerRequest { + string id = 1; + string address = 2; +} + +message NotifyPeerResponse { +} + +message ApplyRequest { + enum Type { + TYPE_UNSPECIFIED = 0; + TYPE_ADD_CLASS = 1; + TYPE_UPDATE_CLASS = 2; + TYPE_DELETE_CLASS = 3; + TYPE_RESTORE_CLASS = 4; + TYPE_ADD_PROPERTY = 5; + + TYPE_UPDATE_SHARD_STATUS = 10; + TYPE_ADD_REPLICA_TO_SHARD = 11; + TYPE_DELETE_REPLICA_FROM_SHARD = 12; + + TYPE_ADD_TENANT = 16; + TYPE_UPDATE_TENANT = 17; + TYPE_DELETE_TENANT = 18; + TYPE_TENANT_PROCESS = 19; + + TYPE_CREATE_ALIAS = 40; + TYPE_REPLACE_ALIAS = 41; + TYPE_DELETE_ALIAS = 42; + + TYPE_UPSERT_ROLES_PERMISSIONS = 60; + TYPE_DELETE_ROLES = 61; + TYPE_REMOVE_PERMISSIONS = 62; + TYPE_ADD_ROLES_FOR_USER = 63; + TYPE_REVOKE_ROLES_FOR_USER = 64; + + TYPE_UPSERT_USER = 80; + TYPE_DELETE_USER = 81; + TYPE_ROTATE_USER_API_KEY = 82; + TYPE_SUSPEND_USER = 83; + TYPE_ACTIVATE_USER = 84; + TYPE_CREATE_USER_WITH_KEY = 85; + + TYPE_STORE_SCHEMA_V1 = 99; + + TYPE_REPLICATION_REPLICATE = 200; + TYPE_REPLICATION_REPLICATE_UPDATE_STATE = 201; + TYPE_REPLICATION_REPLICATE_REGISTER_ERROR = 202; + TYPE_REPLICATION_REPLICATE_CANCEL = 203; + TYPE_REPLICATION_REPLICATE_DELETE = 204; + TYPE_REPLICATION_REPLICATE_REMOVE = 205; + TYPE_REPLICATION_REPLICATE_CANCELLATION_COMPLETE = 206; + TYPE_REPLICATION_REPLICATE_DELETE_ALL = 207; + TYPE_REPLICATION_REPLICATE_DELETE_BY_COLLECTION = 208; + TYPE_REPLICATION_REPLICATE_DELETE_BY_TENANTS = 209; + TYPE_REPLICATION_REPLICATE_SYNC_SHARD = 210; + TYPE_REPLICATION_REGISTER_SCHEMA_VERSION = 211; + TYPE_REPLICATION_REPLICATE_ADD_REPLICA_TO_SHARD = 212; + + TYPE_REPLICATION_REPLICATE_FORCE_DELETE_ALL = 220; + TYPE_REPLICATION_REPLICATE_FORCE_DELETE_BY_COLLECTION = 221; + TYPE_REPLICATION_REPLICATE_FORCE_DELETE_BY_COLLECTION_AND_SHARD = 222; + TYPE_REPLICATION_REPLICATE_FORCE_DELETE_BY_TARGET_NODE = 223; + TYPE_REPLICATION_REPLICATE_FORCE_DELETE_BY_UUID = 224; + + TYPE_DISTRIBUTED_TASK_ADD = 300; + TYPE_DISTRIBUTED_TASK_CANCEL = 301; + TYPE_DISTRIBUTED_TASK_RECORD_NODE_COMPLETED = 302; + TYPE_DISTRIBUTED_TASK_CLEAN_UP = 303; + } + Type type = 1; + string class = 2; + uint64 version = 3; + bytes sub_command = 4; +} + +message ApplyResponse { + uint64 version = 1; + string leader = 2; +} + +message QueryRequest { + enum Type { + TYPE_UNSPECIFIED = 0; + TYPE_GET_CLASSES = 1; + TYPE_GET_SCHEMA = 2; + TYPE_GET_TENANTS = 3; + TYPE_GET_SHARD_OWNER = 4; + TYPE_GET_TENANTS_SHARDS = 5; + TYPE_GET_SHARDING_STATE = 6; + TYPE_GET_CLASS_VERSIONS = 7; + TYPE_GET_COLLECTIONS_COUNT = 8; + + TYPE_HAS_PERMISSION = 30; + TYPE_GET_ROLES = 31; + TYPE_GET_ROLES_FOR_USER = 32; + TYPE_GET_USERS_FOR_ROLE= 33; + TYPE_GET_USERS_OR_GROUPS_WITH_ROLES = 34; + + TYPE_GET_USERS = 61; + TYPE_USER_IDENTIFIER_EXISTS = 62; + + TYPE_RESOLVE_ALIAS = 100; + TYPE_GET_ALIASES = 101; + + TYPE_GET_REPLICATION_DETAILS = 200; + TYPE_GET_REPLICATION_DETAILS_BY_COLLECTION = 201; + TYPE_GET_REPLICATION_DETAILS_BY_COLLECTION_AND_SHARD = 202; + TYPE_GET_REPLICATION_DETAILS_BY_TARGET_NODE = 203; + TYPE_GET_SHARDING_STATE_BY_COLLECTION = 204; + TYPE_GET_SHARDING_STATE_BY_COLLECTION_AND_SHARD = 205; + TYPE_GET_ALL_REPLICATION_DETAILS = 206; + TYPE_GET_REPLICATION_OPERATION_STATE = 207; + + TYPE_DISTRIBUTED_TASK_LIST = 300; + } + + Type type = 1; + bytes sub_command = 2; +} + +message QueryResponse { + bytes payload = 1; +} + +message AddTenantsRequest { + repeated string cluster_nodes = 1; + repeated Tenant tenants = 2; +} + +message UpdateTenantsRequest { + repeated Tenant tenants = 1; + repeated string cluster_nodes = 2; + bool implicit_update_request = 3; +} + +message TenantsProcess { + enum Op { + OP_UNSPECIFIED = 0; + OP_START = 1; + OP_DONE = 2; + OP_ABORT = 3; + } + + Op op = 1; + Tenant tenant =2; +} + +message TenantProcessRequest { + enum Action { + ACTION_UNSPECIFIED = 0; + ACTION_FREEZING = 1; + ACTION_UNFREEZING = 2; + } + string node = 1; + Action action = 2; + repeated TenantsProcess tenants_processes = 3; +} + +message DeleteTenantsRequest { + repeated string tenants = 1; +} + +message Tenant { + string name = 1; + string status = 2; +} + +message AddDistributedTaskRequest { + string namespace = 1; + string id = 2; + bytes payload = 4; + int64 submitted_at_unix_millis = 5; +} + +message RecordDistributedTaskNodeCompletionRequest { + string namespace = 1; + string id = 2; + uint64 version = 3; + string node_id = 4; + optional string error = 5; + int64 finished_at_unix_millis = 6; +} + +message CancelDistributedTaskRequest { + string namespace = 1; + string id = 2; + uint64 version = 3; + int64 cancelled_at_unix_millis = 6; +} + +message CleanUpDistributedTaskRequest { + string namespace = 1; + string id = 2; + uint64 version = 3; +} + +message SyncShardRequest { + string collection = 1; + string shard = 2; + string node_id = 3; +} + +message CreateAliasRequest { + string collection = 1; + string alias = 2; +} + +message ReplaceAliasRequest { + string collection = 1; + string alias = 2; +} + +message DeleteAliasRequest { + string alias = 1; +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/proto/api/message_grpc.pb.go b/platform/dbops/binaries/weaviate-src/cluster/proto/api/message_grpc.pb.go new file mode 100644 index 0000000000000000000000000000000000000000..fb441d4da0b5b2f09410860af8c2dd858277e252 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/proto/api/message_grpc.pb.go @@ -0,0 +1,247 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.2.0 +// - protoc (unknown) +// source: api/message.proto + +package api + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +// ClusterServiceClient is the client API for ClusterService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type ClusterServiceClient interface { + RemovePeer(ctx context.Context, in *RemovePeerRequest, opts ...grpc.CallOption) (*RemovePeerResponse, error) + JoinPeer(ctx context.Context, in *JoinPeerRequest, opts ...grpc.CallOption) (*JoinPeerResponse, error) + NotifyPeer(ctx context.Context, in *NotifyPeerRequest, opts ...grpc.CallOption) (*NotifyPeerResponse, error) + Apply(ctx context.Context, in *ApplyRequest, opts ...grpc.CallOption) (*ApplyResponse, error) + Query(ctx context.Context, in *QueryRequest, opts ...grpc.CallOption) (*QueryResponse, error) +} + +type clusterServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewClusterServiceClient(cc grpc.ClientConnInterface) ClusterServiceClient { + return &clusterServiceClient{cc} +} + +func (c *clusterServiceClient) RemovePeer(ctx context.Context, in *RemovePeerRequest, opts ...grpc.CallOption) (*RemovePeerResponse, error) { + out := new(RemovePeerResponse) + err := c.cc.Invoke(ctx, "/weaviate.internal.cluster.ClusterService/RemovePeer", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterServiceClient) JoinPeer(ctx context.Context, in *JoinPeerRequest, opts ...grpc.CallOption) (*JoinPeerResponse, error) { + out := new(JoinPeerResponse) + err := c.cc.Invoke(ctx, "/weaviate.internal.cluster.ClusterService/JoinPeer", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterServiceClient) NotifyPeer(ctx context.Context, in *NotifyPeerRequest, opts ...grpc.CallOption) (*NotifyPeerResponse, error) { + out := new(NotifyPeerResponse) + err := c.cc.Invoke(ctx, "/weaviate.internal.cluster.ClusterService/NotifyPeer", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterServiceClient) Apply(ctx context.Context, in *ApplyRequest, opts ...grpc.CallOption) (*ApplyResponse, error) { + out := new(ApplyResponse) + err := c.cc.Invoke(ctx, "/weaviate.internal.cluster.ClusterService/Apply", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterServiceClient) Query(ctx context.Context, in *QueryRequest, opts ...grpc.CallOption) (*QueryResponse, error) { + out := new(QueryResponse) + err := c.cc.Invoke(ctx, "/weaviate.internal.cluster.ClusterService/Query", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// ClusterServiceServer is the server API for ClusterService service. +// All implementations should embed UnimplementedClusterServiceServer +// for forward compatibility +type ClusterServiceServer interface { + RemovePeer(context.Context, *RemovePeerRequest) (*RemovePeerResponse, error) + JoinPeer(context.Context, *JoinPeerRequest) (*JoinPeerResponse, error) + NotifyPeer(context.Context, *NotifyPeerRequest) (*NotifyPeerResponse, error) + Apply(context.Context, *ApplyRequest) (*ApplyResponse, error) + Query(context.Context, *QueryRequest) (*QueryResponse, error) +} + +// UnimplementedClusterServiceServer should be embedded to have forward compatible implementations. +type UnimplementedClusterServiceServer struct { +} + +func (UnimplementedClusterServiceServer) RemovePeer(context.Context, *RemovePeerRequest) (*RemovePeerResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method RemovePeer not implemented") +} +func (UnimplementedClusterServiceServer) JoinPeer(context.Context, *JoinPeerRequest) (*JoinPeerResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method JoinPeer not implemented") +} +func (UnimplementedClusterServiceServer) NotifyPeer(context.Context, *NotifyPeerRequest) (*NotifyPeerResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method NotifyPeer not implemented") +} +func (UnimplementedClusterServiceServer) Apply(context.Context, *ApplyRequest) (*ApplyResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Apply not implemented") +} +func (UnimplementedClusterServiceServer) Query(context.Context, *QueryRequest) (*QueryResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Query not implemented") +} + +// UnsafeClusterServiceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to ClusterServiceServer will +// result in compilation errors. +type UnsafeClusterServiceServer interface { + mustEmbedUnimplementedClusterServiceServer() +} + +func RegisterClusterServiceServer(s grpc.ServiceRegistrar, srv ClusterServiceServer) { + s.RegisterService(&ClusterService_ServiceDesc, srv) +} + +func _ClusterService_RemovePeer_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RemovePeerRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterServiceServer).RemovePeer(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/weaviate.internal.cluster.ClusterService/RemovePeer", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterServiceServer).RemovePeer(ctx, req.(*RemovePeerRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterService_JoinPeer_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(JoinPeerRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterServiceServer).JoinPeer(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/weaviate.internal.cluster.ClusterService/JoinPeer", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterServiceServer).JoinPeer(ctx, req.(*JoinPeerRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterService_NotifyPeer_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(NotifyPeerRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterServiceServer).NotifyPeer(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/weaviate.internal.cluster.ClusterService/NotifyPeer", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterServiceServer).NotifyPeer(ctx, req.(*NotifyPeerRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterService_Apply_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ApplyRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterServiceServer).Apply(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/weaviate.internal.cluster.ClusterService/Apply", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterServiceServer).Apply(ctx, req.(*ApplyRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterService_Query_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterServiceServer).Query(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/weaviate.internal.cluster.ClusterService/Query", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterServiceServer).Query(ctx, req.(*QueryRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// ClusterService_ServiceDesc is the grpc.ServiceDesc for ClusterService service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var ClusterService_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "weaviate.internal.cluster.ClusterService", + HandlerType: (*ClusterServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "RemovePeer", + Handler: _ClusterService_RemovePeer_Handler, + }, + { + MethodName: "JoinPeer", + Handler: _ClusterService_JoinPeer_Handler, + }, + { + MethodName: "NotifyPeer", + Handler: _ClusterService_NotifyPeer_Handler, + }, + { + MethodName: "Apply", + Handler: _ClusterService_Apply_Handler, + }, + { + MethodName: "Query", + Handler: _ClusterService_Query_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "api/message.proto", +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/proto/api/rbac_requests.go b/platform/dbops/binaries/weaviate-src/cluster/proto/api/rbac_requests.go new file mode 100644 index 0000000000000000000000000000000000000000..482962159a2091ca9c226d12e73ade78dee57260 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/proto/api/rbac_requests.go @@ -0,0 +1,123 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package api + +import ( + "github.com/weaviate/weaviate/usecases/auth/authentication" + "github.com/weaviate/weaviate/usecases/auth/authorization" +) + +const ( + // NOTE: in case changes happens to the RBAC message, add new version before the RBACLatestCommandPolicyVersion + // RBACCommandPolicyVersionV0 represents the first version of RBAC commands where it wasn't set and equal 0 + // this version was needed because we did migrate paths of SchemaDomain to limit the collection + // old "schema/collections/{collection_name}/shards/*" all shards in collection + // new "schema/collections/{collection_name}/shards/#" limited to collection only + RBACCommandPolicyVersionV0 = iota + + // this version was needed because we did migrate verbs of RolesDomain to control the scope + // of Role permissions and default to MATCH scope instead of ALL + // old verb was (C)|(R)|(U)|(D) + // new verb was MATCH + RBACCommandPolicyVersionV1 + + // this version was needed because we did flatten manage_roles to C+U+D_roles + RBACCommandPolicyVersionV2 + // this version was needed because assign_and_revoke_users was saved with verb UPDATE. However with dynamic user + // management we need a special permission to update users + RBACCommandPolicyVersionV3 + + // RBACLatestCommandPolicyVersion represents the latest version of RBAC commands policies + // It's used to migrate policy changes. if we end up with a cluster having different version + // that won't be a problem because the version here is not about the message change but more about + // the content of the body which will dumbed anyway in RBAC storage. + RBACLatestCommandPolicyVersion +) + +const ( + RBACAssignRevokeCommandPolicyVersionV0 = iota + RBACAssignRevokeLatestCommandPolicyVersion +) + +type CreateRolesRequest struct { + Roles map[string][]authorization.Policy + RoleCreation bool + Version int +} + +type DeleteRolesRequest struct { + Roles []string +} + +type RemovePermissionsRequest struct { + Role string + Permissions []*authorization.Policy + Version int +} + +type AddRolesForUsersRequest struct { + User string + Roles []string + Version int +} + +type RevokeRolesForUserRequest struct { + User string + Roles []string + Version int +} + +type QueryHasPermissionRequest struct { + Role string + Permission *authorization.Policy +} + +type QueryHasPermissionResponse struct { + HasPermission bool +} + +type QueryGetAllUsersOrGroupsWithRolesRequest struct { + IsGroup bool + AuthType authentication.AuthType +} + +type QueryGetAllUsersOrGroupsWithRolesResponse struct { + UsersOrGroups []string +} + +type QueryGetRolesRequest struct { + Roles []string +} + +type QueryGetRolesResponse struct { + Roles map[string][]authorization.Policy +} + +type QueryGetRolesForUserOrGroupRequest struct { + User string + UserType authentication.AuthType + IsGroup bool +} + +type QueryGetRolesForUserOrGroupResponse struct { + Roles map[string][]authorization.Policy +} + +type QueryGetUsersForRoleRequest struct { + Role string + UserType authentication.AuthType + IsGroup bool +} + +type QueryGetUsersForRoleResponse struct { + Users []string +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/proto/api/schema_requests.go b/platform/dbops/binaries/weaviate-src/cluster/proto/api/schema_requests.go new file mode 100644 index 0000000000000000000000000000000000000000..6a5d14abb25849a32bc8a104953f71d3cae13dfb --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/proto/api/schema_requests.go @@ -0,0 +1,135 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package api + +import ( + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/versioned" + "github.com/weaviate/weaviate/usecases/sharding" +) + +type AddClassRequest struct { + Class *models.Class + State *sharding.State +} + +type UpdateClassRequest struct { + Class *models.Class + State *sharding.State +} + +type AddPropertyRequest struct { + Properties []*models.Property +} + +type DeleteClassRequest struct { + Name string +} + +type UpdateShardStatusRequest struct { + Class, Shard, Status string + SchemaVersion uint64 +} + +type AddReplicaToShard struct { + Class, Shard, TargetNode string + SchemaVersion uint64 +} + +type DeleteReplicaFromShard struct { + Class, Shard, TargetNode string + SchemaVersion uint64 +} + +type QueryReadOnlyClassesRequest struct { + Classes []string +} + +type QueryReadOnlyClassResponse struct { + Classes map[string]versioned.Class +} + +type QueryTenantsRequest struct { + Class string + Tenants []string // If empty, all tenants are returned +} + +type TenantWithVersion struct { + ShardVersion uint64 + Tenant *models.Tenant +} + +type QueryTenantsResponse struct { + ShardVersion uint64 + Tenants []*models.Tenant +} + +type QuerySchemaResponse struct { + Schema models.Schema +} + +type QueryCollectionsCountResponse struct { + Count int +} + +type QueryShardOwnerRequest struct { + Class, Shard string +} + +type QueryShardOwnerResponse struct { + ShardVersion uint64 + Owner string +} + +type QueryTenantsShardsRequest struct { + Class string + Tenants []string +} + +type QueryTenantsShardsResponse struct { + TenantsActivityStatus map[string]string // map[tenant]status + SchemaVersion uint64 +} + +type QueryShardingStateRequest struct { + Class string +} + +type QueryShardingStateResponse struct { + State *sharding.State + Version uint64 +} + +type QueryClassVersionsRequest struct { + Classes []string +} + +type QueryClassVersionsResponse struct { + // Classes is a map of class name to the class version + Classes map[string]uint64 +} + +type QueryResolveAliasRequest struct { + Alias string +} + +type QueryResolveAliasResponse struct { + Class string +} + +type QueryGetAliasesRequest struct { + Alias, Class string +} + +type QueryGetAliasesResponse struct { + Aliases map[string]string +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/proto/api/shard_requests.go b/platform/dbops/binaries/weaviate-src/cluster/proto/api/shard_requests.go new file mode 100644 index 0000000000000000000000000000000000000000..ea5f5f8c052f116e708fe3f6a7bde6e1f6c53833 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/proto/api/shard_requests.go @@ -0,0 +1,222 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package api + +import ( + "github.com/go-openapi/strfmt" +) + +const ( + ReplicationCommandVersionV0 = iota +) + +type ShardReplicationState string + +func (s ShardReplicationState) String() string { + return string(s) +} + +const ( + REGISTERED ShardReplicationState = "REGISTERED" + HYDRATING ShardReplicationState = "HYDRATING" + FINALIZING ShardReplicationState = "FINALIZING" + READY ShardReplicationState = "READY" + DEHYDRATING ShardReplicationState = "DEHYDRATING" + CANCELLED ShardReplicationState = "CANCELLED" // The operation has been cancelled. It cannot be resumed. +) + +type ShardReplicationTransferType string + +func (s ShardReplicationTransferType) String() string { + return string(s) +} + +const ( + COPY ShardReplicationTransferType = "COPY" + MOVE ShardReplicationTransferType = "MOVE" +) + +type ReplicationReplicateShardRequest struct { + // Version is the version with which this command was generated + Version int + + Uuid strfmt.UUID + + SourceNode string + SourceCollection string + SourceShard string + TargetNode string + + TransferType string +} + +type ReplicationReplicateShardReponse struct{} + +type ReplicationUpdateOpStateRequest struct { + Version int + + Id uint64 + State ShardReplicationState +} + +type ReplicationUpdateOpStateResponse struct{} + +type ReplicationRegisterErrorRequest struct { + Version int + + Id uint64 + Error string + TimeUnixMs int64 +} + +type ReplicationRegisterErrorResponse struct{} + +type ReplicationRemoveOpRequest struct { + Version int + + Id uint64 +} + +type ReplicationDeleteOpResponse struct{} + +type ReplicationDetailsRequest struct { + Uuid strfmt.UUID +} + +type ReplicationDetailsRequestByCollection struct { + Collection string +} + +type ReplicationDetailsRequestByCollectionAndShard struct { + Collection string + Shard string +} + +type ReplicationDetailsRequestByTargetNode struct { + Node string +} + +type ReplicationDetailsError struct { + Message string + ErroredTimeUnixMs int64 // Unix timestamp in milliseconds when the error occurred +} + +type ReplicationDetailsState struct { + State string + Errors []ReplicationDetailsError + StartTimeUnixMs int64 // Unix timestamp in milliseconds when the state was first entered +} + +type ReplicationDetailsResponse struct { + Uuid strfmt.UUID + Id uint64 + ShardId string + Collection string + SourceNodeId string + TargetNodeId string + + Uncancelable bool + ScheduledForCancel bool + ScheduledForDelete bool + + Status ReplicationDetailsState + StatusHistory []ReplicationDetailsState + TransferType string + StartTimeUnixMs int64 +} + +type ReplicationCancelRequest struct { + Version int + Uuid strfmt.UUID +} + +type ReplicationDeleteRequest struct { + Version int + Uuid strfmt.UUID +} + +type ReplicationCancellationCompleteRequest struct { + Version int + Id uint64 +} + +type ReplicationsDeleteByCollectionRequest struct { + Version int + Collection string +} + +type ReplicationsDeleteByTenantsRequest struct { + Version int + Collection string + Tenants []string +} + +type ShardingState struct { + Collection string + Shards map[string][]string +} + +type ReplicationQueryShardingStateByCollectionRequest struct { + Collection string +} + +type ReplicationQueryShardingStateByCollectionAndShardRequest struct { + Collection string + Shard string +} + +type ReplicationDeleteAllRequest struct { + Version int +} + +type ReplicationPurgeRequest struct { + Version int +} + +type ReplicationOperationStateRequest struct { + Id uint64 +} + +type ReplicationOperationStateResponse struct { + State ShardReplicationState +} + +type ReplicationStoreSchemaVersionRequest struct { + Version int + SchemaVersion uint64 + Id uint64 +} + +type ReplicationAddReplicaToShard struct { + OpId uint64 + Class, Shard, TargetNode string + SchemaVersion uint64 +} + +type ReplicationForceDeleteAllRequest struct{} + +type ReplicationForceDeleteByCollectionRequest struct { + Collection string +} + +type ReplicationForceDeleteByCollectionAndShardRequest struct { + Collection string + Shard string +} + +type ReplicationForceDeleteByTargetNodeRequest struct { + Node string +} + +type ReplicationForceDeleteByUuidRequest struct { + Uuid strfmt.UUID +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/proto/buf.gen.yaml b/platform/dbops/binaries/weaviate-src/cluster/proto/buf.gen.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ca78a266fb9ff7afbcd2705297d7fdb8578047e0 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/proto/buf.gen.yaml @@ -0,0 +1,26 @@ + + # _ _ + #__ _____ __ ___ ___ __ _| |_ ___ + #\ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ + # \ V V / __/ (_| |\ V /| | (_| | || __/ + # \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| + # + # Copyright © 2016 - 2023 Weaviate B.V. All rights reserved. + # + # CONTACT: hello@weaviate.io + # +version: v1 +managed: + enabled: true + go_package_prefix: + default: github.com/weaviate/weaviate/cloud/proto +plugins: + - name: go + out: . + opt: + - paths=source_relative + - name: go-grpc + out: . + opt: + - paths=source_relative + - require_unimplemented_servers=false \ No newline at end of file diff --git a/platform/dbops/binaries/weaviate-src/cluster/proto/buf.yaml b/platform/dbops/binaries/weaviate-src/cluster/proto/buf.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a7b9a3aeb75858d37a399fecb1bce8ec4264b46e --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/proto/buf.yaml @@ -0,0 +1,27 @@ + # _ _ + #__ _____ __ ___ ___ __ _| |_ ___ + #\ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ + # \ V V / __/ (_| |\ V /| | (_| | || __/ + # \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| + # + # Copyright © 2016 - 2023 Weaviate B.V. All rights reserved. + # + # CONTACT: hello@weaviate.io + # + +version: v1 + +lint: + use: + - DEFAULT + allow_comment_ignores: true + except: + - PACKAGE_DIRECTORY_MATCH + + - PACKAGE_VERSION_SUFFIX + + service_suffix: "" + rpc_allow_google_protobuf_empty_responses: true +breaking: + use: + - FILE diff --git a/platform/dbops/binaries/weaviate-src/cluster/rbac/manager.go b/platform/dbops/binaries/weaviate-src/cluster/rbac/manager.go new file mode 100644 index 0000000000000000000000000000000000000000..4acd022bffa9d0afa8dd7daa2f9d4d0cc2a2791e --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/rbac/manager.go @@ -0,0 +1,293 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package rbac + +import ( + "encoding/json" + "errors" + "fmt" + + "github.com/weaviate/weaviate/usecases/auth/authorization/rbac" + + "github.com/sirupsen/logrus" + + "github.com/weaviate/weaviate/cluster/fsm" + cmd "github.com/weaviate/weaviate/cluster/proto/api" + "github.com/weaviate/weaviate/usecases/auth/authorization" + "github.com/weaviate/weaviate/usecases/config" +) + +var ErrBadRequest = errors.New("bad request") + +type Manager struct { + authZ *rbac.Manager + authNconfig config.Authentication + snapshotter fsm.Snapshotter + logger logrus.FieldLogger +} + +func NewManager(authZ *rbac.Manager, authNconfig config.Authentication, snapshotter fsm.Snapshotter, logger logrus.FieldLogger) *Manager { + return &Manager{authZ: authZ, authNconfig: authNconfig, snapshotter: snapshotter, logger: logger} +} + +func (m *Manager) GetRoles(req *cmd.QueryRequest) ([]byte, error) { + if m.authZ == nil { + return json.Marshal(cmd.QueryGetRolesResponse{}) + } + + subCommand := cmd.QueryGetRolesRequest{} + if err := json.Unmarshal(req.SubCommand, &subCommand); err != nil { + return []byte{}, fmt.Errorf("%w: %w", ErrBadRequest, err) + } + + roles, err := m.authZ.GetRoles(subCommand.Roles...) + if err != nil { + return []byte{}, fmt.Errorf("%w: %w", ErrBadRequest, err) + } + + response := cmd.QueryGetRolesResponse{Roles: roles} + payload, err := json.Marshal(response) + if err != nil { + return []byte{}, fmt.Errorf("could not marshal query response: %w", err) + } + return payload, nil +} + +func (m *Manager) GetUsersOrGroupsWithRoles(req *cmd.QueryRequest) ([]byte, error) { + if m.authZ == nil { + payload, _ := json.Marshal(cmd.QueryGetAllUsersOrGroupsWithRolesResponse{}) + return payload, nil + } + subCommand := cmd.QueryGetAllUsersOrGroupsWithRolesRequest{} + if err := json.Unmarshal(req.SubCommand, &subCommand); err != nil { + return []byte{}, fmt.Errorf("%w: %w", ErrBadRequest, err) + } + + usersOrGroups, err := m.authZ.GetUsersOrGroupsWithRoles(subCommand.IsGroup, subCommand.AuthType) + if err != nil { + return []byte{}, fmt.Errorf("%w: %w", ErrBadRequest, err) + } + + response := cmd.QueryGetAllUsersOrGroupsWithRolesResponse{UsersOrGroups: usersOrGroups} + payload, err := json.Marshal(response) + if err != nil { + return []byte{}, fmt.Errorf("could not marshal query response: %w", err) + } + return payload, nil +} + +func (m *Manager) GetRolesForUserOrGroup(req *cmd.QueryRequest) ([]byte, error) { + if m.authZ == nil { + payload, _ := json.Marshal(cmd.QueryGetRolesForUserOrGroupResponse{}) + return payload, nil + } + subCommand := cmd.QueryGetRolesForUserOrGroupRequest{} + if err := json.Unmarshal(req.SubCommand, &subCommand); err != nil { + return []byte{}, fmt.Errorf("%w: %w", ErrBadRequest, err) + } + + roles, err := m.authZ.GetRolesForUserOrGroup(subCommand.User, subCommand.UserType, subCommand.IsGroup) + if err != nil { + return []byte{}, fmt.Errorf("%w: %w", ErrBadRequest, err) + } + + response := cmd.QueryGetRolesForUserOrGroupResponse{Roles: roles} + payload, err := json.Marshal(response) + if err != nil { + return []byte{}, fmt.Errorf("could not marshal query response: %w", err) + } + return payload, nil +} + +func (m *Manager) GetUsersForRole(req *cmd.QueryRequest) ([]byte, error) { + if m.authZ == nil { + return json.Marshal(cmd.QueryGetUsersForRoleResponse{}) + } + + subCommand := cmd.QueryGetUsersForRoleRequest{} + if err := json.Unmarshal(req.SubCommand, &subCommand); err != nil { + return []byte{}, fmt.Errorf("%w: %w", ErrBadRequest, err) + } + + users, err := m.authZ.GetUsersOrGroupForRole(subCommand.Role, subCommand.UserType, subCommand.IsGroup) + if err != nil { + return []byte{}, fmt.Errorf("%w: %w", ErrBadRequest, err) + } + + response := cmd.QueryGetUsersForRoleResponse{Users: users} + payload, err := json.Marshal(response) + if err != nil { + return []byte{}, fmt.Errorf("could not marshal query response: %w", err) + } + return payload, nil +} + +func (m *Manager) HasPermission(req *cmd.QueryRequest) ([]byte, error) { + if m.authZ == nil { + return json.Marshal(cmd.QueryHasPermissionResponse{}) + } + + subCommand := cmd.QueryHasPermissionRequest{} + if err := json.Unmarshal(req.SubCommand, &subCommand); err != nil { + return []byte{}, fmt.Errorf("%w: %w", ErrBadRequest, err) + } + + hasPerm, err := m.authZ.HasPermission(subCommand.Role, subCommand.Permission) + if err != nil { + return []byte{}, fmt.Errorf("%w: %w", ErrBadRequest, err) + } + + response := cmd.QueryHasPermissionResponse{HasPermission: hasPerm} + payload, err := json.Marshal(response) + if err != nil { + return []byte{}, fmt.Errorf("could not marshal query response: %w", err) + } + return payload, nil +} + +func (m *Manager) UpsertRolesPermissions(c *cmd.ApplyRequest) error { + if m.authZ == nil { + return nil + } + + req := &cmd.CreateRolesRequest{} + if err := json.Unmarshal(c.SubCommand, req); err != nil { + return fmt.Errorf("%w: %w", ErrBadRequest, err) + } + + // don't allow to create roles if there is already a role present + if req.RoleCreation { + names := make([]string, 0, len(req.Roles)) + for name := range req.Roles { + names = append(names, name) + } + roles, err := m.authZ.GetRoles(names...) + if err != nil { + return err + } + if len(roles) > 0 { + return fmt.Errorf("%w: roles already exist", ErrBadRequest) + } + } + + if req.Version < cmd.RBACLatestCommandPolicyVersion { + for roleName, policies := range req.Roles { + permissions := []*authorization.Policy{} + for _, p := range policies { + permissions = append(permissions, &p) + } + // remove old permissions + if err := m.authZ.RemovePermissions(roleName, permissions); err != nil { + return err + } + } + } + + reqMigrated, err := migrateUpsertRolesPermissions(req) + if err != nil { + return err + } + + return m.authZ.UpdateRolesPermissions(reqMigrated.Roles) // update is upsert, naming is to satisfy interface +} + +func (m *Manager) DeleteRoles(c *cmd.ApplyRequest) error { + if m.authZ == nil { + return nil + } + req := &cmd.DeleteRolesRequest{} + if err := json.Unmarshal(c.SubCommand, req); err != nil { + return fmt.Errorf("%w: %w", ErrBadRequest, err) + } + + return m.authZ.DeleteRoles(req.Roles...) +} + +func (m *Manager) AddRolesForUser(c *cmd.ApplyRequest) error { + if m.authZ == nil { + return nil + } + + req := &cmd.AddRolesForUsersRequest{} + if err := json.Unmarshal(c.SubCommand, req); err != nil { + return fmt.Errorf("%w: %w", ErrBadRequest, err) + } + + reqs := migrateAssignRoles(req, m.authNconfig) + for _, req := range reqs { + if err := m.authZ.AddRolesForUser(req.User, req.Roles); err != nil { + return err + } + } + return nil +} + +func (m *Manager) RemovePermissions(c *cmd.ApplyRequest) error { + if m.authZ == nil { + return nil + } + + req := &cmd.RemovePermissionsRequest{} + if err := json.Unmarshal(c.SubCommand, req); err != nil { + return fmt.Errorf("%w: %w", ErrBadRequest, err) + } + + if req.Version < cmd.RBACLatestCommandPolicyVersion { + if err := m.authZ.RemovePermissions(req.Role, req.Permissions); err != nil { + return err + } + } + + reqMigrated, err := migrateRemovePermissions(req) + if err != nil { + return err + } + + return m.authZ.RemovePermissions(reqMigrated.Role, reqMigrated.Permissions) +} + +func (m *Manager) RevokeRolesForUser(c *cmd.ApplyRequest) error { + if m.authZ == nil { + return nil + } + + req := &cmd.RevokeRolesForUserRequest{} + if err := json.Unmarshal(c.SubCommand, req); err != nil { + return fmt.Errorf("%w: %w", ErrBadRequest, err) + } + + reqs := migrateRevokeRoles(req) + for _, req := range reqs { + if err := m.authZ.RevokeRolesForUser(req.User, req.Roles...); err != nil { + return err + } + } + return nil +} + +func (m *Manager) Snapshot() ([]byte, error) { + if m.snapshotter == nil { + return nil, nil + } + return m.snapshotter.Snapshot() +} + +func (m *Manager) Restore(b []byte) error { + if m.snapshotter == nil { + return nil + } + if err := m.snapshotter.Restore(b); err != nil { + return err + } + m.logger.Info("successfully restored rbac from snapshot") + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/rbac/migration.go b/platform/dbops/binaries/weaviate-src/cluster/rbac/migration.go new file mode 100644 index 0000000000000000000000000000000000000000..76d09203a3d347fd4a238ff757d4a1c470217cbc --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/rbac/migration.go @@ -0,0 +1,292 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package rbac + +import ( + "fmt" + "slices" + "strings" + + "github.com/weaviate/weaviate/usecases/auth/authentication" + + "github.com/weaviate/weaviate/usecases/config" + + cmd "github.com/weaviate/weaviate/cluster/proto/api" + "github.com/weaviate/weaviate/usecases/auth/authorization" + "github.com/weaviate/weaviate/usecases/auth/authorization/conv" +) + +func migrateUpsertRolesPermissions(req *cmd.CreateRolesRequest) (*cmd.CreateRolesRequest, error) { + // loop through updates until current version is reached +UPDATE_LOOP: + for { + switch req.Version { + case cmd.RBACCommandPolicyVersionV0: + for roleName, policies := range req.Roles { + // create new permissions + for idx := range policies { + if req.Roles[roleName][idx].Domain == authorization.SchemaDomain { + parts := strings.Split(req.Roles[roleName][idx].Resource, "/") + if len(parts) < 3 { + // shall never happens + return nil, fmt.Errorf("invalid schema path") + } + req.Roles[roleName][idx].Resource = authorization.CollectionsMetadata(parts[2])[0] + } + } + } + case cmd.RBACCommandPolicyVersionV1: + for roleName, policies := range req.Roles { + // create new permissions + for idx := range policies { + if req.Roles[roleName][idx].Domain == authorization.RolesDomain && + req.Roles[roleName][idx].Verb == conv.CRUD { + // this will override any role was created before 1.28 + // to reset default to + req.Roles[roleName][idx].Verb = authorization.ROLE_SCOPE_MATCH + } + } + } + case cmd.RBACCommandPolicyVersionV2: + req.Roles = migrateUpsertRolesPermissionsV2(req.Roles) + case cmd.RBACCommandPolicyVersionV3: + req.Roles = migrateUpsertRolesPermissionsV3(req.Roles) + case cmd.RBACLatestCommandPolicyVersion: + break UPDATE_LOOP + default: + continue + } + req.Version += 1 + } + + return req, nil +} + +func migrateUpsertRolesPermissionsV2(roles map[string][]authorization.Policy) map[string][]authorization.Policy { + for roleName, policies := range roles { + // create new permissions + for idx := range policies { + if roles[roleName][idx].Domain != authorization.RolesDomain { + continue + } + + switch roles[roleName][idx].Verb { + default: + continue + + case conv.CRUD: + // replace manage ALL (verb CRUD) with individual CUD permissions + roles[roleName][idx].Verb = authorization.VerbWithScope(authorization.CREATE, authorization.ROLE_SCOPE_ALL) + // new permissions for U+D needed + for _, verb := range []string{authorization.UPDATE, authorization.DELETE} { + newPolicy := authorization.Policy{ + Resource: roles[roleName][idx].Resource, + Verb: authorization.VerbWithScope(verb, authorization.ROLE_SCOPE_ALL), + Domain: roles[roleName][idx].Domain, + } + roles[roleName] = append(roles[roleName], newPolicy) + } + case authorization.ROLE_SCOPE_MATCH: + // replace manage MATCH (verb MATCH) with individual CUD permissions + roles[roleName][idx].Verb = authorization.VerbWithScope(authorization.CREATE, authorization.ROLE_SCOPE_MATCH) + // new permissions for U+D needed + for _, verb := range []string{authorization.UPDATE, authorization.DELETE} { + newPolicy := authorization.Policy{ + Resource: roles[roleName][idx].Resource, + Verb: authorization.VerbWithScope(verb, authorization.ROLE_SCOPE_MATCH), + Domain: roles[roleName][idx].Domain, + } + roles[roleName] = append(roles[roleName], newPolicy) + } + case authorization.READ: + // add scope to read + roles[roleName][idx].Verb = authorization.VerbWithScope(authorization.READ, authorization.ROLE_SCOPE_MATCH) + + } + } + } + return roles +} + +func migrateUpsertRolesPermissionsV3(roles map[string][]authorization.Policy) map[string][]authorization.Policy { + for roleName, policies := range roles { + for idx := range policies { + if roles[roleName][idx].Domain != authorization.UsersDomain { + continue + } + + if roles[roleName][idx].Verb != authorization.UPDATE { + continue + } + + roles[roleName][idx].Verb = authorization.USER_AND_GROUP_ASSIGN_AND_REVOKE + + } + } + return roles +} + +func migrateRemovePermissions(req *cmd.RemovePermissionsRequest) (*cmd.RemovePermissionsRequest, error) { + // loop through updates until current version is reached +UPDATE_LOOP: + for { + switch req.Version { + case cmd.RBACCommandPolicyVersionV0: + for idx := range req.Permissions { + if req.Permissions[idx].Domain != authorization.SchemaDomain { + continue + } + parts := strings.Split(req.Permissions[idx].Resource, "/") + if len(parts) < 3 { + // shall never happens + return nil, fmt.Errorf("invalid schema path") + } + req.Permissions[idx].Resource = authorization.CollectionsMetadata(parts[2])[0] + } + case cmd.RBACCommandPolicyVersionV1: + req.Permissions = migrateRemoveRolesPermissionsV1(req.Permissions) + case cmd.RBACCommandPolicyVersionV2: + req.Permissions = migrateRemoveRolesPermissionsV2(req.Permissions) + case cmd.RBACCommandPolicyVersionV3: + req.Permissions = migrateRemoveRolesPermissionsV3(req.Permissions) + case cmd.RBACLatestCommandPolicyVersion: + break UPDATE_LOOP + default: + continue + } + req.Version += 1 + } + + return req, nil +} + +func migrateRemoveRolesPermissionsV1(permissions []*authorization.Policy) []*authorization.Policy { + initialPerms := len(permissions) + for idx := 0; idx < initialPerms; idx++ { + if permissions[idx].Domain == authorization.RolesDomain && permissions[idx].Verb == conv.CRUD { + permissions[idx].Verb = authorization.ROLE_SCOPE_MATCH + } + } + return permissions +} + +func migrateRemoveRolesPermissionsV2(permissions []*authorization.Policy) []*authorization.Policy { + initialPerms := len(permissions) + for idx := 0; idx < initialPerms; idx++ { + if permissions[idx].Domain != authorization.RolesDomain { + continue + } + + switch permissions[idx].Verb { + default: + continue + case conv.CRUD: + // also remove individual CUD permissions for manage_roles with ALL + permissions[idx].Verb = authorization.VerbWithScope(authorization.CREATE, authorization.ROLE_SCOPE_ALL) + // new permissions for U+D needed + for _, verb := range []string{authorization.UPDATE, authorization.DELETE} { + newPolicy := &authorization.Policy{ + Resource: permissions[idx].Resource, + Verb: authorization.VerbWithScope(verb, authorization.ROLE_SCOPE_ALL), + Domain: permissions[idx].Domain, + } + permissions = append(permissions, newPolicy) + } + + case authorization.ROLE_SCOPE_MATCH: + // also remove individual CUD permissions for manage_roles with MATCH + permissions[idx].Verb = authorization.VerbWithScope(authorization.CREATE, authorization.ROLE_SCOPE_MATCH) + // new permissions for U+D needed + for _, verb := range []string{authorization.UPDATE, authorization.DELETE} { + newPolicy := &authorization.Policy{ + Resource: permissions[idx].Resource, + Verb: authorization.VerbWithScope(verb, authorization.ROLE_SCOPE_MATCH), + Domain: permissions[idx].Domain, + } + permissions = append(permissions, newPolicy) + } + case authorization.READ: + permissions[idx].Verb = authorization.VerbWithScope(authorization.READ, authorization.ROLE_SCOPE_MATCH) + } + } + return permissions +} + +func migrateRemoveRolesPermissionsV3(permissions []*authorization.Policy) []*authorization.Policy { + initialPerms := len(permissions) + for idx := 0; idx < initialPerms; idx++ { + if permissions[idx].Domain != authorization.UsersDomain { + continue + } + + if permissions[idx].Verb != authorization.UPDATE { + continue + } + + permissions[idx].Verb = authorization.USER_AND_GROUP_ASSIGN_AND_REVOKE + } + return permissions +} + +func migrateRevokeRoles(req *cmd.RevokeRolesForUserRequest) []*cmd.RevokeRolesForUserRequest { + if req.Version == cmd.RBACAssignRevokeCommandPolicyVersionV0 { + return migrateRevokeRolesV0(req) + } + return []*cmd.RevokeRolesForUserRequest{req} +} + +func migrateRevokeRolesV0(req *cmd.RevokeRolesForUserRequest) []*cmd.RevokeRolesForUserRequest { + user, _ := conv.GetUserAndPrefix(req.User) + + req1 := &cmd.RevokeRolesForUserRequest{ + Version: req.Version + 1, + Roles: req.Roles, + User: conv.UserNameWithTypeFromId(user, authentication.AuthTypeDb), + } + req2 := &cmd.RevokeRolesForUserRequest{ + Version: req.Version + 1, + Roles: req.Roles, + User: conv.UserNameWithTypeFromId(user, authentication.AuthTypeOIDC), + } + + return []*cmd.RevokeRolesForUserRequest{req1, req2} +} + +func migrateAssignRoles(req *cmd.AddRolesForUsersRequest, authNconfig config.Authentication) []*cmd.AddRolesForUsersRequest { + if req.Version == cmd.RBACAssignRevokeCommandPolicyVersionV0 { + return migrateAssignRolesV0(req, authNconfig) + } + return []*cmd.AddRolesForUsersRequest{req} +} + +func migrateAssignRolesV0(req *cmd.AddRolesForUsersRequest, authNconfig config.Authentication) []*cmd.AddRolesForUsersRequest { + user, _ := conv.GetUserAndPrefix(req.User) + + var reqs []*cmd.AddRolesForUsersRequest + if authNconfig.APIKey.Enabled && slices.Contains(authNconfig.APIKey.Users, user) { + reqs = append(reqs, &cmd.AddRolesForUsersRequest{ + Version: req.Version + 1, + Roles: req.Roles, + User: conv.UserNameWithTypeFromId(user, authentication.AuthTypeDb), + }) + } + + if authNconfig.OIDC.Enabled { + reqs = append(reqs, &cmd.AddRolesForUsersRequest{ + Version: req.Version + 1, + Roles: req.Roles, + User: conv.UserNameWithTypeFromId(user, authentication.AuthTypeOIDC), + }) + } + + return reqs +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/rbac/migration_test.go b/platform/dbops/binaries/weaviate-src/cluster/rbac/migration_test.go new file mode 100644 index 0000000000000000000000000000000000000000..37f619dc4df5edab21815dc367c58fa303f1bdee --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/rbac/migration_test.go @@ -0,0 +1,435 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package rbac + +import ( + "testing" + + "github.com/weaviate/weaviate/usecases/config" + + cmd "github.com/weaviate/weaviate/cluster/proto/api" + + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/usecases/auth/authorization" + "github.com/weaviate/weaviate/usecases/auth/authorization/conv" +) + +func TestMigrationsUpsert(t *testing.T) { + tests := []struct { + name string + input *cmd.CreateRolesRequest + output *cmd.CreateRolesRequest + }{ + { + name: "Only increase version", + input: &cmd.CreateRolesRequest{Version: 0, Roles: map[string][]authorization.Policy{}}, + output: &cmd.CreateRolesRequest{Version: cmd.RBACLatestCommandPolicyVersion, Roles: map[string][]authorization.Policy{}}, + }, + { + name: "Migrate roles from V0 to latest", + input: &cmd.CreateRolesRequest{Version: 0, Roles: map[string][]authorization.Policy{ + "manage": {{Resource: "roles/something", Domain: authorization.RolesDomain, Verb: conv.CRUD}}, + "assign_users": {{Resource: "roles/something", Domain: authorization.UsersDomain, Verb: authorization.UPDATE}}, + }}, + output: &cmd.CreateRolesRequest{ + Version: cmd.RBACLatestCommandPolicyVersion, Roles: map[string][]authorization.Policy{ + "manage": { + {Resource: "roles/something", Domain: authorization.RolesDomain, Verb: authorization.VerbWithScope(authorization.CREATE, authorization.ROLE_SCOPE_MATCH)}, + {Resource: "roles/something", Domain: authorization.RolesDomain, Verb: authorization.VerbWithScope(authorization.UPDATE, authorization.ROLE_SCOPE_MATCH)}, + {Resource: "roles/something", Domain: authorization.RolesDomain, Verb: authorization.VerbWithScope(authorization.DELETE, authorization.ROLE_SCOPE_MATCH)}, + }, + "assign_users": {{Resource: "roles/something", Domain: authorization.UsersDomain, Verb: authorization.USER_AND_GROUP_ASSIGN_AND_REVOKE}}, + }, + }, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + output, err := migrateUpsertRolesPermissions(test.input) + require.NoError(t, err) + require.Equal(t, test.output, output) + }) + } +} + +func TestMigrationUpsertV2(t *testing.T) { + tests := []struct { + name string + input map[string][]authorization.Policy + output map[string][]authorization.Policy + }{ + { + name: "empty policy list", + }, + { + name: "single policy - read without scope", + input: map[string][]authorization.Policy{ + "read": {{Resource: "roles/something", Domain: authorization.RolesDomain, Verb: authorization.READ}}, + }, + output: map[string][]authorization.Policy{ + "read": {{Resource: "roles/something", Domain: authorization.RolesDomain, Verb: authorization.VerbWithScope(authorization.READ, authorization.ROLE_SCOPE_MATCH)}}, + }, + }, + { + name: "single policy - manage with match", + input: map[string][]authorization.Policy{ + "manage": {{Resource: "roles/something", Domain: authorization.RolesDomain, Verb: authorization.ROLE_SCOPE_MATCH}}, + }, + output: map[string][]authorization.Policy{ + "manage": { + {Resource: "roles/something", Domain: authorization.RolesDomain, Verb: authorization.VerbWithScope(authorization.CREATE, authorization.ROLE_SCOPE_MATCH)}, + {Resource: "roles/something", Domain: authorization.RolesDomain, Verb: authorization.VerbWithScope(authorization.UPDATE, authorization.ROLE_SCOPE_MATCH)}, + {Resource: "roles/something", Domain: authorization.RolesDomain, Verb: authorization.VerbWithScope(authorization.DELETE, authorization.ROLE_SCOPE_MATCH)}, + }, + }, + }, + { + name: "single policy - manage with all", + input: map[string][]authorization.Policy{ + "manage": {{Resource: "roles/something", Domain: authorization.RolesDomain, Verb: conv.CRUD}}, + }, + output: map[string][]authorization.Policy{ + "manage": { + {Resource: "roles/something", Domain: authorization.RolesDomain, Verb: authorization.VerbWithScope(authorization.CREATE, authorization.ROLE_SCOPE_ALL)}, + {Resource: "roles/something", Domain: authorization.RolesDomain, Verb: authorization.VerbWithScope(authorization.UPDATE, authorization.ROLE_SCOPE_ALL)}, + {Resource: "roles/something", Domain: authorization.RolesDomain, Verb: authorization.VerbWithScope(authorization.DELETE, authorization.ROLE_SCOPE_ALL)}, + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + output := migrateUpsertRolesPermissionsV2(test.input) + require.Equal(t, test.output, output) + }) + } +} + +func TestMigrationUpsertV3(t *testing.T) { + tests := []struct { + name string + input map[string][]authorization.Policy + output map[string][]authorization.Policy + }{ + { + name: "empty policy list", + }, + { + name: "single policy - read users: not affected", + input: map[string][]authorization.Policy{ + "read": {{Resource: "users/something", Domain: authorization.UsersDomain, Verb: authorization.READ}}, + }, + output: map[string][]authorization.Policy{ + "read": {{Resource: "users/something", Domain: authorization.UsersDomain, Verb: authorization.READ}}, + }, + }, + { + name: "single policy - update users => change to assign and revoke", + input: map[string][]authorization.Policy{ + "assign": {{Resource: "users/something", Domain: authorization.UsersDomain, Verb: authorization.UPDATE}}, + }, + output: map[string][]authorization.Policy{ + "assign": {{Resource: "users/something", Domain: authorization.UsersDomain, Verb: authorization.USER_AND_GROUP_ASSIGN_AND_REVOKE}}, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + output := migrateUpsertRolesPermissionsV3(test.input) + require.Equal(t, test.output, output) + }) + } +} + +func TestMigrationsRemove(t *testing.T) { + tests := []struct { + name string + input *cmd.RemovePermissionsRequest + output *cmd.RemovePermissionsRequest + }{ + { + name: "Only increase version", + input: &cmd.RemovePermissionsRequest{Version: 0, Permissions: []*authorization.Policy{}}, + output: &cmd.RemovePermissionsRequest{Version: cmd.RBACLatestCommandPolicyVersion, Permissions: []*authorization.Policy{}}, + }, + { + name: "Migrate roles from V0 to latest", + input: &cmd.RemovePermissionsRequest{Version: 0, Permissions: []*authorization.Policy{ + {Resource: "roles/something", Domain: authorization.RolesDomain, Verb: conv.CRUD}, + {Resource: "roles/testUserAssign", Domain: authorization.UsersDomain, Verb: authorization.UPDATE}, + }}, + output: &cmd.RemovePermissionsRequest{ + Version: cmd.RBACLatestCommandPolicyVersion, Permissions: []*authorization.Policy{ + {Resource: "roles/something", Domain: authorization.RolesDomain, Verb: authorization.VerbWithScope(authorization.CREATE, authorization.ROLE_SCOPE_MATCH)}, + {Resource: "roles/something", Domain: authorization.RolesDomain, Verb: authorization.VerbWithScope(authorization.UPDATE, authorization.ROLE_SCOPE_MATCH)}, + {Resource: "roles/something", Domain: authorization.RolesDomain, Verb: authorization.VerbWithScope(authorization.DELETE, authorization.ROLE_SCOPE_MATCH)}, + {Resource: "roles/testUserAssign", Domain: authorization.UsersDomain, Verb: authorization.USER_AND_GROUP_ASSIGN_AND_REVOKE}, + }, + }, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + output, err := migrateRemovePermissions(test.input) + require.NoError(t, err) + + require.Equal(t, test.output.Version, output.Version) + require.Equal(t, test.output.Role, output.Role) + require.ElementsMatch(t, test.output.Permissions, output.Permissions) + }) + } +} + +func TestMigrationRemoveV2(t *testing.T) { + tests := []struct { + name string + input []*authorization.Policy + output []*authorization.Policy + }{ + { + name: "empty policy list", + }, + { + name: "single policy - read without scope", + input: []*authorization.Policy{ + {Resource: "roles/something", Domain: authorization.RolesDomain, Verb: authorization.READ}, + }, + output: []*authorization.Policy{ + {Resource: "roles/something", Domain: authorization.RolesDomain, Verb: authorization.VerbWithScope(authorization.READ, authorization.ROLE_SCOPE_MATCH)}, + }, + }, + { + name: "single policy - manage with match", + input: []*authorization.Policy{ + {Resource: "roles/something", Domain: authorization.RolesDomain, Verb: authorization.ROLE_SCOPE_MATCH}, + }, + output: []*authorization.Policy{ + {Resource: "roles/something", Domain: authorization.RolesDomain, Verb: authorization.VerbWithScope(authorization.CREATE, authorization.ROLE_SCOPE_MATCH)}, + {Resource: "roles/something", Domain: authorization.RolesDomain, Verb: authorization.VerbWithScope(authorization.UPDATE, authorization.ROLE_SCOPE_MATCH)}, + {Resource: "roles/something", Domain: authorization.RolesDomain, Verb: authorization.VerbWithScope(authorization.DELETE, authorization.ROLE_SCOPE_MATCH)}, + }, + }, + { + name: "single policy - manage with all", + input: []*authorization.Policy{ + {Resource: "roles/something", Domain: authorization.RolesDomain, Verb: conv.CRUD}, + }, + output: []*authorization.Policy{ + {Resource: "roles/something", Domain: authorization.RolesDomain, Verb: authorization.VerbWithScope(authorization.CREATE, authorization.ROLE_SCOPE_ALL)}, + {Resource: "roles/something", Domain: authorization.RolesDomain, Verb: authorization.VerbWithScope(authorization.UPDATE, authorization.ROLE_SCOPE_ALL)}, + {Resource: "roles/something", Domain: authorization.RolesDomain, Verb: authorization.VerbWithScope(authorization.DELETE, authorization.ROLE_SCOPE_ALL)}, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + output := migrateRemoveRolesPermissionsV2(test.input) + require.Equal(t, test.output, output) + }) + } +} + +func TestMigrationRemoveV32(t *testing.T) { + tests := []struct { + name string + input []*authorization.Policy + output []*authorization.Policy + }{ + { + name: "empty policy list", + }, + { + name: "single policy - assign user as update", + input: []*authorization.Policy{ + {Resource: "roles/something", Domain: authorization.UsersDomain, Verb: authorization.UPDATE}, + }, + output: []*authorization.Policy{ + {Resource: "roles/something", Domain: authorization.UsersDomain, Verb: authorization.USER_AND_GROUP_ASSIGN_AND_REVOKE}, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + output := migrateRemoveRolesPermissionsV3(test.input) + require.Equal(t, test.output, output) + }) + } +} + +func TestMigrationRemoveV1(t *testing.T) { + tests := []struct { + name string + input []*authorization.Policy + output []*authorization.Policy + }{ + { + name: "empty policy list", + }, + { + name: "single policy - CRUD", + input: []*authorization.Policy{ + {Resource: "roles/something", Domain: authorization.RolesDomain, Verb: conv.CRUD}, + }, + output: []*authorization.Policy{ + {Resource: "roles/something", Domain: authorization.RolesDomain, Verb: authorization.ROLE_SCOPE_MATCH}, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + output := migrateRemoveRolesPermissionsV1(test.input) + require.Equal(t, test.output, output) + }) + } +} + +func TestMigrateRevokeRoles(t *testing.T) { + tests := []struct { + name string + input *cmd.RevokeRolesForUserRequest + expectedOutput []*cmd.RevokeRolesForUserRequest + }{ + { + name: "current request", + input: &cmd.RevokeRolesForUserRequest{Version: cmd.RBACAssignRevokeCommandPolicyVersionV0 + 1}, + expectedOutput: []*cmd.RevokeRolesForUserRequest{{Version: cmd.RBACAssignRevokeCommandPolicyVersionV0 + 1}}, + }, + { + name: "Request to update", + input: &cmd.RevokeRolesForUserRequest{ + Version: cmd.RBACAssignRevokeCommandPolicyVersionV0, + Roles: []string{"something"}, + User: "user:some-user", + }, + expectedOutput: []*cmd.RevokeRolesForUserRequest{ + { + Version: cmd.RBACAssignRevokeCommandPolicyVersionV0 + 1, + Roles: []string{"something"}, + User: "db:some-user", + }, + { + Version: cmd.RBACAssignRevokeCommandPolicyVersionV0 + 1, + Roles: []string{"something"}, + User: "oidc:some-user", + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + output := migrateRevokeRoles(test.input) + require.Equal(t, test.expectedOutput, output) + }) + } +} + +func TestMigrateAssignRoles(t *testing.T) { + oidc := config.OIDC{ + Enabled: true, + } + + tests := []struct { + name string + input *cmd.AddRolesForUsersRequest + expectedOutput []*cmd.AddRolesForUsersRequest + authNconfig config.Authentication + }{ + { + name: "current request", + input: &cmd.AddRolesForUsersRequest{Version: cmd.RBACAssignRevokeCommandPolicyVersionV0 + 1}, + expectedOutput: []*cmd.AddRolesForUsersRequest{{Version: cmd.RBACAssignRevokeCommandPolicyVersionV0 + 1}}, + authNconfig: config.Authentication{OIDC: oidc}, + }, + { + name: "Request to update with OIDC+apikey enabled", + input: &cmd.AddRolesForUsersRequest{ + Version: cmd.RBACAssignRevokeCommandPolicyVersionV0, + Roles: []string{"something"}, + User: "user:some-user", + }, + expectedOutput: []*cmd.AddRolesForUsersRequest{ + { + Version: cmd.RBACAssignRevokeCommandPolicyVersionV0 + 1, + Roles: []string{"something"}, + User: "db:some-user", + }, + { + Version: cmd.RBACAssignRevokeCommandPolicyVersionV0 + 1, + Roles: []string{"something"}, + User: "oidc:some-user", + }, + }, + authNconfig: config.Authentication{OIDC: oidc, APIKey: config.StaticAPIKey{Enabled: true, Users: []string{"some-user"}}}, + }, + { + name: "only oidc", + input: &cmd.AddRolesForUsersRequest{ + Version: cmd.RBACAssignRevokeCommandPolicyVersionV0, + Roles: []string{"something"}, + User: "user:some-user", + }, + expectedOutput: []*cmd.AddRolesForUsersRequest{ + { + Version: cmd.RBACAssignRevokeCommandPolicyVersionV0 + 1, + Roles: []string{"something"}, + User: "oidc:some-user", + }, + }, + authNconfig: config.Authentication{OIDC: oidc}, + }, + { + name: "Request to update with OIDC+apikey enabled, but missing user", + input: &cmd.AddRolesForUsersRequest{ + Version: cmd.RBACAssignRevokeCommandPolicyVersionV0, + Roles: []string{"something"}, + User: "user:some-user", + }, + expectedOutput: []*cmd.AddRolesForUsersRequest{ + { + Version: cmd.RBACAssignRevokeCommandPolicyVersionV0 + 1, + Roles: []string{"something"}, + User: "oidc:some-user", + }, + }, + authNconfig: config.Authentication{OIDC: oidc, APIKey: config.StaticAPIKey{Enabled: true, Users: []string{"wrong-user"}}}, + }, + { + name: "Only apikey enabled", + input: &cmd.AddRolesForUsersRequest{ + Version: cmd.RBACAssignRevokeCommandPolicyVersionV0, + Roles: []string{"something"}, + User: "user:some-user", + }, + expectedOutput: []*cmd.AddRolesForUsersRequest{ + { + Version: cmd.RBACAssignRevokeCommandPolicyVersionV0 + 1, + Roles: []string{"something"}, + User: "db:some-user", + }, + }, + authNconfig: config.Authentication{APIKey: config.StaticAPIKey{Enabled: true, Users: []string{"some-user"}}}, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + output := migrateAssignRoles(test.input, test.authNconfig) + require.Equal(t, test.expectedOutput, output) + }) + } +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/replication/consumer.go b/platform/dbops/binaries/weaviate-src/cluster/replication/consumer.go new file mode 100644 index 0000000000000000000000000000000000000000..e133664eb390fdb58c24615392d9311e12579841 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/replication/consumer.go @@ -0,0 +1,858 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package replication + +import ( + "context" + "fmt" + "slices" + "strings" + "sync" + "time" + + "github.com/weaviate/weaviate/cluster/replication/metrics" + "github.com/weaviate/weaviate/cluster/schema" + "github.com/weaviate/weaviate/usecases/config/runtime" + "github.com/weaviate/weaviate/usecases/sharding" + + "github.com/cenkalti/backoff/v4" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/weaviate/weaviate/cluster/proto/api" + "github.com/weaviate/weaviate/cluster/replication/types" + "github.com/weaviate/weaviate/entities/additional" + enterrors "github.com/weaviate/weaviate/entities/errors" + "github.com/weaviate/weaviate/entities/models" +) + +// asyncStatusInterval is the polling interval to check the status of the +// async replication of src->target +const ( + asyncStatusInterval = 5 * time.Second + // if async status errors more than 30 times, stop retrying + asyncStatusMaxErrors = 30 + // about `asyncStatusInterval` seconds per retry, 120 retries = 10 minutes for async replication + // to complete + asyncStatusMaxRetries = 120 +) + +// OpConsumer is an interface for consuming replication operations. +type OpConsumer interface { + // Consume starts consuming operations from the provided channel. + // The consumer processes operations, and a buffered channel is typically used to apply backpressure. + // The consumer should return an error if it fails to process any operation. + Consume(ctx context.Context, in <-chan ShardReplicationOpAndStatus) error +} + +// DELETED is a constant representing a temporary deleted state of a replication operation that should not be stored in the FSM. +const DELETED = "deleted" + +// errOpCancelled is an error indicating that the operation was cancelled. +var errOpCancelled = errors.New("operation cancelled") + +// CopyOpConsumer is an implementation of the OpConsumer interface that processes replication operations +// by executing copy operations from a source shard to a target shard. It uses a ReplicaCopier to actually +// carry out the copy operation. Moreover, it supports configurable backoff, timeout and concurrency limits. +type CopyOpConsumer struct { + // logger is used for structured logging throughout the consumer's lifecycle. + // It provides detailed logs for each replication operation and any errors encountered. + logger *logrus.Entry + + // ongoingOps is a cache of ongoing operations. + // It is used to prevent duplicate operations from being processed. + ongoingOps *OpsCache + + // opsGateway is used to keep track of when task were executed and when we can retry or continue their execution next + // It is used to ensure we backoff when retrying ops and avoid thundering herd problems + opsGateway *OpsGateway + + // leaderClient is responsible for interacting with the FSM to update the state of replication operations. + // It is used to update the status of operations during the replication process (e.g. update to HYDRATING state). + leaderClient types.FSMUpdater + + // replicaCopier is used to handle the actual copying of replica data from the source shard to the target shard. + // It abstracts the mechanics of data replication and file copying. + replicaCopier types.ReplicaCopier + + // schemaReader is used to read the schema + schemaReader schema.SchemaReader + + // backoffPolicy defines the retry mechanism for failed operations. + // It allows the consumer to retry replication operations using a backoff strategy in case of failure. + backoffPolicy backoff.BackOff + + // maxWorkers sets the maximum number of concurrent workers that will be used to process replication operations. + // It controls the level of parallelism in the replication process allowing multiple replication operations to + // run concurrently. + maxWorkers int + + // opTimeout defines the timeout duration for each replication operation. + // It ensures that operations do not hang indefinitely and are retried or terminated after the timeout period. + opTimeout time.Duration + + // tokens controls the maximum number of concurrently running consumers + tokens chan struct{} + + // nodeId uniquely identifies the node on which this consumer instance is running. + nodeId string + + // engineOpCallbacks defines hooks invoked at various stages of a replication operation's lifecycle + // (e.g., pending, start, complete, failure) to support metrics or custom observability logic. + engineOpCallbacks *metrics.ReplicationEngineOpsCallbacks + + // asyncReplicationMinimumWait is the duration for the upper time bound for the hash beat. + asyncReplicationMinimumWait *runtime.DynamicValue[time.Duration] +} + +type overrides struct { + source additional.AsyncReplicationTargetNodeOverride + target additional.AsyncReplicationTargetNodeOverride +} + +func newOverrides(op ShardReplicationOpAndStatus, upperTimeBound int64) overrides { + return overrides{ + source: additional.AsyncReplicationTargetNodeOverride{ + CollectionID: op.Op.SourceShard.CollectionId, + ShardID: op.Op.SourceShard.ShardId, + TargetNode: op.Op.TargetShard.NodeId, + SourceNode: op.Op.SourceShard.NodeId, + UpperTimeBound: upperTimeBound, + NoDeletionResolution: true, + }, + target: additional.AsyncReplicationTargetNodeOverride{ + CollectionID: op.Op.SourceShard.CollectionId, + ShardID: op.Op.SourceShard.ShardId, + TargetNode: op.Op.SourceShard.NodeId, + SourceNode: op.Op.TargetShard.NodeId, + UpperTimeBound: upperTimeBound, + NoDeletionResolution: false, + }, + } +} + +// NewCopyOpConsumer creates a new CopyOpConsumer instance responsible for executing +// replication operations using a configurable worker pool. +// +// It uses a ReplicaCopier to perform the actual data copy. +func NewCopyOpConsumer( + logger *logrus.Logger, + leaderClient types.FSMUpdater, + replicaCopier types.ReplicaCopier, + nodeId string, + backoffPolicy backoff.BackOff, + ongoingOps *OpsCache, + opTimeout time.Duration, + maxWorkers int, + asyncReplicationMinimumWait *runtime.DynamicValue[time.Duration], + engineOpCallbacks *metrics.ReplicationEngineOpsCallbacks, + schemaReader schema.SchemaReader, +) *CopyOpConsumer { + c := &CopyOpConsumer{ + logger: logger.WithFields(logrus.Fields{"component": "replication_consumer", "action": replicationEngineLogAction}), + leaderClient: leaderClient, + replicaCopier: replicaCopier, + backoffPolicy: backoffPolicy, + ongoingOps: ongoingOps, + opTimeout: opTimeout, + maxWorkers: maxWorkers, + nodeId: nodeId, + tokens: make(chan struct{}, maxWorkers), + engineOpCallbacks: engineOpCallbacks, + asyncReplicationMinimumWait: asyncReplicationMinimumWait, + schemaReader: schemaReader, + opsGateway: NewOpsGateway(), + } + return c +} + +// Consume processes replication operations from the input channel, ensuring that only a limited number of consumers +// are active concurrently based on the maxWorkers value. +func (c *CopyOpConsumer) Consume(workerCtx context.Context, in <-chan ShardReplicationOpAndStatus) error { + c.logger.WithFields(logrus.Fields{"node": c.nodeId, "max_workers": c.maxWorkers, "op_timeout": c.opTimeout}).Info("starting replication operation consumer") + + c.engineOpCallbacks.OnPrepareProcessing(c.nodeId) + + var wg sync.WaitGroup + for { + select { + case <-workerCtx.Done(): + c.logger.WithError(workerCtx.Err()).Info("worker context canceled, shutting down consumer") + // We can start waiting for ops because their context depend on the worker context that just got cancelled + wg.Wait() + return workerCtx.Err() + + case op, ok := <-in: + if !ok { + c.logger.Info("operation channel closed, shutting down consumer and waiting for ops to finish") + c.ongoingOps.CancelAll() + wg.Wait() + return nil + } + logger := getLoggerForOpAndStatus(c.logger, op.Op, op.Status) + + // If the operation has been scheduled for cancellation or deletion + // This is done outside of the worker goroutine, and therefore without acquiring a token, so that + // we can cancel operations that have frozen or become unresponsive. If we were to acquire a token + // we would block the worker pool and not be able to cancel the operation leading to resource starvation. + if op.Status.ShouldCancel && !c.ongoingOps.HasBeenCancelled(op.Op.ID) { + // Update the cache to mark the operation as cancelled + c.ongoingOps.StoreHasBeenCancelled(op.Op.ID) + logger.Debug("cancelled the replication op") + if c.ongoingOps.InFlight(op.Op.ID) { + // Cancel the in-flight operation + // Is a noop, returns false if the op doesn't exist + c.ongoingOps.Cancel(op.Op.ID) + // Continue to ensure we don't accidentally re-spawn the operation in a new worker + continue + } + // Otherwise, the operation is not in-flight and should therefore be processed in a worker where clean-up happens + } + + if ok, next := c.opsGateway.CanSchedule(op.Op.ID); !ok { + logger.WithFields(logrus.Fields{"next": next}).Debug("replication op skipped as not ready to schedule") + continue + } + + c.engineOpCallbacks.OnOpPending(c.nodeId) + select { + // If main context is cancelled here we just continue so that we hit the shutdown logic on the next iteration + case <-workerCtx.Done(): + continue + // The 'tokens' channel limits the number of concurrent workers (`maxWorkers`). + // Each worker acquires a token before processing an operation. If no tokens are available, + // the worker blocks until one is released. After completing the task, the worker releases the token, + // allowing another worker to proceed. This ensures only a limited number of workers is concurrently + // running replication operations and avoids overloading the system. + case c.tokens <- struct{}{}: + // Here we capture the op argument used by the func below as the enterrors.GoWrapper requires calling + // a function without arguments. + operation := op + opLogger := getLoggerForOpAndStatus(c.logger, operation.Op, op.Status) + shouldSkip := false + opAlreadyInFlight := c.ongoingOps.LoadOrStore(op.Op.ID) + if opAlreadyInFlight { + // Check if the operation is already in progress + // Avoid scheduling unnecessary work or incorrectly counting metrics + // for operations that are already in progress or completed. + c.logger.Debug("replication op skipped as already running") + shouldSkip = true + } else { + // Check if the operation has had its state changed between being added to the channel and being processed + // This is chatty and will likely cause a lot of unnecessary load on the leader + // For now, we need it to ensure eventual consistency between the FSM and the consumer + state, err := c.leaderClient.ReplicationGetReplicaOpStatus(workerCtx, op.Op.ID) + if err != nil { + c.logger.Error("error while checking status of replication op") + shouldSkip = true + } else if state.String() != op.Status.GetCurrent().State.String() { + c.logger.Debug("replication op skipped as state has changed") + shouldSkip = true + } + } + + if op.Status.GetCurrent().State == "" { + c.logger.Debug("replication op skipped as state is empty") + shouldSkip = true + } + + // TODO: Consider more optimal ways of checking that the state of the op has not changed between it being added to the channel + // and being processed here. Could use in-memory solution, e.g. using cache, or refactor consumer-producer to be event/notification-based + // For now, ensure consistency by checking the FSM through the leader + + // Need to release the token to let other consumers process queued replication operations. + if shouldSkip { + opLogger.Debug("replication op skipped as already running") + // Need to release the token to let other consumers process queued replication operations. + <-c.tokens + c.engineOpCallbacks.OnOpSkipped(c.nodeId) + if !opAlreadyInFlight { + c.ongoingOps.DeleteInFlight(op.Op.ID) + } + continue + } + + // Start a replication operation with a timeout for completion to prevent replication operations + // from running indefinitely + opCtx, opCancel := context.WithTimeout(workerCtx, c.opTimeout) + c.engineOpCallbacks.OnOpStart(c.nodeId) + c.ongoingOps.StoreCancel(op.Op.ID, opCancel) + c.opsGateway.ScheduleNow(op.Op.ID) + wg.Add(1) + enterrors.GoWrapper(func() { + defer func() { + <-c.tokens // Release token when completed + // Delete the operation from the ongoingOps map when the operation processing is complete + c.ongoingOps.DeleteInFlight(op.Op.ID) + wg.Done() + opCancel() + }() + + // If the operation has been cancelled in the time between it being added to the channel and + // being processed, we need to cancel it in the FSM and return + if c.ongoingOps.HasBeenCancelled(op.Op.ID) { + c.logger.Info("replication op cancelled, stopping replication operation") + c.cancelOp(operation, opLogger) + return + } + + opLogger.Debug("worker processing replication operation") + err := c.dispatchReplicationOp(opCtx, operation) + if err == nil { + opLogger.Debug("worker completed replication operation") + c.opsGateway.RegisterFinished(op.Op.ID) + c.engineOpCallbacks.OnOpComplete(c.nodeId) + return + } + + c.opsGateway.RegisterFailure(op.Op.ID) + if errors.Is(err, context.DeadlineExceeded) { + c.engineOpCallbacks.OnOpFailed(c.nodeId) + opLogger.WithError(err).Error("replication operation timed out") + return + } + // TODO: Refactor this error handling + if errors.Is(err, context.Canceled) && c.ongoingOps.HasBeenCancelled(op.Op.ID) { + opLogger.WithError(err).Info("replication operation cancelled") + c.cancelOp(operation, opLogger) + return + } + if errors.Is(err, errOpCancelled) { + opLogger.WithError(err).Info("replication operation cancelled") + c.cancelOp(operation, opLogger) + return + } + c.engineOpCallbacks.OnOpFailed(c.nodeId) + opLogger.WithError(err).Error("replication operation failed") + }, c.logger) + } + } + } +} + +// dispatchReplicationOp dispatches the replication operation to the appropriate state handler +// based on the current state of the operation. +// If the state handler returns success and a valid next state, the operation is transitioned to the next state. +// If the state handler returns an error, the operation is not transitioned and the error is returned. +func (c *CopyOpConsumer) dispatchReplicationOp(ctx context.Context, op ShardReplicationOpAndStatus) error { + switch op.Status.GetCurrentState() { + case api.REGISTERED: + return c.processStateAndTransition(ctx, op, c.processRegisteredOp) + case api.HYDRATING: + return c.processStateAndTransition(ctx, op, c.processHydratingOp) + case api.DEHYDRATING: + return c.processStateAndTransition(ctx, op, c.processDehydratingOp) + case api.FINALIZING: + return c.processStateAndTransition(ctx, op, c.processFinalizingOp) + case api.READY: + return nil + case api.CANCELLED: + return c.processStateAndTransition(ctx, op, c.processCancelledOp) + default: + getLoggerForOpAndStatus(c.logger, op.Op, op.Status).Error("unknown replication operation state") + return fmt.Errorf("unknown replication operation state: %s", op.Status.GetCurrentState()) + } +} + +// stateFuncHandler is a function that processes a replication operation and returns the next state and an error. +type stateFuncHandler func(ctx context.Context, op ShardReplicationOpAndStatus) (api.ShardReplicationState, error) + +func (c *CopyOpConsumer) checkCancelled(logger *logrus.Entry, op ShardReplicationOpAndStatus) error { + if c.ongoingOps.HasBeenCancelled(op.Op.ID) { + logger.WithFields(logrus.Fields{"op": op}).Debug("replication op cancelled, stopping replication operation") + return errOpCancelled + } + return nil +} + +// processStateAndTransition processes a replication operation and transitions it to the next state. +// It retries the operation using a backoff policy if it returns an error. +// If the operation is successful, the operation is transitioned to the next state. +// Otherwise, the operation is transitioned to the next state and the process continues. +func (c *CopyOpConsumer) processStateAndTransition(ctx context.Context, op ShardReplicationOpAndStatus, stateFuncHandler stateFuncHandler) error { + logger := getLoggerForOpAndStatus(c.logger, op.Op, op.Status) + nextState, err := backoff.RetryWithData(func() (api.ShardReplicationState, error) { + if ctx.Err() != nil { + logger.WithError(ctx.Err()).Error("error while processing replication operation, shutting down") + return api.ShardReplicationState(""), backoff.Permanent(ctx.Err()) + } + if err := c.checkCancelled(logger, op); err != nil { + return api.ShardReplicationState(""), backoff.Permanent(fmt.Errorf("error while checking if op is cancelled: %w", err)) + } + + nextState, err := stateFuncHandler(ctx, op) + // If we receive an error from the state handler make sure we store it and then stop processing + if err != nil { + // If the op was cancelled for any reason, pass the error up the stack to be handled higher up + if errors.Is(err, context.Canceled) { + logger.Debug("context cancelled, stopping replication operation") + return api.ShardReplicationState(""), backoff.Permanent(fmt.Errorf("context cancelled: %w", err)) + } + if err := c.checkCancelled(logger, op); err != nil { + return api.ShardReplicationState(""), backoff.Permanent(fmt.Errorf("error while checking if op is cancelled: %w", err)) + } + logger.WithError(err).Warn("state transition handler failed") + // Otherwise, register the error with the FSM + if err := c.leaderClient.ReplicationRegisterError(ctx, op.Op.ID, err.Error()); err != nil { + logger.WithError(err).Error("failed to register error for replication operation") + } + return api.ShardReplicationState(""), err + } + + if err := c.checkCancelled(logger, op); err != nil { + return api.ShardReplicationState(""), backoff.Permanent(fmt.Errorf("error while checking if op is cancelled: %w", err)) + } + // No error from the state handler, update the state to the next, if this errors we will stop processing + if err := c.leaderClient.ReplicationUpdateReplicaOpStatus(ctx, op.Op.ID, nextState); err != nil { + logger.WithError(err).Errorf("failed to update replica status to '%s'", nextState) + return api.ShardReplicationState(""), fmt.Errorf("failed to update replica status to '%s': %w", nextState, err) + } + return nextState, nil + }, c.backoffPolicy) + if err != nil { + return err + } + + if nextState == DELETED { + // Stop the recursion if we are in the DELETED state and don't update the state in the FSM + return nil + } + + op.Status.ChangeState(nextState) + if nextState == api.READY { + // No need to continue the recursion if we are in the READY state + return nil + } + + if err := c.checkCancelled(logger, op); err != nil { + return err + } + return c.dispatchReplicationOp(ctx, op) +} + +// cancelOp performs clean up for the cancelled operation and notifies the FSM of the cancellation. +// +// It removes the replica shard from the target node and updates the FSM with the cancellation status. +// If the operation is being cancelled, it notifies the FSM to complete the cancellation. +// If the operation is being deleted, it notifies the FSM to remove the operation from the FSM. +// It returns an error if any of the operations fail. +// +// It exists outside of the formal state machine to allow for cancellation of operations that are in progress +// or have been cancelled but not yet processed without introducing new intermediate states to the FSM. +func (c *CopyOpConsumer) cancelOp(op ShardReplicationOpAndStatus, logger *logrus.Entry) { + defer func() { + c.ongoingOps.DeleteHasBeenCancelled(op.Op.ID) + c.engineOpCallbacks.OnOpCancelled(c.nodeId) + }() + ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second) // Ensure sync shards timesout reasonbly in case of hang + defer cancel() + + overrides := newOverrides(op, time.Now().UnixMilli()) + c.stopAsyncReplication(ctx, op, overrides, logger) + + // Ensure that the states of the shards on the nodes are in-sync with the state of the schema through a RAFT communication + // This handles cleaning up for ghost shards that are in the store but not in the schema that may have been created by index.getOptInitShard + if err := c.sync(ctx, op); err != nil { + logger.WithError(err). + WithField("op", op). + Error(fmt.Errorf("failure while syncing replica shard when cancelling the op")) + } + + // If the operation is only being cancelled then notify the FSM so it can update its state + if op.Status.OnlyCancellation() { + if err := c.leaderClient.ReplicationCancellationComplete(ctx, op.Op.ID); err != nil { + logger.WithError(err).Error("failure while completing cancellation of replica operation") + } + return + } + + // If the operation is being deleted then remove it from the FSM + if op.Status.ShouldDelete { + if err := c.leaderClient.ReplicationRemoveReplicaOp(ctx, op.Op.ID); err != nil { + logger.WithError(err).Error("failure while deleting replica operation") + } + return + } +} + +func (c *CopyOpConsumer) startAsyncReplication(ctx context.Context, op ShardReplicationOpAndStatus, overrides overrides, logger *logrus.Entry) error { + // Ensure async replication is started on local (target) node + if err := c.replicaCopier.InitAsyncReplicationLocally(ctx, op.Op.SourceShard.CollectionId, op.Op.TargetShard.ShardId); err != nil { + logger.WithError(err).Error("failed to initialize async replication on local node") + return err + } + // Start async replication from source node to target node + if err := c.replicaCopier.AddAsyncReplicationTargetNode(ctx, overrides.target, op.Status.SchemaVersion); err != nil { + logger.WithError(err).Error("failed to add async replication from source node to target node") + return err + } + // Start async replication from target node to source node + if err := c.replicaCopier.AddAsyncReplicationTargetNode(ctx, overrides.source, op.Status.SchemaVersion); err != nil { + logger.WithError(err).Error("failed to add async replication from target node to source node") + return err + } + return nil +} + +func (c *CopyOpConsumer) stopAsyncReplication(ctx context.Context, op ShardReplicationOpAndStatus, overrides overrides, logger *logrus.Entry) { + if err := c.replicaCopier.RemoveAsyncReplicationTargetNode(ctx, overrides.target); err != nil { + logger.WithError(err).Error("failure while removing async replication from source node to target node") + } + if err := c.replicaCopier.RemoveAsyncReplicationTargetNode(ctx, overrides.source); err != nil { + logger.WithError(err).Error("failure while removing async replication from target node to source node") + } + if err := c.replicaCopier.RevertAsyncReplicationLocally(ctx, op.Op.TargetShard.CollectionId, op.Op.SourceShard.ShardId); err != nil { + logger.WithError(err).Error("failure while reverting async replication on local node") + } +} + +func (c *CopyOpConsumer) sync(ctx context.Context, op ShardReplicationOpAndStatus) error { + if _, err := c.leaderClient.SyncShard(ctx, op.Op.TargetShard.CollectionId, op.Op.TargetShard.ShardId, op.Op.TargetShard.NodeId); err != nil { + return err + } + if _, err := c.leaderClient.SyncShard(ctx, op.Op.SourceShard.CollectionId, op.Op.SourceShard.ShardId, op.Op.SourceShard.NodeId); err != nil { + return err + } + return nil +} + +// processRegisteredOp is the state handler for the REGISTERED state. +func (c *CopyOpConsumer) processRegisteredOp(ctx context.Context, op ShardReplicationOpAndStatus) (api.ShardReplicationState, error) { + logger := getLoggerForOpAndStatus(c.logger, op.Op, op.Status) + logger.Info("processing registered replication operation") + + return api.HYDRATING, nil +} + +// processHydratingOp is the state handler for the HYDRATING state. +// It copies the replica shard from the source node to the target node using file copy opetaitons and then transitions the operation to the FINALIZING state. +func (c *CopyOpConsumer) processHydratingOp(ctx context.Context, op ShardReplicationOpAndStatus) (api.ShardReplicationState, error) { + logger := getLoggerForOpAndStatus(c.logger, op.Op, op.Status) + logger.Info("processing hydrating replication operation") + + if c.schemaReader.MultiTenancy(op.Op.TargetShard.CollectionId).Enabled { + schemaVersion, err := c.leaderClient.UpdateTenants(ctx, op.Op.TargetShard.CollectionId, &api.UpdateTenantsRequest{ + Tenants: []*api.Tenant{ + { + Name: op.Op.SourceShard.ShardId, + Status: models.TenantActivityStatusHOT, + }, + }, + }) + if err != nil { + logger.WithError(err).Error("failure while updating tenant to active state for hydrating operation") + return api.ShardReplicationState(""), err + } + + if err := c.leaderClient.ReplicationStoreSchemaVersion(ctx, op.Op.ID, schemaVersion); err != nil { + logger.WithError(err).Error("failure while storing schema version for replication operation") + return api.ShardReplicationState(""), err + } + + if err := c.leaderClient.WaitForUpdate(ctx, schemaVersion); err != nil { + logger.WithError(err).Error("failure while waiting for schema version to be applied to local node") + return api.ShardReplicationState(""), err + } + } + + if ctx.Err() != nil { + logger.WithError(ctx.Err()).Debug("context cancelled, stopping replication operation") + return api.ShardReplicationState(""), ctx.Err() + } + + if err := c.replicaCopier.CopyReplicaFiles(ctx, op.Op.SourceShard.NodeId, op.Op.SourceShard.CollectionId, op.Op.TargetShard.ShardId, op.Status.SchemaVersion); err != nil { + logger.WithError(err).Error("failure while copying replica shard") + return api.ShardReplicationState(""), err + } + + if ctx.Err() != nil { + logger.WithError(ctx.Err()).Debug("context cancelled, stopping replication operation") + return api.ShardReplicationState(""), ctx.Err() + } + + return api.FINALIZING, nil +} + +// processFinalizingOp is the state handler for the FINALIZING state. +// It updates the sharding state and then transitions the operation to the READY state. +func (c *CopyOpConsumer) processFinalizingOp(ctx context.Context, op ShardReplicationOpAndStatus) (api.ShardReplicationState, error) { + logger := getLoggerForOpAndStatus(c.logger, op.Op, op.Status) + logger.Info("processing finalizing replication operation") + + if ctx.Err() != nil { + logger.WithError(ctx.Err()).Debug("context cancelled, stopping replication operation") + return api.ShardReplicationState(""), ctx.Err() + } + + if err := c.leaderClient.WaitForUpdate(ctx, op.Status.SchemaVersion); err != nil { + logger.WithError(err).Error("failure while waiting for schema version to be applied to local node") + return api.ShardReplicationState(""), err + } + + if err := c.replicaCopier.LoadLocalShard(ctx, op.Op.SourceShard.CollectionId, op.Op.SourceShard.ShardId); err != nil { + logger.WithError(err).Error("failure while loading shard") + return api.ShardReplicationState(""), err + } + + if ctx.Err() != nil { + logger.WithError(ctx.Err()).Debug("context cancelled, stopping replication operation") + return api.ShardReplicationState(""), ctx.Err() + } + + // Sanity check: directly query the local schema to see if the replica already exists. + // If it does we are probably recoving from a previous failure and can skip adding the replica to the sharding state again + nodes, err := c.schemaReader.ShardReplicas(op.Op.TargetShard.CollectionId, op.Op.TargetShard.ShardId) + if err != nil { + logger.WithError(err).Error("failure while getting shard replicas") + return api.ShardReplicationState(""), err + } + replicaExists := slices.Contains(nodes, op.Op.TargetShard.NodeId) + + // this time will be used to make sure async replication has propagated any writes which + // were received during the hydrating phase + asyncReplicationUpperTimeBoundUnixMillis := time.Now().Add(time.Second * 5).UnixMilli() + overrides := newOverrides(op, asyncReplicationUpperTimeBoundUnixMillis) + if err := c.startAsyncReplication(ctx, op, overrides, logger); err != nil { + return api.ShardReplicationState(""), err + } + + if ctx.Err() != nil { + logger.WithError(ctx.Err()).Debug("error while processing replication operation, shutting down") + return api.ShardReplicationState(""), ctx.Err() + } + + if err := c.waitForAsyncReplication(ctx, op, asyncReplicationUpperTimeBoundUnixMillis, logger); err != nil { + logger.WithError(err).Error("failure while waiting for async replication to complete while finalizing") + return api.ShardReplicationState(""), err + } + + if ctx.Err() != nil { + logger.WithError(ctx.Err()).Debug("error while processing replication operation, shutting down") + return api.ShardReplicationState(""), ctx.Err() + } + + if !replicaExists { + if _, err := c.leaderClient.ReplicationAddReplicaToShard(ctx, op.Op.TargetShard.CollectionId, op.Op.TargetShard.ShardId, op.Op.TargetShard.NodeId, op.Op.ID); err != nil { + if strings.Contains(err.Error(), sharding.ErrReplicaAlreadyExists.Error()) { + // The replica already exists, this is not an error and it got updated after our sanity check + // due to eventual consistency of the sharding state. + logger.Debug("replica already exists, skipping") + } else { + logger.WithError(err).Error("failure while adding replica to shard") + return api.ShardReplicationState(""), err + } + } + } + + switch op.Op.TransferType { + case api.COPY: + c.stopAsyncReplication(ctx, op, overrides, logger) + // sync the replica shard to ensure that the schema and store are consistent on each node + // In a COPY this happens now, in a MOVE this happens in the DEHYDRATING state + if err := c.sync(ctx, op); err != nil { + logger.WithError(err).Error("failure while syncing replica shard in finalizing state") + return api.ShardReplicationState(""), err + } + return api.READY, nil + case api.MOVE: + return api.DEHYDRATING, nil + default: + return api.ShardReplicationState(""), fmt.Errorf("unknown transfer type: %s", op.Op.TransferType) + } +} + +// processDehydratingOp is the state handler for the DEHYDRATING state. +func (c *CopyOpConsumer) processDehydratingOp(ctx context.Context, op ShardReplicationOpAndStatus) (api.ShardReplicationState, error) { + logger := getLoggerForOpAndStatus(c.logger, op.Op, op.Status) + logger.Info("processing dehydrating replication operation") + + if err := c.leaderClient.WaitForUpdate(ctx, op.Status.SchemaVersion); err != nil { + logger.WithError(err).Error("failure while waiting for schema version to be applied to local node") + return api.ShardReplicationState(""), err + } + + nodes, err := c.schemaReader.ShardReplicas(op.Op.SourceShard.CollectionId, op.Op.SourceShard.ShardId) + if err != nil { + logger.WithError(err).Error("failure while getting shard replicas") + return api.ShardReplicationState(""), err + } + + // Async replication was started in processFinalizingOp, but here we want to "increase" the upper time bound + // to make sure any writes received by the source node before the op entered the DEHYDRATING state are + // propagated to the target node. We assume writes will complete or time out (default 90s) within the + // asyncReplicationMinimumWait time (default 100s). The source node should not receive any writes after the op + // enters the DEHYDRATING state. + asyncReplicationUpperTimeBoundUnixMillis := time.Now().Add(c.asyncReplicationMinimumWait.Get()).UnixMilli() + overrides := newOverrides(op, asyncReplicationUpperTimeBoundUnixMillis) + + if slices.Contains(nodes, op.Op.SourceShard.NodeId) { + if ctx.Err() != nil { + logger.WithError(ctx.Err()).Debug("context cancelled, stopping replication operation") + return api.ShardReplicationState(""), ctx.Err() + } + + if err := c.startAsyncReplication(ctx, op, overrides, logger); err != nil { + return api.ShardReplicationState(""), err + } + + if ctx.Err() != nil { + logger.WithError(ctx.Err()).Debug("error while processing replication operation, shutting down") + return api.ShardReplicationState(""), ctx.Err() + } + + if err := c.waitForAsyncReplication(ctx, op, asyncReplicationUpperTimeBoundUnixMillis, logger); err != nil { + logger.WithError(err).Error("failure while waiting for async replication to complete while dehydrating") + return api.ShardReplicationState(""), err + } + + if ctx.Err() != nil { + logger.WithError(ctx.Err()).Debug("context cancelled, stopping replication operation") + return api.ShardReplicationState(""), ctx.Err() + } + + c.stopAsyncReplication(ctx, op, overrides, logger) + + // If the replica got deleted due to eventual consistency between our sanity check and this call, the delete will be a no-op and return no error + if _, err := c.leaderClient.DeleteReplicaFromShard(ctx, op.Op.SourceShard.CollectionId, op.Op.SourceShard.ShardId, op.Op.SourceShard.NodeId); err != nil { + logger.WithError(err).Error("failure while deleting replica from shard") + return api.ShardReplicationState(""), err + } + } + + // sync the replica shard to ensure that the schema and store are consistent on each node + // In a COPY this happens in the FINALIZING state, in a MOVE this happens now + if err := c.sync(ctx, op); err != nil { + logger.WithError(err).Error("failure while syncing replica shard in dehydrating state") + return api.ShardReplicationState(""), err + } + return api.READY, nil +} + +func (c *CopyOpConsumer) processCancelledOp(ctx context.Context, op ShardReplicationOpAndStatus) (api.ShardReplicationState, error) { + logger := getLoggerForOpAndStatus(c.logger, op.Op, op.Status) + logger.Info("processing cancelled replication operation") + + if !op.Status.ShouldDelete { + return api.ShardReplicationState(""), fmt.Errorf("replication operation with id %v is not in a state to be deleted", op.Op.ID) + } + + overrides := newOverrides(op, time.Now().UnixMilli()) + c.stopAsyncReplication(ctx, op, overrides, logger) + + if err := c.leaderClient.ReplicationRemoveReplicaOp(ctx, op.Op.ID); err != nil { + logger.WithError(err).Error("failure while removing replica operation") + return api.ShardReplicationState(""), err + } + return DELETED, nil +} + +func (c *CopyOpConsumer) handleAsyncReplErr( + err error, + retryNum int, + asyncStatusMaxErrors int, + remainingErrorsAllowed int, + logger *logrus.Entry, +) (int, error) { + remainingErrorsAllowed-- + if remainingErrorsAllowed < 0 { + // If we see this error, it means that something probably went wrong with + // initializing the async replication on the source/target nodes. + logger.WithFields(logrus.Fields{"num_errors": asyncStatusMaxErrors, "num_retries": retryNum}).WithError(err).Error("errored on all attempts to get async replication status") + return remainingErrorsAllowed, backoff.Permanent(err) + } + // We expect to see this warning a few times while the hashtree's are being initialized + // on the source/target nodes, but if this errors for longer than ~asyncStatusRetries * asyncStatusInterval + // then either the hashtree is taking forever to init or something has gone wrong + logger.WithFields(logrus.Fields{"num_errors_allowed": asyncStatusMaxErrors, "num_errors_left": remainingErrorsAllowed, "num_retries_so_far": retryNum}).WithError(err).Warn("errored when getting async replication status, hashtrees may still be initializing, retrying") + return remainingErrorsAllowed, err +} + +// waitForAsyncReplication waits for async replication to complete by checking the status of the async +// replication every `asyncStatusInterval` seconds. +// It returns an error if the async replication does not complete within `asyncStatusRetries` attempts. +// It returns nil if the async replication has completed. +func (c *CopyOpConsumer) waitForAsyncReplication( + ctx context.Context, + op ShardReplicationOpAndStatus, + asyncReplicationUpperTimeBoundUnixMillis int64, + logger *logrus.Entry, +) error { + remainingErrorsAllowed := asyncStatusMaxErrors + retryNum := -1 + return backoff.Retry(func() error { + retryNum++ + asyncReplStatusSrc, err := c.replicaCopier.AsyncReplicationStatus( + ctx, + op.Op.SourceShard.NodeId, + op.Op.TargetShard.NodeId, + op.Op.SourceShard.CollectionId, + op.Op.SourceShard.ShardId, + ) + if err != nil { + remainingErrorsAllowed, err = c.handleAsyncReplErr(err, retryNum, asyncStatusMaxErrors, remainingErrorsAllowed, logger) + return err + } + asyncReplIsPastUpperTimeBoundSrc := asyncReplStatusSrc.StartDiffTimeUnixMillis >= asyncReplicationUpperTimeBoundUnixMillis + + asyncReplStatusTgt, err := c.replicaCopier.AsyncReplicationStatus( + ctx, + op.Op.TargetShard.NodeId, + op.Op.SourceShard.NodeId, + op.Op.TargetShard.CollectionId, + op.Op.TargetShard.ShardId, + ) + if err != nil { + remainingErrorsAllowed, err = c.handleAsyncReplErr(err, retryNum, asyncStatusMaxErrors, remainingErrorsAllowed, logger) + return err + } + asyncReplIsPastUpperTimeBoundTgt := asyncReplStatusTgt.StartDiffTimeUnixMillis >= asyncReplicationUpperTimeBoundUnixMillis + + objectsPropagated := asyncReplStatusSrc.ObjectsPropagated + asyncReplStatusTgt.ObjectsPropagated + asyncReplIsPastUpperTimeBound := asyncReplIsPastUpperTimeBoundSrc && asyncReplIsPastUpperTimeBoundTgt + // It can take a few minutes for async replication to complete, this log is here to + // help monitor the progress. + logger.WithFields(logrus.Fields{ + "objects_propagated": objectsPropagated, + "start_diff_time_unix_millis_src": asyncReplStatusSrc.StartDiffTimeUnixMillis, + "start_diff_time_unix_millis_tgt": asyncReplStatusTgt.StartDiffTimeUnixMillis, + "upper_time_bound_unix_millis": asyncReplicationUpperTimeBoundUnixMillis, + "async_replication_past_upper_time_bound": asyncReplIsPastUpperTimeBound, + "num_retries_so_far": retryNum, + "remaining_errors_allowed": remainingErrorsAllowed, + }).Info("async replication status") + if objectsPropagated == 0 && asyncReplIsPastUpperTimeBound { + return nil + } + + // Wait until we've passed the upper time bound before starting status checks + // to avoid unnecessary status checks before the upper time bound has passed + currentTimeMillis := time.Now().UnixMilli() + if currentTimeMillis < asyncReplicationUpperTimeBoundUnixMillis { + waitDuration := time.Duration(asyncReplicationUpperTimeBoundUnixMillis-currentTimeMillis) * time.Millisecond + logger.WithFields(logrus.Fields{ + "wait_duration_ms": waitDuration.Milliseconds(), + "upper_bound_ms": asyncReplicationUpperTimeBoundUnixMillis, + }).Info("waiting to reach upper time bound before starting async replication status checks") + + select { + case <-ctx.Done(): + return ctx.Err() + case <-time.After(waitDuration): + // Time has passed, continue below with the status checks + } + } + + return errors.New("async replication not done") + }, backoff.WithContext( + backoff.WithMaxRetries(backoff.NewConstantBackOff(asyncStatusInterval), asyncStatusMaxRetries), + ctx), + ) +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/replication/consumer_ops_cache.go b/platform/dbops/binaries/weaviate-src/cluster/replication/consumer_ops_cache.go new file mode 100644 index 0000000000000000000000000000000000000000..beae59061b11b95ba1247503e8a02fc84669ac72 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/replication/consumer_ops_cache.go @@ -0,0 +1,106 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package replication + +import ( + "context" + "sync" +) + +type OpsCache struct { + // hasBeenCancelled is a map of opId to an empty struct + // It is used to communicate between the main consumer goroutine and its + // workers whether an operation has been formally cancelled + hasBeenCancelled sync.Map + // cancels is a map of opId to a cancel function + // It is used by the main goroutine to cancel the workers if + // they are still in-flight. If they are not in-flight, the cancel function + // will be absent but hasBeenCancelled will be still be present + cancels sync.Map + // ops is a map of opId to an empty struct + // It is used to track whether an operation is currently being handled by + // a worker goroutine + inFlight sync.Map +} + +func NewOpsCache() *OpsCache { + return &OpsCache{ + hasBeenCancelled: sync.Map{}, + cancels: sync.Map{}, + inFlight: sync.Map{}, + } +} + +func (c *OpsCache) HasBeenCancelled(opId uint64) bool { + _, ok := c.hasBeenCancelled.Load(opId) + return ok +} + +func (c *OpsCache) StoreHasBeenCancelled(opId uint64) { + c.hasBeenCancelled.Store(opId, struct{}{}) +} + +func (c *OpsCache) DeleteHasBeenCancelled(opId uint64) { + c.hasBeenCancelled.Delete(opId) +} + +func (c *OpsCache) LoadOrStore(opId uint64) bool { + _, ok := c.inFlight.LoadOrStore(opId, struct{}{}) + return ok +} + +func (c *OpsCache) InFlight(opId uint64) bool { + _, ok := c.inFlight.Load(opId) + return ok +} + +func (c *OpsCache) LoadCancel(opId uint64) (context.CancelFunc, bool) { + cancelAny, ok := c.cancels.Load(opId) + if !ok { + return nil, false + } + cancel, ok := cancelAny.(context.CancelFunc) + if !ok { + return nil, false + } + return cancel, true +} + +func (c *OpsCache) StoreCancel(opId uint64, cancel context.CancelFunc) { + c.cancels.Store(opId, cancel) +} + +func (c *OpsCache) Cancel(opId uint64) bool { + cancel, ok := c.LoadCancel(opId) + if !ok { + return false + } + cancel() + return true +} + +func (c *OpsCache) CancelAll() { + c.cancels.Range(func(key, value any) bool { + cancel, ok := value.(context.CancelFunc) + if ok { + cancel() + } + + // Iterate on all + return true + }) +} + +func (c *OpsCache) DeleteInFlight(opId uint64) { + c.cancels.Delete(opId) + c.inFlight.Delete(opId) +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/replication/consumer_ops_gateway.go b/platform/dbops/binaries/weaviate-src/cluster/replication/consumer_ops_gateway.go new file mode 100644 index 0000000000000000000000000000000000000000..0b2edf4257c58228b70724cf669d1a749928a88b --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/replication/consumer_ops_gateway.go @@ -0,0 +1,104 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package replication + +import ( + "sync" + "time" + + "github.com/cenkalti/backoff/v5" +) + +var ( + GatewayBackoffMaxInterval = 15 * time.Second + GatewayInitialBackoffPeriod = 5 * time.Second +) + +type OpsScheduleMetadata struct { + lastScheduled time.Time + nextSchedule time.Time + + executionAttempt uint64 + m sync.RWMutex + expBackoff *backoff.ExponentialBackOff +} + +func NewOpsScheduleMetadata() *OpsScheduleMetadata { + expBackoff := backoff.NewExponentialBackOff() + expBackoff.MaxInterval = GatewayBackoffMaxInterval + expBackoff.InitialInterval = GatewayInitialBackoffPeriod + return &OpsScheduleMetadata{ + lastScheduled: time.Now(), + nextSchedule: time.Now().Add(-time.Second * 10), + expBackoff: expBackoff, + } +} + +type OpsGateway struct { + opsToMetadata sync.Map +} + +func NewOpsGateway() *OpsGateway { + return &OpsGateway{ + opsToMetadata: sync.Map{}, + } +} + +func (og *OpsGateway) CanSchedule(opId uint64) (bool, time.Time) { + v, _ := og.opsToMetadata.LoadOrStore(opId, NewOpsScheduleMetadata()) + metadata, ok := v.(*OpsScheduleMetadata) + if !ok { + // This should never happen + return false, time.Now() + } + metadata.m.RLock() + defer metadata.m.RUnlock() + + return metadata.nextSchedule.Before(time.Now()), metadata.nextSchedule +} + +func (og *OpsGateway) ScheduleNow(opId uint64) { + v, _ := og.opsToMetadata.LoadOrStore(opId, NewOpsScheduleMetadata()) + metadata, ok := v.(*OpsScheduleMetadata) + if !ok { + // This should never happen + return + } + metadata.m.Lock() + defer metadata.m.Unlock() + + metadata.lastScheduled = time.Now() + metadata.executionAttempt += 1 + og.opsToMetadata.Store(opId, metadata) +} + +func (og *OpsGateway) RegisterFinished(opId uint64) { + og.opsToMetadata.Delete(opId) +} + +func (og *OpsGateway) RegisterFailure(opId uint64) { + v, ok := og.opsToMetadata.Load(opId) + if !ok { + // This should never happen + return + } + metadata, ok := v.(*OpsScheduleMetadata) + if !ok { + // This should never happen + return + } + metadata.m.Lock() + defer metadata.m.Unlock() + + // The op just failed, let's backoff + metadata.nextSchedule = time.Now().Add(metadata.expBackoff.NextBackOff()) +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/replication/consumer_test.go b/platform/dbops/binaries/weaviate-src/cluster/replication/consumer_test.go new file mode 100644 index 0000000000000000000000000000000000000000..bef4f29b0d4edcf180342f7aac22879a2a3f8664 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/replication/consumer_test.go @@ -0,0 +1,1512 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package replication_test + +import ( + "context" + "fmt" + "sync" + "testing" + "time" + + "github.com/prometheus/client_golang/prometheus" + "github.com/weaviate/weaviate/cluster/replication/types" + "github.com/weaviate/weaviate/cluster/schema" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/usecases/config/runtime" + "github.com/weaviate/weaviate/usecases/fakes" + "github.com/weaviate/weaviate/usecases/sharding" + + "github.com/cenkalti/backoff/v4" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + logrustest "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/cluster/proto/api" + "github.com/weaviate/weaviate/cluster/replication" + "github.com/weaviate/weaviate/cluster/replication/metrics" +) + +func TestConsumerWithCallbacks(t *testing.T) { + t.Run("successful operation should trigger expected callbacks", func(t *testing.T) { + // GIVEN + logger, _ := logrustest.NewNullLogger() + mockFSMUpdater := types.NewMockFSMUpdater(t) + mockReplicaCopier := types.NewMockReplicaCopier(t) + parser := fakes.NewMockParser() + parser.On("ParseClass", mock.Anything).Return(nil) + schemaManager := schema.NewSchemaManager("test-node", nil, parser, prometheus.NewPedanticRegistry(), logrus.New()) + schemaReader := schemaManager.NewSchemaReader() + schemaManager.AddClass( + buildApplyRequest("TestCollection", api.ApplyRequest_TYPE_ADD_CLASS, api.AddClassRequest{ + Class: &models.Class{Class: "TestCollection", MultiTenancyConfig: &models.MultiTenancyConfig{Enabled: false}}, + State: &sharding.State{ + Physical: map[string]sharding.Physical{"shard1": {BelongsToNodes: []string{"node1"}}}, + }, + }), "node1", true, false) + + opId, err := randInt(t, 100, 200) + require.NoError(t, err, "error generating random operation id") + + mockFSMUpdater.EXPECT(). + WaitForUpdate(mock.Anything, mock.Anything). + Return(nil) + mockFSMUpdater.EXPECT(). + ReplicationGetReplicaOpStatus(mock.Anything, uint64(opId)). + Return(api.REGISTERED, nil). + Times(1) + mockFSMUpdater.EXPECT(). + ReplicationUpdateReplicaOpStatus(mock.Anything, uint64(opId), api.HYDRATING). + Return(nil) + mockFSMUpdater.EXPECT(). + ReplicationUpdateReplicaOpStatus(mock.Anything, uint64(opId), api.FINALIZING). + Return(nil) + mockFSMUpdater.EXPECT(). + ReplicationUpdateReplicaOpStatus(mock.Anything, uint64(opId), api.READY). + Return(nil) + mockFSMUpdater.EXPECT(). + ReplicationAddReplicaToShard(mock.Anything, "TestCollection", "shard1", "node2", uint64(opId)). + Return(uint64(0), nil) + mockFSMUpdater.EXPECT(). + SyncShard(mock.Anything, "TestCollection", "shard1", "node1"). + Return(uint64(0), nil). + Times(1) + mockFSMUpdater.EXPECT(). + SyncShard(mock.Anything, "TestCollection", "shard1", "node2"). + Return(uint64(0), nil). + Times(1) + mockReplicaCopier.EXPECT(). + CopyReplicaFiles( + mock.Anything, + "node1", + "TestCollection", + "shard1", + mock.Anything, + ). + Once(). + Return(nil) + mockReplicaCopier.EXPECT(). + LoadLocalShard(mock.Anything, mock.Anything, mock.Anything). + Return(nil) + mockReplicaCopier.EXPECT(). + InitAsyncReplicationLocally(mock.Anything, "TestCollection", "shard1"). + Return(nil) + mockReplicaCopier.EXPECT(). + AsyncReplicationStatus(mock.Anything, "node1", "node2", "TestCollection", "shard1"). + Return(models.AsyncReplicationStatus{ + ObjectsPropagated: 0, + StartDiffTimeUnixMillis: time.Now().Add(200 * time.Second).UnixMilli(), + }, nil) + mockReplicaCopier.EXPECT(). + AsyncReplicationStatus(mock.Anything, "node2", "node1", "TestCollection", "shard1"). + Return(models.AsyncReplicationStatus{ + ObjectsPropagated: 0, + StartDiffTimeUnixMillis: time.Now().Add(200 * time.Second).UnixMilli(), + }, nil) + mockReplicaCopier.EXPECT(). + AddAsyncReplicationTargetNode(mock.Anything, mock.Anything, mock.Anything).Return(nil) + mockReplicaCopier.EXPECT(). + RemoveAsyncReplicationTargetNode(mock.Anything, mock.Anything).Return(nil) + mockReplicaCopier.EXPECT(). + RevertAsyncReplicationLocally(mock.Anything, "TestCollection", "shard1").Return(nil) + + var ( + prepareProcessingCallbacksCounter int + pendingCallbacksCounter int + skippedCallbacksCounter int + startedCallbacksCounter int + completedCallbacksCounter int + failedCallbacksCounter int + completionWg sync.WaitGroup + ) + completionWg.Add(1) + + metricsCallbacks := metrics.NewReplicationEngineOpsCallbacksBuilder(). + WithPrepareProcessing(func(node string) { + require.Equal(t, "node2", node, "invalid node in prepare processing callback") + prepareProcessingCallbacksCounter++ + }). + WithOpPendingCallback(func(node string) { + require.Equal(t, "node2", node, "invalid node in pending op callback") + pendingCallbacksCounter++ + }). + WithOpSkippedCallback(func(node string) { + require.Equal(t, "node2", node, "invalid node in skipped op callback") + skippedCallbacksCounter++ + }). + WithOpStartCallback(func(node string) { + require.Equal(t, "node2", node, "invalid node in start op callback") + startedCallbacksCounter++ + }). + WithOpCompleteCallback(func(node string) { + require.Equal(t, "node2", node, "invalid node in complete op callback") + completedCallbacksCounter++ + completionWg.Done() + }). + WithOpFailedCallback(func(node string) { + require.Equal(t, "node2", node, "invalid node in failed op callback") + failedCallbacksCounter++ + t.Error("Failed callback should not be called for successful operation") + }).Build() + + consumer := replication.NewCopyOpConsumer( + logger, + mockFSMUpdater, + mockReplicaCopier, + "node2", + &backoff.StopBackOff{}, + replication.NewOpsCache(), + time.Second*10, + 1, + runtime.NewDynamicValue(time.Second*100), + metricsCallbacks, + schemaReader, + ) + + ctx, cancel := context.WithCancel(t.Context()) + defer cancel() + + opsChan := make(chan replication.ShardReplicationOpAndStatus, 1) + doneChan := make(chan error, 1) + + // WHEN + go func() { + doneChan <- consumer.Consume(ctx, opsChan) + }() + + opsChan <- replication.NewShardReplicationOpAndStatus(replication.NewShardReplicationOp(uint64(opId), "node1", "node2", "TestCollection", "shard1", api.COPY), replication.NewShardReplicationStatus(api.REGISTERED)) + waitChan := make(chan struct{}) + go func() { + completionWg.Wait() + waitChan <- struct{}{} + }() + + select { + case <-waitChan: + // This is here just to make sure the test does not run indefinitely + case <-time.After(5 * time.Second): + t.Fatal("Test timed out waiting for operation completion") + } + + close(opsChan) + err = <-doneChan + + // THEN + require.NoError(t, err, "expected operation completing successfully") + require.Equal(t, 1, prepareProcessingCallbacksCounter, "expected prepare processing callback to be called once") + require.Equal(t, 1, pendingCallbacksCounter, "Pending callback should be called") + require.Equal(t, 0, skippedCallbacksCounter, "Skipped callback should be called") + require.Equal(t, 1, startedCallbacksCounter, "Start callback should be called") + require.Equal(t, 1, completedCallbacksCounter, "Complete callback should be called") + require.Equal(t, 0, failedCallbacksCounter, "Failed callback should be called for failed operation") + mockFSMUpdater.AssertExpectations(t) + mockReplicaCopier.AssertExpectations(t) + }) + + t.Run("failed operation should trigger failed callback", func(t *testing.T) { + // GIVEN + logger, _ := logrustest.NewNullLogger() + mockFSMUpdater := types.NewMockFSMUpdater(t) + mockReplicaCopier := types.NewMockReplicaCopier(t) + parser := fakes.NewMockParser() + parser.On("ParseClass", mock.Anything).Return(nil) + schemaManager := schema.NewSchemaManager("test-node", nil, parser, prometheus.NewPedanticRegistry(), logrus.New()) + schemaReader := schemaManager.NewSchemaReader() + schemaManager.AddClass( + buildApplyRequest("TestCollection", api.ApplyRequest_TYPE_ADD_CLASS, api.AddClassRequest{ + Class: &models.Class{Class: "TestCollection", MultiTenancyConfig: &models.MultiTenancyConfig{Enabled: false}}, + State: &sharding.State{ + Physical: map[string]sharding.Physical{"shard1": {BelongsToNodes: []string{"node1"}}}, + }, + }), "node1", true, false) + + opId, err := randInt(t, 100, 200) + require.NoError(t, err, "error generating random operation id") + + mockFSMUpdater.EXPECT(). + ReplicationGetReplicaOpStatus(mock.Anything, uint64(opId)). + Return(api.REGISTERED, nil). + Times(1) + mockFSMUpdater.EXPECT(). + ReplicationUpdateReplicaOpStatus(mock.Anything, uint64(opId), api.HYDRATING). + Return(nil) + mockReplicaCopier.EXPECT(). + CopyReplicaFiles( + mock.Anything, + "node1", + "TestCollection", + "shard1", + mock.Anything, + ). + Once(). + Return(errors.New("simulated copy failure")) + mockFSMUpdater.EXPECT(). + ReplicationRegisterError(mock.Anything, uint64(opId), mock.Anything). + Return(nil) + + var ( + prepareProcessingCallbacksCounter int + pendingCallbacksCounter int + skippedCallbacksCounter int + startedCallbacksCounter int + completedCallbacksCounter int + failedCallbacksCounter int + completionWg sync.WaitGroup + ) + completionWg.Add(1) + + metricsCallbacks := metrics.NewReplicationEngineOpsCallbacksBuilder(). + WithPrepareProcessing(func(node string) { + require.Equal(t, "node2", node, "invalid node in prepare processing callback") + prepareProcessingCallbacksCounter++ + }). + WithOpPendingCallback(func(node string) { + require.Equal(t, "node2", node, "invalid node in pending op callback") + pendingCallbacksCounter++ + }). + WithOpSkippedCallback(func(node string) { + require.Equal(t, "node2", node, "invalid node in skipped op callback") + skippedCallbacksCounter++ + }). + WithOpStartCallback(func(node string) { + require.Equal(t, "node2", node, "invalid node in start op callback") + startedCallbacksCounter++ + }). + WithOpCompleteCallback(func(node string) { + require.Equal(t, "node2", node, "invalid node in complete op callback") + completedCallbacksCounter++ + }). + WithOpFailedCallback(func(node string) { + require.Equal(t, "node2", node, "invalid node in failed op callback") + failedCallbacksCounter++ + completionWg.Done() + }).Build() + + consumer := replication.NewCopyOpConsumer( + logger, + mockFSMUpdater, + mockReplicaCopier, + "node2", + &backoff.StopBackOff{}, // No retries for test + replication.NewOpsCache(), + time.Second*10, + 1, + runtime.NewDynamicValue(time.Second*100), + metricsCallbacks, + schemaReader, + ) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + opsChan := make(chan replication.ShardReplicationOpAndStatus, 1) + doneChan := make(chan error, 1) + + // WHEN + go func() { + doneChan <- consumer.Consume(ctx, opsChan) + }() + + opsChan <- replication.NewShardReplicationOpAndStatus(replication.NewShardReplicationOp(uint64(opId), "node1", "node2", "TestCollection", "shard1", api.COPY), replication.NewShardReplicationStatus(api.REGISTERED)) + waitChan := make(chan struct{}) + go func() { + completionWg.Wait() + waitChan <- struct{}{} + }() + + select { + case <-waitChan: + // This is here just to make sure the test does not run indefinitely + case <-time.After(5 * time.Second): + t.Fatal("Test timed out waiting for operation completion") + } + + close(opsChan) + err = <-doneChan + + // THEN + require.NoError(t, err, "expected consumer to stop without error") + require.Equal(t, 1, prepareProcessingCallbacksCounter, "Prepare processing callback should be called") + require.Equal(t, 1, pendingCallbacksCounter, "Pending callback should be called") + require.Equal(t, 0, skippedCallbacksCounter, "Skipped callback should be called") + require.Equal(t, 1, startedCallbacksCounter, "Start callback should be called") + require.Equal(t, 0, completedCallbacksCounter, "Complete callback should be called once") + require.Equal(t, 1, failedCallbacksCounter, "Failed callback should be called for failed operation") + mockFSMUpdater.AssertExpectations(t) + mockReplicaCopier.AssertExpectations(t) + }) + + t.Run("multiple random concurrent operations should be tracked correctly", func(t *testing.T) { + // GIVEN + logger, _ := logrustest.NewNullLogger() + mockFSMUpdater := types.NewMockFSMUpdater(t) + mockReplicaCopier := types.NewMockReplicaCopier(t) + parser := fakes.NewMockParser() + parser.On("ParseClass", mock.Anything).Return(nil) + schemaManager := schema.NewSchemaManager("test-node", nil, parser, prometheus.NewPedanticRegistry(), logrus.New()) + schemaReader := schemaManager.NewSchemaReader() + + randomNumberOfOps, err := randInt(t, 10, 20) + require.NoError(t, err, "error while generating random number of operations") + + physical := make(map[string]sharding.Physical) + for i := 0; i < randomNumberOfOps; i++ { + physical[fmt.Sprintf("shard-%d", i)] = sharding.Physical{BelongsToNodes: []string{"node1"}} + } + schemaManager.AddClass( + buildApplyRequest("TestCollection", api.ApplyRequest_TYPE_ADD_CLASS, api.AddClassRequest{ + Class: &models.Class{Class: "TestCollection", MultiTenancyConfig: &models.MultiTenancyConfig{Enabled: false}}, + State: &sharding.State{Physical: physical}, + }), "node1", true, false) + + randomStartOpId, err := randInt(t, 1000, 2000) + require.NoError(t, err, "error while generating random op id start") + for i := 0; i < randomNumberOfOps; i++ { + opId := uint64(randomStartOpId + i) + mockFSMUpdater.EXPECT(). + WaitForUpdate(mock.Anything, mock.Anything). + Return(nil) + mockFSMUpdater.EXPECT(). + ReplicationGetReplicaOpStatus(mock.Anything, uint64(opId)). + Return(api.REGISTERED, nil). + Times(1) + mockFSMUpdater.EXPECT(). + ReplicationUpdateReplicaOpStatus(mock.Anything, uint64(opId), api.HYDRATING). + Return(nil) + mockFSMUpdater.EXPECT(). + ReplicationUpdateReplicaOpStatus(mock.Anything, uint64(opId), api.FINALIZING). + Return(nil) + mockFSMUpdater.EXPECT(). + ReplicationUpdateReplicaOpStatus(mock.Anything, uint64(opId), api.READY). + Return(nil) + mockReplicaCopier.EXPECT(). + CopyReplicaFiles(mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything). + Return(nil) + mockReplicaCopier.EXPECT(). + LoadLocalShard(mock.Anything, mock.Anything, mock.Anything). + Return(nil) + mockFSMUpdater.EXPECT(). + ReplicationAddReplicaToShard(mock.Anything, mock.Anything, mock.Anything, mock.Anything, uint64(opId)). + Return(uint64(i), nil) + mockReplicaCopier.EXPECT(). + AsyncReplicationStatus(mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything). + Return(models.AsyncReplicationStatus{ + ObjectsPropagated: 0, + StartDiffTimeUnixMillis: time.Now().Add(200 * time.Second).UnixMilli(), + }, nil) + mockReplicaCopier.EXPECT(). + InitAsyncReplicationLocally(mock.Anything, mock.Anything, mock.Anything). + Return(nil) + mockReplicaCopier.EXPECT(). + AddAsyncReplicationTargetNode(mock.Anything, mock.Anything, mock.Anything).Return(nil) + mockReplicaCopier.EXPECT(). + RemoveAsyncReplicationTargetNode(mock.Anything, mock.Anything).Return(nil) + mockReplicaCopier.EXPECT(). + RevertAsyncReplicationLocally(mock.Anything, mock.Anything, mock.Anything).Return(nil) + mockFSMUpdater.EXPECT(). + SyncShard(mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(uint64(i), nil) + } + + var ( + mutex sync.Mutex + prepareProcessingCount int + pendingCount int + skippedCount int + startCount int + completeCount int + completionWg sync.WaitGroup + ) + completionWg.Add(randomNumberOfOps) + + metricsCallbacks := metrics.NewReplicationEngineOpsCallbacksBuilder(). + WithPrepareProcessing(func(node string) { + mutex.Lock() + prepareProcessingCount++ + mutex.Unlock() + }). + WithOpPendingCallback(func(node string) { + mutex.Lock() + pendingCount++ + mutex.Unlock() + }). + WithOpSkippedCallback(func(node string) { + mutex.Lock() + skippedCount++ + mutex.Unlock() + }). + WithOpStartCallback(func(node string) { + mutex.Lock() + startCount++ + mutex.Unlock() + }). + WithOpCompleteCallback(func(node string) { + mutex.Lock() + completeCount++ + mutex.Unlock() + completionWg.Done() + }).Build() + + consumer := replication.NewCopyOpConsumer( + logger, + mockFSMUpdater, + mockReplicaCopier, + "node2", + &backoff.StopBackOff{}, + replication.NewOpsCache(), + time.Second*10, + 1, + runtime.NewDynamicValue(time.Second*100), + metricsCallbacks, + schemaReader, + ) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + opsChan := make(chan replication.ShardReplicationOpAndStatus, randomNumberOfOps) + doneChan := make(chan error, 1) + + // WHEN + go func() { + doneChan <- consumer.Consume(ctx, opsChan) + }() + + for i := 0; i < randomNumberOfOps; i++ { + shard := fmt.Sprintf("shard-%d", i) + opsChan <- replication.NewShardReplicationOpAndStatus(replication.NewShardReplicationOp(uint64(randomStartOpId+i), "node1", "node2", "TestCollection", shard, api.COPY), replication.NewShardReplicationStatus(api.REGISTERED)) + } + + waitChan := make(chan struct{}) + go func() { + completionWg.Wait() + waitChan <- struct{}{} + }() + + select { + case <-waitChan: + // All operations completed + case <-time.After(5 * time.Second): + // This is here just to make sure the test does not run indefinitely + t.Fatal("Test timed out waiting for operations to complete") + } + + close(opsChan) + err = <-doneChan + + // THEN + require.NoError(t, err, "expected consumer to stop without error") + mutex.Lock() + require.Equal(t, 1, prepareProcessingCount, "Prepare processing callback should be called once") + require.Equal(t, randomNumberOfOps, pendingCount, "Pending callback should be called for each operation") + require.Equal(t, randomNumberOfOps, startCount, "Start callback should be called for each operation") + require.Equal(t, randomNumberOfOps, completeCount, "Complete callback should be called for each operation") + mutex.Unlock() + }) + + t.Run("all operations are skipped and should trigger skipped callbacks", func(t *testing.T) { + // GIVEN + logger, _ := logrustest.NewNullLogger() + mockFSMUpdater := types.NewMockFSMUpdater(t) + mockReplicaCopier := types.NewMockReplicaCopier(t) + parser := fakes.NewMockParser() + parser.On("ParseClass", mock.Anything).Return(nil) + schemaManager := schema.NewSchemaManager("test-node", nil, parser, prometheus.NewPedanticRegistry(), logrus.New()) + schemaReader := schemaManager.NewSchemaReader() + schemaManager.AddClass( + buildApplyRequest("TestCollection", api.ApplyRequest_TYPE_ADD_CLASS, api.AddClassRequest{ + Class: &models.Class{Class: "TestCollection", MultiTenancyConfig: &models.MultiTenancyConfig{Enabled: false}}, + State: &sharding.State{ + Physical: map[string]sharding.Physical{"shard1": {BelongsToNodes: []string{"node1"}}}, + }, + }), "node1", true, false) + + totalOps, err := randInt(t, 10, 20) + require.NoError(t, err, "error while generating random number of operations") + randomStartOpId, err := randInt(t, 1000, 2000) + require.NoError(t, err, "error while generating random number of operations") + opsCache := replication.NewOpsCache() + + for i := 0; i < totalOps; i++ { + opId := uint64(randomStartOpId + i) + opsCache.LoadOrStore(opId) + } + + var ( + mutex sync.Mutex + prepareProcessingCount int + pendingCount int + skippedCount int + startCount int + completeCount int + failedCount int + ) + + callbacks := metrics.NewReplicationEngineOpsCallbacksBuilder(). + WithPrepareProcessing(func(node string) { + mutex.Lock() + prepareProcessingCount++ + mutex.Unlock() + }). + WithOpPendingCallback(func(node string) { + mutex.Lock() + pendingCount++ + mutex.Unlock() + }). + WithOpSkippedCallback(func(node string) { + mutex.Lock() + skippedCount++ + mutex.Unlock() + }). + WithOpStartCallback(func(node string) { + mutex.Lock() + startCount++ + mutex.Unlock() + t.Error("Start callback should not be called when all ops are skipped") + }). + WithOpCompleteCallback(func(node string) { + mutex.Lock() + completeCount++ + mutex.Unlock() + t.Error("Complete callback should not be called when all ops are skipped") + }). + WithOpFailedCallback(func(node string) { + mutex.Lock() + failedCount++ + mutex.Unlock() + t.Error("Failed callback should not be called when all ops are skipped") + }).Build() + + consumer := replication.NewCopyOpConsumer( + logger, + mockFSMUpdater, + mockReplicaCopier, + "node2", + &backoff.StopBackOff{}, + opsCache, + time.Second*10, + 1, + runtime.NewDynamicValue(time.Second*100), + callbacks, + schemaReader, + ) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + opsChan := make(chan replication.ShardReplicationOpAndStatus, totalOps) + doneChan := make(chan error, 1) + + // WHEN + go func() { + doneChan <- consumer.Consume(ctx, opsChan) + }() + + require.NoError(t, err, "error while generating random op id start") + + for i := 0; i < totalOps; i++ { + node := fmt.Sprintf("node-%d", i) + opsChan <- replication.NewShardReplicationOpAndStatus(replication.NewShardReplicationOp(uint64(randomStartOpId+i), "node1", node, "TestCollection", "shard1", api.COPY), replication.NewShardReplicationStatus(api.REGISTERED)) + } + + close(opsChan) + err = <-doneChan + + // THEN + require.NoError(t, err, "expected consumer to stop without error") + + mutex.Lock() + require.Equal(t, 1, prepareProcessingCount, "Prepare processing callback should be called once") + require.Equal(t, totalOps, pendingCount, "Pending should be called for each op") + require.Equal(t, totalOps, skippedCount, "Skipped should be called for each op") + require.Equal(t, 0, startCount, "Start should not be called when all ops are skipped") + require.Equal(t, 0, completeCount, "Complete should not be called when all ops are skipped") + require.Equal(t, 0, failedCount, "Failed should not be called when all ops are skipped") + mutex.Unlock() + + mockFSMUpdater.AssertExpectations(t) + mockReplicaCopier.AssertExpectations(t) + }) + + t.Run("some operations are randomly skipped and should trigger corresponding callbacks", func(t *testing.T) { + // GIVEN + logger, _ := logrustest.NewNullLogger() + mockFSMUpdater := types.NewMockFSMUpdater(t) + mockReplicaCopier := types.NewMockReplicaCopier(t) + parser := fakes.NewMockParser() + parser.On("ParseClass", mock.Anything).Return(nil) + schemaManager := schema.NewSchemaManager("test-node", nil, parser, prometheus.NewPedanticRegistry(), logrus.New()) + schemaReader := schemaManager.NewSchemaReader() + + totalOps, err := randInt(t, 10, 20) + require.NoError(t, err, "error while generating random number of operations") + + physical := make(map[string]sharding.Physical) + for i := 0; i < totalOps; i++ { + physical[fmt.Sprintf("shard-%d", i)] = sharding.Physical{BelongsToNodes: []string{"node1"}} + } + + schemaManager.AddClass( + buildApplyRequest("TestCollection", api.ApplyRequest_TYPE_ADD_CLASS, api.AddClassRequest{ + Class: &models.Class{Class: "TestCollection", MultiTenancyConfig: &models.MultiTenancyConfig{Enabled: false}}, + State: &sharding.State{Physical: physical}, + }), "node1", true, false) + + var ( + mutex sync.Mutex + prepareProcessingCount int + pendingCount int + skippedCount int + startCount int + completeCount int + failedCount int + completionWg sync.WaitGroup + ) + + opsCache := replication.NewOpsCache() + + randomStartOpId, err := randInt(t, 1000, 2000) + require.NoError(t, err, "error while generating random op id start") + + expectedSkipped := 0 + expectedStarted := 0 + expectedCompleted := 0 + + for i := 0; i < totalOps; i++ { + opID := uint64(randomStartOpId + i) + skip := randomBoolean(t) + if !skip { + expectedStarted++ + expectedCompleted++ + mockFSMUpdater.EXPECT(). + WaitForUpdate(mock.Anything, mock.Anything). + Return(nil) + mockFSMUpdater.EXPECT(). + ReplicationGetReplicaOpStatus(mock.Anything, uint64(opID)). + Return(api.REGISTERED, nil). + Times(1) + mockFSMUpdater.EXPECT(). + ReplicationUpdateReplicaOpStatus(mock.Anything, uint64(opID), api.HYDRATING). + Return(nil) + mockFSMUpdater.EXPECT(). + ReplicationUpdateReplicaOpStatus(mock.Anything, uint64(opID), api.FINALIZING). + Return(nil) + mockFSMUpdater.EXPECT(). + ReplicationUpdateReplicaOpStatus(mock.Anything, uint64(opID), api.READY). + Return(nil) + mockReplicaCopier.EXPECT(). + CopyReplicaFiles(mock.Anything, "node1", "TestCollection", mock.Anything, mock.Anything). + Return(nil) + mockReplicaCopier.EXPECT(). + LoadLocalShard(mock.Anything, mock.Anything, mock.Anything). + Return(nil) + mockFSMUpdater.EXPECT(). + ReplicationAddReplicaToShard(mock.Anything, "TestCollection", mock.Anything, mock.Anything, uint64(opID)). + Return(uint64(i), nil) + mockReplicaCopier.EXPECT(). + AsyncReplicationStatus(mock.Anything, "node1", "node2", "TestCollection", mock.Anything). + Return(models.AsyncReplicationStatus{ + ObjectsPropagated: 0, + StartDiffTimeUnixMillis: time.Now().Add(200 * time.Second).UnixMilli(), + }, nil) + mockReplicaCopier.EXPECT(). + AsyncReplicationStatus(mock.Anything, "node2", "node1", "TestCollection", mock.Anything). + Return(models.AsyncReplicationStatus{ + ObjectsPropagated: 0, + StartDiffTimeUnixMillis: time.Now().Add(200 * time.Second).UnixMilli(), + }, nil) + mockReplicaCopier.EXPECT(). + AddAsyncReplicationTargetNode(mock.Anything, mock.Anything, mock.Anything).Return(nil) + mockReplicaCopier.EXPECT(). + RemoveAsyncReplicationTargetNode(mock.Anything, mock.Anything).Return(nil) + mockReplicaCopier.EXPECT(). + InitAsyncReplicationLocally(mock.Anything, mock.Anything, mock.Anything). + Return(nil) + mockReplicaCopier.EXPECT(). + RevertAsyncReplicationLocally(mock.Anything, mock.Anything, mock.Anything).Return(nil) + mockFSMUpdater.EXPECT(). + SyncShard(mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(uint64(i), nil) + completionWg.Add(1) + } else { + require.False(t, opsCache.LoadOrStore(opID), "operation should not be stored twice in cache") + expectedSkipped++ + } + } + + callbacks := metrics.NewReplicationEngineOpsCallbacksBuilder(). + WithPrepareProcessing(func(node string) { + mutex.Lock() + prepareProcessingCount++ + mutex.Unlock() + }). + WithOpPendingCallback(func(node string) { + mutex.Lock() + pendingCount++ + mutex.Unlock() + }). + WithOpSkippedCallback(func(node string) { + mutex.Lock() + skippedCount++ + mutex.Unlock() + }). + WithOpStartCallback(func(node string) { + mutex.Lock() + startCount++ + mutex.Unlock() + }). + WithOpCompleteCallback(func(node string) { + mutex.Lock() + completeCount++ + mutex.Unlock() + completionWg.Done() + }). + WithOpFailedCallback(func(node string) { + mutex.Lock() + failedCount++ + mutex.Unlock() + t.Error("Failed callback should not be called in this test") + }).Build() + + consumer := replication.NewCopyOpConsumer( + logger, + mockFSMUpdater, + mockReplicaCopier, + "node2", + &backoff.StopBackOff{}, + opsCache, + time.Second*10, + 1, + runtime.NewDynamicValue(time.Second*100), + callbacks, + schemaReader, + ) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + opsChan := make(chan replication.ShardReplicationOpAndStatus, totalOps) + doneChan := make(chan error, 1) + + // WHEN + go func() { + doneChan <- consumer.Consume(ctx, opsChan) + }() + + for i := 0; i < totalOps; i++ { + shard := fmt.Sprintf("shard-%d", i) + opsChan <- replication.NewShardReplicationOpAndStatus(replication.NewShardReplicationOp(uint64(randomStartOpId+i), "node1", "node2", "TestCollection", shard, api.COPY), replication.NewShardReplicationStatus(api.REGISTERED)) + } + + waitChan := make(chan struct{}) + go func() { + completionWg.Wait() + waitChan <- struct{}{} + }() + + select { + case <-waitChan: + case <-time.After(5 * time.Second): + t.Fatal("Test timed out waiting for operation completion") + } + + close(opsChan) + err = <-doneChan + + // THEN + require.NoError(t, err, "expected consumer to stop without error") + + mutex.Lock() + require.Equal(t, 1, prepareProcessingCount, "Prepare processing should be called once") + require.Equal(t, totalOps, pendingCount, "Pending should be called for each op") + require.Equal(t, expectedSkipped, skippedCount, "Skipped count should match") + require.Equal(t, expectedStarted, startCount, "Started count should match non-skipped ops") + require.Equal(t, expectedCompleted, completeCount, "Completed count should match non-skipped ops") + require.Equal(t, 0, failedCount, "No operations should fail") + mutex.Unlock() + + mockFSMUpdater.AssertExpectations(t) + mockReplicaCopier.AssertExpectations(t) + }) +} + +func TestConsumerOpCancellation(t *testing.T) { + // GIVEN + logger, _ := logrustest.NewNullLogger() + mockFSMUpdater := types.NewMockFSMUpdater(t) + mockReplicaCopier := types.NewMockReplicaCopier(t) + parser := fakes.NewMockParser() + parser.On("ParseClass", mock.Anything).Return(nil) + schemaManager := schema.NewSchemaManager("test-node", nil, parser, prometheus.NewPedanticRegistry(), logrus.New()) + schemaReader := schemaManager.NewSchemaReader() + schemaManager.AddClass( + buildApplyRequest("TestCollection", api.ApplyRequest_TYPE_ADD_CLASS, api.AddClassRequest{ + Class: &models.Class{Class: "TestCollection", MultiTenancyConfig: &models.MultiTenancyConfig{Enabled: false}}, + State: &sharding.State{ + Physical: map[string]sharding.Physical{"shard1": {BelongsToNodes: []string{"node1"}}}, + }, + }), "node1", true, false) + + mockFSMUpdater.EXPECT(). + ReplicationCancellationComplete(mock.Anything, uint64(1)). + Return(nil) + mockFSMUpdater.EXPECT(). + SyncShard(mock.Anything, mock.Anything, mock.Anything, mock.Anything). + Return(0, nil) + mockReplicaCopier.EXPECT(). + RevertAsyncReplicationLocally(mock.Anything, mock.Anything, mock.Anything). + Return(nil) + mockReplicaCopier.EXPECT(). + RemoveAsyncReplicationTargetNode(mock.Anything, mock.Anything). + Return(nil) + + var completionWg sync.WaitGroup + var once sync.Once + completionWg.Add(1) + metricsCallbacks := metrics.NewReplicationEngineOpsCallbacksBuilder(). + WithPrepareProcessing(func(node string) { + require.Equal(t, "node2", node, "invalid node in prepare processing callback") + }). + WithOpPendingCallback(func(node string) { + require.Equal(t, "node2", node, "invalid node in pending op callback") + }). + WithOpSkippedCallback(func(node string) { + require.Equal(t, "node2", node, "invalid node in skipped op callback") + }). + WithOpStartCallback(func(node string) { + require.Equal(t, "node2", node, "invalid node in start op callback") + }). + WithOpCompleteCallback(func(node string) { + require.Equal(t, "node2", node, "invalid node in complete op callback") + }). + WithOpFailedCallback(func(node string) { + require.Equal(t, "node2", node, "invalid node in failed op callback") + }). + WithOpCancelledCallback(func(node string) { + require.Equal(t, "node2", node, "invalid node in cancelled op callback") + once.Do(func() { + // cancelOp in Consumer is a complete noop so can be called multiple times + // without error. However, completionWg.Done() can only be called once + completionWg.Done() + }) + }). + Build() + + consumer := replication.NewCopyOpConsumer( + logger, + mockFSMUpdater, + mockReplicaCopier, + "node2", + &backoff.StopBackOff{}, + replication.NewOpsCache(), + time.Second*10, + 1, + runtime.NewDynamicValue(time.Second*100), + metricsCallbacks, + schemaReader, + ) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + opsChan := make(chan replication.ShardReplicationOpAndStatus, 2) + doneChan := make(chan error, 1) + + // WHEN + go func() { + doneChan <- consumer.Consume(ctx, opsChan) + }() + + mockReplicaCopier.EXPECT(). + CopyReplicaFiles(mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything). + RunAndReturn(func(ctx context.Context, sourceNode string, collectionName string, shardName string, schemaVersion uint64) error { + // Simulate a long-running operation that checks for cancellation every loop + for { + if ctx.Err() != nil { + return ctx.Err() + } + time.Sleep(1 * time.Second) + } + }).Maybe() + + op := replication.NewShardReplicationOp(1, "node1", "node2", "TestCollection", "shard1", api.COPY) + + status := replication.NewShardReplicationStatus(api.HYDRATING) + mockFSMUpdater.EXPECT(). + ReplicationGetReplicaOpStatus(mock.Anything, uint64(1)). + Return(api.HYDRATING, nil) + + status.TriggerCancellation() + // Simulate the copying step that will loop forever until cancelled in the mock + opsChan <- replication.NewShardReplicationOpAndStatus(op, status) + // Tests the cancellation happening before the copying has started (0s) and once it has started (1s) + time.Sleep(1 * time.Second) + // Cancel the operation via ShouldCancel or ShouldDelete + opsChan <- replication.NewShardReplicationOpAndStatus(op, status) + + waitChan := make(chan struct{}) + go func() { + completionWg.Wait() + waitChan <- struct{}{} + }() + + select { + case <-waitChan: + case <-time.After(10 * time.Second): + t.Fatalf("Test timed out waiting for operation completion") + } + + close(opsChan) + err := <-doneChan + + // THEN + require.NoError(t, err, "expected consumer to stop without error") + + mockFSMUpdater.AssertExpectations(t) + mockReplicaCopier.AssertExpectations(t) +} + +func TestConsumerOpDeletion(t *testing.T) { + // GIVEN + logger, _ := logrustest.NewNullLogger() + mockFSMUpdater := types.NewMockFSMUpdater(t) + mockReplicaCopier := types.NewMockReplicaCopier(t) + parser := fakes.NewMockParser() + parser.On("ParseClass", mock.Anything).Return(nil) + schemaManager := schema.NewSchemaManager("test-node", nil, parser, prometheus.NewPedanticRegistry(), logrus.New()) + schemaReader := schemaManager.NewSchemaReader() + schemaManager.AddClass( + buildApplyRequest("TestCollection", api.ApplyRequest_TYPE_ADD_CLASS, api.AddClassRequest{ + Class: &models.Class{Class: "TestCollection", MultiTenancyConfig: &models.MultiTenancyConfig{Enabled: false}}, + State: &sharding.State{ + Physical: map[string]sharding.Physical{"shard1": {BelongsToNodes: []string{"node1"}}}, + }, + }), "node1", true, false) + + mockFSMUpdater.EXPECT(). + ReplicationRemoveReplicaOp(mock.Anything, uint64(1)). + Return(nil) + mockFSMUpdater.EXPECT(). + SyncShard(mock.Anything, mock.Anything, mock.Anything, mock.Anything). + Return(0, nil) + mockReplicaCopier.EXPECT(). + RevertAsyncReplicationLocally(mock.Anything, mock.Anything, mock.Anything). + Return(nil) + mockReplicaCopier.EXPECT(). + RemoveAsyncReplicationTargetNode(mock.Anything, mock.Anything). + Return(nil) + + var completionWg sync.WaitGroup + var once sync.Once + completionWg.Add(1) + metricsCallbacks := metrics.NewReplicationEngineOpsCallbacksBuilder(). + WithPrepareProcessing(func(node string) { + require.Equal(t, "node2", node, "invalid node in prepare processing callback") + }). + WithOpPendingCallback(func(node string) { + require.Equal(t, "node2", node, "invalid node in pending op callback") + }). + WithOpSkippedCallback(func(node string) { + require.Equal(t, "node2", node, "invalid node in skipped op callback") + }). + WithOpStartCallback(func(node string) { + require.Equal(t, "node2", node, "invalid node in start op callback") + }). + WithOpCompleteCallback(func(node string) { + require.Equal(t, "node2", node, "invalid node in complete op callback") + }). + WithOpFailedCallback(func(node string) { + require.Equal(t, "node2", node, "invalid node in failed op callback") + }). + WithOpCancelledCallback(func(node string) { + require.Equal(t, "node2", node, "invalid node in cancelled op callback") + once.Do(func() { + // cancelOp in Consumer is a complete noop so can be called multiple times + // without error. However, completionWg.Done() can only be called once + completionWg.Done() + }) + }). + Build() + + consumer := replication.NewCopyOpConsumer( + logger, + mockFSMUpdater, + mockReplicaCopier, + "node2", + &backoff.StopBackOff{}, + replication.NewOpsCache(), + time.Second*10, + 1, + runtime.NewDynamicValue(time.Second*100), + metricsCallbacks, + schemaReader, + ) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + opsChan := make(chan replication.ShardReplicationOpAndStatus, 2) + doneChan := make(chan error, 1) + + // WHEN + go func() { + doneChan <- consumer.Consume(ctx, opsChan) + }() + + mockReplicaCopier.EXPECT(). + CopyReplicaFiles(mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything). + RunAndReturn(func(ctx context.Context, sourceNode string, collectionName string, shardName string, schemaVersion uint64) error { + // Simulate a long-running operation that checks for cancellation every loop + for { + if ctx.Err() != nil { + return ctx.Err() + } + time.Sleep(1 * time.Second) + } + }).Maybe() + + op := replication.NewShardReplicationOp(1, "node1", "node2", "TestCollection", "shard1", api.COPY) + + status := replication.NewShardReplicationStatus(api.HYDRATING) + mockFSMUpdater.EXPECT(). + ReplicationGetReplicaOpStatus(mock.Anything, uint64(1)). + Return(api.HYDRATING, nil) + + status.TriggerDeletion() + // Simulate the copying step that will loop forever until cancelled in the mock + opsChan <- replication.NewShardReplicationOpAndStatus(op, status) + // Tests the cancellation happening before the copying has started (0s) and once it has started (1s) + time.Sleep(1 * time.Second) + // Cancel the operation via ShouldCancel or ShouldDelete + opsChan <- replication.NewShardReplicationOpAndStatus(op, status) + + waitChan := make(chan struct{}) + go func() { + completionWg.Wait() + waitChan <- struct{}{} + }() + + select { + case <-waitChan: + case <-time.After(10 * time.Second): + t.Fatalf("Test timed out waiting for operation completion") + } + + close(opsChan) + err := <-doneChan + + // THEN + require.NoError(t, err, "expected consumer to stop without error") + + mockFSMUpdater.AssertExpectations(t) + mockReplicaCopier.AssertExpectations(t) +} + +func TestConsumerOpDuplication(t *testing.T) { + // GIVEN + logger, _ := logrustest.NewNullLogger() + mockFSMUpdater := types.NewMockFSMUpdater(t) + mockReplicaCopier := types.NewMockReplicaCopier(t) + parser := fakes.NewMockParser() + parser.On("ParseClass", mock.Anything).Return(nil) + schemaManager := schema.NewSchemaManager("test-node", nil, parser, prometheus.NewPedanticRegistry(), logrus.New()) + schemaManager.AddClass( + buildApplyRequest("TestCollection", api.ApplyRequest_TYPE_ADD_CLASS, api.AddClassRequest{ + Class: &models.Class{Class: "TestCollection", MultiTenancyConfig: &models.MultiTenancyConfig{Enabled: false}}, + State: &sharding.State{ + Physical: map[string]sharding.Physical{"shard1": {BelongsToNodes: []string{"node1"}}}, + }, + }), "node1", true, false) + schemaReader := schemaManager.NewSchemaReader() + schemaManager.AddClass( + buildApplyRequest("TestCollection", api.ApplyRequest_TYPE_ADD_CLASS, api.AddClassRequest{ + Class: &models.Class{Class: "TestCollection", MultiTenancyConfig: &models.MultiTenancyConfig{Enabled: false}}, + State: &sharding.State{ + Physical: map[string]sharding.Physical{"shard1": {BelongsToNodes: []string{"node1"}}}, + }, + }), "node1", true, false) + + var completionWg sync.WaitGroup + + metricsCallbacks := metrics.NewReplicationEngineOpsCallbacksBuilder(). + WithPrepareProcessing(func(node string) { + require.Equal(t, "node2", node, "invalid node in prepare processing callback") + }). + WithOpPendingCallback(func(node string) { + require.Equal(t, "node2", node, "invalid node in pending op callback") + }). + WithOpSkippedCallback(func(node string) { + require.Equal(t, "node2", node, "invalid node in skipped op callback") + completionWg.Done() + }). + WithOpStartCallback(func(node string) { + require.Equal(t, "node2", node, "invalid node in start op callback") + }). + WithOpCompleteCallback(func(node string) { + require.Equal(t, "node2", node, "invalid node in complete op callback") + completionWg.Done() + }). + WithOpFailedCallback(func(node string) { + require.Equal(t, "node2", node, "invalid node in failed op callback") + }). + WithOpCancelledCallback(func(node string) { + require.Equal(t, "node2", node, "invalid node in cancelled op callback") + }). + Build() + + consumer := replication.NewCopyOpConsumer( + logger, + mockFSMUpdater, + mockReplicaCopier, + "node2", + &backoff.StopBackOff{}, + replication.NewOpsCache(), + time.Second*10, + 1, + runtime.NewDynamicValue(time.Second*100), + metricsCallbacks, + schemaReader, + ) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + opsChan := make(chan replication.ShardReplicationOpAndStatus, 1) + doneChan := make(chan error, 1) + + // WHEN + go func() { + doneChan <- consumer.Consume(ctx, opsChan) + }() + + mockFSMUpdater.EXPECT(). + WaitForUpdate(mock.Anything, mock.Anything). + Return(nil) + mockFSMUpdater.EXPECT(). + ReplicationGetReplicaOpStatus(mock.Anything, uint64(1)). + Return(api.FINALIZING, nil) + mockFSMUpdater.EXPECT(). + ReplicationGetReplicaOpStatus(mock.Anything, uint64(1)). + Return(api.READY, nil) + mockFSMUpdater.EXPECT(). + ReplicationUpdateReplicaOpStatus(mock.Anything, uint64(1), api.READY). + Return(nil) + mockFSMUpdater.EXPECT(). + ReplicationAddReplicaToShard(mock.Anything, "TestCollection", "shard1", "node2", uint64(1)). + Return(uint64(1), nil) + mockReplicaCopier.EXPECT(). + LoadLocalShard(mock.Anything, mock.Anything, mock.Anything). + Return(nil) + mockReplicaCopier.EXPECT(). + AsyncReplicationStatus(mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything). + Return(models.AsyncReplicationStatus{ + ObjectsPropagated: 0, + StartDiffTimeUnixMillis: time.Now().Add(200 * time.Second).UnixMilli(), + }, nil) + mockReplicaCopier.EXPECT(). + AddAsyncReplicationTargetNode(mock.Anything, mock.Anything, mock.Anything).Return(nil) + mockReplicaCopier.EXPECT(). + RemoveAsyncReplicationTargetNode(mock.Anything, mock.Anything).Return(nil) + mockReplicaCopier.EXPECT(). + InitAsyncReplicationLocally(mock.Anything, "TestCollection", "shard1"). + Return(nil) + mockReplicaCopier.EXPECT(). + RevertAsyncReplicationLocally(mock.Anything, "TestCollection", "shard1").Return(nil) + mockFSMUpdater.EXPECT(). + SyncShard(mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(uint64(1), nil) + + op := replication.NewShardReplicationOp(1, "node1", "node2", "TestCollection", "shard1", api.COPY) + status := replication.NewShardReplicationStatus(api.FINALIZING) + + opsChan <- replication.NewShardReplicationOpAndStatus(op, status) + completionWg.Add(1) + + // Send the same operation again to make sure it isn't reprocessed after a state change + // as mocked in the above expectations + opsChan <- replication.NewShardReplicationOpAndStatus(op, status) + completionWg.Add(1) + + waitChan := make(chan struct{}) + go func() { + completionWg.Wait() + waitChan <- struct{}{} + }() + + select { + case <-waitChan: + case <-time.After(30 * time.Second): + t.Fatalf("Test timed out waiting for operation completion") + } + + close(opsChan) + err := <-doneChan + + // THEN + require.NoError(t, err, "expected consumer to stop without error") + + mockFSMUpdater.AssertExpectations(t) + mockReplicaCopier.AssertExpectations(t) +} + +func TestConsumerOpSkip(t *testing.T) { + // GIVEN + logger, _ := logrustest.NewNullLogger() + mockFSMUpdater := types.NewMockFSMUpdater(t) + mockReplicaCopier := types.NewMockReplicaCopier(t) + parser := fakes.NewMockParser() + parser.On("ParseClass", mock.Anything).Return(nil) + schemaManager := schema.NewSchemaManager("test-node", nil, parser, prometheus.NewPedanticRegistry(), logrus.New()) + schemaManager.AddClass( + buildApplyRequest("TestCollection", api.ApplyRequest_TYPE_ADD_CLASS, api.AddClassRequest{ + Class: &models.Class{Class: "TestCollection", MultiTenancyConfig: &models.MultiTenancyConfig{Enabled: false}}, + State: &sharding.State{ + Physical: map[string]sharding.Physical{"shard1": {BelongsToNodes: []string{"node1"}}}, + }, + }), "node1", true, false) + schemaReader := schemaManager.NewSchemaReader() + schemaManager.AddClass( + buildApplyRequest("TestCollection", api.ApplyRequest_TYPE_ADD_CLASS, api.AddClassRequest{ + Class: &models.Class{Class: "TestCollection", MultiTenancyConfig: &models.MultiTenancyConfig{Enabled: false}}, + State: &sharding.State{ + Physical: map[string]sharding.Physical{"shard1": {BelongsToNodes: []string{"node1"}}}, + }, + }), "node1", true, false) + + var completionWg sync.WaitGroup + + metricsCallbacks := metrics.NewReplicationEngineOpsCallbacksBuilder(). + WithPrepareProcessing(func(node string) { + require.Equal(t, "node2", node, "invalid node in prepare processing callback") + }). + WithOpPendingCallback(func(node string) { + require.Equal(t, "node2", node, "invalid node in pending op callback") + }). + WithOpSkippedCallback(func(node string) { + require.Equal(t, "node2", node, "invalid node in skipped op callback") + completionWg.Done() + }). + WithOpStartCallback(func(node string) { + require.Equal(t, "node2", node, "invalid node in start op callback") + }). + WithOpCompleteCallback(func(node string) { + require.Equal(t, "node2", node, "invalid node in complete op callback") + completionWg.Done() + }). + WithOpFailedCallback(func(node string) { + require.Equal(t, "node2", node, "invalid node in failed op callback") + }). + WithOpCancelledCallback(func(node string) { + require.Equal(t, "node2", node, "invalid node in cancelled op callback") + }). + Build() + + consumer := replication.NewCopyOpConsumer( + logger, + mockFSMUpdater, + mockReplicaCopier, + "node2", + &backoff.StopBackOff{}, + replication.NewOpsCache(), + time.Second*10, + 3, + runtime.NewDynamicValue(time.Second*100), + metricsCallbacks, + schemaReader, + ) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + opsChan := make(chan replication.ShardReplicationOpAndStatus, 4) + doneChan := make(chan error, 1) + + // WHEN + go func() { + doneChan <- consumer.Consume(ctx, opsChan) + }() + + mockFSMUpdater.EXPECT(). + WaitForUpdate(mock.Anything, mock.Anything). + Return(nil) + mockFSMUpdater.EXPECT(). + ReplicationGetReplicaOpStatus(mock.Anything, uint64(1)). + Return(api.FINALIZING, nil) + mockFSMUpdater.EXPECT(). + ReplicationGetReplicaOpStatus(mock.Anything, uint64(1)). + Return(api.READY, nil) + mockReplicaCopier.EXPECT(). + LoadLocalShard(mock.Anything, mock.Anything, mock.Anything). + Return(nil) + mockFSMUpdater.EXPECT(). + ReplicationUpdateReplicaOpStatus(mock.Anything, uint64(1), api.READY). + Return(nil) + mockFSMUpdater.EXPECT(). + ReplicationAddReplicaToShard(mock.Anything, "TestCollection", "shard1", "node2", uint64(1)). + Return(uint64(1), nil) + mockReplicaCopier.EXPECT(). + AsyncReplicationStatus(mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything). + Return(models.AsyncReplicationStatus{ + ObjectsPropagated: 0, + StartDiffTimeUnixMillis: time.Now().Add(200 * time.Second).UnixMilli(), + }, nil) + mockReplicaCopier.EXPECT(). + AddAsyncReplicationTargetNode(mock.Anything, mock.Anything, mock.Anything).Return(nil) + mockReplicaCopier.EXPECT(). + RemoveAsyncReplicationTargetNode(mock.Anything, mock.Anything).Return(nil) + mockReplicaCopier.EXPECT(). + InitAsyncReplicationLocally(mock.Anything, "TestCollection", "shard1"). + Return(nil) + mockReplicaCopier.EXPECT(). + RevertAsyncReplicationLocally(mock.Anything, "TestCollection", "shard1").Return(nil) + mockFSMUpdater.EXPECT(). + SyncShard(mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(uint64(1), nil) + op := replication.NewShardReplicationOp(1, "node1", "node2", "TestCollection", "shard1", api.COPY) + status := replication.NewShardReplicationStatus(api.FINALIZING) + + opsChan <- replication.NewShardReplicationOpAndStatus(op, status) + completionWg.Add(1) + + // Send the same operation again twice to make sure it is skipped + opsChan <- replication.NewShardReplicationOpAndStatus(op, status) + completionWg.Add(1) + + waitChan := make(chan struct{}) + go func() { + completionWg.Wait() + waitChan <- struct{}{} + }() + + select { + case <-waitChan: + case <-time.After(30 * time.Second): + t.Fatalf("Test timed out waiting for operation completion") + } + + close(opsChan) + err := <-doneChan + + // THEN + require.NoError(t, err, "expected consumer to stop without error") + + mockFSMUpdater.AssertExpectations(t) + mockReplicaCopier.AssertExpectations(t) +} + +func TestConsumerShutdown(t *testing.T) { + // GIVEN + logger, _ := logrustest.NewNullLogger() + mockFSMUpdater := types.NewMockFSMUpdater(t) + mockReplicaCopier := types.NewMockReplicaCopier(t) + parser := fakes.NewMockParser() + parser.On("ParseClass", mock.Anything).Return(nil) + schemaManager := schema.NewSchemaManager("test-node", nil, parser, prometheus.NewPedanticRegistry(), logrus.New()) + schemaReader := schemaManager.NewSchemaReader() + + var completionWg sync.WaitGroup + metricsCallbacks := metrics.NewReplicationEngineOpsCallbacksBuilder(). + WithPrepareProcessing(func(node string) { + require.Equal(t, "node2", node, "invalid node in prepare processing callback") + }). + WithOpPendingCallback(func(node string) { + require.Equal(t, "node2", node, "invalid node in pending op callback") + }). + WithOpSkippedCallback(func(node string) { + require.Equal(t, "node2", node, "invalid node in skipped op callback") + }). + WithOpStartCallback(func(node string) { + require.Equal(t, "node2", node, "invalid node in start op callback") + }). + WithOpCompleteCallback(func(node string) { + require.Equal(t, "node2", node, "invalid node in complete op callback") + }). + WithOpFailedCallback(func(node string) { + require.Equal(t, "node2", node, "invalid node in failed op callback") + completionWg.Done() + }). + WithOpCancelledCallback(func(node string) { + require.Equal(t, "node2", node, "invalid node in cancelled op callback") + }). + Build() + + consumer := replication.NewCopyOpConsumer( + logger, + mockFSMUpdater, + mockReplicaCopier, + "node2", + &backoff.StopBackOff{}, + replication.NewOpsCache(), + time.Second*30, + 5, + runtime.NewDynamicValue(time.Second*100), + metricsCallbacks, + schemaReader, + ) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + opsChan := make(chan replication.ShardReplicationOpAndStatus, 16) + doneChan := make(chan error, 1) + + // WHEN + go func() { + doneChan <- consumer.Consume(ctx, opsChan) + }() + + mockReplicaCopier.EXPECT(). + CopyReplicaFiles(mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything). + RunAndReturn(func(ctx context.Context, sourceNode string, collectionName string, shardName string, schemaVersion uint64) error { + // Simulate a long-running operation that checks for cancellation every loop + for { + if ctx.Err() != nil { + return ctx.Err() + } + time.Sleep(1 * time.Second) + // Simulate a long-running operation + } + }). + Times(5) + + // Add five long running ops to the consumer + for i := 0; i < 5; i++ { + mockFSMUpdater.EXPECT(). + ReplicationGetReplicaOpStatus(mock.Anything, uint64(i)). + Return(api.HYDRATING, nil) + op := replication.NewShardReplicationOp(uint64(i), "node1", "node2", "TestCollection", "test-shard", api.COPY) + status := replication.NewShardReplicationStatus(api.HYDRATING) + opsChan <- replication.NewShardReplicationOpAndStatus(op, status) + completionWg.Add(1) + } + // Wait for a second for the ops to start processing + time.Sleep(1 * time.Second) + // Shutdown the consumer + close(opsChan) + + waitChan := make(chan struct{}) + go func() { + completionWg.Wait() + waitChan <- struct{}{} + }() + + select { + case <-waitChan: + case <-time.After(10 * time.Second): + t.Fatalf("Test timed out waiting for operation completion") + } + + err := <-doneChan + + // THEN + require.NoError(t, err, "expected consumer to stop without error") + + mockFSMUpdater.AssertExpectations(t) + mockReplicaCopier.AssertExpectations(t) +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/replication/copier/copier.go b/platform/dbops/binaries/weaviate-src/cluster/replication/copier/copier.go new file mode 100644 index 0000000000000000000000000000000000000000..7e0b0589104cde8266d98f3bd1ff1634b1798269 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/replication/copier/copier.go @@ -0,0 +1,588 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package copier + +import ( + "bufio" + "context" + "errors" + "fmt" + "io/fs" + "os" + "path" + "path/filepath" + "sort" + "strings" + "sync" + "time" + + "github.com/sirupsen/logrus" + "github.com/weaviate/weaviate/cluster/replication/copier/types" + "github.com/weaviate/weaviate/entities/additional" + "github.com/weaviate/weaviate/entities/diskio" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/schema" + "github.com/weaviate/weaviate/usecases/cluster" + "github.com/weaviate/weaviate/usecases/integrity" + + pbv1 "github.com/weaviate/weaviate/grpc/generated/protocol/v1" + + enterrors "github.com/weaviate/weaviate/entities/errors" +) + +// Copier for shard replicas, can copy a shard replica from one node to another. +type Copier struct { + // clientFactory is a factory function to create a gRPC client for the remote node + clientFactory FileReplicationServiceClientFactory + // nodeSelector converts node IDs to hostnames + nodeSelector cluster.NodeSelector + // remoteIndex allows you to "call" methods on other nodes, in this case, we'll be "calling" + // methods on the source node to perform the copy + remoteIndex types.RemoteIndex + // concurrentWorkers is the number of concurrent workers to use for copying files + concurrentWorkers int + // rootDataPath is the local path to the root data directory for the shard, we'll copy files + // to this path + rootDataPath string + // dbWrapper is used to load the index for the collection so that we can create/interact + // with the shard on this node + dbWrapper types.DbWrapper + // nodeName is the name of this node + nodeName string + + logger logrus.FieldLogger +} + +// New creates a new shard replica Copier. +func New(clientFactory FileReplicationServiceClientFactory, remoteIndex types.RemoteIndex, nodeSelector cluster.NodeSelector, + concurrentWorkers int, rootPath string, dbWrapper types.DbWrapper, nodeName string, logger logrus.FieldLogger, +) *Copier { + return &Copier{ + clientFactory: clientFactory, + remoteIndex: remoteIndex, + nodeSelector: nodeSelector, + concurrentWorkers: concurrentWorkers, + rootDataPath: rootPath, + dbWrapper: dbWrapper, + nodeName: nodeName, + logger: logger, + } +} + +// CopyReplicaFiles copies a shard replica from the source node to this node. +func (c *Copier) CopyReplicaFiles(ctx context.Context, srcNodeId, collectionName, shardName string, schemaVersion uint64) error { + sourceNodeAddress := c.nodeSelector.NodeAddress(srcNodeId) + + sourceNodeGRPCPort, err := c.nodeSelector.NodeGRPCPort(srcNodeId) + if err != nil { + return fmt.Errorf("failed to get gRPC port for source node: %w", err) + } + + client, err := c.clientFactory(ctx, fmt.Sprintf("%s:%d", sourceNodeAddress, sourceNodeGRPCPort)) + if err != nil { + return fmt.Errorf("failed to create gRPC client connection: %w", err) + } + defer client.Close() + + _, err = client.PauseFileActivity(ctx, &pbv1.PauseFileActivityRequest{ + IndexName: collectionName, + ShardName: shardName, + SchemaVersion: schemaVersion, + }) + if err != nil { + return fmt.Errorf("failed to pause file activity: %w", err) + } + defer client.ResumeFileActivity(ctx, &pbv1.ResumeFileActivityRequest{ + IndexName: collectionName, + ShardName: shardName, + }) + + fileListResp, err := client.ListFiles(ctx, &pbv1.ListFilesRequest{ + IndexName: collectionName, + ShardName: shardName, + }) + if err != nil { + return fmt.Errorf("failed to list files: %w", err) + } + + fileNameChan := make(chan string, 1000) + + enterrors.GoWrapper(func() { + defer close(fileNameChan) + + for _, name := range fileListResp.FileNames { + fileNameChan <- name + } + }, c.logger) + + // TODO remove this once we have a passing test that constantly inserts in parallel + // during shard replica movement + // if WEAVIATE_TEST_COPY_REPLICA_SLEEP is set, sleep for that amount of time + // this is only used for testing purposes + if os.Getenv("WEAVIATE_TEST_COPY_REPLICA_SLEEP") != "" { + sleepTime, err := time.ParseDuration(os.Getenv("WEAVIATE_TEST_COPY_REPLICA_SLEEP")) + if err != nil { + return fmt.Errorf("invalid WEAVIATE_TEST_COPY_REPLICA_SLEEP: %w", err) + } + time.Sleep(sleepTime) + } + + err = c.prepareLocalFolder(collectionName, shardName, fileListResp.FileNames) + if err != nil { + return fmt.Errorf("failed to prepare local folder: %w", err) + } + + metadataChan := make(chan *pbv1.FileMetadata, 1000) + var metaWG sync.WaitGroup + + for range c.concurrentWorkers { + metaWG.Add(1) + + enterrors.GoWrapper(func() { + err := c.metadataWorker(ctx, client, collectionName, shardName, fileNameChan, metadataChan, &metaWG) + if err != nil { + c.logger.WithError(err).Error("failed to get files metadata") + return + } + }, c.logger) + } + + var dlWG sync.WaitGroup + + for range c.concurrentWorkers { + dlWG.Add(1) + + enterrors.GoWrapper(func() { + err := c.downloadWorker(ctx, client, metadataChan, &dlWG) + if err != nil { + c.logger.WithError(err).Error("failed to download files") + return + } + }, c.logger) + } + + // wait for all metadata workers to finish + metaWG.Wait() + close(metadataChan) + + // wait for all download workers to finish + dlWG.Wait() + + err = c.validateLocalFolder(collectionName, shardName, fileListResp.FileNames) + if err != nil { + return fmt.Errorf("failed to validate local folder: %w", err) + } + + return nil +} + +func (c *Copier) shardPath(collectionName, shardName string) string { + return path.Join(c.rootDataPath, strings.ToLower(collectionName), shardName) +} + +func (c *Copier) prepareLocalFolder(collectionName, shardName string, fileNames []string) error { + fileNamesMap := make(map[string]struct{}, len(fileNames)) + for _, fileName := range fileNames { + fileNamesMap[fileName] = struct{}{} + } + + var dirs []string + + // remove files that are not in the source node + basePath := c.shardPath(collectionName, shardName) + + err := filepath.WalkDir(basePath, func(path string, d fs.DirEntry, err error) error { + if err != nil { + if errors.Is(err, fs.ErrNotExist) { + return nil + } + return fmt.Errorf("preparing local folder: %w", err) + } + + if d.IsDir() { + dirs = append(dirs, path) + return nil + } + + localRelFilePath, err := filepath.Rel(c.rootDataPath, path) + if err != nil { + return fmt.Errorf("failed to get relative path: %w", err) + } + + if _, ok := fileNamesMap[localRelFilePath]; !ok { + err := os.Remove(path) + if err != nil { + return fmt.Errorf("removing local file %q not present in source node: %w", path, err) + } + } + + return nil + }) + if err != nil { + return fmt.Errorf("preparing local folder: %w", err) + } + + // sort dirs by depth, so that we delete the deepest directories first + sortPathsByDepthDescending(dirs) + + for _, dir := range dirs { + isEmpty, err := diskio.IsDirEmpty(dir) + if err != nil { + return fmt.Errorf("checking if local folder is empty: %s: %w", dir, err) + } + if !isEmpty { + continue + } + + err = os.Remove(dir) + if err != nil { + return fmt.Errorf("failed to remove empty local folder: %s: %w", dir, err) + } + } + + return nil +} + +func (c *Copier) metadataWorker(ctx context.Context, client FileReplicationServiceClient, + collectionName, shardName string, fileNameChan <-chan string, metadataChan chan<- *pbv1.FileMetadata, + wg *sync.WaitGroup, +) error { + defer wg.Done() + + stream, err := client.GetFileMetadata(ctx) + if err != nil { + return fmt.Errorf("failed to create GetFileMetadata stream: %w", err) + } + defer func() { + err := stream.CloseSend() + + // drain stream + for err == nil { + _, err = stream.Recv() + } + }() + + for fileName := range fileNameChan { + err := stream.Send(&pbv1.GetFileMetadataRequest{ + IndexName: collectionName, + ShardName: shardName, + FileName: fileName, + }) + if err != nil { + return fmt.Errorf("failed to send GetFileMetadata request for %q: %w", fileName, err) + } + + meta, err := stream.Recv() + if err != nil { + return fmt.Errorf("failed to receive file metadata for %q: %w", meta.FileName, err) + } + + metadataChan <- meta + } + + return nil +} + +func (c *Copier) downloadWorker(ctx context.Context, client FileReplicationServiceClient, + metadataChan <-chan *pbv1.FileMetadata, wg *sync.WaitGroup, +) error { + defer wg.Done() + + stream, err := client.GetFile(ctx) + if err != nil { + return fmt.Errorf("failed to create GetFile stream: %w", err) + } + defer func() { + err := stream.CloseSend() + + // drain stream + for err == nil { + _, err = stream.Recv() + } + }() + + for meta := range metadataChan { + localFilePath := filepath.Join(c.rootDataPath, meta.FileName) + + _, checksum, err := integrity.CRC32(localFilePath) + if err != nil { + if !errors.Is(err, os.ErrNotExist) { + return err + } + } else if checksum == meta.Crc32 { + // local file matches remote one, no need to download it + return nil + } + + err = stream.Send(&pbv1.GetFileRequest{ + IndexName: meta.IndexName, + ShardName: meta.ShardName, + FileName: meta.FileName, + }) + if err != nil { + return fmt.Errorf("failed to send GetFile request for %s: %w", meta.FileName, err) + } + + dir := path.Dir(localFilePath) + if err := os.MkdirAll(dir, os.ModePerm); err != nil { + return fmt.Errorf("create parent folder for %s: %w", localFilePath, err) + } + + f, err := os.Create(localFilePath + ".tmp") + if err != nil { + return fmt.Errorf("open file %q for writing: %w", localFilePath, err) + } + defer f.Close() + + wbuf := bufio.NewWriter(f) + + for { + chunk, err := stream.Recv() + if err != nil { + return fmt.Errorf("failed to receive file chunk for %s: %w", meta.FileName, err) + } + + if len(chunk.Data) > 0 { + _, err = wbuf.Write(chunk.Data) + if err != nil { + return fmt.Errorf("writing chunk to file %q: %w", localFilePath+".tmp", err) + } + } + + if chunk.Eof { + break + } + } + + err = wbuf.Flush() + if err != nil { + return fmt.Errorf("flushing buffer to file %q: %w", localFilePath+".tmp", err) + } + + err = f.Sync() + if err != nil { + return fmt.Errorf("fsyncing file %q for writing: %w", localFilePath+".tmp", err) + } + + _, checksum, err = integrity.CRC32(localFilePath + ".tmp") + if err != nil { + return fmt.Errorf("calculating checksum for file %q: %w", localFilePath+".tmp", err) + } + if checksum != meta.Crc32 { + defer os.Remove(localFilePath + ".tmp") + return fmt.Errorf("checksum validation of file %q failed, expected %d, got %d", localFilePath+".tmp", meta.Crc32, checksum) + } + + err = os.Rename(localFilePath+".tmp", localFilePath) + if err != nil { + return fmt.Errorf("renaming temporary file %q to final path %q: %w", localFilePath+".tmp", localFilePath, err) + } + } + + return nil +} + +func (c *Copier) LoadLocalShard(ctx context.Context, collectionName, shardName string) error { + idx := c.dbWrapper.GetIndex(schema.ClassName(collectionName)) + if idx == nil { + return fmt.Errorf("index for collection %s not found", collectionName) + } + + return idx.LoadLocalShard(ctx, shardName, false) +} + +func (c *Copier) validateLocalFolder(collectionName, shardName string, fileNames []string) error { + fileNamesMap := make(map[string]struct{}, len(fileNames)) + for _, fileName := range fileNames { + fileNamesMap[fileName] = struct{}{} + } + + var dirs []string + + basePath := c.shardPath(collectionName, shardName) + + err := filepath.WalkDir(basePath, func(path string, d fs.DirEntry, err error) error { + if err != nil { + return fmt.Errorf("validating local folder: %w", err) + } + + if d.IsDir() { + dirs = append(dirs, path) + return nil + } + + localRelFilePath, err := filepath.Rel(c.rootDataPath, path) + if err != nil { + return fmt.Errorf("failed to get relative path: %w", err) + } + + if _, ok := fileNamesMap[localRelFilePath]; !ok { + return fmt.Errorf("file %q not found in source node, but exists locally", localRelFilePath) + } + + return nil + }) + if err != nil { + return fmt.Errorf("validating local folder: %w", err) + } + + // sort dirs by depth, so that we fsync the deepest directories first + sortPathsByDepthDescending(dirs) + + for _, dir := range dirs { + if err := diskio.Fsync(dir); err != nil { + return fmt.Errorf("failed to fsync local folder: %s: %w", dir, err) + } + } + + return nil +} + +// sortPathsByDepthDescending sorts paths by depth in descending order. +// Paths with the same depth may be sorted in any order. +// For example: +// +// /a/b +// /a/b/c +// /a/b/d +// /a +// +// may be sorted to: +// +// /a/b/d +// /a/b/c +// /a/b +// /a +func sortPathsByDepthDescending(paths []string) { + sort.Slice(paths, func(i, j int) bool { + return depth(paths[i]) > depth(paths[j]) + }) +} + +func depth(path string) int { + return strings.Count(filepath.Clean(path), string(filepath.Separator)) +} + +// AddAsyncReplicationTargetNode adds a target node override for a shard. +func (c *Copier) AddAsyncReplicationTargetNode(ctx context.Context, targetNodeOverride additional.AsyncReplicationTargetNodeOverride, schemaVersion uint64) error { + if targetNodeOverride.SourceNode == c.nodeName { + index := c.dbWrapper.GetIndex(schema.ClassName(targetNodeOverride.CollectionID)) + if index == nil { + return nil + } + return index.IncomingAddAsyncReplicationTargetNode(ctx, targetNodeOverride.ShardID, targetNodeOverride) + } + + srcNodeHostname, ok := c.nodeSelector.NodeHostname(targetNodeOverride.SourceNode) + if !ok { + return fmt.Errorf("source node address not found in cluster membership for node %s", targetNodeOverride.SourceNode) + } + + return c.remoteIndex.AddAsyncReplicationTargetNode(ctx, srcNodeHostname, targetNodeOverride.CollectionID, targetNodeOverride.ShardID, targetNodeOverride, schemaVersion) +} + +// RemoveAsyncReplicationTargetNode removes a target node override for a shard. +func (c *Copier) RemoveAsyncReplicationTargetNode(ctx context.Context, targetNodeOverride additional.AsyncReplicationTargetNodeOverride) error { + if targetNodeOverride.SourceNode == c.nodeName { + index := c.dbWrapper.GetIndex(schema.ClassName(targetNodeOverride.CollectionID)) + if index == nil { + return nil + } + return index.IncomingRemoveAsyncReplicationTargetNode(ctx, targetNodeOverride.ShardID, targetNodeOverride) + } + + srcNodeHostname, ok := c.nodeSelector.NodeHostname(targetNodeOverride.SourceNode) + if !ok { + return fmt.Errorf("source node address not found in cluster membership for node %s", targetNodeOverride.SourceNode) + } + + return c.remoteIndex.RemoveAsyncReplicationTargetNode(ctx, srcNodeHostname, targetNodeOverride.CollectionID, targetNodeOverride.ShardID, targetNodeOverride) +} + +func (c *Copier) InitAsyncReplicationLocally(ctx context.Context, collectionName, shardName string) error { + index := c.dbWrapper.GetIndex(schema.ClassName(collectionName)) + if index == nil { + return fmt.Errorf("index for collection %s not found", collectionName) + } + + shard, release, err := index.GetShard(ctx, shardName) + if err != nil { + return fmt.Errorf("get shard %s err: %w", shardName, err) + } + if shard == nil { + return fmt.Errorf("get shard %s: not found", shardName) + } + defer release() + + return shard.SetAsyncReplicationEnabled(ctx, true) +} + +func (c *Copier) RevertAsyncReplicationLocally(ctx context.Context, collectionName, shardName string) error { + index := c.dbWrapper.GetIndex(schema.ClassName(collectionName)) + if index == nil { + return fmt.Errorf("index for collection %s not found", collectionName) + } + + shard, release, err := index.GetShard(ctx, shardName) + if err != nil { + return fmt.Errorf("get shard %s err: %w", shardName, err) + } + if shard == nil { + return fmt.Errorf("get shard %s: not found", shardName) + } + defer release() + + return shard.SetAsyncReplicationEnabled(ctx, shard.Index().Config.AsyncReplicationEnabled) +} + +// AsyncReplicationStatus returns the async replication status for a shard. +// The first two return values are the number of objects propagated and the start diff time in unix milliseconds. +func (c *Copier) AsyncReplicationStatus(ctx context.Context, srcNodeId, targetNodeId, collectionName, shardName string) (models.AsyncReplicationStatus, error) { + status, err := c.dbWrapper.GetOneNodeStatus(ctx, srcNodeId, collectionName, shardName, "verbose") + if err != nil { + return models.AsyncReplicationStatus{}, err + } + + if len(status.Shards) == 0 { + return models.AsyncReplicationStatus{}, fmt.Errorf("stats are empty for node %s", srcNodeId) + } + + shardFound := false + for _, shard := range status.Shards { + if shard.Name != shardName || shard.Class != collectionName { + continue + } + + shardFound = true + if len(shard.AsyncReplicationStatus) == 0 { + return models.AsyncReplicationStatus{}, fmt.Errorf("async replication status empty for shard %s in node %s", shardName, srcNodeId) + } + + for _, asyncReplicationStatus := range shard.AsyncReplicationStatus { + if asyncReplicationStatus.TargetNode != targetNodeId { + continue + } + + return models.AsyncReplicationStatus{ + ObjectsPropagated: asyncReplicationStatus.ObjectsPropagated, + StartDiffTimeUnixMillis: asyncReplicationStatus.StartDiffTimeUnixMillis, + TargetNode: asyncReplicationStatus.TargetNode, + }, nil + } + } + + if !shardFound { + return models.AsyncReplicationStatus{}, fmt.Errorf("shard %s not found in node %s", shardName, srcNodeId) + } + + return models.AsyncReplicationStatus{}, fmt.Errorf("async replication status not found for shard %s in node %s", shardName, srcNodeId) +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/replication/copier/copier_test.go b/platform/dbops/binaries/weaviate-src/cluster/replication/copier/copier_test.go new file mode 100644 index 0000000000000000000000000000000000000000..d863fd15b187b7d27d085db162cecef025f4cb57 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/replication/copier/copier_test.go @@ -0,0 +1,279 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package copier + +import ( + "context" + "io" + "os" + "path" + "path/filepath" + "testing" + + logrusTest "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/cluster/replication/copier/types" + pbv1 "github.com/weaviate/weaviate/grpc/generated/protocol/v1" + "github.com/weaviate/weaviate/usecases/fakes" + "github.com/weaviate/weaviate/usecases/file" + "github.com/weaviate/weaviate/usecases/integrity" +) + +func TestCopierCopyReplicaFiles(t *testing.T) { + remoteTmpDir, err := os.MkdirTemp("", "remote-*") + require.NoError(t, err) + defer os.RemoveAll(remoteTmpDir) + + localTmpDir, err := os.MkdirTemp("", "local-*") + require.NoError(t, err) + defer os.RemoveAll(localTmpDir) + + type filesToCreateBeforeCopy struct { + relativeFilePath string + fileContent []byte + isDir bool + } + + type fileWithMetadata struct { + absoluteFilePath string + relativeFilePath string + fileContent []byte + crc32 uint32 + isDir bool + } + + createTestFiles := func(t *testing.T, basePath string, files []filesToCreateBeforeCopy) []fileWithMetadata { + createdFiles := []fileWithMetadata{} + for _, file := range files { + absolutePath := filepath.Join(basePath, file.relativeFilePath) + + dir := path.Dir(absolutePath) + require.NoError(t, os.MkdirAll(dir, os.ModePerm)) + + var fileCrc32 uint32 + if file.isDir { + require.NoError(t, os.Mkdir(absolutePath, 0o755)) + } else { + require.NoError(t, os.WriteFile(absolutePath, file.fileContent, 0o644)) + _, fileCrc32, err = integrity.CRC32(absolutePath) + require.NoError(t, err) + } + createdFiles = append(createdFiles, fileWithMetadata{ + absoluteFilePath: absolutePath, + relativeFilePath: file.relativeFilePath, + fileContent: file.fileContent, + crc32: fileCrc32, + isDir: file.isDir, + }) + } + return createdFiles + } + type testCase struct { + name string + localFilesBefore func() []fileWithMetadata + remoteFilesToSync func() []fileWithMetadata + } + for _, tc := range []testCase{ + { + name: "ensure unexpected local files are deleted", + localFilesBefore: func() []fileWithMetadata { + return createTestFiles(t, localTmpDir, []filesToCreateBeforeCopy{ + {relativeFilePath: "collection/shard/file2", fileContent: []byte("bar")}, + }) + }, + remoteFilesToSync: func() []fileWithMetadata { + return createTestFiles(t, remoteTmpDir, []filesToCreateBeforeCopy{ + {relativeFilePath: "collection/shard/file1", fileContent: []byte("foo")}, + }) + }, + }, + { + name: "an existing local file with the same path as a remote file is overwritten", + localFilesBefore: func() []fileWithMetadata { + return createTestFiles(t, localTmpDir, []filesToCreateBeforeCopy{ + {relativeFilePath: "collection/shard/file1", fileContent: []byte("bar")}, + }) + }, + remoteFilesToSync: func() []fileWithMetadata { + return createTestFiles(t, remoteTmpDir, []filesToCreateBeforeCopy{ + {relativeFilePath: "collection/shard/file1", fileContent: []byte("foo")}, + }) + }, + }, + { + name: "ensure nested empty local directories are deleted", + localFilesBefore: func() []fileWithMetadata { + return createTestFiles(t, localTmpDir, []filesToCreateBeforeCopy{ + {relativeFilePath: "collection/shard/dir1/file2", fileContent: []byte("bar")}, + {relativeFilePath: "collection/shard/dir2/", isDir: true}, + {relativeFilePath: "collection/shard/dir2/dir3/", isDir: true}, + }) + }, + remoteFilesToSync: func() []fileWithMetadata { + return createTestFiles(t, remoteTmpDir, []filesToCreateBeforeCopy{ + {relativeFilePath: "collection/shard/dir1/file1", fileContent: []byte("foo")}, + }) + }, + }, + } { + localFilesBefore := tc.localFilesBefore() + remoteFilesToSync := tc.remoteFilesToSync() + + mockRemoteIndex := types.NewMockRemoteIndex(t) + + mockClient := NewMockFileReplicationServiceClient(t) + + mockClient.EXPECT().PauseFileActivity( + mock.Anything, + mock.MatchedBy(func(req *pbv1.PauseFileActivityRequest) bool { + return req.IndexName == "collection" && + req.ShardName == "shard" && + req.SchemaVersion == uint64(0) + }), + ).Return(&pbv1.PauseFileActivityResponse{}, nil) + + mockClient.EXPECT().ResumeFileActivity( + mock.Anything, + mock.MatchedBy(func(req *pbv1.ResumeFileActivityRequest) bool { + return req.IndexName == "collection" && req.ShardName == "shard" + }), + ).Return(&pbv1.ResumeFileActivityResponse{}, nil) + + mockClient.EXPECT().Close().Return(nil) + + remoteFileRelativePaths := []string{} + + mockBidirectionalFileMetadataStream := NewMockFileMetadataStream(t) + + mockBidirectionalFileMetadataStream.EXPECT(). + CloseSend(). + Return(nil) + + mockBidirectionalFileChunkStream := NewMockFileChunkStream(t) + + mockBidirectionalFileChunkStream.EXPECT(). + CloseSend(). + Return(nil) + + for _, remoteFilePath := range remoteFilesToSync { + fi, err := os.Stat(remoteFilePath.absoluteFilePath) + require.NoError(t, err) + + _, fileCrc32, err := integrity.CRC32(remoteFilePath.absoluteFilePath) + require.NoError(t, err) + + fileMetadata := file.FileMetadata{ + Name: remoteFilePath.relativeFilePath, + Size: fi.Size(), + CRC32: fileCrc32, + } + + mockBidirectionalFileMetadataStream.EXPECT(). + Send(&pbv1.GetFileMetadataRequest{ + IndexName: "collection", + ShardName: "shard", + FileName: remoteFilePath.relativeFilePath, + }).Return(nil) + + mockBidirectionalFileMetadataStream.EXPECT(). + Recv(). + Return(&pbv1.FileMetadata{ + IndexName: "collection", + ShardName: "shard", + FileName: fileMetadata.Name, + Size: fileMetadata.Size, + Crc32: fileMetadata.CRC32, + }, nil).Times(1) + + mockBidirectionalFileChunkStream.EXPECT(). + Recv(). + Return(&pbv1.FileChunk{ + Offset: 0, + Data: remoteFilePath.fileContent, + Eof: true, + }, nil).Times(1) + + mockBidirectionalFileChunkStream.EXPECT(). + Send(&pbv1.GetFileRequest{ + IndexName: "collection", + ShardName: "shard", + FileName: remoteFilePath.relativeFilePath, + }).Return(nil) + + remoteFileRelativePaths = append(remoteFileRelativePaths, remoteFilePath.relativeFilePath) + } + + mockBidirectionalFileMetadataStream.EXPECT(). + Recv(). + Return(nil, io.EOF) + + mockBidirectionalFileChunkStream.EXPECT(). + Recv(). + Return(nil, io.EOF) + + mockClient.EXPECT().GetFileMetadata( + mock.Anything, + ).Return(mockBidirectionalFileMetadataStream, nil) + + mockClient.EXPECT().GetFile( + mock.Anything, + ).Return(mockBidirectionalFileChunkStream, nil) + + mockClient.EXPECT().ListFiles( + mock.Anything, + mock.MatchedBy(func(req *pbv1.ListFilesRequest) bool { + return req.IndexName == "collection" && req.ShardName == "shard" + }), + ).Return(&pbv1.ListFilesResponse{ + FileNames: remoteFileRelativePaths, + }, nil) + + mockClientFactory := func(ctx context.Context, address string) (FileReplicationServiceClient, error) { + return mockClient, nil + } + + fakeNodeSelector := fakes.NewFakeClusterState("node1") + + logger, _ := logrusTest.NewNullLogger() + + copier := New(mockClientFactory, mockRemoteIndex, fakeNodeSelector, 1, localTmpDir, nil, "node1", logger) + err = copier.CopyReplicaFiles(t.Context(), "node1", "collection", "shard", 0) + require.NoError(t, err) + + remoteFilesRelativePathLookup := map[string]struct{}{} + for _, remoteFile := range remoteFilesToSync { + newLocalFilePath := filepath.Join(localTmpDir, remoteFile.relativeFilePath) + _, err := os.Stat(newLocalFilePath) + require.NoError(t, err) + // assert the content of the synced local/remote files match + remoteFileContent, err := os.ReadFile(remoteFile.absoluteFilePath) + require.NoError(t, err) + finalLocalFileContent, err := os.ReadFile(newLocalFilePath) + require.NoError(t, err) + require.Equal(t, remoteFileContent, finalLocalFileContent) + remoteFilesRelativePathLookup[remoteFile.relativeFilePath] = struct{}{} + } + + // verify that the unexpected local files from before were deleted + for _, localFile := range localFilesBefore { + // if the file exists on the remote, it should not be deleted + if _, ok := remoteFilesRelativePathLookup[localFile.relativeFilePath]; ok { + continue + } + _, err := os.Stat(localFile.absoluteFilePath) + require.Error(t, err) + require.ErrorIs(t, err, os.ErrNotExist) + } + + } +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/replication/copier/mock_file_chunk_stream.go b/platform/dbops/binaries/weaviate-src/cluster/replication/copier/mock_file_chunk_stream.go new file mode 100644 index 0000000000000000000000000000000000000000..2a36dc4ce07efeac4198f4aefd2398f18ccb7410 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/replication/copier/mock_file_chunk_stream.go @@ -0,0 +1,441 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by mockery v2.53.2. DO NOT EDIT. + +package copier + +import ( + context "context" + + mock "github.com/stretchr/testify/mock" + metadata "google.golang.org/grpc/metadata" + + protocol "github.com/weaviate/weaviate/grpc/generated/protocol/v1" +) + +// MockFileChunkStream is an autogenerated mock type for the FileChunkStream type +type MockFileChunkStream struct { + mock.Mock +} + +type MockFileChunkStream_Expecter struct { + mock *mock.Mock +} + +func (_m *MockFileChunkStream) EXPECT() *MockFileChunkStream_Expecter { + return &MockFileChunkStream_Expecter{mock: &_m.Mock} +} + +// CloseSend provides a mock function with no fields +func (_m *MockFileChunkStream) CloseSend() error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for CloseSend") + } + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockFileChunkStream_CloseSend_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CloseSend' +type MockFileChunkStream_CloseSend_Call struct { + *mock.Call +} + +// CloseSend is a helper method to define mock.On call +func (_e *MockFileChunkStream_Expecter) CloseSend() *MockFileChunkStream_CloseSend_Call { + return &MockFileChunkStream_CloseSend_Call{Call: _e.mock.On("CloseSend")} +} + +func (_c *MockFileChunkStream_CloseSend_Call) Run(run func()) *MockFileChunkStream_CloseSend_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *MockFileChunkStream_CloseSend_Call) Return(_a0 error) *MockFileChunkStream_CloseSend_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockFileChunkStream_CloseSend_Call) RunAndReturn(run func() error) *MockFileChunkStream_CloseSend_Call { + _c.Call.Return(run) + return _c +} + +// Context provides a mock function with no fields +func (_m *MockFileChunkStream) Context() context.Context { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Context") + } + + var r0 context.Context + if rf, ok := ret.Get(0).(func() context.Context); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(context.Context) + } + } + + return r0 +} + +// MockFileChunkStream_Context_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Context' +type MockFileChunkStream_Context_Call struct { + *mock.Call +} + +// Context is a helper method to define mock.On call +func (_e *MockFileChunkStream_Expecter) Context() *MockFileChunkStream_Context_Call { + return &MockFileChunkStream_Context_Call{Call: _e.mock.On("Context")} +} + +func (_c *MockFileChunkStream_Context_Call) Run(run func()) *MockFileChunkStream_Context_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *MockFileChunkStream_Context_Call) Return(_a0 context.Context) *MockFileChunkStream_Context_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockFileChunkStream_Context_Call) RunAndReturn(run func() context.Context) *MockFileChunkStream_Context_Call { + _c.Call.Return(run) + return _c +} + +// Header provides a mock function with no fields +func (_m *MockFileChunkStream) Header() (metadata.MD, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Header") + } + + var r0 metadata.MD + var r1 error + if rf, ok := ret.Get(0).(func() (metadata.MD, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() metadata.MD); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(metadata.MD) + } + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockFileChunkStream_Header_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Header' +type MockFileChunkStream_Header_Call struct { + *mock.Call +} + +// Header is a helper method to define mock.On call +func (_e *MockFileChunkStream_Expecter) Header() *MockFileChunkStream_Header_Call { + return &MockFileChunkStream_Header_Call{Call: _e.mock.On("Header")} +} + +func (_c *MockFileChunkStream_Header_Call) Run(run func()) *MockFileChunkStream_Header_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *MockFileChunkStream_Header_Call) Return(_a0 metadata.MD, _a1 error) *MockFileChunkStream_Header_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockFileChunkStream_Header_Call) RunAndReturn(run func() (metadata.MD, error)) *MockFileChunkStream_Header_Call { + _c.Call.Return(run) + return _c +} + +// Recv provides a mock function with no fields +func (_m *MockFileChunkStream) Recv() (*protocol.FileChunk, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Recv") + } + + var r0 *protocol.FileChunk + var r1 error + if rf, ok := ret.Get(0).(func() (*protocol.FileChunk, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() *protocol.FileChunk); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*protocol.FileChunk) + } + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockFileChunkStream_Recv_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Recv' +type MockFileChunkStream_Recv_Call struct { + *mock.Call +} + +// Recv is a helper method to define mock.On call +func (_e *MockFileChunkStream_Expecter) Recv() *MockFileChunkStream_Recv_Call { + return &MockFileChunkStream_Recv_Call{Call: _e.mock.On("Recv")} +} + +func (_c *MockFileChunkStream_Recv_Call) Run(run func()) *MockFileChunkStream_Recv_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *MockFileChunkStream_Recv_Call) Return(_a0 *protocol.FileChunk, _a1 error) *MockFileChunkStream_Recv_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockFileChunkStream_Recv_Call) RunAndReturn(run func() (*protocol.FileChunk, error)) *MockFileChunkStream_Recv_Call { + _c.Call.Return(run) + return _c +} + +// RecvMsg provides a mock function with given fields: m +func (_m *MockFileChunkStream) RecvMsg(m interface{}) error { + ret := _m.Called(m) + + if len(ret) == 0 { + panic("no return value specified for RecvMsg") + } + + var r0 error + if rf, ok := ret.Get(0).(func(interface{}) error); ok { + r0 = rf(m) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockFileChunkStream_RecvMsg_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'RecvMsg' +type MockFileChunkStream_RecvMsg_Call struct { + *mock.Call +} + +// RecvMsg is a helper method to define mock.On call +// - m interface{} +func (_e *MockFileChunkStream_Expecter) RecvMsg(m interface{}) *MockFileChunkStream_RecvMsg_Call { + return &MockFileChunkStream_RecvMsg_Call{Call: _e.mock.On("RecvMsg", m)} +} + +func (_c *MockFileChunkStream_RecvMsg_Call) Run(run func(m interface{})) *MockFileChunkStream_RecvMsg_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(interface{})) + }) + return _c +} + +func (_c *MockFileChunkStream_RecvMsg_Call) Return(_a0 error) *MockFileChunkStream_RecvMsg_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockFileChunkStream_RecvMsg_Call) RunAndReturn(run func(interface{}) error) *MockFileChunkStream_RecvMsg_Call { + _c.Call.Return(run) + return _c +} + +// Send provides a mock function with given fields: _a0 +func (_m *MockFileChunkStream) Send(_a0 *protocol.GetFileRequest) error { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for Send") + } + + var r0 error + if rf, ok := ret.Get(0).(func(*protocol.GetFileRequest) error); ok { + r0 = rf(_a0) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockFileChunkStream_Send_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Send' +type MockFileChunkStream_Send_Call struct { + *mock.Call +} + +// Send is a helper method to define mock.On call +// - _a0 *protocol.GetFileRequest +func (_e *MockFileChunkStream_Expecter) Send(_a0 interface{}) *MockFileChunkStream_Send_Call { + return &MockFileChunkStream_Send_Call{Call: _e.mock.On("Send", _a0)} +} + +func (_c *MockFileChunkStream_Send_Call) Run(run func(_a0 *protocol.GetFileRequest)) *MockFileChunkStream_Send_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(*protocol.GetFileRequest)) + }) + return _c +} + +func (_c *MockFileChunkStream_Send_Call) Return(_a0 error) *MockFileChunkStream_Send_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockFileChunkStream_Send_Call) RunAndReturn(run func(*protocol.GetFileRequest) error) *MockFileChunkStream_Send_Call { + _c.Call.Return(run) + return _c +} + +// SendMsg provides a mock function with given fields: _a0 +func (_m *MockFileChunkStream) SendMsg(_a0 interface{}) error { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for SendMsg") + } + + var r0 error + if rf, ok := ret.Get(0).(func(interface{}) error); ok { + r0 = rf(_a0) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockFileChunkStream_SendMsg_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SendMsg' +type MockFileChunkStream_SendMsg_Call struct { + *mock.Call +} + +// SendMsg is a helper method to define mock.On call +// - _a0 interface{} +func (_e *MockFileChunkStream_Expecter) SendMsg(_a0 interface{}) *MockFileChunkStream_SendMsg_Call { + return &MockFileChunkStream_SendMsg_Call{Call: _e.mock.On("SendMsg", _a0)} +} + +func (_c *MockFileChunkStream_SendMsg_Call) Run(run func(_a0 interface{})) *MockFileChunkStream_SendMsg_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(interface{})) + }) + return _c +} + +func (_c *MockFileChunkStream_SendMsg_Call) Return(_a0 error) *MockFileChunkStream_SendMsg_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockFileChunkStream_SendMsg_Call) RunAndReturn(run func(interface{}) error) *MockFileChunkStream_SendMsg_Call { + _c.Call.Return(run) + return _c +} + +// Trailer provides a mock function with no fields +func (_m *MockFileChunkStream) Trailer() metadata.MD { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Trailer") + } + + var r0 metadata.MD + if rf, ok := ret.Get(0).(func() metadata.MD); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(metadata.MD) + } + } + + return r0 +} + +// MockFileChunkStream_Trailer_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Trailer' +type MockFileChunkStream_Trailer_Call struct { + *mock.Call +} + +// Trailer is a helper method to define mock.On call +func (_e *MockFileChunkStream_Expecter) Trailer() *MockFileChunkStream_Trailer_Call { + return &MockFileChunkStream_Trailer_Call{Call: _e.mock.On("Trailer")} +} + +func (_c *MockFileChunkStream_Trailer_Call) Run(run func()) *MockFileChunkStream_Trailer_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *MockFileChunkStream_Trailer_Call) Return(_a0 metadata.MD) *MockFileChunkStream_Trailer_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockFileChunkStream_Trailer_Call) RunAndReturn(run func() metadata.MD) *MockFileChunkStream_Trailer_Call { + _c.Call.Return(run) + return _c +} + +// NewMockFileChunkStream creates a new instance of MockFileChunkStream. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewMockFileChunkStream(t interface { + mock.TestingT + Cleanup(func()) +}) *MockFileChunkStream { + mock := &MockFileChunkStream{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/replication/copier/mock_file_metadata_stream.go b/platform/dbops/binaries/weaviate-src/cluster/replication/copier/mock_file_metadata_stream.go new file mode 100644 index 0000000000000000000000000000000000000000..8200a37ea5875a65eca4202d5568687aa327e0f0 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/replication/copier/mock_file_metadata_stream.go @@ -0,0 +1,441 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by mockery v2.53.2. DO NOT EDIT. + +package copier + +import ( + context "context" + + mock "github.com/stretchr/testify/mock" + metadata "google.golang.org/grpc/metadata" + + protocol "github.com/weaviate/weaviate/grpc/generated/protocol/v1" +) + +// MockFileMetadataStream is an autogenerated mock type for the FileMetadataStream type +type MockFileMetadataStream struct { + mock.Mock +} + +type MockFileMetadataStream_Expecter struct { + mock *mock.Mock +} + +func (_m *MockFileMetadataStream) EXPECT() *MockFileMetadataStream_Expecter { + return &MockFileMetadataStream_Expecter{mock: &_m.Mock} +} + +// CloseSend provides a mock function with no fields +func (_m *MockFileMetadataStream) CloseSend() error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for CloseSend") + } + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockFileMetadataStream_CloseSend_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CloseSend' +type MockFileMetadataStream_CloseSend_Call struct { + *mock.Call +} + +// CloseSend is a helper method to define mock.On call +func (_e *MockFileMetadataStream_Expecter) CloseSend() *MockFileMetadataStream_CloseSend_Call { + return &MockFileMetadataStream_CloseSend_Call{Call: _e.mock.On("CloseSend")} +} + +func (_c *MockFileMetadataStream_CloseSend_Call) Run(run func()) *MockFileMetadataStream_CloseSend_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *MockFileMetadataStream_CloseSend_Call) Return(_a0 error) *MockFileMetadataStream_CloseSend_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockFileMetadataStream_CloseSend_Call) RunAndReturn(run func() error) *MockFileMetadataStream_CloseSend_Call { + _c.Call.Return(run) + return _c +} + +// Context provides a mock function with no fields +func (_m *MockFileMetadataStream) Context() context.Context { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Context") + } + + var r0 context.Context + if rf, ok := ret.Get(0).(func() context.Context); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(context.Context) + } + } + + return r0 +} + +// MockFileMetadataStream_Context_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Context' +type MockFileMetadataStream_Context_Call struct { + *mock.Call +} + +// Context is a helper method to define mock.On call +func (_e *MockFileMetadataStream_Expecter) Context() *MockFileMetadataStream_Context_Call { + return &MockFileMetadataStream_Context_Call{Call: _e.mock.On("Context")} +} + +func (_c *MockFileMetadataStream_Context_Call) Run(run func()) *MockFileMetadataStream_Context_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *MockFileMetadataStream_Context_Call) Return(_a0 context.Context) *MockFileMetadataStream_Context_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockFileMetadataStream_Context_Call) RunAndReturn(run func() context.Context) *MockFileMetadataStream_Context_Call { + _c.Call.Return(run) + return _c +} + +// Header provides a mock function with no fields +func (_m *MockFileMetadataStream) Header() (metadata.MD, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Header") + } + + var r0 metadata.MD + var r1 error + if rf, ok := ret.Get(0).(func() (metadata.MD, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() metadata.MD); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(metadata.MD) + } + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockFileMetadataStream_Header_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Header' +type MockFileMetadataStream_Header_Call struct { + *mock.Call +} + +// Header is a helper method to define mock.On call +func (_e *MockFileMetadataStream_Expecter) Header() *MockFileMetadataStream_Header_Call { + return &MockFileMetadataStream_Header_Call{Call: _e.mock.On("Header")} +} + +func (_c *MockFileMetadataStream_Header_Call) Run(run func()) *MockFileMetadataStream_Header_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *MockFileMetadataStream_Header_Call) Return(_a0 metadata.MD, _a1 error) *MockFileMetadataStream_Header_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockFileMetadataStream_Header_Call) RunAndReturn(run func() (metadata.MD, error)) *MockFileMetadataStream_Header_Call { + _c.Call.Return(run) + return _c +} + +// Recv provides a mock function with no fields +func (_m *MockFileMetadataStream) Recv() (*protocol.FileMetadata, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Recv") + } + + var r0 *protocol.FileMetadata + var r1 error + if rf, ok := ret.Get(0).(func() (*protocol.FileMetadata, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() *protocol.FileMetadata); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*protocol.FileMetadata) + } + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockFileMetadataStream_Recv_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Recv' +type MockFileMetadataStream_Recv_Call struct { + *mock.Call +} + +// Recv is a helper method to define mock.On call +func (_e *MockFileMetadataStream_Expecter) Recv() *MockFileMetadataStream_Recv_Call { + return &MockFileMetadataStream_Recv_Call{Call: _e.mock.On("Recv")} +} + +func (_c *MockFileMetadataStream_Recv_Call) Run(run func()) *MockFileMetadataStream_Recv_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *MockFileMetadataStream_Recv_Call) Return(_a0 *protocol.FileMetadata, _a1 error) *MockFileMetadataStream_Recv_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockFileMetadataStream_Recv_Call) RunAndReturn(run func() (*protocol.FileMetadata, error)) *MockFileMetadataStream_Recv_Call { + _c.Call.Return(run) + return _c +} + +// RecvMsg provides a mock function with given fields: m +func (_m *MockFileMetadataStream) RecvMsg(m interface{}) error { + ret := _m.Called(m) + + if len(ret) == 0 { + panic("no return value specified for RecvMsg") + } + + var r0 error + if rf, ok := ret.Get(0).(func(interface{}) error); ok { + r0 = rf(m) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockFileMetadataStream_RecvMsg_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'RecvMsg' +type MockFileMetadataStream_RecvMsg_Call struct { + *mock.Call +} + +// RecvMsg is a helper method to define mock.On call +// - m interface{} +func (_e *MockFileMetadataStream_Expecter) RecvMsg(m interface{}) *MockFileMetadataStream_RecvMsg_Call { + return &MockFileMetadataStream_RecvMsg_Call{Call: _e.mock.On("RecvMsg", m)} +} + +func (_c *MockFileMetadataStream_RecvMsg_Call) Run(run func(m interface{})) *MockFileMetadataStream_RecvMsg_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(interface{})) + }) + return _c +} + +func (_c *MockFileMetadataStream_RecvMsg_Call) Return(_a0 error) *MockFileMetadataStream_RecvMsg_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockFileMetadataStream_RecvMsg_Call) RunAndReturn(run func(interface{}) error) *MockFileMetadataStream_RecvMsg_Call { + _c.Call.Return(run) + return _c +} + +// Send provides a mock function with given fields: _a0 +func (_m *MockFileMetadataStream) Send(_a0 *protocol.GetFileMetadataRequest) error { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for Send") + } + + var r0 error + if rf, ok := ret.Get(0).(func(*protocol.GetFileMetadataRequest) error); ok { + r0 = rf(_a0) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockFileMetadataStream_Send_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Send' +type MockFileMetadataStream_Send_Call struct { + *mock.Call +} + +// Send is a helper method to define mock.On call +// - _a0 *protocol.GetFileMetadataRequest +func (_e *MockFileMetadataStream_Expecter) Send(_a0 interface{}) *MockFileMetadataStream_Send_Call { + return &MockFileMetadataStream_Send_Call{Call: _e.mock.On("Send", _a0)} +} + +func (_c *MockFileMetadataStream_Send_Call) Run(run func(_a0 *protocol.GetFileMetadataRequest)) *MockFileMetadataStream_Send_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(*protocol.GetFileMetadataRequest)) + }) + return _c +} + +func (_c *MockFileMetadataStream_Send_Call) Return(_a0 error) *MockFileMetadataStream_Send_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockFileMetadataStream_Send_Call) RunAndReturn(run func(*protocol.GetFileMetadataRequest) error) *MockFileMetadataStream_Send_Call { + _c.Call.Return(run) + return _c +} + +// SendMsg provides a mock function with given fields: _a0 +func (_m *MockFileMetadataStream) SendMsg(_a0 interface{}) error { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for SendMsg") + } + + var r0 error + if rf, ok := ret.Get(0).(func(interface{}) error); ok { + r0 = rf(_a0) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockFileMetadataStream_SendMsg_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SendMsg' +type MockFileMetadataStream_SendMsg_Call struct { + *mock.Call +} + +// SendMsg is a helper method to define mock.On call +// - _a0 interface{} +func (_e *MockFileMetadataStream_Expecter) SendMsg(_a0 interface{}) *MockFileMetadataStream_SendMsg_Call { + return &MockFileMetadataStream_SendMsg_Call{Call: _e.mock.On("SendMsg", _a0)} +} + +func (_c *MockFileMetadataStream_SendMsg_Call) Run(run func(_a0 interface{})) *MockFileMetadataStream_SendMsg_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(interface{})) + }) + return _c +} + +func (_c *MockFileMetadataStream_SendMsg_Call) Return(_a0 error) *MockFileMetadataStream_SendMsg_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockFileMetadataStream_SendMsg_Call) RunAndReturn(run func(interface{}) error) *MockFileMetadataStream_SendMsg_Call { + _c.Call.Return(run) + return _c +} + +// Trailer provides a mock function with no fields +func (_m *MockFileMetadataStream) Trailer() metadata.MD { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Trailer") + } + + var r0 metadata.MD + if rf, ok := ret.Get(0).(func() metadata.MD); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(metadata.MD) + } + } + + return r0 +} + +// MockFileMetadataStream_Trailer_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Trailer' +type MockFileMetadataStream_Trailer_Call struct { + *mock.Call +} + +// Trailer is a helper method to define mock.On call +func (_e *MockFileMetadataStream_Expecter) Trailer() *MockFileMetadataStream_Trailer_Call { + return &MockFileMetadataStream_Trailer_Call{Call: _e.mock.On("Trailer")} +} + +func (_c *MockFileMetadataStream_Trailer_Call) Run(run func()) *MockFileMetadataStream_Trailer_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *MockFileMetadataStream_Trailer_Call) Return(_a0 metadata.MD) *MockFileMetadataStream_Trailer_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockFileMetadataStream_Trailer_Call) RunAndReturn(run func() metadata.MD) *MockFileMetadataStream_Trailer_Call { + _c.Call.Return(run) + return _c +} + +// NewMockFileMetadataStream creates a new instance of MockFileMetadataStream. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewMockFileMetadataStream(t interface { + mock.TestingT + Cleanup(func()) +}) *MockFileMetadataStream { + mock := &MockFileMetadataStream{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/replication/copier/mock_file_replication_service_client.go b/platform/dbops/binaries/weaviate-src/cluster/replication/copier/mock_file_replication_service_client.go new file mode 100644 index 0000000000000000000000000000000000000000..fcf1fe1bfe3936b9827f097b160386fb6236feb6 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/replication/copier/mock_file_replication_service_client.go @@ -0,0 +1,464 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by mockery v2.53.2. DO NOT EDIT. + +package copier + +import ( + context "context" + + grpc "google.golang.org/grpc" + + mock "github.com/stretchr/testify/mock" + + protocol "github.com/weaviate/weaviate/grpc/generated/protocol/v1" +) + +// MockFileReplicationServiceClient is an autogenerated mock type for the FileReplicationServiceClient type +type MockFileReplicationServiceClient struct { + mock.Mock +} + +type MockFileReplicationServiceClient_Expecter struct { + mock *mock.Mock +} + +func (_m *MockFileReplicationServiceClient) EXPECT() *MockFileReplicationServiceClient_Expecter { + return &MockFileReplicationServiceClient_Expecter{mock: &_m.Mock} +} + +// Close provides a mock function with no fields +func (_m *MockFileReplicationServiceClient) Close() error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Close") + } + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockFileReplicationServiceClient_Close_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Close' +type MockFileReplicationServiceClient_Close_Call struct { + *mock.Call +} + +// Close is a helper method to define mock.On call +func (_e *MockFileReplicationServiceClient_Expecter) Close() *MockFileReplicationServiceClient_Close_Call { + return &MockFileReplicationServiceClient_Close_Call{Call: _e.mock.On("Close")} +} + +func (_c *MockFileReplicationServiceClient_Close_Call) Run(run func()) *MockFileReplicationServiceClient_Close_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *MockFileReplicationServiceClient_Close_Call) Return(_a0 error) *MockFileReplicationServiceClient_Close_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockFileReplicationServiceClient_Close_Call) RunAndReturn(run func() error) *MockFileReplicationServiceClient_Close_Call { + _c.Call.Return(run) + return _c +} + +// GetFile provides a mock function with given fields: ctx, opts +func (_m *MockFileReplicationServiceClient) GetFile(ctx context.Context, opts ...grpc.CallOption) (grpc.BidiStreamingClient[protocol.GetFileRequest, protocol.FileChunk], error) { + _va := make([]interface{}, len(opts)) + for _i := range opts { + _va[_i] = opts[_i] + } + var _ca []interface{} + _ca = append(_ca, ctx) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for GetFile") + } + + var r0 grpc.BidiStreamingClient[protocol.GetFileRequest, protocol.FileChunk] + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, ...grpc.CallOption) (grpc.BidiStreamingClient[protocol.GetFileRequest, protocol.FileChunk], error)); ok { + return rf(ctx, opts...) + } + if rf, ok := ret.Get(0).(func(context.Context, ...grpc.CallOption) grpc.BidiStreamingClient[protocol.GetFileRequest, protocol.FileChunk]); ok { + r0 = rf(ctx, opts...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(grpc.BidiStreamingClient[protocol.GetFileRequest, protocol.FileChunk]) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, ...grpc.CallOption) error); ok { + r1 = rf(ctx, opts...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockFileReplicationServiceClient_GetFile_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetFile' +type MockFileReplicationServiceClient_GetFile_Call struct { + *mock.Call +} + +// GetFile is a helper method to define mock.On call +// - ctx context.Context +// - opts ...grpc.CallOption +func (_e *MockFileReplicationServiceClient_Expecter) GetFile(ctx interface{}, opts ...interface{}) *MockFileReplicationServiceClient_GetFile_Call { + return &MockFileReplicationServiceClient_GetFile_Call{Call: _e.mock.On("GetFile", + append([]interface{}{ctx}, opts...)...)} +} + +func (_c *MockFileReplicationServiceClient_GetFile_Call) Run(run func(ctx context.Context, opts ...grpc.CallOption)) *MockFileReplicationServiceClient_GetFile_Call { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]grpc.CallOption, len(args)-1) + for i, a := range args[1:] { + if a != nil { + variadicArgs[i] = a.(grpc.CallOption) + } + } + run(args[0].(context.Context), variadicArgs...) + }) + return _c +} + +func (_c *MockFileReplicationServiceClient_GetFile_Call) Return(_a0 grpc.BidiStreamingClient[protocol.GetFileRequest, protocol.FileChunk], _a1 error) *MockFileReplicationServiceClient_GetFile_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockFileReplicationServiceClient_GetFile_Call) RunAndReturn(run func(context.Context, ...grpc.CallOption) (grpc.BidiStreamingClient[protocol.GetFileRequest, protocol.FileChunk], error)) *MockFileReplicationServiceClient_GetFile_Call { + _c.Call.Return(run) + return _c +} + +// GetFileMetadata provides a mock function with given fields: ctx, opts +func (_m *MockFileReplicationServiceClient) GetFileMetadata(ctx context.Context, opts ...grpc.CallOption) (grpc.BidiStreamingClient[protocol.GetFileMetadataRequest, protocol.FileMetadata], error) { + _va := make([]interface{}, len(opts)) + for _i := range opts { + _va[_i] = opts[_i] + } + var _ca []interface{} + _ca = append(_ca, ctx) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for GetFileMetadata") + } + + var r0 grpc.BidiStreamingClient[protocol.GetFileMetadataRequest, protocol.FileMetadata] + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, ...grpc.CallOption) (grpc.BidiStreamingClient[protocol.GetFileMetadataRequest, protocol.FileMetadata], error)); ok { + return rf(ctx, opts...) + } + if rf, ok := ret.Get(0).(func(context.Context, ...grpc.CallOption) grpc.BidiStreamingClient[protocol.GetFileMetadataRequest, protocol.FileMetadata]); ok { + r0 = rf(ctx, opts...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(grpc.BidiStreamingClient[protocol.GetFileMetadataRequest, protocol.FileMetadata]) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, ...grpc.CallOption) error); ok { + r1 = rf(ctx, opts...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockFileReplicationServiceClient_GetFileMetadata_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetFileMetadata' +type MockFileReplicationServiceClient_GetFileMetadata_Call struct { + *mock.Call +} + +// GetFileMetadata is a helper method to define mock.On call +// - ctx context.Context +// - opts ...grpc.CallOption +func (_e *MockFileReplicationServiceClient_Expecter) GetFileMetadata(ctx interface{}, opts ...interface{}) *MockFileReplicationServiceClient_GetFileMetadata_Call { + return &MockFileReplicationServiceClient_GetFileMetadata_Call{Call: _e.mock.On("GetFileMetadata", + append([]interface{}{ctx}, opts...)...)} +} + +func (_c *MockFileReplicationServiceClient_GetFileMetadata_Call) Run(run func(ctx context.Context, opts ...grpc.CallOption)) *MockFileReplicationServiceClient_GetFileMetadata_Call { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]grpc.CallOption, len(args)-1) + for i, a := range args[1:] { + if a != nil { + variadicArgs[i] = a.(grpc.CallOption) + } + } + run(args[0].(context.Context), variadicArgs...) + }) + return _c +} + +func (_c *MockFileReplicationServiceClient_GetFileMetadata_Call) Return(_a0 grpc.BidiStreamingClient[protocol.GetFileMetadataRequest, protocol.FileMetadata], _a1 error) *MockFileReplicationServiceClient_GetFileMetadata_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockFileReplicationServiceClient_GetFileMetadata_Call) RunAndReturn(run func(context.Context, ...grpc.CallOption) (grpc.BidiStreamingClient[protocol.GetFileMetadataRequest, protocol.FileMetadata], error)) *MockFileReplicationServiceClient_GetFileMetadata_Call { + _c.Call.Return(run) + return _c +} + +// ListFiles provides a mock function with given fields: ctx, in, opts +func (_m *MockFileReplicationServiceClient) ListFiles(ctx context.Context, in *protocol.ListFilesRequest, opts ...grpc.CallOption) (*protocol.ListFilesResponse, error) { + _va := make([]interface{}, len(opts)) + for _i := range opts { + _va[_i] = opts[_i] + } + var _ca []interface{} + _ca = append(_ca, ctx, in) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for ListFiles") + } + + var r0 *protocol.ListFilesResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *protocol.ListFilesRequest, ...grpc.CallOption) (*protocol.ListFilesResponse, error)); ok { + return rf(ctx, in, opts...) + } + if rf, ok := ret.Get(0).(func(context.Context, *protocol.ListFilesRequest, ...grpc.CallOption) *protocol.ListFilesResponse); ok { + r0 = rf(ctx, in, opts...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*protocol.ListFilesResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *protocol.ListFilesRequest, ...grpc.CallOption) error); ok { + r1 = rf(ctx, in, opts...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockFileReplicationServiceClient_ListFiles_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ListFiles' +type MockFileReplicationServiceClient_ListFiles_Call struct { + *mock.Call +} + +// ListFiles is a helper method to define mock.On call +// - ctx context.Context +// - in *protocol.ListFilesRequest +// - opts ...grpc.CallOption +func (_e *MockFileReplicationServiceClient_Expecter) ListFiles(ctx interface{}, in interface{}, opts ...interface{}) *MockFileReplicationServiceClient_ListFiles_Call { + return &MockFileReplicationServiceClient_ListFiles_Call{Call: _e.mock.On("ListFiles", + append([]interface{}{ctx, in}, opts...)...)} +} + +func (_c *MockFileReplicationServiceClient_ListFiles_Call) Run(run func(ctx context.Context, in *protocol.ListFilesRequest, opts ...grpc.CallOption)) *MockFileReplicationServiceClient_ListFiles_Call { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]grpc.CallOption, len(args)-2) + for i, a := range args[2:] { + if a != nil { + variadicArgs[i] = a.(grpc.CallOption) + } + } + run(args[0].(context.Context), args[1].(*protocol.ListFilesRequest), variadicArgs...) + }) + return _c +} + +func (_c *MockFileReplicationServiceClient_ListFiles_Call) Return(_a0 *protocol.ListFilesResponse, _a1 error) *MockFileReplicationServiceClient_ListFiles_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockFileReplicationServiceClient_ListFiles_Call) RunAndReturn(run func(context.Context, *protocol.ListFilesRequest, ...grpc.CallOption) (*protocol.ListFilesResponse, error)) *MockFileReplicationServiceClient_ListFiles_Call { + _c.Call.Return(run) + return _c +} + +// PauseFileActivity provides a mock function with given fields: ctx, in, opts +func (_m *MockFileReplicationServiceClient) PauseFileActivity(ctx context.Context, in *protocol.PauseFileActivityRequest, opts ...grpc.CallOption) (*protocol.PauseFileActivityResponse, error) { + _va := make([]interface{}, len(opts)) + for _i := range opts { + _va[_i] = opts[_i] + } + var _ca []interface{} + _ca = append(_ca, ctx, in) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for PauseFileActivity") + } + + var r0 *protocol.PauseFileActivityResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *protocol.PauseFileActivityRequest, ...grpc.CallOption) (*protocol.PauseFileActivityResponse, error)); ok { + return rf(ctx, in, opts...) + } + if rf, ok := ret.Get(0).(func(context.Context, *protocol.PauseFileActivityRequest, ...grpc.CallOption) *protocol.PauseFileActivityResponse); ok { + r0 = rf(ctx, in, opts...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*protocol.PauseFileActivityResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *protocol.PauseFileActivityRequest, ...grpc.CallOption) error); ok { + r1 = rf(ctx, in, opts...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockFileReplicationServiceClient_PauseFileActivity_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'PauseFileActivity' +type MockFileReplicationServiceClient_PauseFileActivity_Call struct { + *mock.Call +} + +// PauseFileActivity is a helper method to define mock.On call +// - ctx context.Context +// - in *protocol.PauseFileActivityRequest +// - opts ...grpc.CallOption +func (_e *MockFileReplicationServiceClient_Expecter) PauseFileActivity(ctx interface{}, in interface{}, opts ...interface{}) *MockFileReplicationServiceClient_PauseFileActivity_Call { + return &MockFileReplicationServiceClient_PauseFileActivity_Call{Call: _e.mock.On("PauseFileActivity", + append([]interface{}{ctx, in}, opts...)...)} +} + +func (_c *MockFileReplicationServiceClient_PauseFileActivity_Call) Run(run func(ctx context.Context, in *protocol.PauseFileActivityRequest, opts ...grpc.CallOption)) *MockFileReplicationServiceClient_PauseFileActivity_Call { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]grpc.CallOption, len(args)-2) + for i, a := range args[2:] { + if a != nil { + variadicArgs[i] = a.(grpc.CallOption) + } + } + run(args[0].(context.Context), args[1].(*protocol.PauseFileActivityRequest), variadicArgs...) + }) + return _c +} + +func (_c *MockFileReplicationServiceClient_PauseFileActivity_Call) Return(_a0 *protocol.PauseFileActivityResponse, _a1 error) *MockFileReplicationServiceClient_PauseFileActivity_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockFileReplicationServiceClient_PauseFileActivity_Call) RunAndReturn(run func(context.Context, *protocol.PauseFileActivityRequest, ...grpc.CallOption) (*protocol.PauseFileActivityResponse, error)) *MockFileReplicationServiceClient_PauseFileActivity_Call { + _c.Call.Return(run) + return _c +} + +// ResumeFileActivity provides a mock function with given fields: ctx, in, opts +func (_m *MockFileReplicationServiceClient) ResumeFileActivity(ctx context.Context, in *protocol.ResumeFileActivityRequest, opts ...grpc.CallOption) (*protocol.ResumeFileActivityResponse, error) { + _va := make([]interface{}, len(opts)) + for _i := range opts { + _va[_i] = opts[_i] + } + var _ca []interface{} + _ca = append(_ca, ctx, in) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for ResumeFileActivity") + } + + var r0 *protocol.ResumeFileActivityResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *protocol.ResumeFileActivityRequest, ...grpc.CallOption) (*protocol.ResumeFileActivityResponse, error)); ok { + return rf(ctx, in, opts...) + } + if rf, ok := ret.Get(0).(func(context.Context, *protocol.ResumeFileActivityRequest, ...grpc.CallOption) *protocol.ResumeFileActivityResponse); ok { + r0 = rf(ctx, in, opts...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*protocol.ResumeFileActivityResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *protocol.ResumeFileActivityRequest, ...grpc.CallOption) error); ok { + r1 = rf(ctx, in, opts...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockFileReplicationServiceClient_ResumeFileActivity_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ResumeFileActivity' +type MockFileReplicationServiceClient_ResumeFileActivity_Call struct { + *mock.Call +} + +// ResumeFileActivity is a helper method to define mock.On call +// - ctx context.Context +// - in *protocol.ResumeFileActivityRequest +// - opts ...grpc.CallOption +func (_e *MockFileReplicationServiceClient_Expecter) ResumeFileActivity(ctx interface{}, in interface{}, opts ...interface{}) *MockFileReplicationServiceClient_ResumeFileActivity_Call { + return &MockFileReplicationServiceClient_ResumeFileActivity_Call{Call: _e.mock.On("ResumeFileActivity", + append([]interface{}{ctx, in}, opts...)...)} +} + +func (_c *MockFileReplicationServiceClient_ResumeFileActivity_Call) Run(run func(ctx context.Context, in *protocol.ResumeFileActivityRequest, opts ...grpc.CallOption)) *MockFileReplicationServiceClient_ResumeFileActivity_Call { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]grpc.CallOption, len(args)-2) + for i, a := range args[2:] { + if a != nil { + variadicArgs[i] = a.(grpc.CallOption) + } + } + run(args[0].(context.Context), args[1].(*protocol.ResumeFileActivityRequest), variadicArgs...) + }) + return _c +} + +func (_c *MockFileReplicationServiceClient_ResumeFileActivity_Call) Return(_a0 *protocol.ResumeFileActivityResponse, _a1 error) *MockFileReplicationServiceClient_ResumeFileActivity_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockFileReplicationServiceClient_ResumeFileActivity_Call) RunAndReturn(run func(context.Context, *protocol.ResumeFileActivityRequest, ...grpc.CallOption) (*protocol.ResumeFileActivityResponse, error)) *MockFileReplicationServiceClient_ResumeFileActivity_Call { + _c.Call.Return(run) + return _c +} + +// NewMockFileReplicationServiceClient creates a new instance of MockFileReplicationServiceClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewMockFileReplicationServiceClient(t interface { + mock.TestingT + Cleanup(func()) +}) *MockFileReplicationServiceClient { + mock := &MockFileReplicationServiceClient{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/replication/copier/remote_client.go b/platform/dbops/binaries/weaviate-src/cluster/replication/copier/remote_client.go new file mode 100644 index 0000000000000000000000000000000000000000..8aa3ac56001d59abaed46f40d1f1c4b1d35c563f --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/replication/copier/remote_client.go @@ -0,0 +1,81 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package copier + +import ( + "context" + "encoding/base64" + + "github.com/weaviate/weaviate/usecases/cluster" + "google.golang.org/grpc" + "google.golang.org/grpc/metadata" + + pbv1 "github.com/weaviate/weaviate/grpc/generated/protocol/v1" +) + +type FileReplicationServiceClientFactory = func(ctx context.Context, address string) (FileReplicationServiceClient, error) + +type FileReplicationServiceClient interface { + pbv1.FileReplicationServiceClient + Close() error +} + +type grpcFileReplicationServiceClient struct { + clientConn *grpc.ClientConn + authMetadata metadata.MD + client pbv1.FileReplicationServiceClient +} + +func NewFileReplicationServiceClient(clientConn *grpc.ClientConn, authConfig cluster.AuthConfig) FileReplicationServiceClient { + var authMetadata metadata.MD + + if authConfig.BasicAuth.Enabled() { + auth := base64.StdEncoding.EncodeToString([]byte(authConfig.BasicAuth.Username + ":" + authConfig.BasicAuth.Password)) + authMetadata = metadata.New(map[string]string{ + "authorization": "Basic " + auth, + }) + } + + return &grpcFileReplicationServiceClient{ + clientConn: clientConn, + authMetadata: authMetadata, + client: pbv1.NewFileReplicationServiceClient(clientConn), + } +} + +func (g *grpcFileReplicationServiceClient) Close() error { + return g.clientConn.Close() +} + +func (g *grpcFileReplicationServiceClient) addAuthMetadataToContext(ctx context.Context) context.Context { + return metadata.NewOutgoingContext(ctx, g.authMetadata) +} + +func (g *grpcFileReplicationServiceClient) GetFile(ctx context.Context, opts ...grpc.CallOption) (pbv1.FileReplicationService_GetFileClient, error) { + return g.client.GetFile(g.addAuthMetadataToContext(ctx), opts...) +} + +func (g *grpcFileReplicationServiceClient) GetFileMetadata(ctx context.Context, opts ...grpc.CallOption) (pbv1.FileReplicationService_GetFileMetadataClient, error) { + return g.client.GetFileMetadata(g.addAuthMetadataToContext(ctx), opts...) +} + +func (g *grpcFileReplicationServiceClient) ListFiles(ctx context.Context, in *pbv1.ListFilesRequest, opts ...grpc.CallOption) (*pbv1.ListFilesResponse, error) { + return g.client.ListFiles(g.addAuthMetadataToContext(ctx), in, opts...) +} + +func (g *grpcFileReplicationServiceClient) PauseFileActivity(ctx context.Context, in *pbv1.PauseFileActivityRequest, opts ...grpc.CallOption) (*pbv1.PauseFileActivityResponse, error) { + return g.client.PauseFileActivity(g.addAuthMetadataToContext(ctx), in, opts...) +} + +func (g *grpcFileReplicationServiceClient) ResumeFileActivity(ctx context.Context, in *pbv1.ResumeFileActivityRequest, opts ...grpc.CallOption) (*pbv1.ResumeFileActivityResponse, error) { + return g.client.ResumeFileActivity(g.addAuthMetadataToContext(ctx), in, opts...) +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/replication/copier/remote_client_stream.go b/platform/dbops/binaries/weaviate-src/cluster/replication/copier/remote_client_stream.go new file mode 100644 index 0000000000000000000000000000000000000000..f83a13f43b881d89deb9a3f7d8abff072f655511 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/replication/copier/remote_client_stream.go @@ -0,0 +1,43 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package copier + +import ( + "context" + + protocol "github.com/weaviate/weaviate/grpc/generated/protocol/v1" + "google.golang.org/grpc/metadata" +) + +// Create non-generic wrapper interface so it can be generated with mockery +type FileMetadataStream interface { + Context() context.Context + CloseSend() error + Header() (metadata.MD, error) + Trailer() metadata.MD + RecvMsg(m interface{}) error + SendMsg(interface{}) error + Send(*protocol.GetFileMetadataRequest) error + Recv() (*protocol.FileMetadata, error) +} + +// Create non-generic wrapper interface so it can be generated with mockery +type FileChunkStream interface { + Context() context.Context + CloseSend() error + Header() (metadata.MD, error) + Trailer() metadata.MD + RecvMsg(m interface{}) error + SendMsg(interface{}) error + Send(*protocol.GetFileRequest) error + Recv() (*protocol.FileChunk, error) +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/replication/copier/types/mock_remote_index.go b/platform/dbops/binaries/weaviate-src/cluster/replication/copier/types/mock_remote_index.go new file mode 100644 index 0000000000000000000000000000000000000000..165e01ec20f96d80558df68880bf4581017a3416 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/replication/copier/types/mock_remote_index.go @@ -0,0 +1,150 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by mockery v2.53.2. DO NOT EDIT. + +package types + +import ( + context "context" + + additional "github.com/weaviate/weaviate/entities/additional" + + mock "github.com/stretchr/testify/mock" +) + +// MockRemoteIndex is an autogenerated mock type for the RemoteIndex type +type MockRemoteIndex struct { + mock.Mock +} + +type MockRemoteIndex_Expecter struct { + mock *mock.Mock +} + +func (_m *MockRemoteIndex) EXPECT() *MockRemoteIndex_Expecter { + return &MockRemoteIndex_Expecter{mock: &_m.Mock} +} + +// AddAsyncReplicationTargetNode provides a mock function with given fields: ctx, hostName, indexName, shardName, targetNodeOverride, schemaVersion +func (_m *MockRemoteIndex) AddAsyncReplicationTargetNode(ctx context.Context, hostName string, indexName string, shardName string, targetNodeOverride additional.AsyncReplicationTargetNodeOverride, schemaVersion uint64) error { + ret := _m.Called(ctx, hostName, indexName, shardName, targetNodeOverride, schemaVersion) + + if len(ret) == 0 { + panic("no return value specified for AddAsyncReplicationTargetNode") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, string, string, string, additional.AsyncReplicationTargetNodeOverride, uint64) error); ok { + r0 = rf(ctx, hostName, indexName, shardName, targetNodeOverride, schemaVersion) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockRemoteIndex_AddAsyncReplicationTargetNode_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'AddAsyncReplicationTargetNode' +type MockRemoteIndex_AddAsyncReplicationTargetNode_Call struct { + *mock.Call +} + +// AddAsyncReplicationTargetNode is a helper method to define mock.On call +// - ctx context.Context +// - hostName string +// - indexName string +// - shardName string +// - targetNodeOverride additional.AsyncReplicationTargetNodeOverride +// - schemaVersion uint64 +func (_e *MockRemoteIndex_Expecter) AddAsyncReplicationTargetNode(ctx interface{}, hostName interface{}, indexName interface{}, shardName interface{}, targetNodeOverride interface{}, schemaVersion interface{}) *MockRemoteIndex_AddAsyncReplicationTargetNode_Call { + return &MockRemoteIndex_AddAsyncReplicationTargetNode_Call{Call: _e.mock.On("AddAsyncReplicationTargetNode", ctx, hostName, indexName, shardName, targetNodeOverride, schemaVersion)} +} + +func (_c *MockRemoteIndex_AddAsyncReplicationTargetNode_Call) Run(run func(ctx context.Context, hostName string, indexName string, shardName string, targetNodeOverride additional.AsyncReplicationTargetNodeOverride, schemaVersion uint64)) *MockRemoteIndex_AddAsyncReplicationTargetNode_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string), args[2].(string), args[3].(string), args[4].(additional.AsyncReplicationTargetNodeOverride), args[5].(uint64)) + }) + return _c +} + +func (_c *MockRemoteIndex_AddAsyncReplicationTargetNode_Call) Return(_a0 error) *MockRemoteIndex_AddAsyncReplicationTargetNode_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockRemoteIndex_AddAsyncReplicationTargetNode_Call) RunAndReturn(run func(context.Context, string, string, string, additional.AsyncReplicationTargetNodeOverride, uint64) error) *MockRemoteIndex_AddAsyncReplicationTargetNode_Call { + _c.Call.Return(run) + return _c +} + +// RemoveAsyncReplicationTargetNode provides a mock function with given fields: ctx, hostName, indexName, shardName, targetNodeOverride +func (_m *MockRemoteIndex) RemoveAsyncReplicationTargetNode(ctx context.Context, hostName string, indexName string, shardName string, targetNodeOverride additional.AsyncReplicationTargetNodeOverride) error { + ret := _m.Called(ctx, hostName, indexName, shardName, targetNodeOverride) + + if len(ret) == 0 { + panic("no return value specified for RemoveAsyncReplicationTargetNode") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, string, string, string, additional.AsyncReplicationTargetNodeOverride) error); ok { + r0 = rf(ctx, hostName, indexName, shardName, targetNodeOverride) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockRemoteIndex_RemoveAsyncReplicationTargetNode_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'RemoveAsyncReplicationTargetNode' +type MockRemoteIndex_RemoveAsyncReplicationTargetNode_Call struct { + *mock.Call +} + +// RemoveAsyncReplicationTargetNode is a helper method to define mock.On call +// - ctx context.Context +// - hostName string +// - indexName string +// - shardName string +// - targetNodeOverride additional.AsyncReplicationTargetNodeOverride +func (_e *MockRemoteIndex_Expecter) RemoveAsyncReplicationTargetNode(ctx interface{}, hostName interface{}, indexName interface{}, shardName interface{}, targetNodeOverride interface{}) *MockRemoteIndex_RemoveAsyncReplicationTargetNode_Call { + return &MockRemoteIndex_RemoveAsyncReplicationTargetNode_Call{Call: _e.mock.On("RemoveAsyncReplicationTargetNode", ctx, hostName, indexName, shardName, targetNodeOverride)} +} + +func (_c *MockRemoteIndex_RemoveAsyncReplicationTargetNode_Call) Run(run func(ctx context.Context, hostName string, indexName string, shardName string, targetNodeOverride additional.AsyncReplicationTargetNodeOverride)) *MockRemoteIndex_RemoveAsyncReplicationTargetNode_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string), args[2].(string), args[3].(string), args[4].(additional.AsyncReplicationTargetNodeOverride)) + }) + return _c +} + +func (_c *MockRemoteIndex_RemoveAsyncReplicationTargetNode_Call) Return(_a0 error) *MockRemoteIndex_RemoveAsyncReplicationTargetNode_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockRemoteIndex_RemoveAsyncReplicationTargetNode_Call) RunAndReturn(run func(context.Context, string, string, string, additional.AsyncReplicationTargetNodeOverride) error) *MockRemoteIndex_RemoveAsyncReplicationTargetNode_Call { + _c.Call.Return(run) + return _c +} + +// NewMockRemoteIndex creates a new instance of MockRemoteIndex. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewMockRemoteIndex(t interface { + mock.TestingT + Cleanup(func()) +}) *MockRemoteIndex { + mock := &MockRemoteIndex{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/replication/copier/types/types.go b/platform/dbops/binaries/weaviate-src/cluster/replication/copier/types/types.go new file mode 100644 index 0000000000000000000000000000000000000000..b8d7fae2af8c2109853f7eaf14b17e0e55cfdfd1 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/replication/copier/types/types.go @@ -0,0 +1,49 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package types + +import ( + "context" + + "github.com/weaviate/weaviate/adapters/repos/db" + "github.com/weaviate/weaviate/entities/additional" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/schema" +) + +// DbWrapper is a type that hides a db.DB, this is used to avoid a circular +// dependency between the copier and the db package. +type DbWrapper interface { + // GetIndex See adapters/repos/db.Index.GetIndex + GetIndex(name schema.ClassName) *db.Index + + // GetOneNodeStatus See adapters/repos/db.DB.GetOneNodeStatus + GetOneNodeStatus(ctx context.Context, nodeName string, className, shardName, output string) (*models.NodeStatus, error) +} + +// ShardLoader is a type that can load a shard from disk files, this is used to avoid a circular +// dependency between the copier and the db package. +type ShardLoader interface { + // LoadLocalShard See adapters/repos/db.Index.LoadLocalShard + LoadLocalShard(ctx context.Context, name string) error +} + +// RemoteIndex is a type that can interact with a remote index, this is used to avoid a circular +// dependency between the copier and the db package. +type RemoteIndex interface { + // AddAsyncReplicationTargetNode See adapters/clients.RemoteIndex.AddAsyncReplicationTargetNode + AddAsyncReplicationTargetNode(ctx context.Context, + hostName, indexName, shardName string, targetNodeOverride additional.AsyncReplicationTargetNodeOverride, schemaVersion uint64) error + // RemoveAsyncReplicationTargetNode See adapters/clients.RemoteIndex.RemoveAsyncReplicationTargetNode + RemoveAsyncReplicationTargetNode(ctx context.Context, + hostName, indexName, shardName string, targetNodeOverride additional.AsyncReplicationTargetNodeOverride) error +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/replication/manager.go b/platform/dbops/binaries/weaviate-src/cluster/replication/manager.go new file mode 100644 index 0000000000000000000000000000000000000000..717654d1955f99e78b3ff4a48548b54097418dc6 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/replication/manager.go @@ -0,0 +1,462 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package replication + +import ( + "encoding/json" + "errors" + "fmt" + + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/usecases/sharding" + + "github.com/go-openapi/strfmt" + "github.com/prometheus/client_golang/prometheus" + + cmd "github.com/weaviate/weaviate/cluster/proto/api" + "github.com/weaviate/weaviate/cluster/replication/types" + "github.com/weaviate/weaviate/cluster/schema" +) + +var ErrBadRequest = errors.New("bad request") + +type Manager struct { + replicationFSM *ShardReplicationFSM + schemaReader schema.SchemaReader +} + +func NewManager(schemaReader schema.SchemaReader, reg prometheus.Registerer) *Manager { + replicationFSM := NewShardReplicationFSM(reg) + return &Manager{ + replicationFSM: replicationFSM, + schemaReader: schemaReader, + } +} + +func (m *Manager) GetReplicationFSM() *ShardReplicationFSM { + return m.replicationFSM +} + +func (m *Manager) Snapshot() ([]byte, error) { + return m.replicationFSM.Snapshot() +} + +func (m *Manager) Restore(bytes []byte) error { + return m.replicationFSM.Restore(bytes) +} + +func (m *Manager) Replicate(logId uint64, c *cmd.ApplyRequest) error { + req := &cmd.ReplicationReplicateShardRequest{} + if err := json.Unmarshal(c.SubCommand, req); err != nil { + return fmt.Errorf("%w: %w", ErrBadRequest, err) + } + + // Validate that the command is valid and can be applied with the current schema + if err := ValidateReplicationReplicateShard(m.schemaReader, req); err != nil { + return err + } + + // Store the shard replication op in the FSM + return m.replicationFSM.Replicate(logId, req) +} + +func (m *Manager) RegisterError(c *cmd.ApplyRequest) error { + req := &cmd.ReplicationRegisterErrorRequest{} + if err := json.Unmarshal(c.SubCommand, req); err != nil { + return fmt.Errorf("%w: %w", ErrBadRequest, err) + } + + // Store an op's error emitted by the consumer in the FSM + if err := m.replicationFSM.RegisterError(req); err != nil { + if errors.Is(err, ErrMaxErrorsReached) { + uuid, err := m.GetReplicationOpUUIDFromId(req.Id) + if err != nil { + return fmt.Errorf("failed to get op uuid from id %d: %w", req.Id, err) + } + return m.replicationFSM.CancelReplication(&cmd.ReplicationCancelRequest{ + Uuid: uuid, + }) + } + return err + } + return nil +} + +func (m *Manager) GetReplicationOpUUIDFromId(id uint64) (strfmt.UUID, error) { + return m.replicationFSM.GetReplicationOpUUIDFromId(id) +} + +func (m *Manager) UpdateReplicateOpState(c *cmd.ApplyRequest) error { + req := &cmd.ReplicationUpdateOpStateRequest{} + if err := json.Unmarshal(c.SubCommand, req); err != nil { + return fmt.Errorf("%w: %w", ErrBadRequest, err) + } + + // Store the updated shard replication op in the FSM + return m.replicationFSM.UpdateReplicationOpStatus(req) +} + +func (m *Manager) StoreSchemaVersion(c *cmd.ApplyRequest) error { + req := &cmd.ReplicationStoreSchemaVersionRequest{} + if err := json.Unmarshal(c.SubCommand, req); err != nil { + return fmt.Errorf("%w: %w", ErrBadRequest, err) + } + + return m.replicationFSM.StoreSchemaVersion(req) +} + +func (m *Manager) GetReplicationDetailsByReplicationId(c *cmd.QueryRequest) ([]byte, error) { + subCommand := cmd.ReplicationDetailsRequest{} + if err := json.Unmarshal(c.SubCommand, &subCommand); err != nil { + return nil, fmt.Errorf("%w: %w", ErrBadRequest, err) + } + + op, ok := m.replicationFSM.GetOpByUuid(subCommand.Uuid) + if !ok { + return nil, fmt.Errorf("%w: %s", types.ErrReplicationOperationNotFound, subCommand.Uuid) + } + + response := makeReplicationDetailsResponse(&op.Op, &op.Status) + payload, err := json.Marshal(response) + if err != nil { + return nil, fmt.Errorf("could not marshal query response for replication operation '%s': %w", op.Op.UUID, err) + } + + return payload, nil +} + +func (m *Manager) GetReplicationOperationState(c *cmd.QueryRequest) ([]byte, error) { + subCommand := cmd.ReplicationOperationStateRequest{} + if err := json.Unmarshal(c.SubCommand, &subCommand); err != nil { + return nil, fmt.Errorf("%w: %w", ErrBadRequest, err) + } + + op, ok := m.replicationFSM.GetOpById(subCommand.Id) + if !ok { + return nil, fmt.Errorf("unable to retrieve replication operation '%d' status", subCommand.Id) + } + + response := cmd.ReplicationOperationStateResponse{ + State: op.Status.GetCurrent().State, + } + payload, err := json.Marshal(response) + if err != nil { + return nil, fmt.Errorf("could not marshal query response for replication operation '%d': %w", subCommand.Id, err) + } + + return payload, nil +} + +func (m *Manager) GetReplicationDetailsByCollection(c *cmd.QueryRequest) ([]byte, error) { + subCommand := cmd.ReplicationDetailsRequestByCollection{} + if err := json.Unmarshal(c.SubCommand, &subCommand); err != nil { + return nil, fmt.Errorf("%w: %w", ErrBadRequest, err) + } + + responses := []cmd.ReplicationDetailsResponse{} + ops, ok := m.replicationFSM.GetOpsForCollection(subCommand.Collection) + if !ok { + return nil, fmt.Errorf("%w: %s", types.ErrReplicationOperationNotFound, subCommand.Collection) + } + + for _, op := range ops { + responses = append(responses, makeReplicationDetailsResponse(&op.Op, &op.Status)) + } + + payload, err := json.Marshal(responses) + if err != nil { + return nil, fmt.Errorf("could not marshal query response: %w", err) + } + return payload, nil +} + +func (m *Manager) GetReplicationDetailsByCollectionAndShard(c *cmd.QueryRequest) ([]byte, error) { + subCommand := cmd.ReplicationDetailsRequestByCollectionAndShard{} + if err := json.Unmarshal(c.SubCommand, &subCommand); err != nil { + return nil, fmt.Errorf("%w: %w", ErrBadRequest, err) + } + + responses := []cmd.ReplicationDetailsResponse{} + ops, ok := m.replicationFSM.GetOpsForCollectionAndShard(subCommand.Collection, subCommand.Shard) + if !ok { + return nil, fmt.Errorf("%w: %s", types.ErrReplicationOperationNotFound, subCommand.Collection) + } + + for _, op := range ops { + responses = append(responses, makeReplicationDetailsResponse(&op.Op, &op.Status)) + } + + payload, err := json.Marshal(responses) + if err != nil { + return nil, fmt.Errorf("could not marshal query response: %w", err) + } + return payload, nil +} + +func (m *Manager) GetReplicationDetailsByTargetNode(c *cmd.QueryRequest) ([]byte, error) { + subCommand := cmd.ReplicationDetailsRequestByTargetNode{} + if err := json.Unmarshal(c.SubCommand, &subCommand); err != nil { + return nil, fmt.Errorf("%w: %w", ErrBadRequest, err) + } + + responses := []cmd.ReplicationDetailsResponse{} + ops, ok := m.replicationFSM.GetOpsForTargetNode(subCommand.Node) + if !ok { + return nil, fmt.Errorf("%w: %s", types.ErrReplicationOperationNotFound, subCommand.Node) + } + + for _, op := range ops { + responses = append(responses, makeReplicationDetailsResponse(&op.Op, &op.Status)) + } + + payload, err := json.Marshal(responses) + if err != nil { + return nil, fmt.Errorf("could not marshal query response: %w", err) + } + return payload, nil +} + +func (m *Manager) GetAllReplicationDetails(c *cmd.QueryRequest) ([]byte, error) { + statusByOps := m.replicationFSM.GetStatusByOps() + responses := make([]cmd.ReplicationDetailsResponse, 0, len(statusByOps)) + for op, status := range statusByOps { + responses = append(responses, makeReplicationDetailsResponse(&op, &status)) + } + + payload, err := json.Marshal(responses) + if err != nil { + return nil, fmt.Errorf("could not marshal query response: %w", err) + } + return payload, nil +} + +func (m *Manager) QueryShardingStateByCollection(c *cmd.QueryRequest) ([]byte, error) { + subCommand := cmd.ReplicationQueryShardingStateByCollectionRequest{} + if err := json.Unmarshal(c.SubCommand, &subCommand); err != nil { + return nil, fmt.Errorf("%w: %w", ErrBadRequest, err) + } + + shards := make(map[string][]string) + var err error + + err = m.schemaReader.Read(subCommand.Collection, func(_ *models.Class, state *sharding.State) error { + if state == nil { + return fmt.Errorf("%w: %s", types.ErrNotFound, subCommand.Collection) + } + for _, physical := range state.Physical { + shards[physical.Name] = append([]string(nil), physical.BelongsToNodes...) + } + return nil + }) + if err != nil { + return nil, wrapClassNotFoundErr(err, subCommand.Collection) + } + + response := cmd.ShardingState{ + Collection: subCommand.Collection, + Shards: shards, + } + payload, err := json.Marshal(response) + if err != nil { + return nil, fmt.Errorf("could not marshal query response: %w", err) + } + return payload, nil +} + +func (m *Manager) QueryShardingStateByCollectionAndShard(c *cmd.QueryRequest) ([]byte, error) { + subCommand := cmd.ReplicationQueryShardingStateByCollectionAndShardRequest{} + if err := json.Unmarshal(c.SubCommand, &subCommand); err != nil { + return nil, fmt.Errorf("%w: %w", ErrBadRequest, err) + } + + var ( + shards map[string][]string + err error + ) + + err = m.schemaReader.Read(subCommand.Collection, func(_ *models.Class, state *sharding.State) error { + if state == nil { + return fmt.Errorf("%w: %s", types.ErrNotFound, subCommand.Collection) + } + + for _, physical := range state.Physical { + if physical.Name == subCommand.Shard { + shards = map[string][]string{ + physical.Name: append([]string(nil), physical.BelongsToNodes...), + } + return nil + } + } + return fmt.Errorf("%w: %s", types.ErrNotFound, subCommand.Shard) + }) + if err != nil { + return nil, wrapClassNotFoundErr(err, subCommand.Collection) + } + + response := cmd.ShardingState{ + Collection: subCommand.Collection, + Shards: shards, + } + payload, err := json.Marshal(response) + if err != nil { + return nil, fmt.Errorf("could not marshal query response: %w", err) + } + return payload, nil +} + +// wrapClassNotFoundErr normalizes errors from SchemaReader.Read so the HTTP layer +// maps them to the correct HTTP status. +// - If the collection is missing, Read returns schema.ErrClassNotFound and does +// not invoke the callback. This wraps it as types.ErrNotFound with the collection +// name so it maps to HTTP 404. +// - Errors returned by the callback (e.g., shard not found) are passed through unchanged. +func wrapClassNotFoundErr(err error, collection string) error { + if err == nil { + return nil + } + if errors.Is(err, schema.ErrClassNotFound) { + return fmt.Errorf("%w: %s", types.ErrNotFound, collection) + } + return err +} + +func makeReplicationDetailsResponse(op *ShardReplicationOp, status *ShardReplicationOpStatus) cmd.ReplicationDetailsResponse { + return cmd.ReplicationDetailsResponse{ + Uuid: op.UUID, + Id: op.ID, + ShardId: op.SourceShard.ShardId, + Collection: op.SourceShard.CollectionId, + SourceNodeId: op.SourceShard.NodeId, + TargetNodeId: op.TargetShard.NodeId, + TransferType: op.TransferType.String(), + Uncancelable: status.UnCancellable, + ScheduledForCancel: status.ShouldCancel, + ScheduledForDelete: status.ShouldDelete, + Status: status.GetCurrent().ToAPIFormat(), + StatusHistory: status.GetHistory().ToAPIFormat(), + } +} + +func (m *Manager) CancelReplication(c *cmd.ApplyRequest) error { + req := &cmd.ReplicationCancelRequest{} + if err := json.Unmarshal(c.SubCommand, req); err != nil { + return fmt.Errorf("%w: %w", ErrBadRequest, err) + } + + // Trigger cancellation of the replication operation in the FSM + return m.replicationFSM.CancelReplication(req) +} + +func (m *Manager) DeleteReplication(c *cmd.ApplyRequest) error { + req := &cmd.ReplicationDeleteRequest{} + if err := json.Unmarshal(c.SubCommand, req); err != nil { + return fmt.Errorf("%w: %w", ErrBadRequest, err) + } + + // Trigger deletion of the replication operation in the FSM + return m.replicationFSM.DeleteReplication(req) +} + +func (m *Manager) DeleteAllReplications(c *cmd.ApplyRequest) error { + req := &cmd.ReplicationDeleteAllRequest{} + if err := json.Unmarshal(c.SubCommand, req); err != nil { + return fmt.Errorf("%w: %w", ErrBadRequest, err) + } + + // Trigger deletion of all replication operation in the FSM + return m.replicationFSM.DeleteAllReplications(req) +} + +func (m *Manager) RemoveReplicaOp(c *cmd.ApplyRequest) error { + req := &cmd.ReplicationRemoveOpRequest{} + if err := json.Unmarshal(c.SubCommand, req); err != nil { + return fmt.Errorf("%w: %w", ErrBadRequest, err) + } + + // Remove the replication operation itself from the FSM + return m.replicationFSM.RemoveReplicationOp(req) +} + +func (m *Manager) ReplicationCancellationComplete(c *cmd.ApplyRequest) error { + req := &cmd.ReplicationCancellationCompleteRequest{} + if err := json.Unmarshal(c.SubCommand, req); err != nil { + return fmt.Errorf("%w: %w", ErrBadRequest, err) + } + + // Mark the replication operation as cancelled in the FSM + return m.replicationFSM.CancellationComplete(req) +} + +func (m *Manager) DeleteReplicationsByCollection(c *cmd.ApplyRequest) error { + req := &cmd.ReplicationsDeleteByCollectionRequest{} + if err := json.Unmarshal(c.SubCommand, req); err != nil { + return fmt.Errorf("%w: %w", ErrBadRequest, err) + } + + // Trigger deletion of all replication operations for the specified class in the FSM + return m.replicationFSM.DeleteReplicationsByCollection(req.Collection) +} + +func (m *Manager) DeleteReplicationsByTenants(c *cmd.ApplyRequest) error { + req := &cmd.ReplicationsDeleteByTenantsRequest{} + if err := json.Unmarshal(c.SubCommand, req); err != nil { + return fmt.Errorf("%w: %w", ErrBadRequest, err) + } + + // Trigger deletion of all replication operations for the specified class in the FSM + return m.replicationFSM.DeleteReplicationsByTenants(req.Collection, req.Tenants) +} + +func (m *Manager) ForceDeleteAll(c *cmd.ApplyRequest) error { + req := &cmd.ReplicationForceDeleteAllRequest{} + if err := json.Unmarshal(c.SubCommand, req); err != nil { + return fmt.Errorf("%w: %w", ErrBadRequest, err) + } + + return m.replicationFSM.ForceDeleteAll() +} + +func (m *Manager) ForceDeleteByCollection(c *cmd.ApplyRequest) error { + req := &cmd.ReplicationForceDeleteByCollectionRequest{} + if err := json.Unmarshal(c.SubCommand, req); err != nil { + return fmt.Errorf("%w: %w", ErrBadRequest, err) + } + + return m.replicationFSM.ForceDeleteByCollection(req.Collection) +} + +func (m *Manager) ForceDeleteByCollectionAndShard(c *cmd.ApplyRequest) error { + req := &cmd.ReplicationForceDeleteByCollectionAndShardRequest{} + if err := json.Unmarshal(c.SubCommand, req); err != nil { + return fmt.Errorf("%w: %w", ErrBadRequest, err) + } + + return m.replicationFSM.ForceDeleteByCollectionAndShard(req.Collection, req.Shard) +} + +func (m *Manager) ForceDeleteByTargetNode(c *cmd.ApplyRequest) error { + req := &cmd.ReplicationForceDeleteByTargetNodeRequest{} + if err := json.Unmarshal(c.SubCommand, req); err != nil { + return fmt.Errorf("%w: %w", ErrBadRequest, err) + } + + return m.replicationFSM.ForceDeleteByTargetNode(req.Node) +} + +func (m *Manager) ForceDeleteByUuid(c *cmd.ApplyRequest) error { + req := &cmd.ReplicationForceDeleteByUuidRequest{} + if err := json.Unmarshal(c.SubCommand, req); err != nil { + return fmt.Errorf("%w: %w", ErrBadRequest, err) + } + + return m.replicationFSM.ForceDeleteByUuid(req.Uuid) +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/replication/manager_test.go b/platform/dbops/binaries/weaviate-src/cluster/replication/manager_test.go new file mode 100644 index 0000000000000000000000000000000000000000..ce51f45cfda8667de63272e74052b92eefb7b03a --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/replication/manager_test.go @@ -0,0 +1,1301 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package replication_test + +import ( + "encoding/json" + "errors" + "fmt" + "strings" + "testing" + "time" + + "github.com/go-openapi/strfmt" + "github.com/google/uuid" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/testutil" + "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/cluster/proto/api" + "github.com/weaviate/weaviate/cluster/replication" + "github.com/weaviate/weaviate/cluster/replication/types" + "github.com/weaviate/weaviate/cluster/schema" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/usecases/fakes" + "github.com/weaviate/weaviate/usecases/sharding" +) + +var ErrNotFound = errors.New("not found") + +func TestManager_Replicate(t *testing.T) { + tests := []struct { + name string + schemaSetup func(*testing.T, *schema.SchemaManager) error + request *api.ReplicationReplicateShardRequest + expectedError error + }{ + { + name: "valid replication request", + schemaSetup: func(t *testing.T, s *schema.SchemaManager) error { + return s.AddClass( + buildApplyRequest("TestCollection", api.ApplyRequest_TYPE_ADD_CLASS, api.AddClassRequest{ + Class: &models.Class{Class: "TestCollection", MultiTenancyConfig: &models.MultiTenancyConfig{Enabled: false}}, + State: &sharding.State{ + Physical: map[string]sharding.Physical{"shard1": {BelongsToNodes: []string{"node1"}}}, + }, + }), "node1", true, false) + }, + request: &api.ReplicationReplicateShardRequest{ + Uuid: uuid4(), + SourceCollection: "TestCollection", + SourceShard: "shard1", + SourceNode: "node1", + TargetNode: "node2", + TransferType: api.COPY.String(), + }, + expectedError: nil, + }, + { + name: "class not found", + request: &api.ReplicationReplicateShardRequest{ + Uuid: uuid4(), + SourceCollection: "NonExistentCollection", + SourceShard: "shard1", + SourceNode: "node1", + TargetNode: "node2", + TransferType: api.COPY.String(), + }, + expectedError: replication.ErrClassNotFound, + }, + { + name: "source shard not found", + schemaSetup: func(t *testing.T, s *schema.SchemaManager) error { + return s.AddClass( + buildApplyRequest("TestCollection", api.ApplyRequest_TYPE_ADD_CLASS, api.AddClassRequest{ + Class: &models.Class{Class: "TestCollection", MultiTenancyConfig: &models.MultiTenancyConfig{Enabled: false}}, + State: &sharding.State{ + Physical: map[string]sharding.Physical{"shard2": {BelongsToNodes: []string{"node1"}}}, + }, + }), "node1", true, false) + }, + request: &api.ReplicationReplicateShardRequest{ + Uuid: uuid4(), + SourceCollection: "TestCollection", + SourceShard: "NonExistentShard", + SourceNode: "node1", + TargetNode: "node2", + TransferType: api.COPY.String(), + }, + expectedError: replication.ErrShardNotFound, + }, + { + name: "source node not found", + schemaSetup: func(t *testing.T, s *schema.SchemaManager) error { + return s.AddClass( + buildApplyRequest("TestCollection", api.ApplyRequest_TYPE_ADD_CLASS, api.AddClassRequest{ + Class: &models.Class{Class: "TestCollection", MultiTenancyConfig: &models.MultiTenancyConfig{Enabled: false}}, + State: &sharding.State{ + Physical: map[string]sharding.Physical{"shard1": {BelongsToNodes: []string{"node1"}}}, + }, + }), "node1", true, false) + }, + request: &api.ReplicationReplicateShardRequest{ + Uuid: uuid4(), + SourceCollection: "TestCollection", + SourceShard: "shard1", + SourceNode: "node4", + TargetNode: "node2", + TransferType: api.COPY.String(), + }, + expectedError: replication.ErrNodeNotFound, + }, + { + name: "target node already has shard", + schemaSetup: func(t *testing.T, s *schema.SchemaManager) error { + return s.AddClass( + buildApplyRequest("TestCollection", api.ApplyRequest_TYPE_ADD_CLASS, api.AddClassRequest{ + Class: &models.Class{Class: "TestCollection", MultiTenancyConfig: &models.MultiTenancyConfig{Enabled: false}}, + State: &sharding.State{ + Physical: map[string]sharding.Physical{"shard1": {BelongsToNodes: []string{"node1", "node2"}}}, + }, + }), "node1", true, false) + }, + request: &api.ReplicationReplicateShardRequest{ + Uuid: uuid4(), + SourceCollection: "TestCollection", + SourceShard: "shard1", + SourceNode: "node1", + TargetNode: "node2", + TransferType: api.COPY.String(), + }, + expectedError: replication.ErrAlreadyExists, + }, + { + name: "source and target are identicals", + schemaSetup: func(t *testing.T, s *schema.SchemaManager) error { + return s.AddClass( + buildApplyRequest("TestCollection", api.ApplyRequest_TYPE_ADD_CLASS, api.AddClassRequest{ + Class: &models.Class{Class: "TestCollection", MultiTenancyConfig: &models.MultiTenancyConfig{Enabled: false}}, + State: &sharding.State{ + Physical: map[string]sharding.Physical{"shard1": {BelongsToNodes: []string{"node1"}}}, + }, + }), "node1", true, false) + }, + request: &api.ReplicationReplicateShardRequest{ + Uuid: uuid4(), + SourceCollection: "TestCollection", + SourceShard: "shard1", + SourceNode: "node1", + TargetNode: "node1", + TransferType: api.COPY.String(), + }, + expectedError: replication.ErrBadRequest, + }, + } + + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + // Setup + reg := prometheus.NewPedanticRegistry() + parser := fakes.NewMockParser() + parser.On("ParseClass", mock.Anything).Return(nil) + schemaManager := schema.NewSchemaManager("test-node", nil, parser, prometheus.NewPedanticRegistry(), logrus.New()) + schemaReader := schemaManager.NewSchemaReader() + manager := replication.NewManager(schemaReader, reg) + if tt.schemaSetup != nil { + tt.schemaSetup(t, schemaManager) + } + + // Create ApplyRequest + subCommand, _ := json.Marshal(tt.request) + applyRequest := &api.ApplyRequest{ + SubCommand: subCommand, + } + + // Execute + err := manager.Replicate(0, applyRequest) + + // Assert + if tt.expectedError != nil { + assert.ErrorAs(t, err, &tt.expectedError) + } else { + assert.NoError(t, err) + } + }) + } +} + +func TestManager_ReplicateMultipleOps(t *testing.T) { + tests := []struct { + name string + schemaSetup func(*testing.T, *schema.SchemaManager) error + requests []*api.ReplicationReplicateShardRequest + expectedLastError error + }{ + { + name: "source shard is already moving", + schemaSetup: func(t *testing.T, s *schema.SchemaManager) error { + return s.AddClass( + buildApplyRequest("TestCollection", api.ApplyRequest_TYPE_ADD_CLASS, api.AddClassRequest{ + Class: &models.Class{Class: "TestCollection", MultiTenancyConfig: &models.MultiTenancyConfig{Enabled: false}}, + State: &sharding.State{ + Physical: map[string]sharding.Physical{"shard1": {BelongsToNodes: []string{"node1"}}}, + }, + }), "node1", true, false) + }, + requests: []*api.ReplicationReplicateShardRequest{ + { + Uuid: uuid4(), + SourceCollection: "TestCollection", + SourceShard: "shard1", + SourceNode: "node1", + TargetNode: "node2", + TransferType: api.MOVE.String(), + }, + { + Uuid: uuid4(), + SourceCollection: "TestCollection", + SourceShard: "shard1", + SourceNode: "node1", + TargetNode: "node3", + TransferType: api.MOVE.String(), + }, + }, + expectedLastError: replication.ErrShardAlreadyReplicating, + }, + { + name: "source shard can accept multiple copies", + schemaSetup: func(t *testing.T, s *schema.SchemaManager) error { + return s.AddClass( + buildApplyRequest("TestCollection", api.ApplyRequest_TYPE_ADD_CLASS, api.AddClassRequest{ + Class: &models.Class{Class: "TestCollection", MultiTenancyConfig: &models.MultiTenancyConfig{Enabled: false}}, + State: &sharding.State{ + Physical: map[string]sharding.Physical{"shard1": {BelongsToNodes: []string{"node1"}}}, + }, + }), "node1", true, false) + }, + requests: []*api.ReplicationReplicateShardRequest{ + { + Uuid: uuid4(), + SourceCollection: "TestCollection", + SourceShard: "shard1", + SourceNode: "node1", + TargetNode: "node2", + TransferType: api.COPY.String(), + }, + { + Uuid: uuid4(), + SourceCollection: "TestCollection", + SourceShard: "shard1", + SourceNode: "node1", + TargetNode: "node3", + TransferType: api.COPY.String(), + }, + }, + expectedLastError: nil, + }, + { + name: "source shard is copying and can't accept a new move", + schemaSetup: func(t *testing.T, s *schema.SchemaManager) error { + return s.AddClass( + buildApplyRequest("TestCollection", api.ApplyRequest_TYPE_ADD_CLASS, api.AddClassRequest{ + Class: &models.Class{Class: "TestCollection", MultiTenancyConfig: &models.MultiTenancyConfig{Enabled: false}}, + State: &sharding.State{ + Physical: map[string]sharding.Physical{"shard1": {BelongsToNodes: []string{"node1"}}}, + }, + }), "node1", true, false) + }, + requests: []*api.ReplicationReplicateShardRequest{ + { + Uuid: uuid4(), + SourceCollection: "TestCollection", + SourceShard: "shard1", + SourceNode: "node1", + TargetNode: "node2", + TransferType: api.COPY.String(), + }, + { + Uuid: uuid4(), + SourceCollection: "TestCollection", + SourceShard: "shard1", + SourceNode: "node1", + TargetNode: "node3", + TransferType: api.MOVE.String(), + }, + }, + expectedLastError: replication.ErrShardAlreadyReplicating, + }, + } + + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + // Setup + reg := prometheus.NewPedanticRegistry() + parser := fakes.NewMockParser() + parser.On("ParseClass", mock.Anything).Return(nil) + schemaManager := schema.NewSchemaManager("test-node", nil, parser, prometheus.NewPedanticRegistry(), logrus.New()) + schemaReader := schemaManager.NewSchemaReader() + manager := replication.NewManager(schemaReader, reg) + if tt.schemaSetup != nil { + tt.schemaSetup(t, schemaManager) + } + + for i, request := range tt.requests { + // Create ApplyRequest + subCommand, _ := json.Marshal(request) + applyRequest := &api.ApplyRequest{ + SubCommand: subCommand, + } + + // Execute + err := manager.Replicate(uint64(i), applyRequest) + if i == len(tt.requests)-1 && tt.expectedLastError != nil { + assert.ErrorAs(t, err, &tt.expectedLastError) + } else { + assert.NoError(t, err) + } + } + }) + } +} + +func TestManager_UpdateReplicaOpStatusAndRegisterErrors(t *testing.T) { + type stateChangeAndErrors struct { + stateChangeRequest *api.ReplicationUpdateOpStateRequest + stateChangeExpectedError error + + registerErrorRequests []*api.ReplicationRegisterErrorRequest + registerErrorExpectedError []error + } + + tests := []struct { + name string + schemaSetup func(*testing.T, *schema.SchemaManager) error + replicaRequest *api.ReplicationReplicateShardRequest + updateStatusRequests []*stateChangeAndErrors + }{ + { + name: "valid state change and no errors", + schemaSetup: func(t *testing.T, s *schema.SchemaManager) error { + return s.AddClass( + buildApplyRequest("TestCollection", api.ApplyRequest_TYPE_ADD_CLASS, api.AddClassRequest{ + Class: &models.Class{Class: "TestCollection", MultiTenancyConfig: &models.MultiTenancyConfig{Enabled: false}}, + State: &sharding.State{ + Physical: map[string]sharding.Physical{"shard1": {BelongsToNodes: []string{"node1"}}}, + }, + }), "node1", true, false) + }, + replicaRequest: &api.ReplicationReplicateShardRequest{ + Uuid: uuid4(), + SourceCollection: "TestCollection", + SourceShard: "shard1", + SourceNode: "node1", + TargetNode: "node2", + }, + updateStatusRequests: []*stateChangeAndErrors{ + { + stateChangeRequest: &api.ReplicationUpdateOpStateRequest{Id: 0, State: api.ShardReplicationState(api.HYDRATING)}, + registerErrorRequests: []*api.ReplicationRegisterErrorRequest{}, + }, + }, + }, + { + name: "valid state change and errors", + schemaSetup: func(t *testing.T, s *schema.SchemaManager) error { + return s.AddClass( + buildApplyRequest("TestCollection", api.ApplyRequest_TYPE_ADD_CLASS, api.AddClassRequest{ + Class: &models.Class{Class: "TestCollection", MultiTenancyConfig: &models.MultiTenancyConfig{Enabled: false}}, + State: &sharding.State{ + Physical: map[string]sharding.Physical{"shard1": {BelongsToNodes: []string{"node1"}}}, + }, + }), "node1", true, false) + }, + replicaRequest: &api.ReplicationReplicateShardRequest{ + Uuid: uuid4(), + SourceCollection: "TestCollection", + SourceShard: "shard1", + SourceNode: "node1", + TargetNode: "node2", + }, + updateStatusRequests: []*stateChangeAndErrors{ + { + stateChangeRequest: &api.ReplicationUpdateOpStateRequest{Id: 0, State: api.ShardReplicationState(api.HYDRATING)}, + registerErrorRequests: []*api.ReplicationRegisterErrorRequest{ + {Id: 0, Error: "test error"}, + {Id: 0, Error: "test error"}, + }, + registerErrorExpectedError: []error{nil, nil}, + }, + }, + }, + { + name: "valid state change andinvalid register error", + schemaSetup: func(t *testing.T, s *schema.SchemaManager) error { + return s.AddClass( + buildApplyRequest("TestCollection", api.ApplyRequest_TYPE_ADD_CLASS, api.AddClassRequest{ + Class: &models.Class{Class: "TestCollection", MultiTenancyConfig: &models.MultiTenancyConfig{Enabled: false}}, + State: &sharding.State{ + Physical: map[string]sharding.Physical{"shard1": {BelongsToNodes: []string{"node1"}}}, + }, + }), "node1", true, false) + }, + replicaRequest: &api.ReplicationReplicateShardRequest{ + Uuid: uuid4(), + SourceCollection: "TestCollection", + SourceShard: "shard1", + SourceNode: "node1", + TargetNode: "node2", + }, + updateStatusRequests: []*stateChangeAndErrors{ + { + stateChangeRequest: &api.ReplicationUpdateOpStateRequest{Id: 0, State: api.ShardReplicationState(api.HYDRATING)}, + registerErrorRequests: []*api.ReplicationRegisterErrorRequest{ + {Id: 1, Error: "test error"}, + }, + registerErrorExpectedError: []error{types.ErrReplicationOperationNotFound, nil}, + }, + }, + }, + { + name: "multiple state changes and errors", + schemaSetup: func(t *testing.T, s *schema.SchemaManager) error { + return s.AddClass( + buildApplyRequest("TestCollection", api.ApplyRequest_TYPE_ADD_CLASS, api.AddClassRequest{ + Class: &models.Class{Class: "TestCollection", MultiTenancyConfig: &models.MultiTenancyConfig{Enabled: false}}, + State: &sharding.State{ + Physical: map[string]sharding.Physical{"shard1": {BelongsToNodes: []string{"node1"}}}, + }, + }), "node1", true, false) + }, + replicaRequest: &api.ReplicationReplicateShardRequest{ + Uuid: uuid4(), + SourceCollection: "TestCollection", + SourceShard: "shard1", + SourceNode: "node1", + TargetNode: "node2", + }, + updateStatusRequests: []*stateChangeAndErrors{ + { + stateChangeRequest: &api.ReplicationUpdateOpStateRequest{Id: 0, State: api.ShardReplicationState(api.HYDRATING)}, + registerErrorRequests: []*api.ReplicationRegisterErrorRequest{ + {Id: 0, Error: "test error"}, + {Id: 0, Error: "test error"}, + }, + registerErrorExpectedError: []error{nil, nil}, + }, + { + stateChangeRequest: &api.ReplicationUpdateOpStateRequest{Id: 0, State: api.ShardReplicationState(api.FINALIZING)}, + registerErrorRequests: []*api.ReplicationRegisterErrorRequest{ + {Id: 0, Error: "test error"}, + {Id: 0, Error: "test error"}, + }, + registerErrorExpectedError: []error{nil, nil}, + }, + { + stateChangeRequest: &api.ReplicationUpdateOpStateRequest{Id: 0, State: api.ShardReplicationState(api.REGISTERED)}, + registerErrorRequests: []*api.ReplicationRegisterErrorRequest{}, + }, + }, + }, + { + name: "invalid state change", + schemaSetup: func(t *testing.T, s *schema.SchemaManager) error { + return s.AddClass( + buildApplyRequest("TestCollection", api.ApplyRequest_TYPE_ADD_CLASS, api.AddClassRequest{ + Class: &models.Class{Class: "TestCollection", MultiTenancyConfig: &models.MultiTenancyConfig{Enabled: false}}, + State: &sharding.State{ + Physical: map[string]sharding.Physical{"shard1": {BelongsToNodes: []string{"node1"}}}, + }, + }), "node1", true, false) + }, + replicaRequest: &api.ReplicationReplicateShardRequest{ + Uuid: uuid4(), + SourceCollection: "TestCollection", + SourceShard: "shard1", + SourceNode: "node1", + TargetNode: "node2", + }, + updateStatusRequests: []*stateChangeAndErrors{ + { + stateChangeRequest: &api.ReplicationUpdateOpStateRequest{Id: 1, State: api.ShardReplicationState(api.REGISTERED)}, + stateChangeExpectedError: types.ErrReplicationOperationNotFound, + }, + }, + }, + } + + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + // Setup + reg := prometheus.NewPedanticRegistry() + parser := fakes.NewMockParser() + parser.On("ParseClass", mock.Anything).Return(nil) + schemaManager := schema.NewSchemaManager("test-node", nil, parser, prometheus.NewPedanticRegistry(), logrus.New()) + schemaReader := schemaManager.NewSchemaReader() + manager := replication.NewManager(schemaReader, reg) + if tt.schemaSetup != nil { + tt.schemaSetup(t, schemaManager) + } + + // Create ApplyRequest + subCommand, _ := json.Marshal(tt.replicaRequest) + applyRequest := &api.ApplyRequest{ + SubCommand: subCommand, + } + + // Execute + err := manager.Replicate(0, applyRequest) + require.NoError(t, err) + + expectedFinalState := replication.NewShardReplicationStatus(api.REGISTERED) + + for _, req := range tt.updateStatusRequests { + subCommand, _ := json.Marshal(req.stateChangeRequest) + applyRequest = &api.ApplyRequest{ + SubCommand: subCommand, + } + err = manager.UpdateReplicateOpState(applyRequest) + if req.stateChangeExpectedError != nil { + assert.ErrorAs(t, err, &req.stateChangeExpectedError) + } else { + expectedFinalState.ChangeState(req.stateChangeRequest.State) + assert.NoError(t, err) + } + + for i, errReq := range req.registerErrorRequests { + expectedErr := req.registerErrorExpectedError[i] + + subCommand, _ := json.Marshal(errReq) + applyRequest = &api.ApplyRequest{ + SubCommand: subCommand, + } + err = manager.RegisterError(applyRequest) + if expectedErr != nil { + assert.ErrorAs(t, err, &expectedErr) + } else { + assert.NoError(t, err) + expectedFinalState.AddError(errReq.Error, time.Now().UnixMilli()) + } + } + } + + subCommand, _ = json.Marshal(&api.ReplicationDetailsRequest{Uuid: tt.replicaRequest.Uuid}) + queryRequest := &api.QueryRequest{ + Type: api.QueryRequest_TYPE_GET_REPLICATION_DETAILS, + SubCommand: subCommand, + } + resp, err := manager.GetReplicationDetailsByReplicationId(queryRequest) + assert.NoError(t, err) + + statusResp := api.ReplicationDetailsResponse{} + err = json.Unmarshal(resp, &statusResp) + assert.NoError(t, err) + assert.Equal(t, expectedFinalState.GetCurrent().ToAPIFormat().State, statusResp.Status.State) + for i, err := range expectedFinalState.GetCurrent().ToAPIFormat().Errors { + assert.Equal(t, err.Message, statusResp.Status.Errors[i].Message) + } + for i, status := range expectedFinalState.GetHistory().ToAPIFormat() { + assert.Equal(t, status.State, statusResp.StatusHistory[i].State) + for j, err := range status.Errors { + assert.Equal(t, err.Message, statusResp.StatusHistory[i].Errors[j].Message) + } + } + }) + } +} + +func TestManager_SnapshotRestore(t *testing.T) { + UUID1 := uuid4() + UUID2 := uuid4() + tests := []struct { + name string + schemaSetup func(*testing.T, *schema.SchemaManager) error + uuids []strfmt.UUID + snapshotRequests []*api.ApplyRequest + nonSnapshottedRequests []*api.ApplyRequest + }{ + { + name: "snapshot and restore data with non snapshotted data", + schemaSetup: func(t *testing.T, s *schema.SchemaManager) error { + return s.AddClass( + buildApplyRequest("TestCollection", api.ApplyRequest_TYPE_ADD_CLASS, api.AddClassRequest{ + Class: &models.Class{Class: "TestCollection", MultiTenancyConfig: &models.MultiTenancyConfig{Enabled: false}}, + State: &sharding.State{ + Physical: map[string]sharding.Physical{ + "shard1": {BelongsToNodes: []string{"node1"}}, + "shard2": {BelongsToNodes: []string{"node1"}}, + }, + }, + }), "node1", true, false) + }, + snapshotRequests: []*api.ApplyRequest{ + buildApplyRequest("TestCollection", api.ApplyRequest_TYPE_REPLICATION_REPLICATE, api.ReplicationReplicateShardRequest{ + Uuid: UUID1, + SourceCollection: "TestCollection", + SourceShard: "shard1", + SourceNode: "node1", + TargetNode: "node2", + TransferType: api.COPY.String(), + }), + buildApplyRequest("TestCollection", api.ApplyRequest_TYPE_REPLICATION_REPLICATE_REGISTER_ERROR, api.ReplicationRegisterErrorRequest{Id: 0, Error: "test error"}), + }, + nonSnapshottedRequests: []*api.ApplyRequest{ + buildApplyRequest("TestCollection", api.ApplyRequest_TYPE_REPLICATION_REPLICATE, api.ReplicationReplicateShardRequest{ + Uuid: UUID2, + SourceCollection: "TestCollection", + SourceShard: "shard2", + SourceNode: "node1", + TargetNode: "node2", + TransferType: api.COPY.String(), + }), + buildApplyRequest("TestCollection", api.ApplyRequest_TYPE_REPLICATION_REPLICATE_REGISTER_ERROR, api.ReplicationRegisterErrorRequest{Id: 1, Error: "test error"}), + }, + }, + { + name: "snapshot and restore no data", + schemaSetup: func(t *testing.T, s *schema.SchemaManager) error { + return s.AddClass( + buildApplyRequest("TestCollection", api.ApplyRequest_TYPE_ADD_CLASS, api.AddClassRequest{ + Class: &models.Class{Class: "TestCollection", MultiTenancyConfig: &models.MultiTenancyConfig{Enabled: false}}, + State: &sharding.State{ + Physical: map[string]sharding.Physical{ + "shard1": {BelongsToNodes: []string{"node1"}}, + "shard2": {BelongsToNodes: []string{"node1"}}, + }, + }, + }), "node1", true, false) + }, + snapshotRequests: []*api.ApplyRequest{}, + nonSnapshottedRequests: []*api.ApplyRequest{}, + }, + { + name: "snapshot and restore latest data", + schemaSetup: func(t *testing.T, s *schema.SchemaManager) error { + return s.AddClass( + buildApplyRequest("TestCollection", api.ApplyRequest_TYPE_ADD_CLASS, api.AddClassRequest{ + Class: &models.Class{Class: "TestCollection", MultiTenancyConfig: &models.MultiTenancyConfig{Enabled: false}}, + State: &sharding.State{ + Physical: map[string]sharding.Physical{ + "shard1": {BelongsToNodes: []string{"node1"}}, + "shard2": {BelongsToNodes: []string{"node1"}}, + }, + }, + }), "node1", true, false) + }, + snapshotRequests: []*api.ApplyRequest{ + buildApplyRequest("TestCollection", api.ApplyRequest_TYPE_REPLICATION_REPLICATE, api.ReplicationReplicateShardRequest{ + Uuid: UUID1, + SourceCollection: "TestCollection", + SourceShard: "shard1", + SourceNode: "node1", + TargetNode: "node2", + TransferType: api.MOVE.String(), + }), + buildApplyRequest("TestCollection", api.ApplyRequest_TYPE_REPLICATION_REPLICATE_REGISTER_ERROR, api.ReplicationRegisterErrorRequest{Id: 0, Error: "test error"}), + buildApplyRequest("TestCollection", api.ApplyRequest_TYPE_REPLICATION_REPLICATE, api.ReplicationReplicateShardRequest{ + Uuid: UUID2, + SourceCollection: "TestCollection", + SourceShard: "shard2", + SourceNode: "node1", + TargetNode: "node2", + TransferType: api.COPY.String(), + }), + buildApplyRequest("TestCollection", api.ApplyRequest_TYPE_REPLICATION_REPLICATE_REGISTER_ERROR, api.ReplicationRegisterErrorRequest{Id: 1, Error: "test error"}), + }, + nonSnapshottedRequests: []*api.ApplyRequest{}, + }, + } + + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + // Setup + reg := prometheus.NewPedanticRegistry() + parser := fakes.NewMockParser() + parser.On("ParseClass", mock.Anything).Return(nil) + schemaManager := schema.NewSchemaManager("test-node", nil, parser, prometheus.NewPedanticRegistry(), logrus.New()) + schemaReader := schemaManager.NewSchemaReader() + manager := replication.NewManager(schemaReader, reg) + if tt.schemaSetup != nil { + tt.schemaSetup(t, schemaManager) + } + + var logIndex uint64 + // Write data + for _, req := range tt.snapshotRequests { + switch req.Type { + case api.ApplyRequest_TYPE_REPLICATION_REPLICATE: + // Execute + err := manager.Replicate(logIndex, req) + assert.NoError(t, err) + logIndex++ + case api.ApplyRequest_TYPE_REPLICATION_REPLICATE_REGISTER_ERROR: + var originalReq api.ReplicationRegisterErrorRequest + err := json.Unmarshal(req.SubCommand, &originalReq) + require.NoError(t, err) + err = manager.RegisterError(req) + assert.NoError(t, err) + default: + t.Fatalf("unknown apply request type: %v", req.Type) + } + } + + // Do the snapshot/restore routine + bytes, err := manager.Snapshot() + require.NoError(t, err) + require.NotNil(t, bytes) + + // Write data that will not be snapshotted + for _, req := range tt.nonSnapshottedRequests { + switch req.Type { + case api.ApplyRequest_TYPE_REPLICATION_REPLICATE: + // Execute + err := manager.Replicate(logIndex, req) + assert.NoError(t, err) + case api.ApplyRequest_TYPE_REPLICATION_REPLICATE_REGISTER_ERROR: + var originalReq api.ReplicationRegisterErrorRequest + err := json.Unmarshal(req.SubCommand, &originalReq) + require.NoError(t, err) + err = manager.RegisterError(req) + assert.NoError(t, err) + default: + t.Fatalf("unknown apply request type: %v", req.Type) + } + logIndex++ + } + + err = manager.Restore(bytes) + require.NoError(t, err) + + // Ensure snapshotted data is here + logIndex = 0 + for _, req := range tt.snapshotRequests { + switch req.Type { + case api.ApplyRequest_TYPE_REPLICATION_REPLICATE: + var originalReq api.ReplicationReplicateShardRequest + err = json.Unmarshal(req.SubCommand, &originalReq) + require.NoError(t, err) + + // Create QueryRequest + subCommand, _ := json.Marshal(&api.ReplicationDetailsRequest{Uuid: originalReq.Uuid}) + queryRequest := &api.QueryRequest{ + Type: api.QueryRequest_TYPE_GET_REPLICATION_DETAILS, + SubCommand: subCommand, + } + + // Execute + bytes, err := manager.GetReplicationDetailsByReplicationId(queryRequest) + require.NoError(t, err) + require.NotNil(t, bytes) + + var resp api.ReplicationDetailsResponse + err = json.Unmarshal(bytes, &resp) + require.NoError(t, err) + require.Equal(t, resp.Uuid, originalReq.Uuid) + require.Equal(t, resp.Id, logIndex) + require.Equal(t, originalReq.SourceCollection, resp.Collection) + require.Equal(t, originalReq.SourceShard, resp.ShardId) + require.Equal(t, originalReq.SourceNode, resp.SourceNodeId) + require.Equal(t, originalReq.TargetNode, resp.TargetNodeId) + require.Equal(t, originalReq.TransferType, resp.TransferType) + logIndex++ + case api.ApplyRequest_TYPE_REPLICATION_REPLICATE_REGISTER_ERROR: + originalReq := api.ReplicationRegisterErrorRequest{} + err = json.Unmarshal(req.SubCommand, &originalReq) + require.NoError(t, err) + + uuid, err := manager.GetReplicationOpUUIDFromId(originalReq.Id) + require.NoError(t, err) + + // Create QueryRequest + subCommand, _ := json.Marshal(&api.ReplicationDetailsRequest{Uuid: uuid}) + queryRequest := &api.QueryRequest{ + Type: api.QueryRequest_TYPE_GET_REPLICATION_DETAILS, + SubCommand: subCommand, + } + // Execute + bytes, err := manager.GetReplicationDetailsByReplicationId(queryRequest) + require.NoError(t, err) + require.NotNil(t, bytes) + + var resp api.ReplicationDetailsResponse + err = json.Unmarshal(bytes, &resp) + require.NoError(t, err) + require.Equal(t, resp.Uuid, uuid) + require.Equal(t, resp.Id, originalReq.Id) + require.Equal(t, api.ShardReplicationState(resp.Status.State), api.REGISTERED) + for _, err := range resp.Status.Errors { + require.Equal(t, err.Message, originalReq.Error) + } + default: + t.Fatalf("unknown apply request type: %v", req.Type) + } + } + + // Ensure non snapshotted data is absent + for _, req := range tt.nonSnapshottedRequests { + switch req.Type { + case api.ApplyRequest_TYPE_REPLICATION_REPLICATE: + originalReq := api.ReplicationReplicateShardRequest{} + err = json.Unmarshal(req.SubCommand, &originalReq) + require.NoError(t, err) + + // Create QueryRequest + subCommand, _ := json.Marshal(&api.ReplicationDetailsRequest{Uuid: originalReq.Uuid}) + queryRequest := &api.QueryRequest{ + Type: api.QueryRequest_TYPE_GET_REPLICATION_DETAILS, + SubCommand: subCommand, + } + + // Execute + _, err := manager.GetReplicationDetailsByReplicationId(queryRequest) + require.Error(t, err) + logIndex++ + case api.ApplyRequest_TYPE_REPLICATION_REPLICATE_REGISTER_ERROR: + originalReq := api.ReplicationRegisterErrorRequest{} + err = json.Unmarshal(req.SubCommand, &originalReq) + require.NoError(t, err) + + _, err = manager.GetReplicationOpUUIDFromId(originalReq.Id) + require.Error(t, err) + default: + t.Fatalf("unknown apply request type: %v", req.Type) + } + } + }) + } +} + +func TestManager_MetricsTracking(t *testing.T) { + const metricName = "weaviate_replication_operation_fsm_ops_by_state" + t.Run("one replication operation with two state transitions", func(t *testing.T) { + reg := prometheus.NewPedanticRegistry() + parser := fakes.NewMockParser() + parser.On("ParseClass", mock.Anything).Return(nil) + schemaManager := schema.NewSchemaManager("test-node", nil, parser, prometheus.NewPedanticRegistry(), logrus.New()) + schemaReader := schemaManager.NewSchemaReader() + manager := replication.NewManager(schemaReader, reg) + err := schemaManager.AddClass(buildApplyRequest("TestCollection", api.ApplyRequest_TYPE_ADD_CLASS, api.AddClassRequest{ + Class: &models.Class{Class: "TestCollection", MultiTenancyConfig: &models.MultiTenancyConfig{Enabled: false}}, + State: &sharding.State{ + Physical: map[string]sharding.Physical{"shard1": {BelongsToNodes: []string{"node1"}}}, + }, + }), "node1", true, false) + require.NoError(t, err, "error while adding class: %v", err) + + // Create replication request + subCommand, err := json.Marshal(&api.ReplicationReplicateShardRequest{ + Uuid: uuid4(), + SourceCollection: "TestCollection", + SourceShard: "shard1", + SourceNode: "node1", + TargetNode: "node2", + }) + require.NoErrorf(t, err, "error while marshalling a replication request: %v", err) + + err = manager.Replicate(0, &api.ApplyRequest{ + SubCommand: subCommand, + }) + require.NoErrorf(t, err, "error while starting a replication operation: %v", err) + + assertGaugeValues(t, reg, metricName, map[api.ShardReplicationState]float64{ + api.REGISTERED: 1, + }) + + // Update replication state to 'HYDRATING' + subCommand, err = json.Marshal(&api.ReplicationUpdateOpStateRequest{ + Version: 0, + Id: 0, + State: api.HYDRATING, + }) + require.NoErrorf(t, err, "error while marshalling a replication state change operation: %v", err) + + err = manager.UpdateReplicateOpState(&api.ApplyRequest{ + SubCommand: subCommand, + }) + require.NoErrorf(t, err, "error while updating replication state: %v", err) + + assertGaugeValues(t, reg, metricName, map[api.ShardReplicationState]float64{ + api.REGISTERED: 0, + api.HYDRATING: 1, + }) + + // Update replication status to 'DEHYDRATING' + subCommand, err = json.Marshal(&api.ReplicationUpdateOpStateRequest{ + Version: 0, + Id: 0, + State: api.DEHYDRATING, + }) + require.NoErrorf(t, err, "error while marshalling a replication state change operation: %v", err) + + err = manager.UpdateReplicateOpState(&api.ApplyRequest{ + SubCommand: subCommand, + }) + require.NoErrorf(t, err, "error while updating replication state: %v", err) + + assertGaugeValues(t, reg, metricName, map[api.ShardReplicationState]float64{ + api.REGISTERED: 0, + api.HYDRATING: 0, + api.DEHYDRATING: 1, + }) + }) + + t.Run("two replication operations with different state transitions", func(t *testing.T) { + reg := prometheus.NewPedanticRegistry() + parser := fakes.NewMockParser() + parser.On("ParseClass", mock.Anything).Return(nil) + schemaManager := schema.NewSchemaManager("test-node", nil, parser, prometheus.NewPedanticRegistry(), logrus.New()) + schemaReader := schemaManager.NewSchemaReader() + manager := replication.NewManager(schemaReader, reg) + err := schemaManager.AddClass(buildApplyRequest("TestCollection", api.ApplyRequest_TYPE_ADD_CLASS, api.AddClassRequest{ + Class: &models.Class{Class: "TestCollection", MultiTenancyConfig: &models.MultiTenancyConfig{Enabled: false}}, + State: &sharding.State{ + Physical: map[string]sharding.Physical{ + "shard1": {BelongsToNodes: []string{"node1"}}, + "shard2": {BelongsToNodes: []string{"node1"}}, + }, + }, + }), "node1", true, false) + require.NoError(t, err, "error while adding class: %v", err) + + firstSubCommand, err := json.Marshal(&api.ReplicationReplicateShardRequest{ + Uuid: uuid4(), + SourceCollection: "TestCollection", + SourceShard: "shard1", + SourceNode: "node1", + TargetNode: "node2", + TransferType: api.COPY.String(), + }) + require.NoErrorf(t, err, "error while marshalling first replication request: %v", err) + + secondSubCommand, err := json.Marshal(&api.ReplicationReplicateShardRequest{ + Uuid: uuid4(), + SourceCollection: "TestCollection", + SourceShard: "shard2", + SourceNode: "node1", + TargetNode: "node3", + TransferType: api.COPY.String(), + }) + require.NoErrorf(t, err, "error while marshalling second replication request: %v", err) + + err = manager.Replicate(0, &api.ApplyRequest{ + SubCommand: firstSubCommand, + }) + require.NoErrorf(t, err, "error while starting first replication operation: %v", err) + + err = manager.Replicate(1, &api.ApplyRequest{ + SubCommand: secondSubCommand, + }) + require.NoErrorf(t, err, "error while starting second replication operation: %v", err) + + assertGaugeValues(t, reg, metricName, map[api.ShardReplicationState]float64{ + api.REGISTERED: 2, + }) + + // Update first operation to 'READY' + firstStateUpdate, err := json.Marshal(&api.ReplicationUpdateOpStateRequest{ + Version: 0, + Id: 0, + State: api.READY, + }) + require.NoErrorf(t, err, "error while marshalling first operation state change: %v", err) + + err = manager.UpdateReplicateOpState(&api.ApplyRequest{ + SubCommand: firstStateUpdate, + }) + require.NoErrorf(t, err, "error while updating first operation state: %v", err) + + // Verify state after first operation state transition + assertGaugeValues(t, reg, metricName, map[api.ShardReplicationState]float64{ + api.REGISTERED: 1, + api.READY: 1, + }) + + // Update second operation to 'ABORTED' + secondStateUpdate, err := json.Marshal(&api.ReplicationUpdateOpStateRequest{ + Version: 0, + Id: 1, + State: api.CANCELLED, + }) + require.NoErrorf(t, err, "error while marshalling second operation state change: %v", err) + + err = manager.UpdateReplicateOpState(&api.ApplyRequest{ + SubCommand: secondStateUpdate, + }) + require.NoErrorf(t, err, "error while updating second operation state: %v", err) + + // Verify state after second operation state transition + assertGaugeValues(t, reg, metricName, map[api.ShardReplicationState]float64{ + api.REGISTERED: 0, + api.READY: 1, + api.CANCELLED: 1, + }) + }) +} + +func assertGaugeValues(t *testing.T, reg prometheus.Gatherer, metricName string, expectedMetrics map[api.ShardReplicationState]float64) { + t.Helper() + + var expectedOutput strings.Builder + _, _ = fmt.Fprintf(&expectedOutput, "\n# HELP %s Current number of replication operations in each state of the FSM lifecycle\n", metricName) + _, _ = fmt.Fprintf(&expectedOutput, "# TYPE %s gauge\n", metricName) + + for expectedState, expectedMetricValue := range expectedMetrics { + _, _ = fmt.Fprintf(&expectedOutput, "%s{state=\"%s\"} %v\n", metricName, expectedState, expectedMetricValue) + } + + err := testutil.GatherAndCompare(reg, strings.NewReader(expectedOutput.String()), metricName) + require.NoErrorf(t, err, "error while gathering %s metric: %v", metricName, err) +} + +func buildApplyRequest( + class string, + cmdType api.ApplyRequest_Type, + jsonSubCmd interface{}, +) *api.ApplyRequest { + subData, err := json.Marshal(jsonSubCmd) + if err != nil { + panic("json.Marshal( " + err.Error()) + } + + cmd := api.ApplyRequest{ + Type: cmdType, + Class: class, + SubCommand: subData, + } + + return &cmd +} + +func uuid4() strfmt.UUID { + id, err := uuid.NewRandom() + if err != nil { + panic(fmt.Sprintf("failed to generate Uuid: %v", err)) + } + return strfmt.UUID(id.String()) +} + +func TestReplicationFSM_HasOngoingReplication(t *testing.T) { + type hasOngoingReplicationParams struct { + collection string + shard string + replica string + expected bool + } + + tests := []struct { + name string + status api.ShardReplicationState + hasOngoingReplicationParams []hasOngoingReplicationParams + expectedError error + }{ + { + name: "op is REGISTERED", + status: api.REGISTERED, + hasOngoingReplicationParams: []hasOngoingReplicationParams{ + { + collection: "TestCollection", + shard: "shard1", + replica: "node1", + expected: true, + }, + { + collection: "non-existing-collection", + shard: "shard1", + replica: "node1", + expected: false, + }, + { + collection: "TestCollection", + shard: "non-existing-shard", + replica: "node1", + expected: false, + }, + { + collection: "TestCollection", + shard: "shard1", + replica: "non-existing-replica", + expected: false, + }, + }, + expectedError: nil, + }, + { + name: "op is HYDRATING", + status: api.HYDRATING, + hasOngoingReplicationParams: []hasOngoingReplicationParams{ + { + collection: "TestCollection", + shard: "shard1", + replica: "node1", + expected: true, + }, + { + collection: "non-existing-collection", + shard: "shard1", + replica: "node1", + expected: false, + }, + { + collection: "TestCollection", + shard: "non-existing-shard", + replica: "node1", + expected: false, + }, + { + collection: "TestCollection", + shard: "shard1", + replica: "non-existing-replica", + expected: false, + }, + }, + expectedError: nil, + }, + { + name: "op is DEHYDRATING", + status: api.DEHYDRATING, + hasOngoingReplicationParams: []hasOngoingReplicationParams{ + { + collection: "TestCollection", + shard: "shard1", + replica: "node1", + expected: true, + }, + { + collection: "non-existing-collection", + shard: "shard1", + replica: "node1", + expected: false, + }, + { + collection: "TestCollection", + shard: "non-existing-shard", + replica: "node1", + expected: false, + }, + { + collection: "TestCollection", + shard: "shard1", + replica: "non-existing-replica", + expected: false, + }, + }, + expectedError: nil, + }, + { + name: "op is FINALIZING", + status: api.FINALIZING, + hasOngoingReplicationParams: []hasOngoingReplicationParams{ + { + collection: "TestCollection", + shard: "shard1", + replica: "node1", + expected: true, + }, + { + collection: "non-existing-collection", + shard: "shard1", + replica: "node1", + expected: false, + }, + { + collection: "TestCollection", + shard: "non-existing-shard", + replica: "node1", + expected: false, + }, + { + collection: "TestCollection", + shard: "shard1", + replica: "non-existing-replica", + expected: false, + }, + }, + expectedError: nil, + }, + { + name: "op is CANCELLED", + status: api.CANCELLED, + hasOngoingReplicationParams: []hasOngoingReplicationParams{ + { + collection: "TestCollection", + shard: "shard1", + replica: "node1", + expected: false, + }, + { + collection: "non-existing-collection", + shard: "shard1", + replica: "node1", + expected: false, + }, + { + collection: "TestCollection", + shard: "non-existing-shard", + replica: "node1", + expected: false, + }, + { + collection: "TestCollection", + shard: "shard1", + replica: "non-existing-replica", + expected: false, + }, + }, + expectedError: nil, + }, + { + name: "op is READY", + status: api.READY, + hasOngoingReplicationParams: []hasOngoingReplicationParams{ + { + collection: "TestCollection", + shard: "shard1", + replica: "node1", + expected: false, + }, + { + collection: "non-existing-collection", + shard: "shard1", + replica: "node1", + expected: false, + }, + { + collection: "TestCollection", + shard: "non-existing-shard", + replica: "node1", + expected: false, + }, + { + collection: "TestCollection", + shard: "shard1", + replica: "non-existing-replica", + expected: false, + }, + }, + expectedError: nil, + }, + } + + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + // Setup + reg := prometheus.NewPedanticRegistry() + parser := fakes.NewMockParser() + parser.On("ParseClass", mock.Anything).Return(nil) + schemaManager := schema.NewSchemaManager("test-node", nil, parser, prometheus.NewPedanticRegistry(), logrus.New()) + schemaReader := schemaManager.NewSchemaReader() + manager := replication.NewManager(schemaReader, reg) + schemaManager.AddClass( + buildApplyRequest("TestCollection", api.ApplyRequest_TYPE_ADD_CLASS, api.AddClassRequest{ + Class: &models.Class{Class: "TestCollection", MultiTenancyConfig: &models.MultiTenancyConfig{Enabled: false}}, + State: &sharding.State{ + Physical: map[string]sharding.Physical{"shard1": {BelongsToNodes: []string{"node1"}}}, + }, + }), "node1", true, false) + + // Create ApplyRequest + subCommand, _ := json.Marshal(&api.ReplicationReplicateShardRequest{ + Uuid: uuid4(), + SourceCollection: "TestCollection", + SourceShard: "shard1", + SourceNode: "node1", + TargetNode: "node2", + TransferType: api.COPY.String(), + }) + applyRequest := &api.ApplyRequest{ + SubCommand: subCommand, + } + + // Execute + err := manager.Replicate(0, applyRequest) + assert.NoError(t, err) + + manager.GetReplicationFSM().UpdateReplicationOpStatus(&api.ReplicationUpdateOpStateRequest{ + Id: 0, + Version: 0, + State: tt.status, + }) + + for _, param := range tt.hasOngoingReplicationParams { + actual := manager.GetReplicationFSM().HasOngoingReplication(param.collection, param.shard, param.replica) + assert.Equal(t, param.expected, actual) + } + }) + } +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/replication/metrics/metrics.go b/platform/dbops/binaries/weaviate-src/cluster/replication/metrics/metrics.go new file mode 100644 index 0000000000000000000000000000000000000000..42d378a9007fdd9cb6bd85fbe07a5b4a8aa9bd6a --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/replication/metrics/metrics.go @@ -0,0 +1,379 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package metrics + +import ( + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" +) + +// ReplicationEngineOpsCallbacks contains a set of callback functions that are invoked +// on different stages of a replication operation's lifecycle. +type ReplicationEngineOpsCallbacks struct { + onPrepareProcessing func(node string) + onOpPending func(node string) + onOpSkipped func(node string) + onOpStart func(node string) + onOpComplete func(node string) + onOpFailed func(node string) + onOpCancelled func(node string) +} + +// ReplicationEngineOpsCallbacksBuilder helps construct an ReplicationEngineOpsCallbacks instance with +// custom behavior for each stage of a replication operation. +type ReplicationEngineOpsCallbacksBuilder struct { + callbacks ReplicationEngineOpsCallbacks +} + +// NewReplicationEngineOpsCallbacksBuilder initializes a new ReplicationEngineOpsCallbacksBuilder with +// no-op default callbacks. +func NewReplicationEngineOpsCallbacksBuilder() *ReplicationEngineOpsCallbacksBuilder { + return &ReplicationEngineOpsCallbacksBuilder{ + callbacks: ReplicationEngineOpsCallbacks{ + onPrepareProcessing: func(node string) {}, + onOpPending: func(node string) {}, + onOpSkipped: func(node string) {}, + onOpStart: func(node string) {}, + onOpComplete: func(node string) {}, + onOpFailed: func(node string) {}, + onOpCancelled: func(node string) {}, + }, + } +} + +// WithPrepareProcessing sets a callback to be executed before starting to process replication operations +// for a given node. This can be used to initialize metrics like counters and gauges to ensure they are +// exposed with an initial value, avoiding gaps when the engine starts. +func (b *ReplicationEngineOpsCallbacksBuilder) WithPrepareProcessing(callback func(node string)) *ReplicationEngineOpsCallbacksBuilder { + b.callbacks.onPrepareProcessing = callback + return b +} + +// WithOpPendingCallback sets a callback to be executed when a replication +// operation becomes pending for the given node. +func (b *ReplicationEngineOpsCallbacksBuilder) WithOpPendingCallback(callback func(node string)) *ReplicationEngineOpsCallbacksBuilder { + b.callbacks.onOpPending = callback + return b +} + +// WithOpSkippedCallback sets a callback to be executed when a replication +// operation is skipped because already running or completed execution (successfully or with a failure) +func (b *ReplicationEngineOpsCallbacksBuilder) WithOpSkippedCallback(callback func(node string)) *ReplicationEngineOpsCallbacksBuilder { + b.callbacks.onOpSkipped = callback + return b +} + +// WithOpStartCallback sets a callback to be executed when a replication +// operation starts processing for the given node. +func (b *ReplicationEngineOpsCallbacksBuilder) WithOpStartCallback(callback func(node string)) *ReplicationEngineOpsCallbacksBuilder { + b.callbacks.onOpStart = callback + return b +} + +// WithOpCompleteCallback sets a callback to be executed when a replication +// operation completes successfully for the given node. +func (b *ReplicationEngineOpsCallbacksBuilder) WithOpCompleteCallback(callback func(node string)) *ReplicationEngineOpsCallbacksBuilder { + b.callbacks.onOpComplete = callback + return b +} + +// WithOpFailedCallback sets a callback to be executed when a replication +// operation fails for the given node. +func (b *ReplicationEngineOpsCallbacksBuilder) WithOpFailedCallback(callback func(node string)) *ReplicationEngineOpsCallbacksBuilder { + b.callbacks.onOpFailed = callback + return b +} + +// WithOpCancelledCallback sets a callback to be executed when a replication +// operation is cancelled for the given node. +func (b *ReplicationEngineOpsCallbacksBuilder) WithOpCancelledCallback(callback func(node string)) *ReplicationEngineOpsCallbacksBuilder { + b.callbacks.onOpCancelled = callback + return b +} + +// Build finalizes the configuration and returns the ReplicationEngineOpsCallbacks instance. +func (b *ReplicationEngineOpsCallbacksBuilder) Build() *ReplicationEngineOpsCallbacks { + return &b.callbacks +} + +func (m *ReplicationEngineOpsCallbacks) OnPrepareProcessing(node string) { + m.onPrepareProcessing(node) +} + +// OnOpPending invokes the configured callback for when a replication operation becomes pending. +func (m *ReplicationEngineOpsCallbacks) OnOpPending(node string) { + m.onOpPending(node) +} + +// OnOpSkipped invokes the configured callback when a replication operation is skipped +func (m *ReplicationEngineOpsCallbacks) OnOpSkipped(node string) { + m.onOpSkipped(node) +} + +// OnOpStart invokes the configured callback for when a replication operation starts. +func (m *ReplicationEngineOpsCallbacks) OnOpStart(node string) { + m.onOpStart(node) +} + +// OnOpComplete invokes the configured callback for when a replication operation completes successfully. +func (m *ReplicationEngineOpsCallbacks) OnOpComplete(node string) { + m.onOpComplete(node) +} + +// OnOpFailed invokes the configured callback for when a replication operation fails. +func (m *ReplicationEngineOpsCallbacks) OnOpFailed(node string) { + m.onOpFailed(node) +} + +// OnOpCancelled invokes the configured callback for when a replication operation is cancelled. +func (m *ReplicationEngineOpsCallbacks) OnOpCancelled(node string) { + m.onOpCancelled(node) +} + +// NewReplicationEngineOpsCallbacks creates and registers Prometheus metrics for tracking +// replication operations and returns a ReplicationEngineOpsCallbacks instance configured to update those metrics. +// +// The following metrics are registered with the provided registerer: +// - weaviate_replication_pending_operations (GaugeVec) +// - weaviate_replication_ongoing_operations (GaugeVec) +// - weaviate_replication_complete_operations (CounterVec) +// - weaviate_replication_failed_operations (CounterVec) +// - weaviate_replication_cancelled_operations (CounterVec) +// +// All metrics are labeled by node and automatically updated through the callback lifecycle. +// +// The operation lifecycle and corresponding metric updates are as follows: +// 1. When an operation is **registered as pending**, increment `replication_pending_operations`. +// 2. When (and if) an operations is **skipped** (cancelled before starting), decrement `replication_pending_operations`. +// 3. When an operation **starts**, decrement `replication_pending_operations` and increment `replication_ongoing_operations`. +// 4. When an operation **completes successfully**, decrement `replication_ongoing_operations` and increment `replication_complete_operations`. +// 5. When an operation **fails**, decrement `replication_ongoing_operations` and increment `replication_failed_operations`. +// +// This ensures that gauges (`pending`, `ongoing`) reflect the current number of active operations, +// while counters (`complete`, `failed`) accumulate totals over time. +func NewReplicationEngineOpsCallbacks(reg prometheus.Registerer) *ReplicationEngineOpsCallbacks { + pendingOps := promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "weaviate", + Name: "replication_pending_operations", + Help: "Number of replication operations pending processing", + }, []string{"node"}) + + ongoingOps := promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "weaviate", + Name: "replication_ongoing_operations", + Help: "Number of replication operations currently in progress", + }, []string{"node"}) + + completeOps := promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ + Namespace: "weaviate", + Name: "replication_complete_operations", + Help: "Number of successfully completed replication operations", + }, []string{"node"}) + + failedOps := promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ + Namespace: "weaviate", + Name: "replication_failed_operations", + Help: "Number of failed replication operations", + }, []string{"node"}) + + cancelledOps := promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ + Namespace: "weaviate", + Name: "replication_cancelled_operations", + Help: "Number of cancelled replication operations", + }, []string{"node"}) + + return NewReplicationEngineOpsCallbacksBuilder(). + WithPrepareProcessing(func(node string) { + // Add(0) is used to ensure that the metric exists for the given node label + // and will be scraped by Prometheus even before any real increment happens. + // This avoids gaps and allows queries like increase() and rate() to work correctly + // from startup. + pendingOps.WithLabelValues(node).Add(0) + ongoingOps.WithLabelValues(node).Add(0) + completeOps.WithLabelValues(node).Add(0) + failedOps.WithLabelValues(node).Add(0) + }). + WithOpPendingCallback(func(node string) { + pendingOps.WithLabelValues(node).Inc() + }). + WithOpSkippedCallback(func(node string) { + pendingOps.WithLabelValues(node).Dec() + }). + WithOpStartCallback(func(node string) { + pendingOps.WithLabelValues(node).Dec() + ongoingOps.WithLabelValues(node).Inc() + }). + WithOpCompleteCallback(func(node string) { + ongoingOps.WithLabelValues(node).Dec() + completeOps.WithLabelValues(node).Inc() + }). + WithOpFailedCallback(func(node string) { + ongoingOps.WithLabelValues(node).Dec() + failedOps.WithLabelValues(node).Inc() + }). + WithOpCancelledCallback(func(node string) { + ongoingOps.WithLabelValues(node).Dec() + cancelledOps.WithLabelValues(node).Inc() + }). + Build() +} + +// ReplicationEngineCallbacks contains a set of callback functions that are invoked +// during the lifecycle of the replication engine and its internal components. +// +// These callbacks allow external systems to react to state transitions in the +// engine, producer, and consumer. +type ReplicationEngineCallbacks struct { + onEngineStart func(node string) + onEngineStop func(node string) + onProducerStart func(node string) + onProducerStop func(node string) + onConsumerStart func(node string) + onConsumerStop func(node string) +} + +// ReplicationEngineCallbacksBuilder helps construct an ReplicationEngineCallbacks instance +// by allowing selective customization of lifecycle hooks. +// +// All callbacks default to no-ops unless explicitly overridden. +type ReplicationEngineCallbacksBuilder struct { + callbacks ReplicationEngineCallbacks +} + +// NewReplicationEngineCallbacksBuilder initializes a new ReplicationEngineCallbacksBuilder with +// default no-op functions for all lifecycle callbacks. +func NewReplicationEngineCallbacksBuilder() *ReplicationEngineCallbacksBuilder { + return &ReplicationEngineCallbacksBuilder{ + callbacks: ReplicationEngineCallbacks{ + onEngineStart: func(node string) {}, + onEngineStop: func(node string) {}, + onProducerStart: func(node string) {}, + onProducerStop: func(node string) {}, + onConsumerStart: func(node string) {}, + onConsumerStop: func(node string) {}, + }, + } +} + +// WithEngineStartCallback sets the callback to be executed when the replication engine starts. +func (b *ReplicationEngineCallbacksBuilder) WithEngineStartCallback(callback func(node string)) *ReplicationEngineCallbacksBuilder { + b.callbacks.onEngineStart = callback + return b +} + +// WithEngineStopCallback sets the callback to be executed when the replication engine stops. +func (b *ReplicationEngineCallbacksBuilder) WithEngineStopCallback(callback func(node string)) *ReplicationEngineCallbacksBuilder { + b.callbacks.onEngineStop = callback + return b +} + +// WithProducerStartCallback sets the callback to be executed when the replication engine's producer starts. +func (b *ReplicationEngineCallbacksBuilder) WithProducerStartCallback(callback func(node string)) *ReplicationEngineCallbacksBuilder { + b.callbacks.onProducerStart = callback + return b +} + +// WithProducerStopCallback sets the callback to be executed when the replication engine's producer stops. +func (b *ReplicationEngineCallbacksBuilder) WithProducerStopCallback(callback func(node string)) *ReplicationEngineCallbacksBuilder { + b.callbacks.onProducerStop = callback + return b +} + +// WithConsumerStartCallback sets the callback to be executed when the replication engine's consumer starts. +func (b *ReplicationEngineCallbacksBuilder) WithConsumerStartCallback(callback func(node string)) *ReplicationEngineCallbacksBuilder { + b.callbacks.onConsumerStart = callback + return b +} + +// WithConsumerStopCallback sets the callback to be executed when the replication engine's consumer stops. +func (b *ReplicationEngineCallbacksBuilder) WithConsumerStopCallback(callback func(node string)) *ReplicationEngineCallbacksBuilder { + b.callbacks.onConsumerStop = callback + return b +} + +// Build finalizes the builder and returns the ReplicationEngineCallbacks instance. +func (b *ReplicationEngineCallbacksBuilder) Build() *ReplicationEngineCallbacks { + return &b.callbacks +} + +// OnEngineStart invokes the configured callback for when the engine starts. +func (m *ReplicationEngineCallbacks) OnEngineStart(node string) { + m.onEngineStart(node) +} + +// OnEngineStop invokes the configured callback for when the engine stops. +func (m *ReplicationEngineCallbacks) OnEngineStop(node string) { + m.onEngineStop(node) +} + +// OnProducerStart invokes the configured callback for when the producer starts. +func (m *ReplicationEngineCallbacks) OnProducerStart(node string) { + m.onProducerStart(node) +} + +// OnProducerStop invokes the configured callback for when the producer stops. +func (m *ReplicationEngineCallbacks) OnProducerStop(node string) { + m.onProducerStop(node) +} + +// OnConsumerStart invokes the configured callback for when the consumer starts. +func (m *ReplicationEngineCallbacks) OnConsumerStart(node string) { + m.onConsumerStart(node) +} + +// OnConsumerStop invokes the configured callback for when the consumer stops. +func (m *ReplicationEngineCallbacks) OnConsumerStop(node string) { + m.onConsumerStop(node) +} + +// NewReplicationEngineCallbacks creates and registers Prometheus metrics +// to track the lifecycle status of the replication engine and its internal components. +// +// It returns an ReplicationEngineCallbacks instance that updates the following metrics: +// - weaviate_replication_engine_running_status (GaugeVec) +// - weaviate_replication_engine_producer_running_status (GaugeVec) +// - weaviate_replication_engine_consumer_running_status (GaugeVec) +// +// All metrics are labeled by node and reflect the current running state: +// - 1 = running +// - 0 = not running +// +// This provides visibility into whether the engine, producer, or consumer +// is currently active on a per-node basis. +func NewReplicationEngineCallbacks(reg prometheus.Registerer) *ReplicationEngineCallbacks { + engineRunning := promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "weaviate", + Name: "replication_engine_running_status", + Help: "The Replication engine running status (0: not running, 1: running)", + }, []string{"node"}) + + producerRunning := promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "weaviate", + Name: "replication_engine_producer_running_status", + Help: "The replication engine producer running status (0: not running, 1: running)", + }, []string{"node"}) + + consumerRunning := promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "weaviate", + Name: "replication_engine_consumer_running_status", + Help: "The replication engine consumer running status (0: not running, 1: running)", + }, []string{"node"}) + + return NewReplicationEngineCallbacksBuilder(). + WithEngineStartCallback(func(node string) { engineRunning.WithLabelValues(node).Set(1) }). + WithEngineStopCallback(func(node string) { engineRunning.WithLabelValues(node).Set(0) }). + WithProducerStartCallback(func(node string) { producerRunning.WithLabelValues(node).Set(1) }). + WithProducerStopCallback(func(node string) { producerRunning.WithLabelValues(node).Set(0) }). + WithConsumerStartCallback(func(node string) { consumerRunning.WithLabelValues(node).Set(1) }). + WithConsumerStopCallback(func(node string) { consumerRunning.WithLabelValues(node).Set(0) }). + Build() +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/replication/metrics/metrics_test.go b/platform/dbops/binaries/weaviate-src/cluster/replication/metrics/metrics_test.go new file mode 100644 index 0000000000000000000000000000000000000000..a48df59454c94eb0eb3e874390752759ca238384 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/replication/metrics/metrics_test.go @@ -0,0 +1,463 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package metrics_test + +import ( + "testing" + + "github.com/weaviate/weaviate/cluster/replication/metrics" + + "github.com/prometheus/client_golang/prometheus" + io_prometheus_client "github.com/prometheus/client_model/go" + "github.com/stretchr/testify/require" +) + +func TestOpCallbacks(t *testing.T) { + t.Run("default callbacks should be no-op", func(t *testing.T) { + callbacks := metrics.NewReplicationEngineOpsCallbacksBuilder().Build() + callbacks.OnPrepareProcessing("node1") + callbacks.OnOpPending("node1") + callbacks.OnOpSkipped("node1") + callbacks.OnOpStart("node1") + callbacks.OnOpComplete("node1") + callbacks.OnOpFailed("node1") + callbacks.OnOpCancelled("node1") + }) + + t.Run("custom callbacks should be called with correct parameters", func(t *testing.T) { + // GIVEN + var ( + prepareProcessingNode string + pendingNode string + skippedNode string + startNode string + completeNode string + failedNode string + cancelledNode string + ) + + callbacks := metrics.NewReplicationEngineOpsCallbacksBuilder(). + WithPrepareProcessing(func(node string) { + prepareProcessingNode = node + }). + WithOpPendingCallback(func(node string) { + pendingNode = node + }). + WithOpSkippedCallback(func(node string) { + skippedNode = node + }). + WithOpStartCallback(func(node string) { + startNode = node + }). + WithOpCompleteCallback(func(node string) { + completeNode = node + }). + WithOpFailedCallback(func(node string) { + failedNode = node + }). + WithOpCancelledCallback(func(node string) { + cancelledNode = node + }). + Build() + + // WHEN + expectedNode := "test-node" + callbacks.OnPrepareProcessing(expectedNode) + callbacks.OnOpPending(expectedNode) + callbacks.OnOpSkipped(expectedNode) + callbacks.OnOpStart(expectedNode) + callbacks.OnOpComplete(expectedNode) + callbacks.OnOpFailed(expectedNode) + callbacks.OnOpCancelled(expectedNode) + + // THEN + require.Equal(t, expectedNode, prepareProcessingNode, "invalid prepare processing callback node") + require.Equal(t, expectedNode, pendingNode, "invalid pending callback node") + require.Equal(t, expectedNode, skippedNode, "invalid skipped callback node") + require.Equal(t, expectedNode, startNode, "invalid start callback node") + require.Equal(t, expectedNode, completeNode, "invalid complete callback node") + require.Equal(t, expectedNode, failedNode, "invalid failed callback node") + require.Equal(t, expectedNode, cancelledNode, "invalid cancelled callback node") + }) + + t.Run("only prepare processing", func(t *testing.T) { + // GIVEN + prepareProcessingCalled := false + callbacks := metrics.NewReplicationEngineOpsCallbacksBuilder(). + WithPrepareProcessing(func(node string) { + prepareProcessingCalled = true + }). + Build() + + // WHEN + callbacks.OnPrepareProcessing("node1") + + // THEN + require.True(t, prepareProcessingCalled, "expected prepare processing callback to be called") + }) + + t.Run("only op pending", func(t *testing.T) { + // GIVEN + pendingCalled := false + callbacks := metrics.NewReplicationEngineOpsCallbacksBuilder(). + WithOpPendingCallback(func(node string) { + pendingCalled = true + }). + Build() + + // WHEN + callbacks.OnOpPending("node1") + + // THEN + require.True(t, pendingCalled, "expected pending callback to be called") + }) + + t.Run("only op skipped", func(t *testing.T) { + // GIVEN + skippedCalled := false + callbacks := metrics.NewReplicationEngineOpsCallbacksBuilder(). + WithOpSkippedCallback(func(node string) { + skippedCalled = true + }). + Build() + + // WHEN + callbacks.OnOpSkipped("node1") + + // THEN + require.True(t, skippedCalled, "expected skipped callback to be called") + }) + + t.Run("only op start", func(t *testing.T) { + // GIVEN + startCalled := false + callbacks := metrics.NewReplicationEngineOpsCallbacksBuilder(). + WithOpStartCallback(func(node string) { + startCalled = true + }). + Build() + + // WHEN + callbacks.OnOpStart("node1") + + // THEN + require.True(t, startCalled, "expected start callback to be called") + }) + + t.Run("only op complete", func(t *testing.T) { + // GIVEN + completeCalled := false + callbacks := metrics.NewReplicationEngineOpsCallbacksBuilder(). + WithOpCompleteCallback(func(node string) { + completeCalled = true + }). + Build() + + // WHEN + callbacks.OnOpComplete("node1") + + // THEN + require.True(t, completeCalled, "expected complete callback to be called") + }) + + t.Run("only op failed", func(t *testing.T) { + // GIVEN + failedCalled := false + callbacks := metrics.NewReplicationEngineOpsCallbacksBuilder(). + WithOpFailedCallback(func(node string) { + failedCalled = true + }). + Build() + + // WHEN + callbacks.OnOpFailed("node1") + + // THEN + require.True(t, failedCalled, "expected failed callback to be called") + }) + + t.Run("only op cancelled", func(t *testing.T) { + // GIVEN + cancelledCalled := false + callbacks := metrics.NewReplicationEngineOpsCallbacksBuilder(). + WithOpCancelledCallback(func(node string) { + cancelledCalled = true + }). + Build() + + // WHEN + callbacks.OnOpCancelled("node1") + + // THEN + require.True(t, cancelledCalled, "expected cancelled callback to be called") + }) +} + +func TestMetricsCollection(t *testing.T) { + t.Run("metrics should track operations correctly", func(t *testing.T) { + // GIVEN + reg := prometheus.NewPedanticRegistry() + callbacks := metrics.NewReplicationEngineOpsCallbacks(reg) + node := "test-node" + + callbacks.OnPrepareProcessing(node) + + // Process first operation completing successfully + callbacks.OnOpPending(node) + callbacks.OnOpStart(node) + callbacks.OnOpComplete(node) + + // Process second operation completing with a failure + callbacks.OnOpPending(node) + callbacks.OnOpStart(node) + callbacks.OnOpFailed(node) // This one fails + + // Start a third operation but leave it running + callbacks.OnOpPending(node) + callbacks.OnOpStart(node) + + // Start a fourth operation but leave it pending + callbacks.OnOpPending(node) + + // Start a fifth operation but skip it + callbacks.OnOpPending(node) + callbacks.OnOpSkipped(node) + + // Start a sixth operation but cancel it + callbacks.OnOpPending(node) + callbacks.OnOpStart(node) + callbacks.OnOpCancelled(node) + + // WHEN + metricFamilies, err := reg.Gather() + require.NoError(t, err) + + metricsByName := make(map[string]*io_prometheus_client.MetricFamily) + for _, mf := range metricFamilies { + metricsByName[mf.GetName()] = mf + } + + // THEN + require.Equal(t, float64(1), metricsByName["weaviate_replication_pending_operations"].GetMetric()[0].GetGauge().GetValue()) + require.Equal(t, float64(1), metricsByName["weaviate_replication_ongoing_operations"].GetMetric()[0].GetGauge().GetValue()) + require.Equal(t, float64(1), metricsByName["weaviate_replication_complete_operations"].GetMetric()[0].GetCounter().GetValue()) + require.Equal(t, float64(1), metricsByName["weaviate_replication_failed_operations"].GetMetric()[0].GetCounter().GetValue()) + require.Equal(t, float64(1), metricsByName["weaviate_replication_cancelled_operations"].GetMetric()[0].GetCounter().GetValue()) + }) + + t.Run("metrics should be tracked separately for different nodes", func(t *testing.T) { + // GIVEN + reg := prometheus.NewPedanticRegistry() + callbacks := metrics.NewReplicationEngineOpsCallbacks(reg) + node1 := "node-1" + node2 := "node-2" + + callbacks.OnPrepareProcessing(node1) + callbacks.OnPrepareProcessing(node2) + + // Node 1 ops + callbacks.OnOpPending(node1) + callbacks.OnOpStart(node1) + callbacks.OnOpComplete(node1) + + callbacks.OnOpPending(node1) + callbacks.OnOpStart(node1) + callbacks.OnOpFailed(node1) + + // Node 2 ops + callbacks.OnOpPending(node2) + callbacks.OnOpStart(node2) + callbacks.OnOpComplete(node2) + + callbacks.OnOpPending(node2) + callbacks.OnOpStart(node2) + callbacks.OnOpComplete(node2) + + // Pending operation for node 2 + callbacks.OnOpPending(node2) + + // Pending operation for node 2 then skipped + callbacks.OnOpPending(node2) + callbacks.OnOpSkipped(node2) + + // WHEN + metricFamilies, err := reg.Gather() + require.NoError(t, err) + + pendingByNode := make(map[string]float64) + ongoingByNode := make(map[string]float64) + completeByNode := make(map[string]float64) + failedByNode := make(map[string]float64) + + for _, mf := range metricFamilies { + for _, m := range mf.GetMetric() { + var nodeLabel string + for _, labelPair := range m.GetLabel() { + if labelPair.GetName() == "node" { + nodeLabel = labelPair.GetValue() + break + } + } + + switch mf.GetName() { + case "weaviate_replication_pending_operations": + pendingByNode[nodeLabel] = m.GetGauge().GetValue() + case "weaviate_replication_ongoing_operations": + ongoingByNode[nodeLabel] = m.GetGauge().GetValue() + case "weaviate_replication_complete_operations": + completeByNode[nodeLabel] = m.GetCounter().GetValue() + case "weaviate_replication_failed_operations": + failedByNode[nodeLabel] = m.GetCounter().GetValue() + } + } + } + + // THEN (for node1) + require.Equal(t, float64(0), pendingByNode[node1], "invalid pending callback node") + require.Equal(t, float64(0), ongoingByNode[node1], "invalid ongoing callback node") + require.Equal(t, float64(1), completeByNode[node1], "invalid complete callback node") + require.Equal(t, float64(1), failedByNode[node1], "invalid failed callback node") + + // THEN (for node2) + require.Equal(t, float64(1), pendingByNode[node2], "invalid pending callback node") + require.Equal(t, float64(0), ongoingByNode[node2], "invalid ongoing callback node") + require.Equal(t, float64(2), completeByNode[node2], "invalid complete callback node") + require.Equal(t, float64(0), failedByNode[node2], "invalid failed callback node") + }) +} + +func TestEngineCallbacks(t *testing.T) { + t.Run("default callbacks should be no-op", func(t *testing.T) { + callbacks := metrics.NewReplicationEngineCallbacksBuilder().Build() + callbacks.OnEngineStart("node1") + callbacks.OnEngineStop("node1") + callbacks.OnProducerStart("node1") + callbacks.OnProducerStop("node1") + callbacks.OnConsumerStart("node1") + callbacks.OnConsumerStop("node1") + }) + + t.Run("custom callbacks should be called with correct parameters", func(t *testing.T) { + var ( + engineStartedNode, engineStoppedNode string + producerStartedNode, producerStoppedNode string + consumerStartedNode, consumerStoppedNode string + engineStartedCallbacksCounter, engineStoppedCallbacksCounter int + producerStartedCallbacksCounter, producerStoppedCallbacksCounter int + consumerStartedCallbacksCounter, consumerStoppedCallbacksCounter int + ) + + callbacks := metrics.NewReplicationEngineCallbacksBuilder(). + WithEngineStartCallback(func(node string) { + engineStartedNode = node + engineStartedCallbacksCounter++ + }). + WithEngineStopCallback(func(node string) { + engineStoppedNode = node + engineStoppedCallbacksCounter++ + }). + WithProducerStartCallback(func(node string) { + producerStartedNode = node + producerStartedCallbacksCounter++ + }). + WithProducerStopCallback(func(node string) { + producerStoppedNode = node + producerStoppedCallbacksCounter++ + }). + WithConsumerStartCallback(func(node string) { + consumerStartedNode = node + consumerStartedCallbacksCounter++ + }). + WithConsumerStopCallback(func(node string) { + consumerStoppedNode = node + consumerStoppedCallbacksCounter++ + }). + Build() + + node := "node-test" + callbacks.OnEngineStart(node) + callbacks.OnEngineStop(node) + callbacks.OnProducerStart(node) + callbacks.OnProducerStop(node) + callbacks.OnConsumerStart(node) + callbacks.OnConsumerStop(node) + + require.Equal(t, node, engineStartedNode, "invalid node in engine start callback") + require.Equal(t, node, engineStoppedNode, "invalid node in engine stop callback") + require.Equal(t, node, producerStartedNode, "invalid node in producer start callback") + require.Equal(t, node, producerStoppedNode, "invalid node in producer stop callback") + require.Equal(t, node, consumerStartedNode, "invalid node in consumer start callback") + require.Equal(t, node, consumerStoppedNode, "invalid node in consumer stop callback") + require.Equal(t, 1, engineStartedCallbacksCounter, "invalid engine started callback counter") + require.Equal(t, 1, engineStoppedCallbacksCounter, "invalid engine stop callback counter") + require.Equal(t, 1, producerStartedCallbacksCounter, "invalid producer start callback counter") + require.Equal(t, 1, producerStoppedCallbacksCounter, "invalid producer stop callback counter") + require.Equal(t, 1, consumerStartedCallbacksCounter, "invalid consumer start callback counter") + require.Equal(t, 1, consumerStoppedCallbacksCounter, "invalid consumer stop callback counter") + }) +} + +func TestEngineMetricsCollection(t *testing.T) { + t.Run("engine lifecycle metrics are tracked correctly", func(t *testing.T) { + // GIVEN + reg := prometheus.NewPedanticRegistry() + callbacks := metrics.NewReplicationEngineCallbacks(reg) + node := "node1" + + // WHEN + callbacks.OnEngineStart(node) + callbacks.OnProducerStart(node) + callbacks.OnConsumerStart(node) + afterStartMetricFamilies, err := reg.Gather() + + // THEN + require.NoError(t, err) + afterStartMetrics := collectMetrics(afterStartMetricFamilies, node) + require.Equal(t, float64(1), afterStartMetrics["engine"], "invalid engine running status") + require.Equal(t, float64(1), afterStartMetrics["producer"], "invalid producer running status") + require.Equal(t, float64(1), afterStartMetrics["consumer"], "invalid consumer running status") + + // WHEN + callbacks.OnProducerStop(node) + callbacks.OnConsumerStop(node) + callbacks.OnEngineStop(node) + afterStopMetricFamilies, err := reg.Gather() + + // THEN + require.NoError(t, err) + afterStopMetrics := collectMetrics(afterStopMetricFamilies, node) + require.Equal(t, float64(0), afterStopMetrics["engine"], "invalid engine running status") + require.Equal(t, float64(0), afterStopMetrics["producer"], "invalid producer running status") + require.Equal(t, float64(0), afterStopMetrics["consumer"], "invalid consumer running status") + }) +} + +func collectMetrics(metricFamilies []*io_prometheus_client.MetricFamily, node string) map[string]float64 { + values := make(map[string]float64) + for _, mf := range metricFamilies { + for _, m := range mf.GetMetric() { + for _, label := range m.GetLabel() { + if label.GetName() == "node" && label.GetValue() == node { + switch mf.GetName() { + case "weaviate_replication_engine_running_status": + values["engine"] = m.GetGauge().GetValue() + case "weaviate_replication_engine_producer_running_status": + values["producer"] = m.GetGauge().GetValue() + case "weaviate_replication_engine_consumer_running_status": + values["consumer"] = m.GetGauge().GetValue() + } + } + } + } + } + return values +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/replication/mock_op_consumer.go b/platform/dbops/binaries/weaviate-src/cluster/replication/mock_op_consumer.go new file mode 100644 index 0000000000000000000000000000000000000000..9c8e924bb1fe93367d3eeab14ff68f462e41f51e --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/replication/mock_op_consumer.go @@ -0,0 +1,94 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by mockery v2.53.2. DO NOT EDIT. + +package replication + +import ( + context "context" + + mock "github.com/stretchr/testify/mock" +) + +// MockOpConsumer is an autogenerated mock type for the OpConsumer type +type MockOpConsumer struct { + mock.Mock +} + +type MockOpConsumer_Expecter struct { + mock *mock.Mock +} + +func (_m *MockOpConsumer) EXPECT() *MockOpConsumer_Expecter { + return &MockOpConsumer_Expecter{mock: &_m.Mock} +} + +// Consume provides a mock function with given fields: ctx, in +func (_m *MockOpConsumer) Consume(ctx context.Context, in <-chan ShardReplicationOpAndStatus) error { + ret := _m.Called(ctx, in) + + if len(ret) == 0 { + panic("no return value specified for Consume") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, <-chan ShardReplicationOpAndStatus) error); ok { + r0 = rf(ctx, in) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockOpConsumer_Consume_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Consume' +type MockOpConsumer_Consume_Call struct { + *mock.Call +} + +// Consume is a helper method to define mock.On call +// - ctx context.Context +// - in <-chan ShardReplicationOpAndStatus +func (_e *MockOpConsumer_Expecter) Consume(ctx interface{}, in interface{}) *MockOpConsumer_Consume_Call { + return &MockOpConsumer_Consume_Call{Call: _e.mock.On("Consume", ctx, in)} +} + +func (_c *MockOpConsumer_Consume_Call) Run(run func(ctx context.Context, in <-chan ShardReplicationOpAndStatus)) *MockOpConsumer_Consume_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(<-chan ShardReplicationOpAndStatus)) + }) + return _c +} + +func (_c *MockOpConsumer_Consume_Call) Return(_a0 error) *MockOpConsumer_Consume_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockOpConsumer_Consume_Call) RunAndReturn(run func(context.Context, <-chan ShardReplicationOpAndStatus) error) *MockOpConsumer_Consume_Call { + _c.Call.Return(run) + return _c +} + +// NewMockOpConsumer creates a new instance of MockOpConsumer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewMockOpConsumer(t interface { + mock.TestingT + Cleanup(func()) +}) *MockOpConsumer { + mock := &MockOpConsumer{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/replication/mock_op_producer.go b/platform/dbops/binaries/weaviate-src/cluster/replication/mock_op_producer.go new file mode 100644 index 0000000000000000000000000000000000000000..7adf85388ae3f0337e503a92b833c5bb6eccd231 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/replication/mock_op_producer.go @@ -0,0 +1,94 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by mockery v2.53.2. DO NOT EDIT. + +package replication + +import ( + context "context" + + mock "github.com/stretchr/testify/mock" +) + +// MockOpProducer is an autogenerated mock type for the OpProducer type +type MockOpProducer struct { + mock.Mock +} + +type MockOpProducer_Expecter struct { + mock *mock.Mock +} + +func (_m *MockOpProducer) EXPECT() *MockOpProducer_Expecter { + return &MockOpProducer_Expecter{mock: &_m.Mock} +} + +// Produce provides a mock function with given fields: ctx, out +func (_m *MockOpProducer) Produce(ctx context.Context, out chan<- ShardReplicationOpAndStatus) error { + ret := _m.Called(ctx, out) + + if len(ret) == 0 { + panic("no return value specified for Produce") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, chan<- ShardReplicationOpAndStatus) error); ok { + r0 = rf(ctx, out) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockOpProducer_Produce_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Produce' +type MockOpProducer_Produce_Call struct { + *mock.Call +} + +// Produce is a helper method to define mock.On call +// - ctx context.Context +// - out chan<- ShardReplicationOpAndStatus +func (_e *MockOpProducer_Expecter) Produce(ctx interface{}, out interface{}) *MockOpProducer_Produce_Call { + return &MockOpProducer_Produce_Call{Call: _e.mock.On("Produce", ctx, out)} +} + +func (_c *MockOpProducer_Produce_Call) Run(run func(ctx context.Context, out chan<- ShardReplicationOpAndStatus)) *MockOpProducer_Produce_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(chan<- ShardReplicationOpAndStatus)) + }) + return _c +} + +func (_c *MockOpProducer_Produce_Call) Return(_a0 error) *MockOpProducer_Produce_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockOpProducer_Produce_Call) RunAndReturn(run func(context.Context, chan<- ShardReplicationOpAndStatus) error) *MockOpProducer_Produce_Call { + _c.Call.Return(run) + return _c +} + +// NewMockOpProducer creates a new instance of MockOpProducer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewMockOpProducer(t interface { + mock.TestingT + Cleanup(func()) +}) *MockOpProducer { + mock := &MockOpProducer{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/replication/mock_time_provider.go b/platform/dbops/binaries/weaviate-src/cluster/replication/mock_time_provider.go new file mode 100644 index 0000000000000000000000000000000000000000..17741969c9375259a32da38f457b3df8387528d7 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/replication/mock_time_provider.go @@ -0,0 +1,92 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by mockery v2.53.2. DO NOT EDIT. + +package replication + +import ( + time "time" + + mock "github.com/stretchr/testify/mock" +) + +// MockTimeProvider is an autogenerated mock type for the TimeProvider type +type MockTimeProvider struct { + mock.Mock +} + +type MockTimeProvider_Expecter struct { + mock *mock.Mock +} + +func (_m *MockTimeProvider) EXPECT() *MockTimeProvider_Expecter { + return &MockTimeProvider_Expecter{mock: &_m.Mock} +} + +// Now provides a mock function with no fields +func (_m *MockTimeProvider) Now() time.Time { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Now") + } + + var r0 time.Time + if rf, ok := ret.Get(0).(func() time.Time); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(time.Time) + } + + return r0 +} + +// MockTimeProvider_Now_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Now' +type MockTimeProvider_Now_Call struct { + *mock.Call +} + +// Now is a helper method to define mock.On call +func (_e *MockTimeProvider_Expecter) Now() *MockTimeProvider_Now_Call { + return &MockTimeProvider_Now_Call{Call: _e.mock.On("Now")} +} + +func (_c *MockTimeProvider_Now_Call) Run(run func()) *MockTimeProvider_Now_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *MockTimeProvider_Now_Call) Return(_a0 time.Time) *MockTimeProvider_Now_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockTimeProvider_Now_Call) RunAndReturn(run func() time.Time) *MockTimeProvider_Now_Call { + _c.Call.Return(run) + return _c +} + +// NewMockTimeProvider creates a new instance of MockTimeProvider. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewMockTimeProvider(t interface { + mock.TestingT + Cleanup(func()) +}) *MockTimeProvider { + mock := &MockTimeProvider{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/replication/mock_timer.go b/platform/dbops/binaries/weaviate-src/cluster/replication/mock_timer.go new file mode 100644 index 0000000000000000000000000000000000000000..99afa579622128b05d25589528f5d020353b086d --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/replication/mock_timer.go @@ -0,0 +1,96 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by mockery v2.53.2. DO NOT EDIT. + +package replication + +import ( + time "time" + + mock "github.com/stretchr/testify/mock" +) + +// MockTimer is an autogenerated mock type for the Timer type +type MockTimer struct { + mock.Mock +} + +type MockTimer_Expecter struct { + mock *mock.Mock +} + +func (_m *MockTimer) EXPECT() *MockTimer_Expecter { + return &MockTimer_Expecter{mock: &_m.Mock} +} + +// AfterFunc provides a mock function with given fields: duration, fn +func (_m *MockTimer) AfterFunc(duration time.Duration, fn func()) *time.Timer { + ret := _m.Called(duration, fn) + + if len(ret) == 0 { + panic("no return value specified for AfterFunc") + } + + var r0 *time.Timer + if rf, ok := ret.Get(0).(func(time.Duration, func()) *time.Timer); ok { + r0 = rf(duration, fn) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*time.Timer) + } + } + + return r0 +} + +// MockTimer_AfterFunc_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'AfterFunc' +type MockTimer_AfterFunc_Call struct { + *mock.Call +} + +// AfterFunc is a helper method to define mock.On call +// - duration time.Duration +// - fn func() +func (_e *MockTimer_Expecter) AfterFunc(duration interface{}, fn interface{}) *MockTimer_AfterFunc_Call { + return &MockTimer_AfterFunc_Call{Call: _e.mock.On("AfterFunc", duration, fn)} +} + +func (_c *MockTimer_AfterFunc_Call) Run(run func(duration time.Duration, fn func())) *MockTimer_AfterFunc_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(time.Duration), args[1].(func())) + }) + return _c +} + +func (_c *MockTimer_AfterFunc_Call) Return(_a0 *time.Timer) *MockTimer_AfterFunc_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockTimer_AfterFunc_Call) RunAndReturn(run func(time.Duration, func()) *time.Timer) *MockTimer_AfterFunc_Call { + _c.Call.Return(run) + return _c +} + +// NewMockTimer creates a new instance of MockTimer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewMockTimer(t interface { + mock.TestingT + Cleanup(func()) +}) *MockTimer { + mock := &MockTimer{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/replication/producer.go b/platform/dbops/binaries/weaviate-src/cluster/replication/producer.go new file mode 100644 index 0000000000000000000000000000000000000000..e05185c34cd03a8bc6433e21db42eda2c4c0c246 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/replication/producer.go @@ -0,0 +1,131 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package replication + +import ( + "context" + "time" + + "github.com/sirupsen/logrus" +) + +// OpProducer is an interface for producing replication operations. +type OpProducer interface { + // Produce starts producing replication operations and sends them to the provided channel. + // A buffered channel is typically used for backpressure, but an unbounded channel may cause + // memory growth if the consumer falls behind. Errors during production should be returned. + Produce(ctx context.Context, out chan<- ShardReplicationOpAndStatus) error +} + +// FSMOpProducer is an implementation of the OpProducer interface that reads replication +// operations from a ShardReplicationFSM, which tracks the state of replication operations. +type FSMOpProducer struct { + logger *logrus.Entry + fsm *ShardReplicationFSM + pollingInterval time.Duration + nodeId string +} + +// NewFSMOpProducer creates a new FSMOpProducer instance, which periodically polls the +// ShardReplicationFSM for operations assigned to the given node and pushes them to +// a channel for consumption by the replication engine.The polling interval controls +// how often the FSM is queried for replication operations. +// +// Additional configuration can be applied using optional FSMProducerOption functions. +func NewFSMOpProducer(logger *logrus.Logger, fsm *ShardReplicationFSM, pollingInterval time.Duration, nodeId string) *FSMOpProducer { + return &FSMOpProducer{ + logger: logger.WithFields(logrus.Fields{"component": "replication_producer", "action": replicationEngineLogAction}), + fsm: fsm, + pollingInterval: pollingInterval, + nodeId: nodeId, + } +} + +// Produce implements the OpProducer interface and starts producing operations for the given node. +// +// It uses a polling mechanism based on time.Ticker to periodically fetch all replication operations +// that should be executed on the current node. These operations are then sent to the provided output +// channel to be consumed by the OpConsumer. +// +// The function respects backpressure by using a bounded output channel. If the channel is full +// (i.e., the consumer is slow or blocked), the producer blocks while trying to send operations. +// While blocked, any additional ticks from the time.Ticker are dropped, as time.Ticker does not +// buffer ticks. This means the polling interval is effectively paused while the system is under load. +// +// This behavior is intentional: the producer only generates new work when the system has capacity +// to process it. Missing some ticks during backpressure is acceptable and avoids accumulating +// unprocessed work or overloading the system. +func (p *FSMOpProducer) Produce(ctx context.Context, out chan<- ShardReplicationOpAndStatus) error { + p.logger.WithFields(logrus.Fields{"node": p.nodeId, "polling_interval": p.pollingInterval}).Info("starting replication engine FSM producer") + + ticker := time.NewTicker(p.pollingInterval) + defer ticker.Stop() + + for { + select { + case <-ctx.Done(): + p.logger.Info("replication engine producer cancel request, stopping FSM producer") + return ctx.Err() + case <-ticker.C: + ops := p.allOpsForNode(p.nodeId) + if len(ops) <= 0 { + continue + } + + p.logger.WithFields(logrus.Fields{"number_of_ops": len(ops)}).Debug("preparing op replication") + + for _, op := range ops { + status, ok := p.fsm.GetOpState(op) // Get most recent state to narrow the window for state change races between producer and consumer + if !ok { + p.logger.WithField("op", op).Debug("skipping op as it has no state stored in FSM. It may have been deleted in the meantime.") + continue + } + if !status.ShouldConsumeOps() { + continue + } + select { + case <-ctx.Done(): + return ctx.Err() + case out <- NewShardReplicationOpAndStatus(op, status): // Write replication operation to channel. + } + } + } + } +} + +// allOpsForNode filters and returns replication operations assigned to the specified node. +// +// This method implements the core of the pull-based replication mechanism: +// +// 1. Pull Model: Each node is responsible for pulling data TO itself FROM other nodes +// +// 2. Node Responsibility: +// - Target node: Handles all replication operations which are in REGISTERED or HYDRATING +// - Source node: Only handles DEHYDRATING operations as that state needs data to be deleted +// +// 3. Operation States: +// - All states except for ABORTED and READY are processes +// +// Returns only operations that should be actively processed by this node. +func (p *FSMOpProducer) allOpsForNode(nodeId string) []ShardReplicationOp { + allNodeAsTargetOps := p.fsm.GetOpsForTarget(nodeId) + + nodeOpsSubset := make([]ShardReplicationOp, 0, len(allNodeAsTargetOps)) + for _, op := range allNodeAsTargetOps { + if opState, ok := p.fsm.GetOpState(op); ok && opState.ShouldConsumeOps() { + nodeOpsSubset = append(nodeOpsSubset, op) + } else if !ok { + p.logger.WithField("op", op).Warn("skipping op as it has no state stored in FSM. It may have been deleted in the meantime.") + } + } + return nodeOpsSubset +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/replication/producer_consumer_test.go b/platform/dbops/binaries/weaviate-src/cluster/replication/producer_consumer_test.go new file mode 100644 index 0000000000000000000000000000000000000000..f9698e69ebcff705c7e1a6ec829d9bc3b1385139 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/replication/producer_consumer_test.go @@ -0,0 +1,456 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package replication_test + +import ( + "context" + "fmt" + "sync" + "testing" + "time" + + "github.com/cenkalti/backoff/v4" + "github.com/go-openapi/strfmt" + "github.com/google/uuid" + "github.com/prometheus/client_golang/prometheus" + "github.com/sirupsen/logrus" + logrustest "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/cluster/proto/api" + "github.com/weaviate/weaviate/cluster/replication" + "github.com/weaviate/weaviate/cluster/replication/metrics" + "github.com/weaviate/weaviate/cluster/replication/types" + "github.com/weaviate/weaviate/cluster/schema" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/usecases/config/runtime" + "github.com/weaviate/weaviate/usecases/fakes" + "github.com/weaviate/weaviate/usecases/sharding" +) + +// TestConsumerStateChangeOrder tests that the consumer correctly transitions the state of the operation +func TestConsumerStateChangeOrder(t *testing.T) { + t.Parallel() + + opId := 0 + + testCases := []struct { + name string + transferType api.ShardReplicationTransferType + setupMocksFunc func(wg *sync.WaitGroup, mockFSMUpdater *types.MockFSMUpdater, mockReplicaCopier *types.MockReplicaCopier) + }{ + { + name: "All operations are processed in order in copy mode", + transferType: api.COPY, + setupMocksFunc: func(wg *sync.WaitGroup, mockFSMUpdater *types.MockFSMUpdater, mockReplicaCopier *types.MockReplicaCopier) { + var once sync.Once + wg.Add(1) + mockFSMUpdater.EXPECT(). + WaitForUpdate(mock.Anything, mock.Anything). + Return(nil) + mockFSMUpdater.EXPECT(). + ReplicationGetReplicaOpStatus(mock.Anything, uint64(opId)). + Return(api.REGISTERED, nil). + Times(1) + mockFSMUpdater.EXPECT(). + ReplicationUpdateReplicaOpStatus(mock.Anything, uint64(opId), api.HYDRATING). + Return(nil) + mockFSMUpdater.EXPECT(). + ReplicationUpdateReplicaOpStatus(mock.Anything, uint64(opId), api.FINALIZING). + Return(nil) + mockReplicaCopier.EXPECT(). + CopyReplicaFiles(mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything). + Return(nil) + mockReplicaCopier.EXPECT(). + LoadLocalShard(mock.Anything, mock.Anything, mock.Anything). + Return(nil) + mockReplicaCopier.EXPECT(). + InitAsyncReplicationLocally(mock.Anything, mock.Anything, mock.Anything). + Return(nil) + mockReplicaCopier.EXPECT(). + AddAsyncReplicationTargetNode(mock.Anything, mock.Anything, mock.Anything). + Return(nil) + mockReplicaCopier.EXPECT(). + RemoveAsyncReplicationTargetNode(mock.Anything, mock.Anything). + Return(nil).Maybe() + mockReplicaCopier.EXPECT(). + RevertAsyncReplicationLocally(mock.Anything, mock.Anything, mock.Anything). + Return(nil) + mockReplicaCopier.EXPECT(). + AsyncReplicationStatus(mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything). + // Simulate that the async replication is already done + Return(models.AsyncReplicationStatus{StartDiffTimeUnixMillis: time.Now().Add(time.Second * 200).UnixMilli(), ObjectsPropagated: 0}, nil) + mockFSMUpdater.EXPECT(). + ReplicationAddReplicaToShard(mock.Anything, mock.Anything, mock.Anything, mock.Anything, uint64(opId)). + Return(uint64(0), nil) + mockFSMUpdater.EXPECT(). + SyncShard(mock.Anything, mock.Anything, mock.Anything, mock.Anything). + Return(uint64(0), nil) + mockFSMUpdater.EXPECT(). + ReplicationUpdateReplicaOpStatus(mock.Anything, uint64(opId), api.READY). + Run(func(ctx context.Context, opId uint64, state api.ShardReplicationState) { + once.Do(wg.Done) + }).Return(nil).Maybe() + }, + }, + { + name: "consumer resumes on state change failure", + transferType: api.COPY, + setupMocksFunc: func(wg *sync.WaitGroup, mockFSMUpdater *types.MockFSMUpdater, mockReplicaCopier *types.MockReplicaCopier) { + var once sync.Once + wg.Add(1) + mockFSMUpdater.EXPECT(). + WaitForUpdate(mock.Anything, mock.Anything). + Return(nil) + mockFSMUpdater.EXPECT(). + ReplicationGetReplicaOpStatus(mock.Anything, uint64(opId)). + Return(api.REGISTERED, nil). + Times(2) // equal to the op plus number of times the op failed + mockFSMUpdater.EXPECT(). + ReplicationUpdateReplicaOpStatus(mock.Anything, uint64(opId), api.HYDRATING). + Return(fmt.Errorf("failed to update state")). + Times(1) + mockFSMUpdater.EXPECT(). + ReplicationUpdateReplicaOpStatus(mock.Anything, uint64(opId), api.HYDRATING). + Return(nil) + mockFSMUpdater.EXPECT(). + ReplicationUpdateReplicaOpStatus(mock.Anything, uint64(opId), api.FINALIZING). + Return(nil) + mockReplicaCopier.EXPECT(). + CopyReplicaFiles(mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything). + Return(nil) + mockReplicaCopier.EXPECT(). + LoadLocalShard(mock.Anything, mock.Anything, mock.Anything). + Return(nil) + mockReplicaCopier.EXPECT(). + InitAsyncReplicationLocally(mock.Anything, mock.Anything, mock.Anything). + Return(nil) + mockReplicaCopier.EXPECT(). + AddAsyncReplicationTargetNode(mock.Anything, mock.Anything, mock.Anything). + Return(nil) + mockReplicaCopier.EXPECT(). + RemoveAsyncReplicationTargetNode(mock.Anything, mock.Anything). + Return(nil).Maybe() + mockReplicaCopier.EXPECT(). + RevertAsyncReplicationLocally(mock.Anything, mock.Anything, mock.Anything). + Return(nil) + mockReplicaCopier.EXPECT(). + AsyncReplicationStatus(mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything). + // Simulate that the async replication is already done + Return(models.AsyncReplicationStatus{StartDiffTimeUnixMillis: time.Now().Add(time.Second * 200).UnixMilli(), ObjectsPropagated: 0}, nil) + mockFSMUpdater.EXPECT(). + ReplicationAddReplicaToShard(mock.Anything, mock.Anything, mock.Anything, mock.Anything, uint64(opId)). + Return(uint64(0), nil) + mockFSMUpdater.EXPECT(). + SyncShard(mock.Anything, mock.Anything, mock.Anything, mock.Anything). + Return(uint64(0), nil) + mockFSMUpdater.EXPECT(). + ReplicationUpdateReplicaOpStatus(mock.Anything, uint64(opId), api.READY). + Run(func(ctx context.Context, id uint64, state api.ShardReplicationState) { + once.Do(wg.Done) + }).Return(nil).Maybe() + }, + }, + { + name: "consumer resumes on replica copier failures", + transferType: api.COPY, + setupMocksFunc: func(wg *sync.WaitGroup, mockFSMUpdater *types.MockFSMUpdater, mockReplicaCopier *types.MockReplicaCopier) { + var once sync.Once + wg.Add(1) + mockFSMUpdater.EXPECT(). + WaitForUpdate(mock.Anything, mock.Anything). + Return(nil) + mockFSMUpdater.EXPECT(). + ReplicationGetReplicaOpStatus(mock.Anything, uint64(opId)). + Return(api.REGISTERED, nil). + Times(2) // equal to the op plus number of times the op failed + mockFSMUpdater.EXPECT(). + ReplicationUpdateReplicaOpStatus(mock.Anything, uint64(opId), api.HYDRATING). + Return(nil) + mockFSMUpdater.EXPECT(). + ReplicationUpdateReplicaOpStatus(mock.Anything, uint64(opId), api.FINALIZING). + Return(nil) + mockReplicaCopier.EXPECT(). + CopyReplicaFiles(mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything). + Return(fmt.Errorf("failed to copy replica")). + Times(1) + mockFSMUpdater.EXPECT(). + ReplicationRegisterError(mock.Anything, uint64(opId), fmt.Errorf("failed to copy replica").Error()). + Return(nil) + mockReplicaCopier.EXPECT(). + CopyReplicaFiles(mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything). + Return(nil) + mockReplicaCopier.EXPECT(). + LoadLocalShard(mock.Anything, mock.Anything, mock.Anything). + Return(nil) + mockReplicaCopier.EXPECT(). + InitAsyncReplicationLocally(mock.Anything, mock.Anything, mock.Anything). + Return(nil) + mockReplicaCopier.EXPECT(). + AddAsyncReplicationTargetNode(mock.Anything, mock.Anything, mock.Anything). + Return(nil) + mockReplicaCopier.EXPECT(). + RemoveAsyncReplicationTargetNode(mock.Anything, mock.Anything). + Return(nil).Maybe() + mockReplicaCopier.EXPECT(). + RevertAsyncReplicationLocally(mock.Anything, mock.Anything, mock.Anything). + Return(nil) + mockReplicaCopier.EXPECT(). + AsyncReplicationStatus(mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything). + // Simulate that the async replication is already done + Return(models.AsyncReplicationStatus{StartDiffTimeUnixMillis: time.Now().Add(time.Second * 200).UnixMilli(), ObjectsPropagated: 0}, nil) + mockFSMUpdater.EXPECT(). + ReplicationAddReplicaToShard(mock.Anything, mock.Anything, mock.Anything, mock.Anything, uint64(opId)). + Return(uint64(0), nil) + mockFSMUpdater.EXPECT(). + SyncShard(mock.Anything, mock.Anything, mock.Anything, mock.Anything). + Return(uint64(0), nil) + mockFSMUpdater.EXPECT(). + ReplicationUpdateReplicaOpStatus(mock.Anything, uint64(opId), api.READY). + Run(func(ctx context.Context, opId uint64, state api.ShardReplicationState) { + once.Do(wg.Done) + }).Return(nil).Maybe() + }, + }, + { + name: "consumer resumes on async replication failures", + transferType: api.COPY, + setupMocksFunc: func(wg *sync.WaitGroup, mockFSMUpdater *types.MockFSMUpdater, mockReplicaCopier *types.MockReplicaCopier) { + var once sync.Once + wg.Add(1) + mockFSMUpdater.EXPECT(). + WaitForUpdate(mock.Anything, mock.Anything). + Return(nil) + mockFSMUpdater.EXPECT(). + ReplicationGetReplicaOpStatus(mock.Anything, uint64(opId)). + Return(api.REGISTERED, nil) + mockFSMUpdater.EXPECT(). + ReplicationUpdateReplicaOpStatus(mock.Anything, uint64(opId), api.HYDRATING). + Return(nil) + mockFSMUpdater.EXPECT(). + ReplicationUpdateReplicaOpStatus(mock.Anything, uint64(opId), api.FINALIZING). + Return(nil) + mockReplicaCopier.EXPECT(). + CopyReplicaFiles(mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything). + Return(nil) + mockReplicaCopier.EXPECT(). + LoadLocalShard(mock.Anything, mock.Anything, mock.Anything). + Return(nil) + mockReplicaCopier.EXPECT(). + InitAsyncReplicationLocally(mock.Anything, mock.Anything, mock.Anything). + Return(fmt.Errorf("failed to initialize async replication")). + Times(1) + mockFSMUpdater.EXPECT(). + ReplicationRegisterError(mock.Anything, uint64(opId), fmt.Errorf("failed to initialize async replication").Error()). + Return(nil) + mockReplicaCopier.EXPECT(). + InitAsyncReplicationLocally(mock.Anything, mock.Anything, mock.Anything). + Return(nil) + mockReplicaCopier.EXPECT(). + AddAsyncReplicationTargetNode(mock.Anything, mock.Anything, mock.Anything). + Return(fmt.Errorf("failed to set async replication target node")). + Times(1) + mockReplicaCopier.EXPECT(). + RemoveAsyncReplicationTargetNode(mock.Anything, mock.Anything). + Return(nil).Maybe() + mockReplicaCopier.EXPECT(). + RevertAsyncReplicationLocally(mock.Anything, mock.Anything, mock.Anything). + Return(nil) + mockFSMUpdater.EXPECT(). + ReplicationRegisterError(mock.Anything, uint64(opId), fmt.Errorf("failed to set async replication target node").Error()). + Return(nil) + mockReplicaCopier.EXPECT(). + AddAsyncReplicationTargetNode(mock.Anything, mock.Anything, mock.Anything). + Return(nil) + // Async replication status triggers an internal retry and doesn't register an error + mockReplicaCopier.EXPECT(). + AsyncReplicationStatus(mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything). + Return(models.AsyncReplicationStatus{}, fmt.Errorf("failed to get async replication status")). + Times(1) + mockReplicaCopier.EXPECT(). + AsyncReplicationStatus(mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything). + // Simulate that the async replication is already done + Return(models.AsyncReplicationStatus{StartDiffTimeUnixMillis: time.Now().Add(time.Second * 200).UnixMilli(), ObjectsPropagated: 0}, nil) + mockFSMUpdater.EXPECT(). + ReplicationAddReplicaToShard(mock.Anything, mock.Anything, mock.Anything, mock.Anything, uint64(opId)). + Return(uint64(0), nil) + mockFSMUpdater.EXPECT(). + SyncShard(mock.Anything, mock.Anything, mock.Anything, mock.Anything). + Return(uint64(0), nil) + mockFSMUpdater.EXPECT(). + ReplicationUpdateReplicaOpStatus(mock.Anything, uint64(opId), api.READY). + Run(func(ctx context.Context, opId uint64, state api.ShardReplicationState) { + once.Do(wg.Done) + }).Return(nil).Maybe() + }, + }, + { + name: "All operations are processed in order in move mode", + transferType: api.MOVE, + setupMocksFunc: func(wg *sync.WaitGroup, mockFSMUpdater *types.MockFSMUpdater, mockReplicaCopier *types.MockReplicaCopier) { + var once sync.Once + wg.Add(1) + mockFSMUpdater.EXPECT(). + WaitForUpdate(mock.Anything, mock.Anything). + Return(nil) + mockFSMUpdater.EXPECT(). + ReplicationGetReplicaOpStatus(mock.Anything, uint64(opId)). + Return(api.REGISTERED, nil). + Times(1) + mockFSMUpdater.EXPECT(). + ReplicationUpdateReplicaOpStatus(mock.Anything, uint64(opId), api.HYDRATING). + Return(nil) + mockFSMUpdater.EXPECT(). + ReplicationUpdateReplicaOpStatus(mock.Anything, uint64(opId), api.FINALIZING). + Return(nil) + mockReplicaCopier.EXPECT(). + CopyReplicaFiles(mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything). + Return(nil) + mockReplicaCopier.EXPECT(). + LoadLocalShard(mock.Anything, mock.Anything, mock.Anything). + Return(nil) + mockReplicaCopier.EXPECT(). + InitAsyncReplicationLocally(mock.Anything, mock.Anything, mock.Anything). + Return(nil) + mockReplicaCopier.EXPECT(). + AddAsyncReplicationTargetNode(mock.Anything, mock.Anything, mock.Anything). + Return(nil) + mockReplicaCopier.EXPECT(). + RemoveAsyncReplicationTargetNode(mock.Anything, mock.Anything). + Return(nil).Maybe() + mockReplicaCopier.EXPECT(). + RevertAsyncReplicationLocally(mock.Anything, mock.Anything, mock.Anything). + Return(nil) + mockReplicaCopier.EXPECT(). + AsyncReplicationStatus(mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything). + // Simulate that the async replication is already done + Return(models.AsyncReplicationStatus{StartDiffTimeUnixMillis: time.Now().Add(time.Second * 200).UnixMilli(), ObjectsPropagated: 0}, nil) + mockFSMUpdater.EXPECT(). + ReplicationAddReplicaToShard(mock.Anything, mock.Anything, mock.Anything, mock.Anything, uint64(opId)). + Return(uint64(0), nil) + mockFSMUpdater.EXPECT(). + SyncShard(mock.Anything, mock.Anything, mock.Anything, mock.Anything). + Return(uint64(0), nil) + mockFSMUpdater.EXPECT(). + ReplicationUpdateReplicaOpStatus(mock.Anything, uint64(opId), api.DEHYDRATING). + Return(nil) + mockFSMUpdater.EXPECT(). + DeleteReplicaFromShard(mock.Anything, mock.Anything, mock.Anything, mock.Anything). + Return(uint64(0), nil) + mockFSMUpdater.EXPECT(). + ReplicationUpdateReplicaOpStatus(mock.Anything, uint64(opId), api.READY). + Run(func(ctx context.Context, opId uint64, state api.ShardReplicationState) { + once.Do(wg.Done) + }).Return(nil).Maybe() + }, + }, + } + + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + var wg sync.WaitGroup + logger, _ := logrustest.NewNullLogger() + mockFSMUpdater := types.NewMockFSMUpdater(t) + mockReplicaCopier := types.NewMockReplicaCopier(t) + reg := prometheus.NewPedanticRegistry() + parser := fakes.NewMockParser() + parser.On("ParseClass", mock.Anything).Return(nil) + schemaManager := schema.NewSchemaManager("test-node", nil, parser, prometheus.NewPedanticRegistry(), logrus.New()) + schemaReader := schemaManager.NewSchemaReader() + manager := replication.NewManager(schemaReader, reg) + + ctx := t.Context() + replicateRequest := &api.ReplicationReplicateShardRequest{ + Uuid: strfmt.UUID(uuid.New().String()), + SourceCollection: "TestCollection", + SourceShard: "shard1", + SourceNode: "node1", + TargetNode: "node2", + TransferType: tc.transferType.String(), + } + + consumer := replication.NewCopyOpConsumer( + logger, + mockFSMUpdater, + mockReplicaCopier, + replicateRequest.TargetNode, + &backoff.StopBackOff{}, + replication.NewOpsCache(), + time.Second*20, + 1, + runtime.NewDynamicValue(time.Second*100), + metrics.NewReplicationEngineOpsCallbacksBuilder().Build(), + schemaReader, + ) + tc.setupMocksFunc(&wg, mockFSMUpdater, mockReplicaCopier) + + producer := replication.NewFSMOpProducer(logger, manager.GetReplicationFSM(), time.Second*1, replicateRequest.TargetNode) + + // Setup the class + shard in the schema + // We only use the manager + fsm + schema to "kickstart" the producer/consumer read loop, all the subsequent + // operations are triggered by the producer/consumer themselves and we use the mocks to verify the state changes + schemaManager.AddClass(buildApplyRequest(replicateRequest.SourceCollection, api.ApplyRequest_TYPE_ADD_CLASS, api.AddClassRequest{ + Class: &models.Class{Class: replicateRequest.SourceCollection, MultiTenancyConfig: &models.MultiTenancyConfig{Enabled: false}}, + State: &sharding.State{ + Physical: map[string]sharding.Physical{ + replicateRequest.SourceShard: {BelongsToNodes: []string{replicateRequest.SourceNode}}, + "shard2": {BelongsToNodes: []string{replicateRequest.TargetNode}}, + }, + }, + }), "node1", true, false) + // Start a replicate operation + err := manager.Replicate(0, buildApplyRequest(replicateRequest.SourceCollection, api.ApplyRequest_TYPE_REPLICATION_REPLICATE, replicateRequest)) + require.NoError(t, err) + + targetOpsChan := make(chan replication.ShardReplicationOpAndStatus, 1) + defer close(targetOpsChan) + ctx, cancel := context.WithCancel(ctx) + + consumerDoneChan := make(chan error, 1) + producerDoneChan := make(chan error, 1) + go func() { + producerDoneChan <- producer.Produce(ctx, targetOpsChan) + }() + go func() { + consumerDoneChan <- consumer.Consume(ctx, targetOpsChan) + }() + + // Ensure that we wait for the waitgroup up to a given amount of time + waitChan := make(chan struct{}) + go func() { + wg.Wait() + waitChan <- struct{}{} + }() + select { + case <-time.After(30 * time.Second): + cancel() + t.Fatal("Test timed out waiting for operation completion") + case <-waitChan: + cancel() + // This is here just to make sure the test does not run indefinitely + } + + err = <-producerDoneChan + require.ErrorIs(t, err, context.Canceled) + err = <-consumerDoneChan + require.ErrorIs(t, err, context.Canceled) + + // Assert that the mock expectations were met + mockFSMUpdater.AssertExpectations(t) + mockReplicaCopier.AssertExpectations(t) + require.True(t, mockFSMUpdater.AssertCalled(t, "ReplicationUpdateReplicaOpStatus", mock.Anything, uint64(opId), api.READY), "READY should be called at least once") + }) + } +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/replication/shard_replication_apply.go b/platform/dbops/binaries/weaviate-src/cluster/replication/shard_replication_apply.go new file mode 100644 index 0000000000000000000000000000000000000000..e2b33f80eca5452edc94542fb6627ae42bf6caf1 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/replication/shard_replication_apply.go @@ -0,0 +1,546 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package replication + +import ( + "errors" + "fmt" + "slices" + "time" + + "github.com/go-openapi/strfmt" + "github.com/hashicorp/go-multierror" + "github.com/weaviate/weaviate/cluster/proto/api" + "github.com/weaviate/weaviate/cluster/replication/types" +) + +var ErrShardAlreadyReplicating = errors.New("replica is already being replicated") + +func (s *ShardReplicationFSM) Replicate(id uint64, c *api.ReplicationReplicateShardRequest) error { + s.opsLock.Lock() + defer s.opsLock.Unlock() + + op := ShardReplicationOp{ + ID: id, + UUID: c.Uuid, + SourceShard: newShardFQDN(c.SourceNode, c.SourceCollection, c.SourceShard), + TargetShard: newShardFQDN(c.TargetNode, c.SourceCollection, c.SourceShard), + TransferType: api.ShardReplicationTransferType(c.TransferType), + StartTimeUnixMs: time.Now().UnixMilli(), + } + return s.writeOpIntoFSM(op, NewShardReplicationStatus(api.REGISTERED)) +} + +func (s *ShardReplicationFSM) RegisterError(c *api.ReplicationRegisterErrorRequest) error { + s.opsLock.Lock() + defer s.opsLock.Unlock() + + op, ok := s.opsById[c.Id] + if !ok { + return fmt.Errorf("could not find op %d: %w", c.Id, types.ErrReplicationOperationNotFound) + } + status, ok := s.statusById[op.ID] + if !ok { + return fmt.Errorf("could not find op status for op %d", c.Id) + } + if err := status.AddError(c.Error, c.TimeUnixMs); err != nil { + return err + } + s.statusById[op.ID] = status + + return nil +} + +// writeOpIntoFSM writes the op with status into the FSM. It *does* not holds the lock onto the maps so the callee must make sure the lock +// is held +func (s *ShardReplicationFSM) writeOpIntoFSM(op ShardReplicationOp, status ShardReplicationOpStatus) error { + if _, ok := s.opsByTargetFQDN[op.TargetShard]; ok { + return fmt.Errorf("op %s in targetFQDN: %w", op.UUID, ErrShardAlreadyReplicating) + } + + if existingOps, ok := s.opsBySourceFQDN[op.SourceShard]; ok { + for _, existingOp := range existingOps { + // First check the status of the existing op. If it's READY or CANCELLED we can accept a new op + // If it's ongoing we need to check if it's a move, in which case we can't accept any new op + // Otherwise we can accept a copy if the existing op is also a copy + if existingOpStatus, ok := s.statusById[existingOp.ID]; !ok { + // This should never happen + return fmt.Errorf("could not find op status for op %d", existingOp.ID) + } else if existingOpStatus.GetCurrentState() == api.CANCELLED { + continue + } else if existingOpStatus.GetCurrentState() == api.READY && existingOp.TransferType == api.COPY { + continue + } + + // If any of the ops we're handling is a move we can't accept any new op + if existingOp.TransferType == api.MOVE { + return fmt.Errorf("existing op %s is a MOVE: %w", op.UUID, ErrShardAlreadyReplicating) + } + + // At this point we know the existing op is a copy, if our new op is a move we can't accept it + if op.TransferType == api.MOVE { + return fmt.Errorf("existing op %s is a COPY, but new op is a MOVE: %w", op.UUID, ErrShardAlreadyReplicating) + } + + // Existing op is an ongoing copy, our new op is also a copy, we can accept it + } + } + + s.idsByUuid[op.UUID] = op.ID + s.opsBySource[op.SourceShard.NodeId] = append(s.opsBySource[op.SourceShard.NodeId], op) + s.opsByTarget[op.TargetShard.NodeId] = append(s.opsByTarget[op.TargetShard.NodeId], op) + // Make sure the nested map exists and is initialized + if _, ok := s.opsByCollectionAndShard[op.SourceShard.CollectionId]; !ok { + s.opsByCollectionAndShard[op.SourceShard.CollectionId] = make(map[string][]ShardReplicationOp) + } + s.opsByCollectionAndShard[op.SourceShard.CollectionId][op.SourceShard.ShardId] = append(s.opsByCollectionAndShard[op.SourceShard.CollectionId][op.SourceShard.ShardId], op) + s.opsByCollection[op.SourceShard.CollectionId] = append(s.opsByCollection[op.SourceShard.CollectionId], op) + s.opsByTargetFQDN[op.TargetShard] = op + s.opsBySourceFQDN[op.SourceShard] = append(s.opsBySourceFQDN[op.SourceShard], op) + s.opsById[op.ID] = op + s.statusById[op.ID] = status + + s.opsByStateGauge.WithLabelValues(status.GetCurrentState().String()).Inc() + + return nil +} + +func (s *ShardReplicationFSM) UpdateReplicationOpStatus(c *api.ReplicationUpdateOpStateRequest) error { + s.opsLock.Lock() + defer s.opsLock.Unlock() + + op, ok := s.opsById[c.Id] + if !ok { + return fmt.Errorf("could not find op %d: %w", c.Id, types.ErrReplicationOperationNotFound) + } + status, ok := s.statusById[op.ID] + if !ok { + return fmt.Errorf("could not find op status for op %d", c.Id) + } + + if status.GetCurrentState() == api.CANCELLED { + return fmt.Errorf("cannot update op %d state, it is already cancelled", c.Id) + } + + s.opsByStateGauge.WithLabelValues(status.GetCurrentState().String()).Dec() + status.ChangeState(c.State) + s.statusById[op.ID] = status + s.opsByStateGauge.WithLabelValues(status.GetCurrentState().String()).Inc() + + return nil +} + +func (s *ShardReplicationFSM) StoreSchemaVersion(c *api.ReplicationStoreSchemaVersionRequest) error { + s.opsLock.Lock() + defer s.opsLock.Unlock() + + status, ok := s.statusById[c.Id] + if !ok { + return fmt.Errorf("could not find op status for op %d: %w", c.Id, types.ErrReplicationOperationNotFound) + } + status.SchemaVersion = c.SchemaVersion + s.statusById[c.Id] = status + + return nil +} + +func (s *ShardReplicationFSM) SetUnCancellable(id uint64) error { + s.opsLock.Lock() + defer s.opsLock.Unlock() + + status, ok := s.statusById[id] + if !ok { + return fmt.Errorf("could not find op status for op %d: %w", id, types.ErrReplicationOperationNotFound) + } + status.UnCancellable = true + s.statusById[id] = status + + return nil +} + +func (s *ShardReplicationFSM) GetReplicationOpUUIDFromId(id uint64) (strfmt.UUID, error) { + s.opsLock.RLock() + defer s.opsLock.RUnlock() + + op, ok := s.opsById[id] + if !ok { + return "", fmt.Errorf("%w: %d", types.ErrReplicationOperationNotFound, id) + } + return op.UUID, nil +} + +func (s *ShardReplicationFSM) CancelReplication(c *api.ReplicationCancelRequest) error { + s.opsLock.Lock() + defer s.opsLock.Unlock() + + id, ok := s.idsByUuid[c.Uuid] + if !ok { + return fmt.Errorf("%w: %s", types.ErrReplicationOperationNotFound, c.Uuid) + } + op, ok := s.opsById[id] + if !ok { + return fmt.Errorf("could not find op %d: %w", id, types.ErrReplicationOperationNotFound) + } + status, ok := s.statusById[op.ID] + if !ok { + return fmt.Errorf("could not find op status for op %d", id) + } + + // Only allow to cancel ops if they are cancellable (before being added to sharding state) + if status.UnCancellable { + return types.ErrCancellationImpossible + } + + status.TriggerCancellation() + s.statusById[op.ID] = status + + return nil +} + +func (s *ShardReplicationFSM) DeleteReplication(c *api.ReplicationDeleteRequest) error { + s.opsLock.Lock() + defer s.opsLock.Unlock() + + id, ok := s.idsByUuid[c.Uuid] + if !ok { + return fmt.Errorf("could not find op %s: %w", c.Uuid, types.ErrReplicationOperationNotFound) + } + op, ok := s.opsById[id] + if !ok { + return fmt.Errorf("could not find op %d: %w", id, types.ErrReplicationOperationNotFound) + } + status, ok := s.statusById[op.ID] + if !ok { + return fmt.Errorf("could not find op status for op %d", id) + } + + // Only allow to delete ops if they are cancellable (before being added to sharding state) and not READY + if status.UnCancellable && status.GetCurrentState() != api.READY { + return types.ErrDeletionImpossible + } + + status.TriggerDeletion() + s.statusById[op.ID] = status + + return nil +} + +func (s *ShardReplicationFSM) DeleteAllReplications(c *api.ReplicationDeleteAllRequest) error { + s.opsLock.Lock() + defer s.opsLock.Unlock() + + for id, status := range s.statusById { + if status.UnCancellable && status.GetCurrentState() != api.READY { + continue + } + status.TriggerDeletion() + s.statusById[id] = status + } + return nil +} + +func (s *ShardReplicationFSM) RemoveReplicationOp(c *api.ReplicationRemoveOpRequest) error { + s.opsLock.Lock() + defer s.opsLock.Unlock() + + return s.removeReplicationOp(c.Id) +} + +func (s *ShardReplicationFSM) CancellationComplete(c *api.ReplicationCancellationCompleteRequest) error { + s.opsLock.Lock() + defer s.opsLock.Unlock() + + op, ok := s.opsById[c.Id] + if !ok { + return fmt.Errorf("could not find op %d: %w", c.Id, types.ErrReplicationOperationNotFound) + } + status, ok := s.statusById[op.ID] + if !ok { + return fmt.Errorf("could not find op status for op %d", c.Id) + } + status.CompleteCancellation() + s.statusById[op.ID] = status + + return nil +} + +func (s *ShardReplicationFSM) DeleteReplicationsByCollection(collection string) error { + s.opsLock.Lock() + defer s.opsLock.Unlock() + + ops, ok := s.opsByCollection[collection] + if !ok { + return nil // nothing to do + } + + for _, op := range ops { + status, ok := s.statusById[op.ID] + if !ok { + return fmt.Errorf("could not find op status for op %d: %w", op.ID, types.ErrReplicationOperationNotFound) + } + status.TriggerDeletion() + s.statusById[op.ID] = status + } + + return nil +} + +func (s *ShardReplicationFSM) DeleteReplicationsByTenants(collection string, tenants []string) error { + s.opsLock.Lock() + defer s.opsLock.Unlock() + + ops := make([]ShardReplicationOp, 0) + for _, tenant := range tenants { + opsPerTenant, ok := s.opsByCollectionAndShard[collection][tenant] + if !ok { + continue + } + ops = append(ops, opsPerTenant...) + } + if len(ops) == 0 { + return nil // nothing to do + } + + for _, op := range ops { + status, ok := s.statusById[op.ID] + if !ok { + return fmt.Errorf("could not find op status for op %d: %w", op.ID, types.ErrReplicationOperationNotFound) + } + status.TriggerDeletion() + s.statusById[op.ID] = status + } + + return nil +} + +func (s *ShardReplicationFSM) ForceDeleteAll() error { + s.opsLock.Lock() + defer s.opsLock.Unlock() + + for id := range s.opsById { + err := s.removeReplicationOp(id) + if err != nil { + return fmt.Errorf("could not remove op %d: %w", id, err) + } + } + + return nil +} + +func (s *ShardReplicationFSM) ForceDeleteByCollection(collection string) error { + s.opsLock.Lock() + defer s.opsLock.Unlock() + + ops, ok := s.opsByCollection[collection] + if !ok { + return nil // nothing to do + } + + for _, op := range ops { + err := s.removeReplicationOp(op.ID) + if err != nil { + return fmt.Errorf("could not remove op %d: %w", op.ID, err) + } + } + + return nil +} + +func (s *ShardReplicationFSM) ForceDeleteByCollectionAndShard(collection, shard string) error { + s.opsLock.Lock() + defer s.opsLock.Unlock() + + collectionOps, ok := s.opsByCollectionAndShard[collection] + if !ok { + return nil // nothing to do + } + + shardOps, ok := collectionOps[shard] + if !ok { + return nil // nothing to do + } + + for _, op := range shardOps { + err := s.removeReplicationOp(op.ID) + if err != nil { + return fmt.Errorf("could not remove op %d: %w", op.ID, err) + } + } + + return nil +} + +func (s *ShardReplicationFSM) ForceDeleteByTargetNode(node string) error { + s.opsLock.Lock() + defer s.opsLock.Unlock() + + ops, ok := s.opsByTarget[node] + if !ok { + return nil // nothing to do + } + + for _, op := range ops { + err := s.removeReplicationOp(op.ID) + if err != nil { + return fmt.Errorf("could not remove op %d: %w", op.ID, err) + } + } + + return nil +} + +func (s *ShardReplicationFSM) ForceDeleteByUuid(uuid strfmt.UUID) error { + s.opsLock.Lock() + defer s.opsLock.Unlock() + + id, ok := s.idsByUuid[uuid] + if !ok { + return fmt.Errorf("could not find op with uuid %s: %w", uuid, types.ErrReplicationOperationNotFound) + } + + if err := s.removeReplicationOp(id); err != nil { + return fmt.Errorf("could not remove op %d: %w", id, err) + } + + return nil +} + +func (s *ShardReplicationFSM) hasOngoingSourceReplication(sourceFQDN shardFQDN) bool { + ops, ok := s.opsBySourceFQDN[sourceFQDN] + if !ok { + return false + } + + for _, op := range ops { + status, ok := s.statusById[op.ID] + if !ok { + continue + } + + if status.ShouldConsumeOps() { + return true + } else { + continue + } + } + return false +} + +func (s *ShardReplicationFSM) hasOngoingTargetReplication(targetFQDN shardFQDN) bool { + op, ok := s.opsByTargetFQDN[targetFQDN] + if !ok { + return false + } + status, ok := s.statusById[op.ID] + if !ok { + return false + } + return status.ShouldConsumeOps() +} + +func (s *ShardReplicationFSM) HasOngoingReplication(collection string, shard string, replica string) bool { + s.opsLock.RLock() + defer s.opsLock.RUnlock() + + FQDN := newShardFQDN(replica, collection, shard) + return s.hasOngoingSourceReplication(FQDN) || s.hasOngoingTargetReplication(FQDN) +} + +// TODO: Improve the error handling in that function +func (s *ShardReplicationFSM) removeReplicationOp(id uint64) error { + var err error + op, ok := s.opsById[id] + if !ok { + return fmt.Errorf("could not find op %d: %w", id, types.ErrReplicationOperationNotFound) + } + + ops, ok := s.opsByTarget[op.TargetShard.NodeId] + if !ok { + err = multierror.Append(err, fmt.Errorf("could not find op %d in ops by target %s, this should not happen", op.ID, op.SourceShard.NodeId)) + } + opsReplace, ok := findAndDeleteOp(op.ID, ops) + if ok { + s.opsByTarget[op.TargetShard.NodeId] = opsReplace + } + + ops, ok = s.opsBySource[op.SourceShard.NodeId] + if !ok { + err = multierror.Append(err, fmt.Errorf("could not find op %d in ops by source %s, this should not happen", op.ID, op.TargetShard.NodeId)) + } + opsReplace, ok = findAndDeleteOp(op.ID, ops) + if ok { + s.opsBySource[op.SourceShard.NodeId] = opsReplace + } + + ops, ok = s.opsByCollection[op.SourceShard.CollectionId] + if !ok { + err = multierror.Append(err, fmt.Errorf("could not find op %d in ops by collection %s, this should not happen", op.ID, op.SourceShard.CollectionId)) + } + opsReplace, ok = findAndDeleteOp(op.ID, ops) + if ok { + s.opsByCollection[op.SourceShard.CollectionId] = opsReplace + } + + ops, ok = s.opsBySourceFQDN[op.SourceShard] + if !ok { + err = multierror.Append(err, fmt.Errorf("could not find op in ops by source fqdn, this should not happen")) + } + opsReplace, ok = findAndDeleteOp(op.ID, ops) + if ok { + s.opsBySourceFQDN[op.SourceShard] = opsReplace + } + + shardOps, ok := s.opsByCollectionAndShard[op.SourceShard.CollectionId] + if !ok { + err = multierror.Append(err, fmt.Errorf("could not find op in ops by collection and shard, this should not happen")) + } else { + ops, ok = shardOps[op.SourceShard.ShardId] + if !ok { + err = multierror.Append(err, fmt.Errorf("could not find op in ops by shard, this should not happen")) + } + opsReplace, ok = findAndDeleteOp(op.ID, ops) + if ok { + s.opsByCollectionAndShard[op.SourceShard.CollectionId][op.SourceShard.ShardId] = opsReplace + } + } + + status, ok := s.statusById[op.ID] + if !ok { + err = multierror.Append(err, fmt.Errorf("could not find op status for op %d", id)) + } else { + s.opsByStateGauge.WithLabelValues(status.GetCurrentState().String()).Dec() + } + + delete(s.idsByUuid, op.UUID) + delete(s.opsByTargetFQDN, op.TargetShard) + delete(s.opsById, op.ID) + delete(s.statusById, op.ID) + + return err +} + +func findAndDeleteOp(id uint64, ops []ShardReplicationOp) ([]ShardReplicationOp, bool) { + indexToDelete := 0 + ok := false + // Iterate by hand as the slices should be kept small enough & we can't use the `slices` package binary search as we have a custom type + // in the slice and the Comparable constraint only works on primitive type + for i, op := range ops { + if op.ID == id { + ok = true + indexToDelete = i + } + } + if ok { + ops = slices.Delete(ops, indexToDelete, indexToDelete+1) + } + return ops, ok +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/replication/shard_replication_engine.go b/platform/dbops/binaries/weaviate-src/cluster/replication/shard_replication_engine.go new file mode 100644 index 0000000000000000000000000000000000000000..6234f1d5f981083fd344d6de344607adfe91f8a5 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/replication/shard_replication_engine.go @@ -0,0 +1,270 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package replication + +import ( + "context" + "errors" + "fmt" + "sync" + "sync/atomic" + "time" + + "github.com/weaviate/weaviate/cluster/replication/metrics" + + "github.com/sirupsen/logrus" + enterrors "github.com/weaviate/weaviate/entities/errors" +) + +const ( + replicationEngineLogAction = "replication_engine" +) + +// ShardReplicationEngine coordinates the replication of shard data between nodes in a distributed system. +// +// It uses a producer-consumer pattern where replication operations are pulled from a source (e.g., FSM) +// and dispatched to workers for execution, enabling parallel processing with built-in backpressure implemented by means +// of a limited channel. +// +// The engine ensures that operations are processed concurrently, but with limits to avoid overloading the system. It also +// provides mechanisms for graceful shutdown and error handling. The replication engine is responsible for managing the +// lifecycle of both producer and consumer goroutines that work together to execute replication tasks. +// +// Key responsibilities of this engine include managing buffered channels for backpressure, starting and stopping +// the replication operation lifecycle, and ensuring that the engine handles concurrent workers without resource exhaustion. +// +// This engine is expected to run in a single node within a cluster, where it processes replication operations relevant +// to the node with a pull mechanism where an engine running on a certain node is responsible for running replication +// operations with that node as a target. +type ShardReplicationEngine struct { + // nodeId uniquely identifies the node on which this engine instance is running. + // It is used to filter replication operations that are relevant to this node, as each + // replication engine works in pull mode and is responsible for a subset of replication + // operations assigned to the node. + nodeId string + + // logger provides structured logging throughout the lifecycle of the engine, + // including producer, consumer, and worker activity. The logger tracks operations, + // errors, and state transitions. + logger *logrus.Entry + + // producer is responsible for generating replication operations that this node should execute. + // These operations are typically retrieved from the cluster’s FSM stored in RAFT. + // The producer pulls operations from the source and sends them to the opsChan for the consumer to process. + producer OpProducer + + // consumer handles the execution of replication operations by processing them with a pool of workers. + // It ensures bounded concurrent execution of multiple workers, performing the actual data replication. + // The consumer listens on the opsChan and processes operations as they arrive. + consumer OpConsumer + + // opBufferSize determines the size of the buffered channel between the producer and consumer. + // It controls how many operations can be in-flight or waiting for processing, enabling backpressure. + // If the channel is full, the producer will be blocked until space is available, resulting in proparagting + // backpressure from the consumer up to the producer. + opBufferSize int + + // opsChan is the buffered channel used to pass operations from the producer to the consumer. + // A bounded channel ensures that backpressure is applied when the consumer is overwhelmed or when + // a certain number of concurrent workers are already busy processing replication operations. + opsChan chan ShardReplicationOpAndStatus + + // stopChan is a signal-only channel used to trigger graceful shutdown of the engine. + // It is closed when Stop() is invoked, prompting shutdown of producer and consumer goroutines. + // This allows for a controlled and graceful shutdown of all active components. + stopChan chan struct{} + + // isRunning is a flag that indicates whether the engine is currently running. + // It prevents concurrent starts (multiple instances of the replication engine running simultaneously) or stops. + // Ensures that the engine runs only once per each node. + isRunning atomic.Bool + + // wg is a wait group that tracks producer and consumer goroutines. + // It ensures graceful shutdown by waiting for all background goroutines to exit cleanly. + // The wait group helps ensure that the engine doesn't terminate prematurely before all goroutines have finished. + wg sync.WaitGroup + + // maxWorkers controls the maximum number of concurrent workers in the consumer pool. + // It is used to limit the parallelism of replication operations, preventing the system from being overwhelmed by + // too many concurrent tasks performing replication operations. + maxWorkers int + + // shutdownTimeout is the maximum amount of time to wait for a graceful shutdown. + // If the engine takes longer than this timeout to shut down, a warning is logged, and the process is forcibly stopped. + // This ensures that the system doesn't hang indefinitely during shutdown. + shutdownTimeout time.Duration + + // engineMetricCallbacks defines optional hooks for tracking engine lifecycle events + // like start/stop of the engine, producer, and consumer via Prometheus metrics or custom logic. + engineMetricCallbacks *metrics.ReplicationEngineCallbacks +} + +// NewShardReplicationEngine creates a new replication engine +func NewShardReplicationEngine( + logger *logrus.Logger, + nodeId string, + producer OpProducer, + consumer OpConsumer, + opBufferSize int, + maxWorkers int, + shutdownTimeout time.Duration, + engineMetricCallbacks *metrics.ReplicationEngineCallbacks, +) *ShardReplicationEngine { + return &ShardReplicationEngine{ + nodeId: nodeId, + logger: logger.WithFields(logrus.Fields{"action": replicationEngineLogAction, "node": nodeId}), + producer: producer, + consumer: consumer, + opBufferSize: opBufferSize, + maxWorkers: maxWorkers, + shutdownTimeout: shutdownTimeout, + stopChan: make(chan struct{}), + engineMetricCallbacks: engineMetricCallbacks, + } +} + +// Start runs the replication engine's main loop, including the operation producer and consumer. +// +// It starts two goroutines: one for the OpProducer and one for the OpConsumer. These goroutines +// communicate through a buffered channel, and the engine coordinates their lifecycle. This method +// is safe to call only once; if the engine is already running, it logs a warning and returns. +// +// It returns an error if either the producer or consumer fails unexpectedly, or if the context is cancelled. +// +// It is, safe to restart the replication engin using this method, after it has been stopped. +func (e *ShardReplicationEngine) Start(ctx context.Context) error { + if !e.isRunning.CompareAndSwap(false, true) { + e.logger.Warnf("replication engine already running: %v", e) + return nil + } + + e.engineMetricCallbacks.OnEngineStart(e.nodeId) + // Channels are creating while starting the replication engine to allow start/stop. + e.opsChan = make(chan ShardReplicationOpAndStatus, e.opBufferSize) + e.stopChan = make(chan struct{}) + + engineCtx, engineCancel := context.WithCancel(ctx) + e.logger.WithFields(logrus.Fields{"engine": e}).Info("starting replication engine") + + // Channels for error reporting used by producer and consumer. + producerErrChan := make(chan error, 1) + consumerErrChan := make(chan error, 1) + + // Start one replication operations producer. + e.wg.Add(1) + enterrors.GoWrapper(func() { + defer e.wg.Done() + defer e.engineMetricCallbacks.OnProducerStop(e.nodeId) + e.engineMetricCallbacks.OnProducerStart(e.nodeId) + e.logger.WithField("producer", e.producer).Info("starting replication engine producer") + err := e.producer.Produce(engineCtx, e.opsChan) + if err != nil && !errors.Is(err, context.Canceled) { + e.logger.WithField("producer", e.producer).WithError(err).Error("stopping producer after failure") + producerErrChan <- err + } + e.logger.WithField("producer", e.producer).Info("replication engine producer stopped") + }, e.logger) + + // Start one replication operations consumer. + e.wg.Add(1) + enterrors.GoWrapper(func() { + defer e.wg.Done() + defer e.engineMetricCallbacks.OnConsumerStop(e.nodeId) + e.engineMetricCallbacks.OnConsumerStart(e.nodeId) + e.logger.WithField("consumer", e.consumer).Info("starting replication engine consumer") + err := e.consumer.Consume(engineCtx, e.opsChan) + if err != nil && !errors.Is(err, context.Canceled) { + e.logger.WithField("consumer", e.consumer).WithError(err).Error("stopping consumer after failure") + consumerErrChan <- err + } + e.logger.WithField("consumer", e.consumer).Info("replication engine consumer stopped") + }, e.logger) + + // Coordinate replication engine execution with producer and consumer lifecycle. + var err error + select { + case <-ctx.Done(): + e.logger.WithField("engine", e).Info("replication engine cancel request, shutting down") + err = ctx.Err() + case <-e.stopChan: + e.logger.WithField("engine", e).Info("replication engine stop request, shutting down") + // Graceful shutdown executed when stopping the replication engine + case producerErr := <-producerErrChan: + if !errors.Is(producerErr, context.Canceled) { + e.logger.WithField("engine", e).WithError(producerErr).Error("stopping replication engine producer after failure") + err = fmt.Errorf("replication engine producer failed with: %w", producerErr) + } + case consumerErr := <-consumerErrChan: + e.logger.WithField("engine", e).WithError(consumerErr).Error("stopping replication engine consumer after failure") + err = fmt.Errorf("replication engine consumer failed with: %w", consumerErr) + } + + // Always cancel the replication engine context and wait for the producer and consumers to terminate to gracefully + // shut down the replication engine the both the producer and consumer. + engineCancel() + close(e.opsChan) + e.wg.Wait() + e.isRunning.Store(false) + return err +} + +// Stop signals the replication engine to shut down gracefully. +// +// It safely transitions the engine's running state to false and closes the internal stop channel, +// which unblocks the main loop in Start() and initiates the shutdown sequence. +// Calling Stop multiple times is safe; only the first call has an effect. +// Note that the ops channel is closed in the Start method after waiting for both the producer and consumers to +// terminate. +func (e *ShardReplicationEngine) Stop() { + if !e.isRunning.Load() { + return + } + + // Closing the stop channel notifies both the producer and consumer to shut down gracefully coordinating with the + // replication engine. + close(e.stopChan) + e.wg.Wait() + e.isRunning.Store(false) + e.engineMetricCallbacks.OnEngineStop(e.nodeId) +} + +// IsRunning reports whether the replication engine is currently running. +// +// It returns true if the engine has been started and has not yet shut down. +func (e *ShardReplicationEngine) IsRunning() bool { + return e.isRunning.Load() +} + +// OpChannelCap returns the capacity of the internal operation channel. +// +// This reflects the total number of replication operations the channel can queue +// before blocking the producer implementing a backpressure mechanism. +func (e *ShardReplicationEngine) OpChannelCap() int { + return cap(e.opsChan) +} + +// OpChannelLen returns the current number of operations buffered in the internal channel. +// +// This can be used to monitor the backpressure between the producer and the consumer. +func (e *ShardReplicationEngine) OpChannelLen() int { + return len(e.opsChan) +} + +// String returns a string representation of the ShardReplicationEngine, +// including the node ID that uniquely identifies the engine for a specific node. +// +// The expectation is that each node runs one and only one replication engine, +// so the string output is helpful for logging or diagnostics to easily identify +// the engine associated with the node. +func (e *ShardReplicationEngine) String() string { + return fmt.Sprintf("replication engine on node '%s'", e.nodeId) +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/replication/shard_replication_engine_test.go b/platform/dbops/binaries/weaviate-src/cluster/replication/shard_replication_engine_test.go new file mode 100644 index 0000000000000000000000000000000000000000..2981d49113835afb2caa0bc1179c1c9219cb7716 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/replication/shard_replication_engine_test.go @@ -0,0 +1,1376 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package replication_test + +import ( + "context" + "crypto/rand" + "fmt" + "sync" + "testing" + "time" + + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + logrustest "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/cluster/proto/api" + "github.com/weaviate/weaviate/cluster/replication" + "github.com/weaviate/weaviate/cluster/replication/metrics" +) + +func TestShardReplicationEngine(t *testing.T) { + t.Run("replication engine cancel graceful handling", func(t *testing.T) { + // GIVEN + mockProducer := replication.NewMockOpProducer(t) + mockConsumer := replication.NewMockOpConsumer(t) + + producerStartedChan := make(chan struct{}) + consumerStartedChan := make(chan struct{}) + + mockProducer.EXPECT(). + Produce(mock.Anything, mock.Anything). + Run(func(ctx context.Context, out chan<- replication.ShardReplicationOpAndStatus) { + producerStartedChan <- struct{}{} + <-ctx.Done() + }). + Once(). + Return(context.Canceled) + + mockConsumer.EXPECT(). + Consume(mock.Anything, mock.Anything). + Run(func(ctx context.Context, in <-chan replication.ShardReplicationOpAndStatus) { + consumerStartedChan <- struct{}{} + <-ctx.Done() + }). + Once(). + Return(context.Canceled) + + logger, _ := logrustest.NewNullLogger() + + engine := replication.NewShardReplicationEngine( + logger, + "node1", + mockProducer, + mockConsumer, + 1, + 1, 1*time.Minute, + metrics.NewReplicationEngineCallbacks(prometheus.NewPedanticRegistry()), + ) + require.False(t, engine.IsRunning(), "engine should report not running before start") + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + var wg sync.WaitGroup + wg.Add(1) + var engineStartErr error + go func() { + defer wg.Done() + engineStartErr = engine.Start(ctx) + require.ErrorIs(t, engineStartErr, context.Canceled) + }() + + <-producerStartedChan + <-consumerStartedChan + + require.True(t, engine.IsRunning(), "engine should be running after producer and consumer started") + + // WHEN + cancel() + + wg.Wait() + + // THEN + require.ErrorIs(t, engineStartErr, context.Canceled, "engine should return context.Canceled") + require.False(t, engine.IsRunning(), "engine should not be running after context cancellation") + mockProducer.AssertExpectations(t) + mockConsumer.AssertExpectations(t) + }) + + t.Run("replication engine consumer failure", func(t *testing.T) { + // GIVEN + mockProducer := replication.NewMockOpProducer(t) + mockConsumer := replication.NewMockOpConsumer(t) + + producerStartedChan := make(chan struct{}) + + mockProducer.EXPECT(). + Produce(mock.Anything, mock.Anything). + Run(func(ctx context.Context, out chan<- replication.ShardReplicationOpAndStatus) { + producerStartedChan <- struct{}{} + <-ctx.Done() + }). + Return(context.Canceled) + + mockConsumer.EXPECT(). + Consume(mock.Anything, mock.Anything). + Return(errors.New("unexpected consumer error")) + + logger, _ := logrustest.NewNullLogger() + + engine := replication.NewShardReplicationEngine(logger, + "node1", + mockProducer, + mockConsumer, + 1, + 1, + 1*time.Minute, + metrics.NewReplicationEngineCallbacks(prometheus.NewPedanticRegistry()), + ) + require.False(t, engine.IsRunning(), "engine should report not running before start") + + var wg sync.WaitGroup + wg.Add(1) + var engineStartErr error + go func() { + defer wg.Done() + engineStartErr = engine.Start(context.Background()) + }() + + // Wait for producer but not for the consumer which will err + <-producerStartedChan + + // Wait for engine start + wg.Wait() + + // THEN + require.Error(t, engineStartErr) + require.Contains(t, engineStartErr.Error(), "unexpected consumer error") + require.False(t, engine.IsRunning(), "engine should report not running after consumer error") + mockProducer.AssertExpectations(t) + mockConsumer.AssertExpectations(t) + }) + + t.Run("replication engine producer failure", func(t *testing.T) { + // GIVEN + mockProducer := replication.NewMockOpProducer(t) + mockConsumer := replication.NewMockOpConsumer(t) + + consumerStartedChan := make(chan struct{}) + + mockConsumer.EXPECT(). + Consume(mock.Anything, mock.Anything). + Run(func(ctx context.Context, in <-chan replication.ShardReplicationOpAndStatus) { + consumerStartedChan <- struct{}{} + <-ctx.Done() + }). + Return(context.Canceled) + + mockProducer.EXPECT(). + Produce(mock.Anything, mock.Anything). + Return(errors.New("unexpected producer error")) + + logger, _ := logrustest.NewNullLogger() + + engine := replication.NewShardReplicationEngine( + logger, + "node1", + mockProducer, + mockConsumer, + 1, + 1, + 1*time.Minute, + metrics.NewReplicationEngineCallbacks(prometheus.NewPedanticRegistry()), + ) + require.False(t, engine.IsRunning(), "engine should report not running before start") + + var wg sync.WaitGroup + wg.Add(1) + var engineStartErr error + go func() { + defer wg.Done() + engineStartErr = engine.Start(context.Background()) + }() + + // Wait for consumer but not for the producer which will err + <-consumerStartedChan + + wg.Wait() + + // THEN + require.Error(t, engineStartErr) + require.Contains(t, engineStartErr.Error(), "unexpected producer error") + require.False(t, engine.IsRunning(), "engine should not be running after consumer error") + mockProducer.AssertExpectations(t) + mockConsumer.AssertExpectations(t) + }) + + t.Run("replication engine stop graceful handling", func(t *testing.T) { + // GIVEN + mockProducer := replication.NewMockOpProducer(t) + mockConsumer := replication.NewMockOpConsumer(t) + + producerStartedChan := make(chan struct{}) + consumerStartedChan := make(chan struct{}) + + mockProducer.EXPECT(). + Produce(mock.Anything, mock.Anything). + Run(func(ctx context.Context, out chan<- replication.ShardReplicationOpAndStatus) { + producerStartedChan <- struct{}{} // producer started event + <-ctx.Done() + }). + Once(). + Return(context.Canceled) + + mockConsumer.EXPECT(). + Consume(mock.Anything, mock.Anything). + Run(func(ctx context.Context, in <-chan replication.ShardReplicationOpAndStatus) { + consumerStartedChan <- struct{}{} // consumer started event + <-ctx.Done() + }). + Once(). + Return(context.Canceled) + + logger, _ := logrustest.NewNullLogger() + engine := replication.NewShardReplicationEngine( + logger, + "node1", + mockProducer, + mockConsumer, + 1, + 1, + 1*time.Minute, + metrics.NewReplicationEngineCallbacks(prometheus.NewPedanticRegistry()), + ) + require.False(t, engine.IsRunning(), "engine should report not running before start") + + var wg sync.WaitGroup + wg.Add(1) + var engineStartErr error + go func() { + defer wg.Done() + engineStartErr = engine.Start(context.Background()) + }() + + // Wait for producer and consumer to start + <-producerStartedChan + <-consumerStartedChan + + // THEN + require.True(t, engine.IsRunning(), "engine should be running before Stop") + + engine.Stop() // stop while the engine is still running + wg.Wait() + + // THEN + require.NoError(t, engineStartErr, "engine should stop without error") + require.False(t, engine.IsRunning(), "engine should not be running after stop") + + mockProducer.AssertExpectations(t) + mockConsumer.AssertExpectations(t) + }) + + t.Run("replication engine started twice", func(t *testing.T) { + // GIVEN + mockProducer := replication.NewMockOpProducer(t) + mockConsumer := replication.NewMockOpConsumer(t) + + producerStarted := make(chan struct{}) + consumerStarted := make(chan struct{}) + + mockProducer.EXPECT(). + Produce(mock.Anything, mock.Anything). + Run(func(ctx context.Context, out chan<- replication.ShardReplicationOpAndStatus) { + producerStarted <- struct{}{} // producer started event + <-ctx.Done() + }). + Once(). + Return(context.Canceled) + + mockConsumer.EXPECT(). + Consume(mock.Anything, mock.Anything). + Run(func(ctx context.Context, in <-chan replication.ShardReplicationOpAndStatus) { + consumerStarted <- struct{}{} // consumer started event + <-ctx.Done() + }). + Once(). + Return(context.Canceled) + + logger, _ := logrustest.NewNullLogger() + engine := replication.NewShardReplicationEngine( + logger, + "node1", + mockProducer, + mockConsumer, + 1, + 1, + 1*time.Minute, + metrics.NewReplicationEngineCallbacks(prometheus.NewPedanticRegistry()), + ) + require.False(t, engine.IsRunning(), "engine should not be running before start") + + var wg sync.WaitGroup + wg.Add(1) + var firstEngineStartErr error + go func() { + defer wg.Done() + firstEngineStartErr = engine.Start(context.Background()) + }() + + // Wait for producer and consumer to start + <-producerStarted + <-consumerStarted + + require.True(t, engine.IsRunning(), "engine should be running after first Start") + + secondEngineStartErr := engine.Start(context.Background()) + + // THEN + require.NoError(t, secondEngineStartErr, "second start should return nil when already running") + require.True(t, engine.IsRunning(), "engine should still be running after second Start") + + engine.Stop() + wg.Wait() + + require.NoError(t, firstEngineStartErr, "first start should complete without error") + require.False(t, engine.IsRunning(), "engine should no longer be running after Stop") + + mockProducer.AssertExpectations(t) + mockConsumer.AssertExpectations(t) + }) + + t.Run("two replication engines run independently on different nodes", func(t *testing.T) { + // GIVEN + mockProducer1 := replication.NewMockOpProducer(t) + mockConsumer1 := replication.NewMockOpConsumer(t) + mockProducer2 := replication.NewMockOpProducer(t) + mockConsumer2 := replication.NewMockOpConsumer(t) + + producer1StartedChan := make(chan struct{}) + consumer1StartedChan := make(chan struct{}) + producer2StartedChan := make(chan struct{}) + consumer2StartedChan := make(chan struct{}) + + mockProducer1.EXPECT(). + Produce(mock.Anything, mock.Anything). + Run(func(ctx context.Context, out chan<- replication.ShardReplicationOpAndStatus) { + producer1StartedChan <- struct{}{} + <-ctx.Done() + }). + Once(). + Return(context.Canceled) + + mockConsumer1.EXPECT(). + Consume(mock.Anything, mock.Anything). + Run(func(ctx context.Context, in <-chan replication.ShardReplicationOpAndStatus) { + consumer1StartedChan <- struct{}{} + <-ctx.Done() + }). + Once(). + Return(context.Canceled) + + mockProducer2.EXPECT(). + Produce(mock.Anything, mock.Anything). + Run(func(ctx context.Context, out chan<- replication.ShardReplicationOpAndStatus) { + producer2StartedChan <- struct{}{} + <-ctx.Done() + }). + Once(). + Return(context.Canceled) + + mockConsumer2.EXPECT(). + Consume(mock.Anything, mock.Anything). + Run(func(ctx context.Context, in <-chan replication.ShardReplicationOpAndStatus) { + consumer2StartedChan <- struct{}{} + <-ctx.Done() + }). + Once(). + Return(context.Canceled) + + logger, _ := logrustest.NewNullLogger() + engine1 := replication.NewShardReplicationEngine(logger, + "node1", + mockProducer1, + mockConsumer1, + 1, + 1, + 1*time.Minute, + metrics.NewReplicationEngineCallbacks(prometheus.NewPedanticRegistry()), + ) + engine2 := replication.NewShardReplicationEngine(logger, + "node2", + mockProducer2, + mockConsumer2, + 1, + 1, + 1*time.Minute, + metrics.NewReplicationEngineCallbacks(prometheus.NewPedanticRegistry()), + ) + require.False(t, engine1.IsRunning(), "engine1 should not be running before start") + require.False(t, engine2.IsRunning(), "engine2 should not be running before start") + + // WHEN + var wg sync.WaitGroup + wg.Add(2) + + var engine1StartErr error + var engine2StartErr error + + go func() { + defer wg.Done() + engine1StartErr = engine1.Start(context.Background()) + }() + + go func() { + defer wg.Done() + engine2StartErr = engine2.Start(context.Background()) + }() + + <-producer1StartedChan + <-consumer1StartedChan + <-producer2StartedChan + <-consumer2StartedChan + + // THEN + require.True(t, engine1.IsRunning(), "engine1 should be running") + require.True(t, engine2.IsRunning(), "engine2 should be running") + + engine1.Stop() + engine2.Stop() + + // Wait for both engines to complete + wg.Wait() + + require.NoError(t, engine1StartErr, "engine1 should stop without error") + require.NoError(t, engine2StartErr, "engine2 should stop without error") + require.False(t, engine1.IsRunning(), "engine1 should not be running after stop") + require.False(t, engine2.IsRunning(), "engine2 should not be running after stop") + mockProducer1.AssertExpectations(t) + mockConsumer1.AssertExpectations(t) + mockProducer2.AssertExpectations(t) + mockConsumer2.AssertExpectations(t) + }) + + t.Run("replication engine stop is idempotent", func(t *testing.T) { + // GIVEN + mockProducer := replication.NewMockOpProducer(t) + mockConsumer := replication.NewMockOpConsumer(t) + + producerStarted := make(chan struct{}) + consumerStarted := make(chan struct{}) + + mockProducer.EXPECT(). + Produce(mock.Anything, mock.Anything). + Run(func(ctx context.Context, out chan<- replication.ShardReplicationOpAndStatus) { + producerStarted <- struct{}{} + <-ctx.Done() + }). + Once(). + Return(context.Canceled) + + mockConsumer.EXPECT(). + Consume(mock.Anything, mock.Anything). + Run(func(ctx context.Context, in <-chan replication.ShardReplicationOpAndStatus) { + consumerStarted <- struct{}{} + <-ctx.Done() + }). + Once(). + Return(context.Canceled) + + logger, _ := logrustest.NewNullLogger() + engine := replication.NewShardReplicationEngine( + logger, + "node1", + mockProducer, + mockConsumer, + 1, + 1, + 1*time.Minute, + metrics.NewReplicationEngineCallbacks(prometheus.NewPedanticRegistry()), + ) + require.False(t, engine.IsRunning(), "engine should not be running before start") + + // WHEN + var wg sync.WaitGroup + wg.Add(1) + var engineStartErr error + go func() { + defer wg.Done() + engineStartErr = engine.Start(context.Background()) + }() + + <-producerStarted + <-consumerStarted + require.True(t, engine.IsRunning(), "engine should report running after start") + + // THEN + engine.Stop() + engine.Stop() // second stop should be idempotent (no-op) + wg.Wait() + + // THEN + require.NoError(t, engineStartErr, "engine should stop without error") + require.False(t, engine.IsRunning(), "engine should not be running after stop") + + engine.Stop() // third stop after already stopped is still idempotent (no-op) + mockProducer.AssertExpectations(t) + mockConsumer.AssertExpectations(t) + }) + + t.Run("replication engine start-stop-start-stop works correctly", func(t *testing.T) { + // GIVEN + mockProducer := replication.NewMockOpProducer(t) + mockConsumer := replication.NewMockOpConsumer(t) + + // First start/stop cycle + producer1StartedChan := make(chan struct{}) + consumer1StartedChan := make(chan struct{}) + + mockProducer.EXPECT(). + Produce(mock.Anything, mock.Anything). + Run(func(ctx context.Context, out chan<- replication.ShardReplicationOpAndStatus) { + producer1StartedChan <- struct{}{} + <-ctx.Done() + }). + Once(). + Return(context.Canceled) + + mockConsumer.EXPECT(). + Consume(mock.Anything, mock.Anything). + Run(func(ctx context.Context, in <-chan replication.ShardReplicationOpAndStatus) { + consumer1StartedChan <- struct{}{} + <-ctx.Done() + }). + Once(). + Return(context.Canceled) + + // Second start/stop cycle + producer2StartedChan := make(chan struct{}) + consumer2StartedChan := make(chan struct{}) + + mockProducer.EXPECT(). + Produce(mock.Anything, mock.Anything). + Run(func(ctx context.Context, out chan<- replication.ShardReplicationOpAndStatus) { + producer2StartedChan <- struct{}{} + <-ctx.Done() + }). + Once(). + Return(context.Canceled) + + mockConsumer.EXPECT(). + Consume(mock.Anything, mock.Anything). + Run(func(ctx context.Context, in <-chan replication.ShardReplicationOpAndStatus) { + consumer2StartedChan <- struct{}{} + <-ctx.Done() + }). + Once(). + Return(context.Canceled) + + logger, _ := logrustest.NewNullLogger() + + engine := replication.NewShardReplicationEngine( + logger, + "node1", + mockProducer, + mockConsumer, + 1, + 1, + 1*time.Minute, + metrics.NewReplicationEngineCallbacks(prometheus.NewPedanticRegistry()), + ) + + require.False(t, engine.IsRunning(), "engine should not be running before start") + + // WHEN (start first cycle) + var wg sync.WaitGroup + wg.Add(1) + var firstCycleErr error + + go func() { + defer wg.Done() + firstCycleErr = engine.Start(context.Background()) + }() + + // Wait for producer and consumer to start + <-producer1StartedChan + <-consumer1StartedChan + + require.True(t, engine.IsRunning(), "engine should be running in first cycle") + + engine.Stop() + + // Wait for first cycle to complete + wg.Wait() + + require.NoError(t, firstCycleErr, "first cycle should complete without error") + require.False(t, engine.IsRunning(), "engine should not be running after first stop") + + // WHEN (start second cycle) + wg.Add(1) + var secondCycleErr error + + go func() { + defer wg.Done() + secondCycleErr = engine.Start(context.Background()) + }() + + // Wait for producer and consumer to start again + <-producer2StartedChan + <-consumer2StartedChan + + require.True(t, engine.IsRunning(), "engine should be running in second cycle") + + engine.Stop() + + // Wait for second cycle to complete + wg.Wait() + + require.NoError(t, secondCycleErr, "second cycle should complete without error") + require.False(t, engine.IsRunning(), "engine should not be running after second stop") + mockProducer.AssertNumberOfCalls(t, "Produce", 2) + mockConsumer.AssertNumberOfCalls(t, "Consume", 2) + mockProducer.AssertExpectations(t) + mockConsumer.AssertExpectations(t) + }) + + t.Run("replication engine supports multiple start/stop cycles", func(t *testing.T) { + // GIVEN + mockProducer := replication.NewMockOpProducer(t) + mockConsumer := replication.NewMockOpConsumer(t) + + logger, _ := logrustest.NewNullLogger() + + engine := replication.NewShardReplicationEngine( + logger, + "node1", + mockProducer, + mockConsumer, + 1, + 1, + 1*time.Minute, + metrics.NewReplicationEngineCallbacks(prometheus.NewPedanticRegistry()), + ) + + require.False(t, engine.IsRunning(), "engine should not be running before start") + + // Run multiple start/stop cycles + cycles, err := randInt(t, 5, 10) + require.NoError(t, err, "unexpected error when generating rando value") + + for cycle := 1; cycle <= cycles; cycle++ { + producerStartedChan := make(chan struct{}) + consumerStartedChan := make(chan struct{}) + + mockProducer.EXPECT(). + Produce(mock.Anything, mock.Anything). + Run(func(ctx context.Context, out chan<- replication.ShardReplicationOpAndStatus) { + producerStartedChan <- struct{}{} + <-ctx.Done() + }). + Once(). + Return(context.Canceled) + + mockConsumer.EXPECT(). + Consume(mock.Anything, mock.Anything). + Run(func(ctx context.Context, in <-chan replication.ShardReplicationOpAndStatus) { + consumerStartedChan <- struct{}{} + <-ctx.Done() + }). + Once(). + Return(context.Canceled) + + var wg sync.WaitGroup + wg.Add(1) + var cycleErr error + + go func() { + defer wg.Done() + cycleErr = engine.Start(context.Background()) + }() + + // Wait for producer and consumer to start + <-producerStartedChan + <-consumerStartedChan + + require.True(t, engine.IsRunning(), "engine should be running in cycle %d", cycle) + + engine.Stop() + + // Wait for cycle to complete + wg.Wait() + + require.NoError(t, cycleErr, "cycle %d should complete without error", cycle) + require.False(t, engine.IsRunning(), "engine should not be running after cycle %d", cycle) + mockProducer.AssertExpectations(t) + mockConsumer.AssertExpectations(t) + } + mockProducer.AssertNumberOfCalls(t, "Produce", cycles) + mockConsumer.AssertNumberOfCalls(t, "Consume", cycles) + }) + + t.Run("replication engine stop without start is a no-op", func(t *testing.T) { + // GIVEN + mockProducer := replication.NewMockOpProducer(t) + mockConsumer := replication.NewMockOpConsumer(t) + + logger, _ := logrustest.NewNullLogger() + + engine := replication.NewShardReplicationEngine( + logger, + "node1", + mockProducer, + mockConsumer, + 1, + 1, + 1*time.Minute, + metrics.NewReplicationEngineCallbacks(prometheus.NewPedanticRegistry()), + ) + + require.False(t, engine.IsRunning(), "engine should not be running initially") + + // WHEN + engine.Stop() // Stop without ever starting + + // THEN + require.False(t, engine.IsRunning(), "engine should still not be running after Stop") + mockProducer.AssertNotCalled(t, "Produce") + mockConsumer.AssertNotCalled(t, "Consume") + }) + + t.Run("replication engine custom op channel size", func(t *testing.T) { + // GIVEN + mockProducer := replication.NewMockOpProducer(t) + mockConsumer := replication.NewMockOpConsumer(t) + + producerStartedChan := make(chan struct{}) + consumerStartedChan := make(chan struct{}) + + mockProducer.EXPECT(). + Produce(mock.Anything, mock.Anything). + Run(func(ctx context.Context, out chan<- replication.ShardReplicationOpAndStatus) { + producerStartedChan <- struct{}{} // producer started event + <-ctx.Done() + }). + Once(). + Return(context.Canceled) + + mockConsumer.EXPECT(). + Consume(mock.Anything, mock.Anything). + Run(func(ctx context.Context, in <-chan replication.ShardReplicationOpAndStatus) { + consumerStartedChan <- struct{}{} // consumer started event + <-ctx.Done() + }). + Once(). + Return(context.Canceled) + + logger, _ := logrustest.NewNullLogger() + randomOpBufferSize, err := randInt(t, 16, 128) + require.NoError(t, err, "error generating random operation buffer") + engine := replication.NewShardReplicationEngine( + logger, + "node1", + mockProducer, + mockConsumer, + randomOpBufferSize, + 1, + 1*time.Minute, + metrics.NewReplicationEngineCallbacks(prometheus.NewPedanticRegistry()), + ) + require.False(t, engine.IsRunning(), "engine should report not running before start") + + // WHEN + var wg sync.WaitGroup + wg.Add(1) + var engineStartErr error + go func() { + defer wg.Done() + engineStartErr = engine.Start(context.Background()) + }() + + // Wait for producer and consumer to start + <-producerStartedChan + <-consumerStartedChan + + // THEN + require.True(t, engine.IsRunning(), "engine should be running after start") + require.Equal(t, randomOpBufferSize, engine.OpChannelCap(), "channel capacity should match the configured size") + require.Equal(t, 0, engine.OpChannelLen(), "channel length should be 0 when no ops are queued") + + engine.Stop() + wg.Wait() + + require.NoError(t, engineStartErr, "engine should stop without error") + require.False(t, engine.IsRunning(), "engine should not be running after stop") + mockProducer.AssertExpectations(t) + mockConsumer.AssertExpectations(t) + }) + + t.Run("producer creates and consumer processes random operations", func(t *testing.T) { + logger, _ := logrustest.NewNullLogger() + opsCount, err := randInt(t, 20, 30) + require.NoError(t, err, "error generating random operation count") + + producedOpsChan := make(chan replication.ShardReplicationOp, opsCount) + consumedOpsChan := make(chan uint64, opsCount) + completedOpsChan := make(chan uint64, opsCount) + doneChan := make(chan struct{}) + + opIds, err := randomOpIds(t, opsCount) + require.NoError(t, err, "error generating operation IDs") + + var producerWg sync.WaitGroup + producerWg.Add(1) + + mockProducer := replication.NewMockOpProducer(t) + mockProducer.EXPECT(). + Produce(mock.Anything, mock.Anything). + Run(func(ctx context.Context, out chan<- replication.ShardReplicationOpAndStatus) { + defer producerWg.Done() + for _, opId := range opIds { + randomSleepTime, e := randInt(t, 10, 50) + require.NoErrorf(t, e, "error generating random sleep time") + time.Sleep(time.Millisecond * time.Duration(randomSleepTime)) + op := replication.NewShardReplicationOp(opId, "node1", "node2", "TestCollection", "shard1", api.COPY) + + select { + case out <- replication.NewShardReplicationOpAndStatus(op, replication.NewShardReplicationStatus(api.REGISTERED)): + producedOpsChan <- op + case <-ctx.Done(): + return + } + } + }). + Return(nil) + + mockConsumer := replication.NewMockOpConsumer(t) + mockConsumer.EXPECT(). + Consume(mock.Anything, mock.Anything). + Run(func(ctx context.Context, in <-chan replication.ShardReplicationOpAndStatus) { + processedOps := 0 + for { + select { + case <-ctx.Done(): + return + case op, ok := <-in: + if !ok { + return + } + + randomSleepTime, e := randInt(t, 10, 50) + require.NoErrorf(t, e, "error generating random sleep time") + time.Sleep(time.Millisecond * time.Duration(randomSleepTime)) + + consumedOpsChan <- op.Op.ID + completedOpsChan <- op.Op.ID + + processedOps++ + if processedOps == opsCount { + close(doneChan) + return + } + } + } + }). + Return(nil) + + engine := replication.NewShardReplicationEngine( + logger, + "node2", + mockProducer, + mockConsumer, + opsCount, + 1, + 1*time.Minute, + metrics.NewReplicationEngineCallbacks(prometheus.NewPedanticRegistry()), + ) + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + var wg sync.WaitGroup + wg.Add(1) + var engineStartErr error + go func() { + defer wg.Done() + engineStartErr = engine.Start(ctx) + }() + + var producedOps, consumedOps, completedOps []uint64 + + select { + case <-doneChan: + case <-time.After(1 * time.Minute): // this is here just to prevent the test from running indefinitely for too long + t.Fatal("timeout waiting for operations to complete") + } + + engine.Stop() + producerWg.Wait() + + close(producedOpsChan) + close(consumedOpsChan) + close(completedOpsChan) + + for op := range producedOpsChan { + producedOps = append(producedOps, op.ID) + } + for opID := range consumedOpsChan { + consumedOps = append(consumedOps, opID) + } + for opID := range completedOpsChan { + completedOps = append(completedOps, opID) + } + + engine.Stop() + wg.Wait() + + require.NoError(t, engineStartErr, "engine should start without error") + require.Equal(t, opsCount, len(producedOps), "all operations should be produced") + require.Equal(t, opsCount, len(consumedOps), "all operations should be consumed") + require.Equal(t, opsCount, len(completedOps), "all operations should be completed") + require.ElementsMatch(t, producedOps, consumedOps, "produced and consumed operations should match") + require.ElementsMatch(t, producedOps, completedOps, "produced and completed operations should match") + mockProducer.AssertExpectations(t) + mockConsumer.AssertExpectations(t) + }) + + t.Run("producer error during operation is handled gracefully and engine can restart", func(t *testing.T) { + // GIVEN + logger, _ := logrustest.NewNullLogger() + + producerStartedChan := make(chan struct{}, 1) + producerErrorChan := make(chan struct{}, 1) + engineStoppedChan := make(chan struct{}, 1) + producerRestartChan := make(chan struct{}, 1) + consumerStartedChan := make(chan struct{}, 1) + + opId, err := randInt(t, 1000, 2000) + require.NoErrorf(t, err, "error generating random op id") + expectedErr := errors.New(fmt.Sprintf("producer error after sending operation %d", uint64(opId))) + + // First attempt - producer sends one operation then errors + mockProducer := replication.NewMockOpProducer(t) + mockProducer.EXPECT(). + Produce(mock.Anything, mock.Anything). + Run(func(ctx context.Context, out chan<- replication.ShardReplicationOpAndStatus) { + producerStartedChan <- struct{}{} + + op := replication.NewShardReplicationOp(uint64(opId), "node1", "node2", "collection1", "shard1", api.COPY) + select { + case <-ctx.Done(): + return + case out <- replication.NewShardReplicationOpAndStatus(op, replication.NewShardReplicationStatus(api.REGISTERED)): + // Error after sending a valid op + producerErrorChan <- struct{}{} + } + }). + Once(). + Return(expectedErr) + + // Second attempt - producer runs normally until canceled + mockProducer.EXPECT(). + Produce(mock.Anything, mock.Anything). + Run(func(ctx context.Context, out chan<- replication.ShardReplicationOpAndStatus) { + producerRestartChan <- struct{}{} + <-ctx.Done() + }). + Once(). + Return(context.Canceled) + + // Consumer runs normally processing operations + mockConsumer := replication.NewMockOpConsumer(t) + mockConsumer.EXPECT(). + Consume(mock.Anything, mock.Anything). + Run(func(ctx context.Context, in <-chan replication.ShardReplicationOpAndStatus) { + consumerStartedChan <- struct{}{} + <-ctx.Done() + }). + Return(context.Canceled). + Twice() + + randomBufferSize, err := randInt(t, 10, 20) + require.NoErrorf(t, err, "error generating random buffer size") + + randomWorkers, err := randInt(t, 2, 5) + require.NoErrorf(t, err, "error generating random workers") + + engine := replication.NewShardReplicationEngine( + logger, + "node1", + mockProducer, + mockConsumer, + randomBufferSize, + randomWorkers, + 1*time.Minute, + metrics.NewReplicationEngineCallbacks(prometheus.NewPedanticRegistry()), + ) + + // WHEN - First attempt fails due to producer facing an unexpected error + var wg sync.WaitGroup + wg.Add(1) + var firstEngineStartErr error + + go func() { + defer wg.Done() + firstEngineStartErr = engine.Start(context.Background()) + // Wait for the engine to stop after the producer fails + engineStoppedChan <- struct{}{} + }() + + <-producerStartedChan + <-consumerStartedChan + <-producerErrorChan + + // Wait for engine to stop as a result of producer error + <-engineStoppedChan + wg.Wait() + + // THEN - First attempt should have failed with expected error + require.Error(t, firstEngineStartErr, "first attempt should return error") + require.Contains(t, firstEngineStartErr.Error(), expectedErr.Error(), + "error should contain expected message") + require.False(t, engine.IsRunning(), "engine should not be running after error") + + // WHEN - Second attempt + wg.Add(1) + ctx, cancel := context.WithCancel(context.Background()) + + var engineRestartErr error + go func() { + defer wg.Done() + engineRestartErr = engine.Start(ctx) + }() + + // Wait for producer and consumer to start again after restarting the engine + <-producerRestartChan + <-consumerStartedChan + + // THEN + require.NoError(t, engineRestartErr, "engine should restart after error") + require.True(t, engine.IsRunning(), "engine should be running on second attempt") + + cancel() + wg.Wait() + + require.False(t, engine.IsRunning(), "engine should not be running after stop") + + mockProducer.AssertExpectations(t) + mockConsumer.AssertExpectations(t) + }) + + t.Run("consumer error during operation is handled gracefully and engine can restart", func(t *testing.T) { + // GIVEN + logger, _ := logrustest.NewNullLogger() + + producerStartedChan := make(chan struct{}, 1) + consumerStartedChan := make(chan struct{}, 1) + consumerErrorChan := make(chan struct{}, 1) + engineStoppedChan := make(chan struct{}, 1) + producerRestartChan := make(chan struct{}, 1) + consumerRestartChan := make(chan struct{}, 1) + + opId, err := randInt(t, 1000, 2000) + require.NoErrorf(t, err, "error generating random op id") + expectedErr := errors.New(fmt.Sprintf("consumer error while processing operation %d", opId)) + + mockProducer := replication.NewMockOpProducer(t) + + // First attempt - producer sends operation and waits for cancellation + mockProducer.EXPECT(). + Produce(mock.Anything, mock.Anything). + Run(func(ctx context.Context, out chan<- replication.ShardReplicationOpAndStatus) { + producerStartedChan <- struct{}{} + + op := replication.NewShardReplicationOp(uint64(opId), "node1", "node2", "collection1", "shard1", api.COPY) + select { + case <-ctx.Done(): + return + case out <- replication.NewShardReplicationOpAndStatus(op, replication.NewShardReplicationStatus(api.REGISTERED)): + } + + // Wait for cancellation + <-ctx.Done() + }). + Once(). + Return(context.Canceled) + + // Second attempt - producer runs normally again after restarting the engine + mockProducer.EXPECT(). + Produce(mock.Anything, mock.Anything). + Run(func(ctx context.Context, out chan<- replication.ShardReplicationOpAndStatus) { + producerRestartChan <- struct{}{} + <-ctx.Done() + }). + Once(). + Return(context.Canceled) + + mockConsumer := replication.NewMockOpConsumer(t) + + // First consumer attempt - fails with error + mockConsumer.EXPECT(). + Consume(mock.Anything, mock.Anything). + Run(func(ctx context.Context, in <-chan replication.ShardReplicationOpAndStatus) { + consumerStartedChan <- struct{}{} + + // Process one operation then fail + select { + case <-ctx.Done(): + return + case <-in: + consumerErrorChan <- struct{}{} + return + } + }). + Once(). + Return(expectedErr) + + // Second consumer attempt - succeeds after restarting the engine + mockConsumer.EXPECT(). + Consume(mock.Anything, mock.Anything). + Run(func(ctx context.Context, in <-chan replication.ShardReplicationOpAndStatus) { + consumerRestartChan <- struct{}{} + <-ctx.Done() + }). + Once(). + Return(context.Canceled) + + randomBufferSize, err := randInt(t, 10, 20) + require.NoErrorf(t, err, "error generating random buffer size") + + randomWorkers, err := randInt(t, 2, 5) + require.NoErrorf(t, err, "error generating random workers") + + engine := replication.NewShardReplicationEngine( + logger, + "node1", + mockProducer, + mockConsumer, + randomBufferSize, + randomWorkers, + 1*time.Minute, + metrics.NewReplicationEngineCallbacks(prometheus.NewPedanticRegistry()), + ) + + // WHEN - First attempt fails due to consumer error + var wg sync.WaitGroup + wg.Add(1) + + var firstEngineStartErr error + go func() { + defer wg.Done() + firstEngineStartErr = engine.Start(context.Background()) + engineStoppedChan <- struct{}{} + }() + + <-producerStartedChan + <-consumerStartedChan + <-consumerErrorChan + + // Wait for engine to stop as a result of consumer error + <-engineStoppedChan + wg.Wait() + + // THEN + require.Error(t, firstEngineStartErr, "first attempt should return error") + require.Contains(t, firstEngineStartErr.Error(), expectedErr.Error(), + "error should contain expected message") + require.False(t, engine.IsRunning(), "engine should not be running after error") + + wg.Add(1) + ctx, cancel := context.WithCancel(context.Background()) + + var engineRestartErr error + go func() { + defer wg.Done() + engineRestartErr = engine.Start(ctx) + }() + + <-producerRestartChan + <-consumerRestartChan + + // THEN + require.True(t, engine.IsRunning(), "engine should be running on second attempt") + + cancel() + wg.Wait() + + require.ErrorIs(t, engineRestartErr, context.Canceled, "engine should stop with context.Canceled") + require.False(t, engine.IsRunning(), "engine should not be running after stop") + + mockProducer.AssertExpectations(t) + mockConsumer.AssertExpectations(t) + }) +} + +func TestEngineWithCallbacks(t *testing.T) { + t.Run("should trigger engine/producer/consumer start and stop callbacks", func(t *testing.T) { + // GIVEN + logger, _ := logrustest.NewNullLogger() + mockProducer := replication.NewMockOpProducer(t) + mockConsumer := replication.NewMockOpConsumer(t) + + // Use channels to track callback execution + engineStarted := make(chan struct{}, 1) + engineStopped := make(chan struct{}, 1) + producerStarted := make(chan struct{}, 1) + producerStopped := make(chan struct{}, 1) + consumerStarted := make(chan struct{}, 1) + consumerStopped := make(chan struct{}, 1) + + engineStartCount := 0 + engineStopCount := 0 + producerStartCount := 0 + producerStopCount := 0 + consumerStartCount := 0 + consumerStopCount := 0 + + callbacks := metrics.NewReplicationEngineCallbacksBuilder(). + WithEngineStartCallback(func(node string) { + require.Equal(t, "node1", node) + engineStartCount++ + engineStarted <- struct{}{} + }). + WithEngineStopCallback(func(node string) { + require.Equal(t, "node1", node) + engineStopCount++ + engineStopped <- struct{}{} + }). + WithProducerStartCallback(func(node string) { + require.Equal(t, "node1", node) + producerStartCount++ + producerStarted <- struct{}{} + }). + WithProducerStopCallback(func(node string) { + require.Equal(t, "node1", node) + producerStopCount++ + producerStopped <- struct{}{} + }). + WithConsumerStartCallback(func(node string) { + consumerStartCount++ + require.Equal(t, "node1", node) + consumerStarted <- struct{}{} + }). + WithConsumerStopCallback(func(node string) { + consumerStopCount++ + require.Equal(t, "node1", node) + consumerStopped <- struct{}{} + }). + Build() + + mockProducer.EXPECT(). + Produce(mock.Anything, mock.Anything). + Return(nil).Maybe() + mockConsumer.EXPECT(). + Consume(mock.Anything, mock.Anything). + Return(nil).Maybe() + + engine := replication.NewShardReplicationEngine( + logger, + "node1", + mockProducer, + mockConsumer, + 1, + 1, + 1*time.Second, + callbacks, + ) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + var engineStartErr error + var engineStartWG sync.WaitGroup + engineStartWG.Add(1) + + // WHEN + go func() { + defer engineStartWG.Done() + engineStartErr = engine.Start(ctx) + }() + + // Wait for all start callbacks + select { + case <-engineStarted: + case <-time.After(5 * time.Second): + t.Fatal("timeout waiting for engine start callback") + } + select { + case <-producerStarted: + case <-time.After(5 * time.Second): + t.Fatal("timeout waiting for producer start callback") + } + select { + case <-consumerStarted: + case <-time.After(5 * time.Second): + t.Fatal("timeout waiting for consumer start callback") + } + + engine.Stop() + + // Wait for all stop callbacks + select { + case <-engineStopped: + case <-time.After(5 * time.Second): + t.Fatal("timeout waiting for engine stop callback") + } + select { + case <-producerStopped: + case <-time.After(5 * time.Second): + t.Fatal("timeout waiting for producer stop callback") + } + select { + case <-consumerStopped: + case <-time.After(5 * time.Second): + t.Fatal("timeout waiting for consumer stop callback") + } + + engineStartWG.Wait() + + // THEN + require.NoErrorf(t, engineStartErr, "engine start should not return error") + require.Equal(t, engineStartCount, 1, "engine start count should be 1") + require.Equal(t, engineStopCount, 1, "engine stop count should be 1") + require.Equal(t, producerStartCount, 1, "producer start count should be 1") + require.Equal(t, producerStopCount, 1, "producer stop count should be 1") + require.Equal(t, consumerStartCount, 1, "consumer start count should be 1") + require.Equal(t, consumerStopCount, 1, "consumer stop count should be 1") + }) +} + +func randomOpIds(t *testing.T, count int) ([]uint64, error) { + t.Helper() + startId, err := randInt(t, 1000, 10000) + if err != nil { + return nil, err + } + + opIds := make([]uint64, count) + currId := uint64(startId) + + for i := 0; i < count; i++ { + opIds[i] = currId + currId += 1 + } + + return opIds, nil +} + +func randInt(t *testing.T, min, max int) (int, error) { + t.Helper() + var randValue [1]byte + _, err := rand.Read(randValue[:]) + if err != nil { + return 0, err + } + return min + int(randValue[0])%(max-min+1), nil +} + +func randomBoolean(t *testing.T) bool { + t.Helper() + var b [1]byte + _, err := rand.Read(b[:]) + if err != nil { + t.Fatalf("failed to generate random boolean: %v", err) + } + return b[0]%2 == 0 +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/replication/shard_replication_fqdn.go b/platform/dbops/binaries/weaviate-src/cluster/replication/shard_replication_fqdn.go new file mode 100644 index 0000000000000000000000000000000000000000..92e11e239bf69671d2388a708671320834197470 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/replication/shard_replication_fqdn.go @@ -0,0 +1,38 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package replication + +import ( + "fmt" +) + +// shardFQDN uniquely identify a shard in a weaviate cluster +type shardFQDN struct { + // nodeId is the node containing the shard + NodeId string + // collectionId is the collection containing the shard + CollectionId string + // shardId is the id of the shard + ShardId string +} + +func newShardFQDN(nodeId, collectionId, shardId string) shardFQDN { + return shardFQDN{ + NodeId: nodeId, + CollectionId: collectionId, + ShardId: shardId, + } +} + +func (s shardFQDN) String() string { + return fmt.Sprintf("%s/%s/%s", s.NodeId, s.CollectionId, s.ShardId) +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/replication/shard_replication_fsm.go b/platform/dbops/binaries/weaviate-src/cluster/replication/shard_replication_fsm.go new file mode 100644 index 0000000000000000000000000000000000000000..5e2a4db27dd9298d8f68f9029945a29f3473603b --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/replication/shard_replication_fsm.go @@ -0,0 +1,404 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package replication + +import ( + "encoding/json" + "fmt" + "sync" + + "github.com/go-openapi/strfmt" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + "golang.org/x/exp/maps" + + "github.com/weaviate/weaviate/cluster/proto/api" +) + +type ShardReplicationOp struct { + ID uint64 + UUID strfmt.UUID + + // Targeting information of the replication operation + SourceShard shardFQDN + TargetShard shardFQDN + + TransferType api.ShardReplicationTransferType + StartTimeUnixMs int64 // Unix timestamp when the operation started +} + +func (s ShardReplicationOp) MarshalText() (text []byte, err error) { + // We have to implement MarshalText to be able to use this struct as a key for a map + // We have to trick go to avoid an infinite recursion here as we still want to use the default json marshal/unmarshal + // code + type shardReplicationOpCopy ShardReplicationOp + return json.Marshal(shardReplicationOpCopy(s)) +} + +func (s *ShardReplicationOp) UnmarshalText(text []byte) error { + type shardReplicationOpCopy ShardReplicationOp + return json.Unmarshal(text, (*shardReplicationOpCopy)(s)) +} + +func NewShardReplicationOp(id uint64, sourceNode, targetNode, collectionId, shardId string, transferType api.ShardReplicationTransferType) ShardReplicationOp { + return ShardReplicationOp{ + ID: id, + SourceShard: newShardFQDN(sourceNode, collectionId, shardId), + TargetShard: newShardFQDN(targetNode, collectionId, shardId), + TransferType: transferType, + } +} + +type ShardReplicationFSM struct { + opsLock sync.RWMutex + + // idsByUuiid stores user-facing UUID -> repo-facing raft log index + idsByUuid map[strfmt.UUID]uint64 + // opsByTarget stores the array of ShardReplicationOp for each "target" node + opsByTarget map[string][]ShardReplicationOp + // opsBySource stores the array of ShardReplicationOp for each "source" node + opsBySource map[string][]ShardReplicationOp + // opsByCollection stores the array of ShardReplicationOp for each collection + opsByCollection map[string][]ShardReplicationOp + // opsByCollectionAndShard stores the array of ShardReplicationOp for each collection and shard + opsByCollectionAndShard map[string]map[string][]ShardReplicationOp + // opsByTargetFQDN stores the registered ShardReplicationOp (if any) for each destination replica + opsByTargetFQDN map[shardFQDN]ShardReplicationOp + // opsBySourceFQDN stores the registered ShardReplicationOp (if any) for each source replica + opsBySourceFQDN map[shardFQDN][]ShardReplicationOp + // opsById stores opId -> replicationOp + opsById map[uint64]ShardReplicationOp + // opsStatus stores op -> opStatus + statusById map[uint64]ShardReplicationOpStatus + + opsByStateGauge *prometheus.GaugeVec +} + +func NewShardReplicationFSM(reg prometheus.Registerer) *ShardReplicationFSM { + fsm := &ShardReplicationFSM{ + idsByUuid: make(map[strfmt.UUID]uint64), + opsByTarget: make(map[string][]ShardReplicationOp), + opsBySource: make(map[string][]ShardReplicationOp), + opsByCollection: make(map[string][]ShardReplicationOp), + opsByCollectionAndShard: make(map[string]map[string][]ShardReplicationOp), + opsByTargetFQDN: make(map[shardFQDN]ShardReplicationOp), + opsBySourceFQDN: make(map[shardFQDN][]ShardReplicationOp), + opsById: make(map[uint64]ShardReplicationOp), + statusById: make(map[uint64]ShardReplicationOpStatus), + } + + fsm.opsByStateGauge = promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "weaviate", + Name: "replication_operation_fsm_ops_by_state", + Help: "Current number of replication operations in each state of the FSM lifecycle", + }, []string{"state"}) + + return fsm +} + +type snapshot struct { + Ops map[ShardReplicationOp]ShardReplicationOpStatus +} + +func (s *ShardReplicationFSM) Snapshot() ([]byte, error) { + s.opsLock.RLock() + ops := make(map[ShardReplicationOp]ShardReplicationOpStatus, len(s.statusById)) + for id, status := range s.statusById { + op, ok := s.opsById[id] + if !ok { + s.opsLock.RUnlock() + return nil, fmt.Errorf("op %d not found in opsById", op.ID) + } + ops[op] = status + } + s.opsLock.RUnlock() + + return json.Marshal(&snapshot{Ops: ops}) +} + +func (s *ShardReplicationFSM) Restore(bytes []byte) error { + var snap snapshot + if err := json.Unmarshal(bytes, &snap); err != nil { + return fmt.Errorf("unmarshal snapshot: %w", err) + } + + s.opsLock.Lock() + defer s.opsLock.Unlock() + + s.resetState() + + for op, status := range snap.Ops { + if err := s.writeOpIntoFSM(op, status); err != nil { + return err + } + } + + return nil +} + +// resetState reset the state of the FSM to empty. This is used when restoring a snapshot to ensure we restore a snapshot +// into a clean FSM +// The lock onto the underlying data is *not acquired* by this function the callee must ensure the lock is held +func (s *ShardReplicationFSM) resetState() { + // Reset data + maps.Clear(s.idsByUuid) + maps.Clear(s.opsByTarget) + maps.Clear(s.opsBySource) + maps.Clear(s.opsByCollection) + maps.Clear(s.opsByCollectionAndShard) + maps.Clear(s.opsByTargetFQDN) + maps.Clear(s.opsBySourceFQDN) + maps.Clear(s.opsById) + maps.Clear(s.statusById) + + s.opsByStateGauge.Reset() +} + +func (s *ShardReplicationFSM) GetOpByUuid(uuid strfmt.UUID) (ShardReplicationOpAndStatus, bool) { + s.opsLock.RLock() + defer s.opsLock.RUnlock() + id, ok := s.idsByUuid[uuid] + if !ok { + return ShardReplicationOpAndStatus{}, false + } + op, ok := s.opsById[id] + if !ok { + return ShardReplicationOpAndStatus{}, false + } + status, ok := s.statusById[id] + if !ok { + return ShardReplicationOpAndStatus{}, false + } + return NewShardReplicationOpAndStatus(op, status), true +} + +func (s *ShardReplicationFSM) GetOpById(id uint64) (ShardReplicationOpAndStatus, bool) { + s.opsLock.RLock() + defer s.opsLock.RUnlock() + op, ok := s.opsById[id] + if !ok { + return ShardReplicationOpAndStatus{}, false + } + status, ok := s.statusById[id] + if !ok { + return ShardReplicationOpAndStatus{}, false + } + return NewShardReplicationOpAndStatus(op, status), true +} + +func (s *ShardReplicationFSM) GetOpsForTarget(node string) []ShardReplicationOp { + s.opsLock.RLock() + defer s.opsLock.RUnlock() + return s.opsByTarget[node] +} + +func (s *ShardReplicationFSM) GetOpsForCollection(collection string) ([]ShardReplicationOpAndStatus, bool) { + s.opsLock.RLock() + defer s.opsLock.RUnlock() + ops, ok := s.opsByCollection[collection] + if !ok { + return nil, false + } + return s.getOpsWithStatus(ops), true +} + +func (s *ShardReplicationFSM) GetOpsForCollectionAndShard(collection string, shard string) ([]ShardReplicationOpAndStatus, bool) { + s.opsLock.RLock() + defer s.opsLock.RUnlock() + shardOps, ok := s.opsByCollectionAndShard[collection] + if !ok { + return nil, false + } + ops, ok := shardOps[shard] + if !ok { + return nil, false + } + return s.getOpsWithStatus(ops), true +} + +func (s *ShardReplicationFSM) getOpsWithStatus(ops []ShardReplicationOp) []ShardReplicationOpAndStatus { + opsWithStatus := make([]ShardReplicationOpAndStatus, 0, len(ops)) + for _, op := range ops { + status, ok := s.statusById[op.ID] + if !ok { + continue + } + opsWithStatus = append(opsWithStatus, NewShardReplicationOpAndStatus(op, status)) + } + return opsWithStatus +} + +func (s *ShardReplicationFSM) GetOpsForTargetNode(node string) ([]ShardReplicationOpAndStatus, bool) { + s.opsLock.RLock() + defer s.opsLock.RUnlock() + ops, ok := s.opsByTarget[node] + return s.getOpsWithStatus(ops), ok +} + +func (s *ShardReplicationFSM) GetStatusByOps() map[ShardReplicationOp]ShardReplicationOpStatus { + s.opsLock.RLock() + defer s.opsLock.RUnlock() + opsStatus := make(map[ShardReplicationOp]ShardReplicationOpStatus, len(s.statusById)) + for id, status := range s.statusById { + op, ok := s.opsById[id] + if !ok { + continue + } + opsStatus[op] = status + } + return opsStatus +} + +// ShouldConsumeOps returns true if the operation should be consumed by the consumer +// +// It checks the following two conditions: +// +// 1. The operation is neither cancelled nor ready, meaning that it is still in progress performing some long-running op like hydrating/finalizing +// +// 2. The operation is cancelled or ready and should be deleted, meaning that the operation is finished and should be removed from the FSM +func (s ShardReplicationOpStatus) ShouldConsumeOps() bool { + state := s.GetCurrentState() + return ( + // Check if op is not in cancelled or ready state -> we schedule it + (state != api.CANCELLED && state != api.READY) || + // If op is in cancelled or ready state, only schedule it if it should be deleted + (state == api.CANCELLED || state == api.READY) && s.ShouldDelete) +} + +func (s *ShardReplicationFSM) GetOpState(op ShardReplicationOp) (ShardReplicationOpStatus, bool) { + s.opsLock.RLock() + defer s.opsLock.RUnlock() + v, ok := s.statusById[op.ID] + return v, ok +} + +func (s *ShardReplicationFSM) FilterOneShardReplicasRead(collection string, shard string, shardReplicasLocation []string) []string { + s.opsLock.RLock() + defer s.opsLock.RUnlock() + + // Check if the specified shard is current undergoing replication at all. + // If not we can return early as all replicas can be used for reads + byCollection, ok := s.opsByCollectionAndShard[collection] + if !ok { + return shardReplicasLocation + } + _, ok = byCollection[shard] + if !ok { + return shardReplicasLocation + } + readReplicas, _ := s.readWriteReplicas(collection, shard, shardReplicasLocation) + return readReplicas +} + +func (s *ShardReplicationFSM) FilterOneShardReplicasWrite(collection string, shard string, shardReplicasLocation []string) ([]string, []string) { + s.opsLock.RLock() + defer s.opsLock.RUnlock() + + // Check if the specified shard is current undergoing replication at all. + // If not we can return early as all replicas can be used for writes + byCollection, ok := s.opsByCollectionAndShard[collection] + if !ok { + return shardReplicasLocation, []string{} + } + ops, ok := byCollection[shard] + if !ok { + return shardReplicasLocation, []string{} + } + + _, writeReplicas := s.readWriteReplicas(collection, shard, shardReplicasLocation) + + additionalWriteReplicas := []string{} + for _, op := range ops { + opState, ok := s.statusById[op.ID] + if !ok { + continue + } + if opState.GetCurrentState() == api.FINALIZING { + additionalWriteReplicas = append(additionalWriteReplicas, op.TargetShard.NodeId) + } + } + return writeReplicas, additionalWriteReplicas +} + +func (s *ShardReplicationFSM) readWriteReplicas(collection, shard string, shardReplicasLocation []string) ([]string, []string) { + readReplicas := make([]string, 0, len(shardReplicasLocation)) + writeReplicas := make([]string, 0, len(shardReplicasLocation)) + for _, shardReplicaLocation := range shardReplicasLocation { + readOk, writeOk := s.filterOneReplicaReadWrite(shardReplicaLocation, collection, shard) + if readOk { + readReplicas = append(readReplicas, shardReplicaLocation) + } + if writeOk { + writeReplicas = append(writeReplicas, shardReplicaLocation) + } + } + return readReplicas, writeReplicas +} + +// filterOneReplicaAsTargetReadWrite returns whether the replica node for collection and shard is usable for read and write +// It returns a tuple of boolean (readOk, writeOk) +func (s *ShardReplicationFSM) filterOneReplicaReadWrite(node string, collection string, shard string) (bool, bool) { + replicaFQDN := newShardFQDN(node, collection, shard) + op, ok := s.opsByTargetFQDN[replicaFQDN] + // No target replication ops for that replica, ensure we check if it's a source + if !ok { + return s.filterOneReplicaAsSourceReadWrite(node, collection, shard) + } + + opState, ok := s.statusById[op.ID] + if !ok { + // TODO: This should never happens + return true, true + } + + // Filter read/write based on the state of the replica op + readOk := false + writeOk := false + switch opState.GetCurrentState() { + case api.READY: + readOk = true + writeOk = true + case api.DEHYDRATING: + readOk = true + writeOk = true + default: + } + return readOk, writeOk +} + +// filterOneReplicaAsSourceReadWrite returns a tuple of boolean (found, readOk, writeOk) +// if found is true it means there's a source replication op for that replica and readOk and writeOk should be considered +func (s *ShardReplicationFSM) filterOneReplicaAsSourceReadWrite(node string, collection string, shard string) (bool, bool) { + replicaFQDN := newShardFQDN(node, collection, shard) + ops, ok := s.opsBySourceFQDN[replicaFQDN] + // No source replication ops for that replica it can be used for both read and writes + if !ok { + return true, true + } + + readOk := true + writeOk := true + for _, op := range ops { + opState, ok := s.statusById[op.ID] + if !ok { + // This should never happen + continue + } + switch opState.GetCurrentState() { + case api.DEHYDRATING: + readOk = false + writeOk = false + default: + } + } + return readOk, writeOk +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/replication/shard_replication_op_state.go b/platform/dbops/binaries/weaviate-src/cluster/replication/shard_replication_op_state.go new file mode 100644 index 0000000000000000000000000000000000000000..86a50ce07aedbbc6f19f7d124a97af5c8e433b56 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/replication/shard_replication_op_state.go @@ -0,0 +1,166 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package replication + +import ( + "errors" + "time" + + "github.com/weaviate/weaviate/cluster/proto/api" +) + +var ErrMaxErrorsReached = errors.New("max errors reached") + +const ( + MaxErrors = 50 +) + +// State is the status of a shard replication operation +type State struct { + // State is the current state of the shard replication operation + State api.ShardReplicationState + // Errors is the list of errors that occurred during this state + Errors []api.ReplicationDetailsError + // Ms is the Unix timestamp in milliseconds when the state was first entered + StartTimeUnixMs int64 +} + +// StateHistory is the history of the state changes of the shard replication operation +// Defining this as a type allows us to define methods on it +type StateHistory []State + +// ShardReplicationOpStatus is the status of a shard replication operation as well as the history of the state changes and their associated errors (if any) +type ShardReplicationOpStatus struct { + // SchemaVersion is the minimum schema version that the shard replication operation can safely proceed with + // It's necessary to track this because the schema version is not always the same across multiple nodes due to EC issues with RAFT. + // By communicating it with remote nodes, we can ensure that they will wait for the schema version to be the same or greater before proceeding with the operation. + SchemaVersion uint64 + + // Current is the current state of the shard replication operation + Current State + + // ShouldCancel is a flag indicating that the operation should be cancelled at the earliest possible time + ShouldCancel bool + // ShouldDelete is a flag indicating that the operation should be cancelled at the earliest possible time and then deleted + ShouldDelete bool + // UnCancellable is a flag indicating that an operation is not capable of being cancelled. + // E.g., an op is not cancellable if it is in the DEHYDRATING state after the replica has been added to the sharding state. + UnCancellable bool + + // History is the history of the state changes of the shard replication operation + History StateHistory +} + +// NewShardReplicationStatus creates a new ShardReplicationOpStatus initialized with the given state and en empty history +func NewShardReplicationStatus(state api.ShardReplicationState) ShardReplicationOpStatus { + return ShardReplicationOpStatus{ + Current: State{ + State: state, + }, + History: []State{}, + } +} + +// AddError adds an error to the current state of the shard replication operation +func (s *ShardReplicationOpStatus) AddError(error string, timeUnixMs int64) error { + if len(s.Current.Errors) >= MaxErrors { + return ErrMaxErrorsReached + } + s.Current.Errors = append(s.Current.Errors, api.ReplicationDetailsError{ + Message: error, + ErroredTimeUnixMs: timeUnixMs, + }) + return nil +} + +// ChangeState changes the state of the shard replication operation to the next state and keeps the previous state in the history +func (s *ShardReplicationOpStatus) ChangeState(nextState api.ShardReplicationState) { + s.History = append(s.History, s.Current) + s.Current = State{ + State: nextState, + Errors: []api.ReplicationDetailsError{}, + StartTimeUnixMs: time.Now().UnixMilli(), + } +} + +// GetCurrent returns the current state and errors of the shard replication operation +func (s *ShardReplicationOpStatus) GetCurrent() State { + return s.Current +} + +// GetCurrentState returns the current state of the shard replication operation +func (s *ShardReplicationOpStatus) GetCurrentState() api.ShardReplicationState { + return s.Current.State +} + +func (s *ShardReplicationOpStatus) TriggerCancellation() { + s.ShouldCancel = true + s.ShouldDelete = false +} + +func (s *ShardReplicationOpStatus) CompleteCancellation() { + s.ShouldCancel = false + s.ShouldDelete = false + s.ChangeState(api.CANCELLED) +} + +func (s *ShardReplicationOpStatus) TriggerDeletion() { + s.ShouldCancel = true + s.ShouldDelete = true +} + +// OnlyCancellation returns true if ShouldCancel is true and ShouldDelete is false +func (s *ShardReplicationOpStatus) OnlyCancellation() bool { + return s.ShouldCancel && !s.ShouldDelete +} + +// ShouldCleanup returns true if the current state is not READY +func (s *ShardReplicationOpStatus) ShouldCleanup() bool { + return s.GetCurrentState() != api.READY && s.GetCurrentState() != api.DEHYDRATING +} + +// GetHistory returns the history of the state changes of the shard replication operation +func (s *ShardReplicationOpStatus) GetHistory() StateHistory { + return s.History +} + +// ToAPIFormat converts the State to the API format +func (s State) ToAPIFormat() api.ReplicationDetailsState { + return api.ReplicationDetailsState{ + State: s.State.String(), + Errors: s.Errors, + StartTimeUnixMs: s.StartTimeUnixMs, + } +} + +// ToAPIFormat converts the StateHistory to the API format +func (sh StateHistory) ToAPIFormat() []api.ReplicationDetailsState { + states := make([]api.ReplicationDetailsState, len(sh)) + for i, s := range sh { + states[i] = s.ToAPIFormat() + } + return states +} + +// ShardReplicationOpAndStatus is a struct that contains a ShardReplicationOp and a ShardReplicationOpStatus +type ShardReplicationOpAndStatus struct { + Op ShardReplicationOp + Status ShardReplicationOpStatus +} + +// NewShardReplicationOpAndStatus creates a new ShardReplicationOpAndStatus from op and status +func NewShardReplicationOpAndStatus(op ShardReplicationOp, status ShardReplicationOpStatus) ShardReplicationOpAndStatus { + return ShardReplicationOpAndStatus{ + Op: op, + Status: status, + } +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/replication/types/err.go b/platform/dbops/binaries/weaviate-src/cluster/replication/types/err.go new file mode 100644 index 0000000000000000000000000000000000000000..6cd6efaad25fab9c2d9426d23821a6ada327053d --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/replication/types/err.go @@ -0,0 +1,27 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package types + +import "errors" + +var ( + ErrInvalidRequest = errors.New("invalid request") + ErrCancellationImpossible = errors.New("cancellation impossible") + ErrDeletionImpossible = errors.New("deletion impossible") + ErrReplicationOperationNotFound = errors.New("replication operation not found") + // ErrNotFound is a custom error that is used to indicate that a resource was not found. + // We use it to return a specific error code from the RPC layer to ensure we don't retry an operation + // returning an error indicating that the resource was not found. + // We add E00001 to the error string to ensure it's unique and can be checked for specifically. + // Otherwise it could be matched against any "not found" error. + ErrNotFound = errors.New("E00001: not found") +) diff --git a/platform/dbops/binaries/weaviate-src/cluster/replication/types/manager.go b/platform/dbops/binaries/weaviate-src/cluster/replication/types/manager.go new file mode 100644 index 0000000000000000000000000000000000000000..0f612e00b39923cbbe814048186426665c222626 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/replication/types/manager.go @@ -0,0 +1,162 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package types + +import ( + context "context" + + "github.com/go-openapi/strfmt" + "github.com/weaviate/weaviate/cluster/proto/api" +) + +type Manager interface { + QueryShardingStateByCollection(ctx context.Context, collection string) (api.ShardingState, error) + QueryShardingStateByCollectionAndShard(ctx context.Context, collection string, shard string) (api.ShardingState, error) + + ReplicationReplicateReplica(ctx context.Context, opId strfmt.UUID, sourceNode string, sourceCollection string, sourceShard string, targetNode string, transferType string) error + + // GetReplicationDetailsByReplicationId retrieves the details of a replication operation by its UUID. + // + // Parameters: + // - uuid: The unique identifier for the replication operation (strfmt.UUID). + // + // Returns: + // - api.ReplicationDetailsResponse: Contains the details of the requested replication operation. + // - error: Returns ErrReplicationOperationNotFound if the operation doesn't exist, + // or another error explaining why retrieving the replication operation details failed. + GetReplicationDetailsByReplicationId(ctx context.Context, uuid strfmt.UUID) (api.ReplicationDetailsResponse, error) + + // GetReplicationDetailsByCollection retrieves the details of all replication operations for a given collection. + // + // Parameters: + // - collection: The name of the collection to retrieve replication details for. + // + // Returns: + // - []api.ReplicationDetailsResponse: A list of replication details for the given collection. Returns an empty list if there are no replication operations for the given collection. + // - error: Returns an error if fetching the replication details failed. + GetReplicationDetailsByCollection(ctx context.Context, collection string) ([]api.ReplicationDetailsResponse, error) + + // GetReplicationDetailsByCollectionAndShard retrieves the details of all replication operations for a given collection and shard. + // + // Parameters: + // - collection: The name of the collection to retrieve replication details for. + // - shard: The name of the shard to retrieve replication details for. + // + // Returns: + // - []api.ReplicationDetailsResponse: A list of replication details for the given collection and shard. Returns an empty list if there are no replication operations for the given collection and shard. + // - error: Returns an error if fetching the replication details failed. + GetReplicationDetailsByCollectionAndShard(ctx context.Context, collection string, shard string) ([]api.ReplicationDetailsResponse, error) + + // GetReplicationDetailsByTargetNode retrieves the details of all replication operations for a given target node. + // + // Parameters: + // - node: The name of the target node to retrieve replication details for. + // + // Returns: + // - []api.ReplicationDetailsResponse: A list of replication details for the given target node. Returns an empty list if there are no replication operations for the given target node. + // - error: Returns an error if fetching the replication details failed. + GetReplicationDetailsByTargetNode(ctx context.Context, node string) ([]api.ReplicationDetailsResponse, error) + + // GetAllReplicationDetails retrieves the details of all replication operations. + // + // Returns: + // - []api.ReplicationDetailsResponse: A list of replication details for the given target node. Returns an empty list if there are no replication operations for the given target node. + // - error: Returns an error if fetching the replication details failed. + GetAllReplicationDetails(ctx context.Context) ([]api.ReplicationDetailsResponse, error) + + // CancelReplication cancels a replication operation meaning that the operation is stopped, cleaned-up on the target, and moved to the CANCELLED state. + // + // Parameters: + // - uuid: The unique identifier for the replication operation (strfmt.UUID). + // Returns: + // - error: Returns ErrReplicationOperationNotFound if the operation doesn't exist, + // or another error explaining why cancelling the replication operation failed. + CancelReplication(ctx context.Context, uuid strfmt.UUID) error + // DeleteReplication removes a replication operation from the FSM. If it's in progress, it will be cancelled first. + // + // Parameters: + // - uuid: The unique identifier for the replication operation (strfmt.UUID). + // Returns: + // - error: Returns ErrReplicationOperationNotFound if the operation doesn't exist, + // or another error explaining why cancelling the replication operation failed. + DeleteReplication(ctx context.Context, uuid strfmt.UUID) error + + // DeleteReplicationsByCollection removes all replication operations for a specific collection. + // + // This is required when a collection is deleted, and all replication operations for that collection should be removed including in-flight operations that must be cancelled first. + // + // Parameters: + // - collection: The name of the collection for which to delete replication operations. + // Returns: + // - error: Returns an error if the deletion of replication operations fails. + DeleteReplicationsByCollection(ctx context.Context, collection string) error + // DeleteReplicationsByTenants removes all replication operations for specified tenants in a specific collection. + // + // This is required when tenants are deleted, and all replication operations for those tenants should be removed including in-flight operations that must be cancelled first. + // + // Parameters: + // - collection: The name of the collection for which to delete replication operations. + // - tenants: The list of tenants for which to delete replication operations. + // Returns: + // - error: Returns an error if the deletion of replication operations fails. + DeleteReplicationsByTenants(ctx context.Context, collection string, tenants []string) error + // DeleteAllReplications removes all replication operation from the FSM. + // If they are in progress, then they are cancelled first. + // + // Returns: + // - error: any error explaining why cancelling the replication operation failed. + DeleteAllReplications(ctx context.Context) error + + // ForceDeleteReplicationByReplicationId forcefully deletes a replication operation by its UUID. + // This operation does not cancel the replication operation, it simply removes it from the FSM. + // + // Parameters: + // - uuid: The unique identifier for the replication operation (strfmt.UUID). + // + // Returns: + // - error: Returns an error if force deleting the replication operation failed. + ForceDeleteReplicationByUuid(ctx context.Context, uuid strfmt.UUID) error + // ForceDeleteReplicationByCollection forcefully deletes all replication operations for a given collection. + // This operation does not cancel the replication operations, it simply removes it from the FSM. + // + // Parameters: + // - collection: The name of the collection to force delete replication operations for. + // + // Returns: + // - error: Returns an error if force deleting the replication operation failed. + ForceDeleteReplicationsByCollection(ctx context.Context, collection string) error + // ForceDeleteReplicationByCollectionAndShard forcefully deletes all replication operations for a given collection and shard. + // This operation does not cancel the replication operations, it simply removes it from the FSM. + // + // Parameters: + // - collection: The name of the collection to force delete replication operations for. + // - shard: The name of the shard to force delete replication operations for. + // + // Returns: + // - error: Returns an error if force deleting the replication operation failed. + ForceDeleteReplicationsByCollectionAndShard(ctx context.Context, collection string, shard string) error + // ForceDeleteReplicationByTargetNode forcefully deletes all replication operations for a given target node. + // This operation does not cancel the replication operations, it simply removes it from the FSM. + // + // Parameters: + // - node: The name of the target node to force delete replication operations for. + // + // Returns: + // - error: Returns an error if force deleting the replication operation failed. + ForceDeleteReplicationsByTargetNode(ctx context.Context, node string) error + // ForceDeleteAllReplication forcefully deletes all replication operations. + // This operation does not cancel the replication operations, it simply removes it from the FSM. + // + // Returns: + // - error: Returns an error if force deleting the replication operation failed. + ForceDeleteAllReplications(ctx context.Context) error +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/replication/types/mock_fsm_updater.go b/platform/dbops/binaries/weaviate-src/cluster/replication/types/mock_fsm_updater.go new file mode 100644 index 0000000000000000000000000000000000000000..70cbfdaeda26d018248283fdac1c398b127da661 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/replication/types/mock_fsm_updater.go @@ -0,0 +1,627 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by mockery v2.53.2. DO NOT EDIT. + +package types + +import ( + context "context" + + api "github.com/weaviate/weaviate/cluster/proto/api" + + mock "github.com/stretchr/testify/mock" +) + +// MockFSMUpdater is an autogenerated mock type for the FSMUpdater type +type MockFSMUpdater struct { + mock.Mock +} + +type MockFSMUpdater_Expecter struct { + mock *mock.Mock +} + +func (_m *MockFSMUpdater) EXPECT() *MockFSMUpdater_Expecter { + return &MockFSMUpdater_Expecter{mock: &_m.Mock} +} + +// DeleteReplicaFromShard provides a mock function with given fields: ctx, collection, shard, nodeId +func (_m *MockFSMUpdater) DeleteReplicaFromShard(ctx context.Context, collection string, shard string, nodeId string) (uint64, error) { + ret := _m.Called(ctx, collection, shard, nodeId) + + if len(ret) == 0 { + panic("no return value specified for DeleteReplicaFromShard") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string, string, string) (uint64, error)); ok { + return rf(ctx, collection, shard, nodeId) + } + if rf, ok := ret.Get(0).(func(context.Context, string, string, string) uint64); ok { + r0 = rf(ctx, collection, shard, nodeId) + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func(context.Context, string, string, string) error); ok { + r1 = rf(ctx, collection, shard, nodeId) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockFSMUpdater_DeleteReplicaFromShard_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DeleteReplicaFromShard' +type MockFSMUpdater_DeleteReplicaFromShard_Call struct { + *mock.Call +} + +// DeleteReplicaFromShard is a helper method to define mock.On call +// - ctx context.Context +// - collection string +// - shard string +// - nodeId string +func (_e *MockFSMUpdater_Expecter) DeleteReplicaFromShard(ctx interface{}, collection interface{}, shard interface{}, nodeId interface{}) *MockFSMUpdater_DeleteReplicaFromShard_Call { + return &MockFSMUpdater_DeleteReplicaFromShard_Call{Call: _e.mock.On("DeleteReplicaFromShard", ctx, collection, shard, nodeId)} +} + +func (_c *MockFSMUpdater_DeleteReplicaFromShard_Call) Run(run func(ctx context.Context, collection string, shard string, nodeId string)) *MockFSMUpdater_DeleteReplicaFromShard_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string), args[2].(string), args[3].(string)) + }) + return _c +} + +func (_c *MockFSMUpdater_DeleteReplicaFromShard_Call) Return(_a0 uint64, _a1 error) *MockFSMUpdater_DeleteReplicaFromShard_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockFSMUpdater_DeleteReplicaFromShard_Call) RunAndReturn(run func(context.Context, string, string, string) (uint64, error)) *MockFSMUpdater_DeleteReplicaFromShard_Call { + _c.Call.Return(run) + return _c +} + +// ReplicationAddReplicaToShard provides a mock function with given fields: ctx, collection, shard, nodeId, opId +func (_m *MockFSMUpdater) ReplicationAddReplicaToShard(ctx context.Context, collection string, shard string, nodeId string, opId uint64) (uint64, error) { + ret := _m.Called(ctx, collection, shard, nodeId, opId) + + if len(ret) == 0 { + panic("no return value specified for ReplicationAddReplicaToShard") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string, string, string, uint64) (uint64, error)); ok { + return rf(ctx, collection, shard, nodeId, opId) + } + if rf, ok := ret.Get(0).(func(context.Context, string, string, string, uint64) uint64); ok { + r0 = rf(ctx, collection, shard, nodeId, opId) + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func(context.Context, string, string, string, uint64) error); ok { + r1 = rf(ctx, collection, shard, nodeId, opId) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockFSMUpdater_ReplicationAddReplicaToShard_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ReplicationAddReplicaToShard' +type MockFSMUpdater_ReplicationAddReplicaToShard_Call struct { + *mock.Call +} + +// ReplicationAddReplicaToShard is a helper method to define mock.On call +// - ctx context.Context +// - collection string +// - shard string +// - nodeId string +// - opId uint64 +func (_e *MockFSMUpdater_Expecter) ReplicationAddReplicaToShard(ctx interface{}, collection interface{}, shard interface{}, nodeId interface{}, opId interface{}) *MockFSMUpdater_ReplicationAddReplicaToShard_Call { + return &MockFSMUpdater_ReplicationAddReplicaToShard_Call{Call: _e.mock.On("ReplicationAddReplicaToShard", ctx, collection, shard, nodeId, opId)} +} + +func (_c *MockFSMUpdater_ReplicationAddReplicaToShard_Call) Run(run func(ctx context.Context, collection string, shard string, nodeId string, opId uint64)) *MockFSMUpdater_ReplicationAddReplicaToShard_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string), args[2].(string), args[3].(string), args[4].(uint64)) + }) + return _c +} + +func (_c *MockFSMUpdater_ReplicationAddReplicaToShard_Call) Return(_a0 uint64, _a1 error) *MockFSMUpdater_ReplicationAddReplicaToShard_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockFSMUpdater_ReplicationAddReplicaToShard_Call) RunAndReturn(run func(context.Context, string, string, string, uint64) (uint64, error)) *MockFSMUpdater_ReplicationAddReplicaToShard_Call { + _c.Call.Return(run) + return _c +} + +// ReplicationCancellationComplete provides a mock function with given fields: ctx, id +func (_m *MockFSMUpdater) ReplicationCancellationComplete(ctx context.Context, id uint64) error { + ret := _m.Called(ctx, id) + + if len(ret) == 0 { + panic("no return value specified for ReplicationCancellationComplete") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, uint64) error); ok { + r0 = rf(ctx, id) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockFSMUpdater_ReplicationCancellationComplete_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ReplicationCancellationComplete' +type MockFSMUpdater_ReplicationCancellationComplete_Call struct { + *mock.Call +} + +// ReplicationCancellationComplete is a helper method to define mock.On call +// - ctx context.Context +// - id uint64 +func (_e *MockFSMUpdater_Expecter) ReplicationCancellationComplete(ctx interface{}, id interface{}) *MockFSMUpdater_ReplicationCancellationComplete_Call { + return &MockFSMUpdater_ReplicationCancellationComplete_Call{Call: _e.mock.On("ReplicationCancellationComplete", ctx, id)} +} + +func (_c *MockFSMUpdater_ReplicationCancellationComplete_Call) Run(run func(ctx context.Context, id uint64)) *MockFSMUpdater_ReplicationCancellationComplete_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(uint64)) + }) + return _c +} + +func (_c *MockFSMUpdater_ReplicationCancellationComplete_Call) Return(_a0 error) *MockFSMUpdater_ReplicationCancellationComplete_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockFSMUpdater_ReplicationCancellationComplete_Call) RunAndReturn(run func(context.Context, uint64) error) *MockFSMUpdater_ReplicationCancellationComplete_Call { + _c.Call.Return(run) + return _c +} + +// ReplicationGetReplicaOpStatus provides a mock function with given fields: ctx, id +func (_m *MockFSMUpdater) ReplicationGetReplicaOpStatus(ctx context.Context, id uint64) (api.ShardReplicationState, error) { + ret := _m.Called(ctx, id) + + if len(ret) == 0 { + panic("no return value specified for ReplicationGetReplicaOpStatus") + } + + var r0 api.ShardReplicationState + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, uint64) (api.ShardReplicationState, error)); ok { + return rf(ctx, id) + } + if rf, ok := ret.Get(0).(func(context.Context, uint64) api.ShardReplicationState); ok { + r0 = rf(ctx, id) + } else { + r0 = ret.Get(0).(api.ShardReplicationState) + } + + if rf, ok := ret.Get(1).(func(context.Context, uint64) error); ok { + r1 = rf(ctx, id) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockFSMUpdater_ReplicationGetReplicaOpStatus_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ReplicationGetReplicaOpStatus' +type MockFSMUpdater_ReplicationGetReplicaOpStatus_Call struct { + *mock.Call +} + +// ReplicationGetReplicaOpStatus is a helper method to define mock.On call +// - ctx context.Context +// - id uint64 +func (_e *MockFSMUpdater_Expecter) ReplicationGetReplicaOpStatus(ctx interface{}, id interface{}) *MockFSMUpdater_ReplicationGetReplicaOpStatus_Call { + return &MockFSMUpdater_ReplicationGetReplicaOpStatus_Call{Call: _e.mock.On("ReplicationGetReplicaOpStatus", ctx, id)} +} + +func (_c *MockFSMUpdater_ReplicationGetReplicaOpStatus_Call) Run(run func(ctx context.Context, id uint64)) *MockFSMUpdater_ReplicationGetReplicaOpStatus_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(uint64)) + }) + return _c +} + +func (_c *MockFSMUpdater_ReplicationGetReplicaOpStatus_Call) Return(_a0 api.ShardReplicationState, _a1 error) *MockFSMUpdater_ReplicationGetReplicaOpStatus_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockFSMUpdater_ReplicationGetReplicaOpStatus_Call) RunAndReturn(run func(context.Context, uint64) (api.ShardReplicationState, error)) *MockFSMUpdater_ReplicationGetReplicaOpStatus_Call { + _c.Call.Return(run) + return _c +} + +// ReplicationRegisterError provides a mock function with given fields: ctx, id, errorToRegister +func (_m *MockFSMUpdater) ReplicationRegisterError(ctx context.Context, id uint64, errorToRegister string) error { + ret := _m.Called(ctx, id, errorToRegister) + + if len(ret) == 0 { + panic("no return value specified for ReplicationRegisterError") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, uint64, string) error); ok { + r0 = rf(ctx, id, errorToRegister) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockFSMUpdater_ReplicationRegisterError_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ReplicationRegisterError' +type MockFSMUpdater_ReplicationRegisterError_Call struct { + *mock.Call +} + +// ReplicationRegisterError is a helper method to define mock.On call +// - ctx context.Context +// - id uint64 +// - errorToRegister string +func (_e *MockFSMUpdater_Expecter) ReplicationRegisterError(ctx interface{}, id interface{}, errorToRegister interface{}) *MockFSMUpdater_ReplicationRegisterError_Call { + return &MockFSMUpdater_ReplicationRegisterError_Call{Call: _e.mock.On("ReplicationRegisterError", ctx, id, errorToRegister)} +} + +func (_c *MockFSMUpdater_ReplicationRegisterError_Call) Run(run func(ctx context.Context, id uint64, errorToRegister string)) *MockFSMUpdater_ReplicationRegisterError_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(uint64), args[2].(string)) + }) + return _c +} + +func (_c *MockFSMUpdater_ReplicationRegisterError_Call) Return(_a0 error) *MockFSMUpdater_ReplicationRegisterError_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockFSMUpdater_ReplicationRegisterError_Call) RunAndReturn(run func(context.Context, uint64, string) error) *MockFSMUpdater_ReplicationRegisterError_Call { + _c.Call.Return(run) + return _c +} + +// ReplicationRemoveReplicaOp provides a mock function with given fields: ctx, id +func (_m *MockFSMUpdater) ReplicationRemoveReplicaOp(ctx context.Context, id uint64) error { + ret := _m.Called(ctx, id) + + if len(ret) == 0 { + panic("no return value specified for ReplicationRemoveReplicaOp") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, uint64) error); ok { + r0 = rf(ctx, id) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockFSMUpdater_ReplicationRemoveReplicaOp_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ReplicationRemoveReplicaOp' +type MockFSMUpdater_ReplicationRemoveReplicaOp_Call struct { + *mock.Call +} + +// ReplicationRemoveReplicaOp is a helper method to define mock.On call +// - ctx context.Context +// - id uint64 +func (_e *MockFSMUpdater_Expecter) ReplicationRemoveReplicaOp(ctx interface{}, id interface{}) *MockFSMUpdater_ReplicationRemoveReplicaOp_Call { + return &MockFSMUpdater_ReplicationRemoveReplicaOp_Call{Call: _e.mock.On("ReplicationRemoveReplicaOp", ctx, id)} +} + +func (_c *MockFSMUpdater_ReplicationRemoveReplicaOp_Call) Run(run func(ctx context.Context, id uint64)) *MockFSMUpdater_ReplicationRemoveReplicaOp_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(uint64)) + }) + return _c +} + +func (_c *MockFSMUpdater_ReplicationRemoveReplicaOp_Call) Return(_a0 error) *MockFSMUpdater_ReplicationRemoveReplicaOp_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockFSMUpdater_ReplicationRemoveReplicaOp_Call) RunAndReturn(run func(context.Context, uint64) error) *MockFSMUpdater_ReplicationRemoveReplicaOp_Call { + _c.Call.Return(run) + return _c +} + +// ReplicationStoreSchemaVersion provides a mock function with given fields: ctx, id, schemaVersion +func (_m *MockFSMUpdater) ReplicationStoreSchemaVersion(ctx context.Context, id uint64, schemaVersion uint64) error { + ret := _m.Called(ctx, id, schemaVersion) + + if len(ret) == 0 { + panic("no return value specified for ReplicationStoreSchemaVersion") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, uint64, uint64) error); ok { + r0 = rf(ctx, id, schemaVersion) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockFSMUpdater_ReplicationStoreSchemaVersion_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ReplicationStoreSchemaVersion' +type MockFSMUpdater_ReplicationStoreSchemaVersion_Call struct { + *mock.Call +} + +// ReplicationStoreSchemaVersion is a helper method to define mock.On call +// - ctx context.Context +// - id uint64 +// - schemaVersion uint64 +func (_e *MockFSMUpdater_Expecter) ReplicationStoreSchemaVersion(ctx interface{}, id interface{}, schemaVersion interface{}) *MockFSMUpdater_ReplicationStoreSchemaVersion_Call { + return &MockFSMUpdater_ReplicationStoreSchemaVersion_Call{Call: _e.mock.On("ReplicationStoreSchemaVersion", ctx, id, schemaVersion)} +} + +func (_c *MockFSMUpdater_ReplicationStoreSchemaVersion_Call) Run(run func(ctx context.Context, id uint64, schemaVersion uint64)) *MockFSMUpdater_ReplicationStoreSchemaVersion_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(uint64), args[2].(uint64)) + }) + return _c +} + +func (_c *MockFSMUpdater_ReplicationStoreSchemaVersion_Call) Return(_a0 error) *MockFSMUpdater_ReplicationStoreSchemaVersion_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockFSMUpdater_ReplicationStoreSchemaVersion_Call) RunAndReturn(run func(context.Context, uint64, uint64) error) *MockFSMUpdater_ReplicationStoreSchemaVersion_Call { + _c.Call.Return(run) + return _c +} + +// ReplicationUpdateReplicaOpStatus provides a mock function with given fields: ctx, id, state +func (_m *MockFSMUpdater) ReplicationUpdateReplicaOpStatus(ctx context.Context, id uint64, state api.ShardReplicationState) error { + ret := _m.Called(ctx, id, state) + + if len(ret) == 0 { + panic("no return value specified for ReplicationUpdateReplicaOpStatus") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, uint64, api.ShardReplicationState) error); ok { + r0 = rf(ctx, id, state) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockFSMUpdater_ReplicationUpdateReplicaOpStatus_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ReplicationUpdateReplicaOpStatus' +type MockFSMUpdater_ReplicationUpdateReplicaOpStatus_Call struct { + *mock.Call +} + +// ReplicationUpdateReplicaOpStatus is a helper method to define mock.On call +// - ctx context.Context +// - id uint64 +// - state api.ShardReplicationState +func (_e *MockFSMUpdater_Expecter) ReplicationUpdateReplicaOpStatus(ctx interface{}, id interface{}, state interface{}) *MockFSMUpdater_ReplicationUpdateReplicaOpStatus_Call { + return &MockFSMUpdater_ReplicationUpdateReplicaOpStatus_Call{Call: _e.mock.On("ReplicationUpdateReplicaOpStatus", ctx, id, state)} +} + +func (_c *MockFSMUpdater_ReplicationUpdateReplicaOpStatus_Call) Run(run func(ctx context.Context, id uint64, state api.ShardReplicationState)) *MockFSMUpdater_ReplicationUpdateReplicaOpStatus_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(uint64), args[2].(api.ShardReplicationState)) + }) + return _c +} + +func (_c *MockFSMUpdater_ReplicationUpdateReplicaOpStatus_Call) Return(_a0 error) *MockFSMUpdater_ReplicationUpdateReplicaOpStatus_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockFSMUpdater_ReplicationUpdateReplicaOpStatus_Call) RunAndReturn(run func(context.Context, uint64, api.ShardReplicationState) error) *MockFSMUpdater_ReplicationUpdateReplicaOpStatus_Call { + _c.Call.Return(run) + return _c +} + +// SyncShard provides a mock function with given fields: ctx, collection, shard, nodeId +func (_m *MockFSMUpdater) SyncShard(ctx context.Context, collection string, shard string, nodeId string) (uint64, error) { + ret := _m.Called(ctx, collection, shard, nodeId) + + if len(ret) == 0 { + panic("no return value specified for SyncShard") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string, string, string) (uint64, error)); ok { + return rf(ctx, collection, shard, nodeId) + } + if rf, ok := ret.Get(0).(func(context.Context, string, string, string) uint64); ok { + r0 = rf(ctx, collection, shard, nodeId) + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func(context.Context, string, string, string) error); ok { + r1 = rf(ctx, collection, shard, nodeId) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockFSMUpdater_SyncShard_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SyncShard' +type MockFSMUpdater_SyncShard_Call struct { + *mock.Call +} + +// SyncShard is a helper method to define mock.On call +// - ctx context.Context +// - collection string +// - shard string +// - nodeId string +func (_e *MockFSMUpdater_Expecter) SyncShard(ctx interface{}, collection interface{}, shard interface{}, nodeId interface{}) *MockFSMUpdater_SyncShard_Call { + return &MockFSMUpdater_SyncShard_Call{Call: _e.mock.On("SyncShard", ctx, collection, shard, nodeId)} +} + +func (_c *MockFSMUpdater_SyncShard_Call) Run(run func(ctx context.Context, collection string, shard string, nodeId string)) *MockFSMUpdater_SyncShard_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string), args[2].(string), args[3].(string)) + }) + return _c +} + +func (_c *MockFSMUpdater_SyncShard_Call) Return(_a0 uint64, _a1 error) *MockFSMUpdater_SyncShard_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockFSMUpdater_SyncShard_Call) RunAndReturn(run func(context.Context, string, string, string) (uint64, error)) *MockFSMUpdater_SyncShard_Call { + _c.Call.Return(run) + return _c +} + +// UpdateTenants provides a mock function with given fields: ctx, class, req +func (_m *MockFSMUpdater) UpdateTenants(ctx context.Context, class string, req *api.UpdateTenantsRequest) (uint64, error) { + ret := _m.Called(ctx, class, req) + + if len(ret) == 0 { + panic("no return value specified for UpdateTenants") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string, *api.UpdateTenantsRequest) (uint64, error)); ok { + return rf(ctx, class, req) + } + if rf, ok := ret.Get(0).(func(context.Context, string, *api.UpdateTenantsRequest) uint64); ok { + r0 = rf(ctx, class, req) + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func(context.Context, string, *api.UpdateTenantsRequest) error); ok { + r1 = rf(ctx, class, req) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockFSMUpdater_UpdateTenants_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UpdateTenants' +type MockFSMUpdater_UpdateTenants_Call struct { + *mock.Call +} + +// UpdateTenants is a helper method to define mock.On call +// - ctx context.Context +// - class string +// - req *api.UpdateTenantsRequest +func (_e *MockFSMUpdater_Expecter) UpdateTenants(ctx interface{}, class interface{}, req interface{}) *MockFSMUpdater_UpdateTenants_Call { + return &MockFSMUpdater_UpdateTenants_Call{Call: _e.mock.On("UpdateTenants", ctx, class, req)} +} + +func (_c *MockFSMUpdater_UpdateTenants_Call) Run(run func(ctx context.Context, class string, req *api.UpdateTenantsRequest)) *MockFSMUpdater_UpdateTenants_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string), args[2].(*api.UpdateTenantsRequest)) + }) + return _c +} + +func (_c *MockFSMUpdater_UpdateTenants_Call) Return(_a0 uint64, _a1 error) *MockFSMUpdater_UpdateTenants_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockFSMUpdater_UpdateTenants_Call) RunAndReturn(run func(context.Context, string, *api.UpdateTenantsRequest) (uint64, error)) *MockFSMUpdater_UpdateTenants_Call { + _c.Call.Return(run) + return _c +} + +// WaitForUpdate provides a mock function with given fields: ctx, schemaVersion +func (_m *MockFSMUpdater) WaitForUpdate(ctx context.Context, schemaVersion uint64) error { + ret := _m.Called(ctx, schemaVersion) + + if len(ret) == 0 { + panic("no return value specified for WaitForUpdate") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, uint64) error); ok { + r0 = rf(ctx, schemaVersion) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockFSMUpdater_WaitForUpdate_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'WaitForUpdate' +type MockFSMUpdater_WaitForUpdate_Call struct { + *mock.Call +} + +// WaitForUpdate is a helper method to define mock.On call +// - ctx context.Context +// - schemaVersion uint64 +func (_e *MockFSMUpdater_Expecter) WaitForUpdate(ctx interface{}, schemaVersion interface{}) *MockFSMUpdater_WaitForUpdate_Call { + return &MockFSMUpdater_WaitForUpdate_Call{Call: _e.mock.On("WaitForUpdate", ctx, schemaVersion)} +} + +func (_c *MockFSMUpdater_WaitForUpdate_Call) Run(run func(ctx context.Context, schemaVersion uint64)) *MockFSMUpdater_WaitForUpdate_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(uint64)) + }) + return _c +} + +func (_c *MockFSMUpdater_WaitForUpdate_Call) Return(_a0 error) *MockFSMUpdater_WaitForUpdate_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockFSMUpdater_WaitForUpdate_Call) RunAndReturn(run func(context.Context, uint64) error) *MockFSMUpdater_WaitForUpdate_Call { + _c.Call.Return(run) + return _c +} + +// NewMockFSMUpdater creates a new instance of MockFSMUpdater. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewMockFSMUpdater(t interface { + mock.TestingT + Cleanup(func()) +}) *MockFSMUpdater { + mock := &MockFSMUpdater{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/replication/types/mock_manager.go b/platform/dbops/binaries/weaviate-src/cluster/replication/types/mock_manager.go new file mode 100644 index 0000000000000000000000000000000000000000..fc19a4b24a141f92e1ad9a4b40a3ec60c853f63d --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/replication/types/mock_manager.go @@ -0,0 +1,981 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by mockery v2.53.2. DO NOT EDIT. + +package types + +import ( + context "context" + + api "github.com/weaviate/weaviate/cluster/proto/api" + + mock "github.com/stretchr/testify/mock" + + strfmt "github.com/go-openapi/strfmt" +) + +// MockManager is an autogenerated mock type for the Manager type +type MockManager struct { + mock.Mock +} + +type MockManager_Expecter struct { + mock *mock.Mock +} + +func (_m *MockManager) EXPECT() *MockManager_Expecter { + return &MockManager_Expecter{mock: &_m.Mock} +} + +// CancelReplication provides a mock function with given fields: ctx, uuid +func (_m *MockManager) CancelReplication(ctx context.Context, uuid strfmt.UUID) error { + ret := _m.Called(ctx, uuid) + + if len(ret) == 0 { + panic("no return value specified for CancelReplication") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, strfmt.UUID) error); ok { + r0 = rf(ctx, uuid) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockManager_CancelReplication_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CancelReplication' +type MockManager_CancelReplication_Call struct { + *mock.Call +} + +// CancelReplication is a helper method to define mock.On call +// - ctx context.Context +// - uuid strfmt.UUID +func (_e *MockManager_Expecter) CancelReplication(ctx interface{}, uuid interface{}) *MockManager_CancelReplication_Call { + return &MockManager_CancelReplication_Call{Call: _e.mock.On("CancelReplication", ctx, uuid)} +} + +func (_c *MockManager_CancelReplication_Call) Run(run func(ctx context.Context, uuid strfmt.UUID)) *MockManager_CancelReplication_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(strfmt.UUID)) + }) + return _c +} + +func (_c *MockManager_CancelReplication_Call) Return(_a0 error) *MockManager_CancelReplication_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockManager_CancelReplication_Call) RunAndReturn(run func(context.Context, strfmt.UUID) error) *MockManager_CancelReplication_Call { + _c.Call.Return(run) + return _c +} + +// DeleteAllReplications provides a mock function with given fields: ctx +func (_m *MockManager) DeleteAllReplications(ctx context.Context) error { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for DeleteAllReplications") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = rf(ctx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockManager_DeleteAllReplications_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DeleteAllReplications' +type MockManager_DeleteAllReplications_Call struct { + *mock.Call +} + +// DeleteAllReplications is a helper method to define mock.On call +// - ctx context.Context +func (_e *MockManager_Expecter) DeleteAllReplications(ctx interface{}) *MockManager_DeleteAllReplications_Call { + return &MockManager_DeleteAllReplications_Call{Call: _e.mock.On("DeleteAllReplications", ctx)} +} + +func (_c *MockManager_DeleteAllReplications_Call) Run(run func(ctx context.Context)) *MockManager_DeleteAllReplications_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *MockManager_DeleteAllReplications_Call) Return(_a0 error) *MockManager_DeleteAllReplications_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockManager_DeleteAllReplications_Call) RunAndReturn(run func(context.Context) error) *MockManager_DeleteAllReplications_Call { + _c.Call.Return(run) + return _c +} + +// DeleteReplication provides a mock function with given fields: ctx, uuid +func (_m *MockManager) DeleteReplication(ctx context.Context, uuid strfmt.UUID) error { + ret := _m.Called(ctx, uuid) + + if len(ret) == 0 { + panic("no return value specified for DeleteReplication") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, strfmt.UUID) error); ok { + r0 = rf(ctx, uuid) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockManager_DeleteReplication_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DeleteReplication' +type MockManager_DeleteReplication_Call struct { + *mock.Call +} + +// DeleteReplication is a helper method to define mock.On call +// - ctx context.Context +// - uuid strfmt.UUID +func (_e *MockManager_Expecter) DeleteReplication(ctx interface{}, uuid interface{}) *MockManager_DeleteReplication_Call { + return &MockManager_DeleteReplication_Call{Call: _e.mock.On("DeleteReplication", ctx, uuid)} +} + +func (_c *MockManager_DeleteReplication_Call) Run(run func(ctx context.Context, uuid strfmt.UUID)) *MockManager_DeleteReplication_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(strfmt.UUID)) + }) + return _c +} + +func (_c *MockManager_DeleteReplication_Call) Return(_a0 error) *MockManager_DeleteReplication_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockManager_DeleteReplication_Call) RunAndReturn(run func(context.Context, strfmt.UUID) error) *MockManager_DeleteReplication_Call { + _c.Call.Return(run) + return _c +} + +// DeleteReplicationsByCollection provides a mock function with given fields: ctx, collection +func (_m *MockManager) DeleteReplicationsByCollection(ctx context.Context, collection string) error { + ret := _m.Called(ctx, collection) + + if len(ret) == 0 { + panic("no return value specified for DeleteReplicationsByCollection") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, string) error); ok { + r0 = rf(ctx, collection) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockManager_DeleteReplicationsByCollection_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DeleteReplicationsByCollection' +type MockManager_DeleteReplicationsByCollection_Call struct { + *mock.Call +} + +// DeleteReplicationsByCollection is a helper method to define mock.On call +// - ctx context.Context +// - collection string +func (_e *MockManager_Expecter) DeleteReplicationsByCollection(ctx interface{}, collection interface{}) *MockManager_DeleteReplicationsByCollection_Call { + return &MockManager_DeleteReplicationsByCollection_Call{Call: _e.mock.On("DeleteReplicationsByCollection", ctx, collection)} +} + +func (_c *MockManager_DeleteReplicationsByCollection_Call) Run(run func(ctx context.Context, collection string)) *MockManager_DeleteReplicationsByCollection_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string)) + }) + return _c +} + +func (_c *MockManager_DeleteReplicationsByCollection_Call) Return(_a0 error) *MockManager_DeleteReplicationsByCollection_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockManager_DeleteReplicationsByCollection_Call) RunAndReturn(run func(context.Context, string) error) *MockManager_DeleteReplicationsByCollection_Call { + _c.Call.Return(run) + return _c +} + +// DeleteReplicationsByTenants provides a mock function with given fields: ctx, collection, tenants +func (_m *MockManager) DeleteReplicationsByTenants(ctx context.Context, collection string, tenants []string) error { + ret := _m.Called(ctx, collection, tenants) + + if len(ret) == 0 { + panic("no return value specified for DeleteReplicationsByTenants") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, string, []string) error); ok { + r0 = rf(ctx, collection, tenants) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockManager_DeleteReplicationsByTenants_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DeleteReplicationsByTenants' +type MockManager_DeleteReplicationsByTenants_Call struct { + *mock.Call +} + +// DeleteReplicationsByTenants is a helper method to define mock.On call +// - ctx context.Context +// - collection string +// - tenants []string +func (_e *MockManager_Expecter) DeleteReplicationsByTenants(ctx interface{}, collection interface{}, tenants interface{}) *MockManager_DeleteReplicationsByTenants_Call { + return &MockManager_DeleteReplicationsByTenants_Call{Call: _e.mock.On("DeleteReplicationsByTenants", ctx, collection, tenants)} +} + +func (_c *MockManager_DeleteReplicationsByTenants_Call) Run(run func(ctx context.Context, collection string, tenants []string)) *MockManager_DeleteReplicationsByTenants_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string), args[2].([]string)) + }) + return _c +} + +func (_c *MockManager_DeleteReplicationsByTenants_Call) Return(_a0 error) *MockManager_DeleteReplicationsByTenants_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockManager_DeleteReplicationsByTenants_Call) RunAndReturn(run func(context.Context, string, []string) error) *MockManager_DeleteReplicationsByTenants_Call { + _c.Call.Return(run) + return _c +} + +// ForceDeleteAllReplications provides a mock function with given fields: ctx +func (_m *MockManager) ForceDeleteAllReplications(ctx context.Context) error { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for ForceDeleteAllReplications") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = rf(ctx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockManager_ForceDeleteAllReplications_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ForceDeleteAllReplications' +type MockManager_ForceDeleteAllReplications_Call struct { + *mock.Call +} + +// ForceDeleteAllReplications is a helper method to define mock.On call +// - ctx context.Context +func (_e *MockManager_Expecter) ForceDeleteAllReplications(ctx interface{}) *MockManager_ForceDeleteAllReplications_Call { + return &MockManager_ForceDeleteAllReplications_Call{Call: _e.mock.On("ForceDeleteAllReplications", ctx)} +} + +func (_c *MockManager_ForceDeleteAllReplications_Call) Run(run func(ctx context.Context)) *MockManager_ForceDeleteAllReplications_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *MockManager_ForceDeleteAllReplications_Call) Return(_a0 error) *MockManager_ForceDeleteAllReplications_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockManager_ForceDeleteAllReplications_Call) RunAndReturn(run func(context.Context) error) *MockManager_ForceDeleteAllReplications_Call { + _c.Call.Return(run) + return _c +} + +// ForceDeleteReplicationByUuid provides a mock function with given fields: ctx, uuid +func (_m *MockManager) ForceDeleteReplicationByUuid(ctx context.Context, uuid strfmt.UUID) error { + ret := _m.Called(ctx, uuid) + + if len(ret) == 0 { + panic("no return value specified for ForceDeleteReplicationByUuid") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, strfmt.UUID) error); ok { + r0 = rf(ctx, uuid) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockManager_ForceDeleteReplicationByUuid_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ForceDeleteReplicationByUuid' +type MockManager_ForceDeleteReplicationByUuid_Call struct { + *mock.Call +} + +// ForceDeleteReplicationByUuid is a helper method to define mock.On call +// - ctx context.Context +// - uuid strfmt.UUID +func (_e *MockManager_Expecter) ForceDeleteReplicationByUuid(ctx interface{}, uuid interface{}) *MockManager_ForceDeleteReplicationByUuid_Call { + return &MockManager_ForceDeleteReplicationByUuid_Call{Call: _e.mock.On("ForceDeleteReplicationByUuid", ctx, uuid)} +} + +func (_c *MockManager_ForceDeleteReplicationByUuid_Call) Run(run func(ctx context.Context, uuid strfmt.UUID)) *MockManager_ForceDeleteReplicationByUuid_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(strfmt.UUID)) + }) + return _c +} + +func (_c *MockManager_ForceDeleteReplicationByUuid_Call) Return(_a0 error) *MockManager_ForceDeleteReplicationByUuid_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockManager_ForceDeleteReplicationByUuid_Call) RunAndReturn(run func(context.Context, strfmt.UUID) error) *MockManager_ForceDeleteReplicationByUuid_Call { + _c.Call.Return(run) + return _c +} + +// ForceDeleteReplicationsByCollection provides a mock function with given fields: ctx, collection +func (_m *MockManager) ForceDeleteReplicationsByCollection(ctx context.Context, collection string) error { + ret := _m.Called(ctx, collection) + + if len(ret) == 0 { + panic("no return value specified for ForceDeleteReplicationsByCollection") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, string) error); ok { + r0 = rf(ctx, collection) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockManager_ForceDeleteReplicationsByCollection_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ForceDeleteReplicationsByCollection' +type MockManager_ForceDeleteReplicationsByCollection_Call struct { + *mock.Call +} + +// ForceDeleteReplicationsByCollection is a helper method to define mock.On call +// - ctx context.Context +// - collection string +func (_e *MockManager_Expecter) ForceDeleteReplicationsByCollection(ctx interface{}, collection interface{}) *MockManager_ForceDeleteReplicationsByCollection_Call { + return &MockManager_ForceDeleteReplicationsByCollection_Call{Call: _e.mock.On("ForceDeleteReplicationsByCollection", ctx, collection)} +} + +func (_c *MockManager_ForceDeleteReplicationsByCollection_Call) Run(run func(ctx context.Context, collection string)) *MockManager_ForceDeleteReplicationsByCollection_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string)) + }) + return _c +} + +func (_c *MockManager_ForceDeleteReplicationsByCollection_Call) Return(_a0 error) *MockManager_ForceDeleteReplicationsByCollection_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockManager_ForceDeleteReplicationsByCollection_Call) RunAndReturn(run func(context.Context, string) error) *MockManager_ForceDeleteReplicationsByCollection_Call { + _c.Call.Return(run) + return _c +} + +// ForceDeleteReplicationsByCollectionAndShard provides a mock function with given fields: ctx, collection, shard +func (_m *MockManager) ForceDeleteReplicationsByCollectionAndShard(ctx context.Context, collection string, shard string) error { + ret := _m.Called(ctx, collection, shard) + + if len(ret) == 0 { + panic("no return value specified for ForceDeleteReplicationsByCollectionAndShard") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, string, string) error); ok { + r0 = rf(ctx, collection, shard) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockManager_ForceDeleteReplicationsByCollectionAndShard_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ForceDeleteReplicationsByCollectionAndShard' +type MockManager_ForceDeleteReplicationsByCollectionAndShard_Call struct { + *mock.Call +} + +// ForceDeleteReplicationsByCollectionAndShard is a helper method to define mock.On call +// - ctx context.Context +// - collection string +// - shard string +func (_e *MockManager_Expecter) ForceDeleteReplicationsByCollectionAndShard(ctx interface{}, collection interface{}, shard interface{}) *MockManager_ForceDeleteReplicationsByCollectionAndShard_Call { + return &MockManager_ForceDeleteReplicationsByCollectionAndShard_Call{Call: _e.mock.On("ForceDeleteReplicationsByCollectionAndShard", ctx, collection, shard)} +} + +func (_c *MockManager_ForceDeleteReplicationsByCollectionAndShard_Call) Run(run func(ctx context.Context, collection string, shard string)) *MockManager_ForceDeleteReplicationsByCollectionAndShard_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string), args[2].(string)) + }) + return _c +} + +func (_c *MockManager_ForceDeleteReplicationsByCollectionAndShard_Call) Return(_a0 error) *MockManager_ForceDeleteReplicationsByCollectionAndShard_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockManager_ForceDeleteReplicationsByCollectionAndShard_Call) RunAndReturn(run func(context.Context, string, string) error) *MockManager_ForceDeleteReplicationsByCollectionAndShard_Call { + _c.Call.Return(run) + return _c +} + +// ForceDeleteReplicationsByTargetNode provides a mock function with given fields: ctx, node +func (_m *MockManager) ForceDeleteReplicationsByTargetNode(ctx context.Context, node string) error { + ret := _m.Called(ctx, node) + + if len(ret) == 0 { + panic("no return value specified for ForceDeleteReplicationsByTargetNode") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, string) error); ok { + r0 = rf(ctx, node) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockManager_ForceDeleteReplicationsByTargetNode_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ForceDeleteReplicationsByTargetNode' +type MockManager_ForceDeleteReplicationsByTargetNode_Call struct { + *mock.Call +} + +// ForceDeleteReplicationsByTargetNode is a helper method to define mock.On call +// - ctx context.Context +// - node string +func (_e *MockManager_Expecter) ForceDeleteReplicationsByTargetNode(ctx interface{}, node interface{}) *MockManager_ForceDeleteReplicationsByTargetNode_Call { + return &MockManager_ForceDeleteReplicationsByTargetNode_Call{Call: _e.mock.On("ForceDeleteReplicationsByTargetNode", ctx, node)} +} + +func (_c *MockManager_ForceDeleteReplicationsByTargetNode_Call) Run(run func(ctx context.Context, node string)) *MockManager_ForceDeleteReplicationsByTargetNode_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string)) + }) + return _c +} + +func (_c *MockManager_ForceDeleteReplicationsByTargetNode_Call) Return(_a0 error) *MockManager_ForceDeleteReplicationsByTargetNode_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockManager_ForceDeleteReplicationsByTargetNode_Call) RunAndReturn(run func(context.Context, string) error) *MockManager_ForceDeleteReplicationsByTargetNode_Call { + _c.Call.Return(run) + return _c +} + +// GetAllReplicationDetails provides a mock function with given fields: ctx +func (_m *MockManager) GetAllReplicationDetails(ctx context.Context) ([]api.ReplicationDetailsResponse, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for GetAllReplicationDetails") + } + + var r0 []api.ReplicationDetailsResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) ([]api.ReplicationDetailsResponse, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) []api.ReplicationDetailsResponse); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]api.ReplicationDetailsResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockManager_GetAllReplicationDetails_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetAllReplicationDetails' +type MockManager_GetAllReplicationDetails_Call struct { + *mock.Call +} + +// GetAllReplicationDetails is a helper method to define mock.On call +// - ctx context.Context +func (_e *MockManager_Expecter) GetAllReplicationDetails(ctx interface{}) *MockManager_GetAllReplicationDetails_Call { + return &MockManager_GetAllReplicationDetails_Call{Call: _e.mock.On("GetAllReplicationDetails", ctx)} +} + +func (_c *MockManager_GetAllReplicationDetails_Call) Run(run func(ctx context.Context)) *MockManager_GetAllReplicationDetails_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *MockManager_GetAllReplicationDetails_Call) Return(_a0 []api.ReplicationDetailsResponse, _a1 error) *MockManager_GetAllReplicationDetails_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockManager_GetAllReplicationDetails_Call) RunAndReturn(run func(context.Context) ([]api.ReplicationDetailsResponse, error)) *MockManager_GetAllReplicationDetails_Call { + _c.Call.Return(run) + return _c +} + +// GetReplicationDetailsByCollection provides a mock function with given fields: ctx, collection +func (_m *MockManager) GetReplicationDetailsByCollection(ctx context.Context, collection string) ([]api.ReplicationDetailsResponse, error) { + ret := _m.Called(ctx, collection) + + if len(ret) == 0 { + panic("no return value specified for GetReplicationDetailsByCollection") + } + + var r0 []api.ReplicationDetailsResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string) ([]api.ReplicationDetailsResponse, error)); ok { + return rf(ctx, collection) + } + if rf, ok := ret.Get(0).(func(context.Context, string) []api.ReplicationDetailsResponse); ok { + r0 = rf(ctx, collection) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]api.ReplicationDetailsResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string) error); ok { + r1 = rf(ctx, collection) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockManager_GetReplicationDetailsByCollection_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetReplicationDetailsByCollection' +type MockManager_GetReplicationDetailsByCollection_Call struct { + *mock.Call +} + +// GetReplicationDetailsByCollection is a helper method to define mock.On call +// - ctx context.Context +// - collection string +func (_e *MockManager_Expecter) GetReplicationDetailsByCollection(ctx interface{}, collection interface{}) *MockManager_GetReplicationDetailsByCollection_Call { + return &MockManager_GetReplicationDetailsByCollection_Call{Call: _e.mock.On("GetReplicationDetailsByCollection", ctx, collection)} +} + +func (_c *MockManager_GetReplicationDetailsByCollection_Call) Run(run func(ctx context.Context, collection string)) *MockManager_GetReplicationDetailsByCollection_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string)) + }) + return _c +} + +func (_c *MockManager_GetReplicationDetailsByCollection_Call) Return(_a0 []api.ReplicationDetailsResponse, _a1 error) *MockManager_GetReplicationDetailsByCollection_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockManager_GetReplicationDetailsByCollection_Call) RunAndReturn(run func(context.Context, string) ([]api.ReplicationDetailsResponse, error)) *MockManager_GetReplicationDetailsByCollection_Call { + _c.Call.Return(run) + return _c +} + +// GetReplicationDetailsByCollectionAndShard provides a mock function with given fields: ctx, collection, shard +func (_m *MockManager) GetReplicationDetailsByCollectionAndShard(ctx context.Context, collection string, shard string) ([]api.ReplicationDetailsResponse, error) { + ret := _m.Called(ctx, collection, shard) + + if len(ret) == 0 { + panic("no return value specified for GetReplicationDetailsByCollectionAndShard") + } + + var r0 []api.ReplicationDetailsResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string, string) ([]api.ReplicationDetailsResponse, error)); ok { + return rf(ctx, collection, shard) + } + if rf, ok := ret.Get(0).(func(context.Context, string, string) []api.ReplicationDetailsResponse); ok { + r0 = rf(ctx, collection, shard) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]api.ReplicationDetailsResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string, string) error); ok { + r1 = rf(ctx, collection, shard) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockManager_GetReplicationDetailsByCollectionAndShard_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetReplicationDetailsByCollectionAndShard' +type MockManager_GetReplicationDetailsByCollectionAndShard_Call struct { + *mock.Call +} + +// GetReplicationDetailsByCollectionAndShard is a helper method to define mock.On call +// - ctx context.Context +// - collection string +// - shard string +func (_e *MockManager_Expecter) GetReplicationDetailsByCollectionAndShard(ctx interface{}, collection interface{}, shard interface{}) *MockManager_GetReplicationDetailsByCollectionAndShard_Call { + return &MockManager_GetReplicationDetailsByCollectionAndShard_Call{Call: _e.mock.On("GetReplicationDetailsByCollectionAndShard", ctx, collection, shard)} +} + +func (_c *MockManager_GetReplicationDetailsByCollectionAndShard_Call) Run(run func(ctx context.Context, collection string, shard string)) *MockManager_GetReplicationDetailsByCollectionAndShard_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string), args[2].(string)) + }) + return _c +} + +func (_c *MockManager_GetReplicationDetailsByCollectionAndShard_Call) Return(_a0 []api.ReplicationDetailsResponse, _a1 error) *MockManager_GetReplicationDetailsByCollectionAndShard_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockManager_GetReplicationDetailsByCollectionAndShard_Call) RunAndReturn(run func(context.Context, string, string) ([]api.ReplicationDetailsResponse, error)) *MockManager_GetReplicationDetailsByCollectionAndShard_Call { + _c.Call.Return(run) + return _c +} + +// GetReplicationDetailsByReplicationId provides a mock function with given fields: ctx, uuid +func (_m *MockManager) GetReplicationDetailsByReplicationId(ctx context.Context, uuid strfmt.UUID) (api.ReplicationDetailsResponse, error) { + ret := _m.Called(ctx, uuid) + + if len(ret) == 0 { + panic("no return value specified for GetReplicationDetailsByReplicationId") + } + + var r0 api.ReplicationDetailsResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, strfmt.UUID) (api.ReplicationDetailsResponse, error)); ok { + return rf(ctx, uuid) + } + if rf, ok := ret.Get(0).(func(context.Context, strfmt.UUID) api.ReplicationDetailsResponse); ok { + r0 = rf(ctx, uuid) + } else { + r0 = ret.Get(0).(api.ReplicationDetailsResponse) + } + + if rf, ok := ret.Get(1).(func(context.Context, strfmt.UUID) error); ok { + r1 = rf(ctx, uuid) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockManager_GetReplicationDetailsByReplicationId_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetReplicationDetailsByReplicationId' +type MockManager_GetReplicationDetailsByReplicationId_Call struct { + *mock.Call +} + +// GetReplicationDetailsByReplicationId is a helper method to define mock.On call +// - ctx context.Context +// - uuid strfmt.UUID +func (_e *MockManager_Expecter) GetReplicationDetailsByReplicationId(ctx interface{}, uuid interface{}) *MockManager_GetReplicationDetailsByReplicationId_Call { + return &MockManager_GetReplicationDetailsByReplicationId_Call{Call: _e.mock.On("GetReplicationDetailsByReplicationId", ctx, uuid)} +} + +func (_c *MockManager_GetReplicationDetailsByReplicationId_Call) Run(run func(ctx context.Context, uuid strfmt.UUID)) *MockManager_GetReplicationDetailsByReplicationId_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(strfmt.UUID)) + }) + return _c +} + +func (_c *MockManager_GetReplicationDetailsByReplicationId_Call) Return(_a0 api.ReplicationDetailsResponse, _a1 error) *MockManager_GetReplicationDetailsByReplicationId_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockManager_GetReplicationDetailsByReplicationId_Call) RunAndReturn(run func(context.Context, strfmt.UUID) (api.ReplicationDetailsResponse, error)) *MockManager_GetReplicationDetailsByReplicationId_Call { + _c.Call.Return(run) + return _c +} + +// GetReplicationDetailsByTargetNode provides a mock function with given fields: ctx, node +func (_m *MockManager) GetReplicationDetailsByTargetNode(ctx context.Context, node string) ([]api.ReplicationDetailsResponse, error) { + ret := _m.Called(ctx, node) + + if len(ret) == 0 { + panic("no return value specified for GetReplicationDetailsByTargetNode") + } + + var r0 []api.ReplicationDetailsResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string) ([]api.ReplicationDetailsResponse, error)); ok { + return rf(ctx, node) + } + if rf, ok := ret.Get(0).(func(context.Context, string) []api.ReplicationDetailsResponse); ok { + r0 = rf(ctx, node) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]api.ReplicationDetailsResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string) error); ok { + r1 = rf(ctx, node) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockManager_GetReplicationDetailsByTargetNode_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetReplicationDetailsByTargetNode' +type MockManager_GetReplicationDetailsByTargetNode_Call struct { + *mock.Call +} + +// GetReplicationDetailsByTargetNode is a helper method to define mock.On call +// - ctx context.Context +// - node string +func (_e *MockManager_Expecter) GetReplicationDetailsByTargetNode(ctx interface{}, node interface{}) *MockManager_GetReplicationDetailsByTargetNode_Call { + return &MockManager_GetReplicationDetailsByTargetNode_Call{Call: _e.mock.On("GetReplicationDetailsByTargetNode", ctx, node)} +} + +func (_c *MockManager_GetReplicationDetailsByTargetNode_Call) Run(run func(ctx context.Context, node string)) *MockManager_GetReplicationDetailsByTargetNode_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string)) + }) + return _c +} + +func (_c *MockManager_GetReplicationDetailsByTargetNode_Call) Return(_a0 []api.ReplicationDetailsResponse, _a1 error) *MockManager_GetReplicationDetailsByTargetNode_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockManager_GetReplicationDetailsByTargetNode_Call) RunAndReturn(run func(context.Context, string) ([]api.ReplicationDetailsResponse, error)) *MockManager_GetReplicationDetailsByTargetNode_Call { + _c.Call.Return(run) + return _c +} + +// QueryShardingStateByCollection provides a mock function with given fields: ctx, collection +func (_m *MockManager) QueryShardingStateByCollection(ctx context.Context, collection string) (api.ShardingState, error) { + ret := _m.Called(ctx, collection) + + if len(ret) == 0 { + panic("no return value specified for QueryShardingStateByCollection") + } + + var r0 api.ShardingState + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string) (api.ShardingState, error)); ok { + return rf(ctx, collection) + } + if rf, ok := ret.Get(0).(func(context.Context, string) api.ShardingState); ok { + r0 = rf(ctx, collection) + } else { + r0 = ret.Get(0).(api.ShardingState) + } + + if rf, ok := ret.Get(1).(func(context.Context, string) error); ok { + r1 = rf(ctx, collection) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockManager_QueryShardingStateByCollection_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'QueryShardingStateByCollection' +type MockManager_QueryShardingStateByCollection_Call struct { + *mock.Call +} + +// QueryShardingStateByCollection is a helper method to define mock.On call +// - ctx context.Context +// - collection string +func (_e *MockManager_Expecter) QueryShardingStateByCollection(ctx interface{}, collection interface{}) *MockManager_QueryShardingStateByCollection_Call { + return &MockManager_QueryShardingStateByCollection_Call{Call: _e.mock.On("QueryShardingStateByCollection", ctx, collection)} +} + +func (_c *MockManager_QueryShardingStateByCollection_Call) Run(run func(ctx context.Context, collection string)) *MockManager_QueryShardingStateByCollection_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string)) + }) + return _c +} + +func (_c *MockManager_QueryShardingStateByCollection_Call) Return(_a0 api.ShardingState, _a1 error) *MockManager_QueryShardingStateByCollection_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockManager_QueryShardingStateByCollection_Call) RunAndReturn(run func(context.Context, string) (api.ShardingState, error)) *MockManager_QueryShardingStateByCollection_Call { + _c.Call.Return(run) + return _c +} + +// QueryShardingStateByCollectionAndShard provides a mock function with given fields: ctx, collection, shard +func (_m *MockManager) QueryShardingStateByCollectionAndShard(ctx context.Context, collection string, shard string) (api.ShardingState, error) { + ret := _m.Called(ctx, collection, shard) + + if len(ret) == 0 { + panic("no return value specified for QueryShardingStateByCollectionAndShard") + } + + var r0 api.ShardingState + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string, string) (api.ShardingState, error)); ok { + return rf(ctx, collection, shard) + } + if rf, ok := ret.Get(0).(func(context.Context, string, string) api.ShardingState); ok { + r0 = rf(ctx, collection, shard) + } else { + r0 = ret.Get(0).(api.ShardingState) + } + + if rf, ok := ret.Get(1).(func(context.Context, string, string) error); ok { + r1 = rf(ctx, collection, shard) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockManager_QueryShardingStateByCollectionAndShard_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'QueryShardingStateByCollectionAndShard' +type MockManager_QueryShardingStateByCollectionAndShard_Call struct { + *mock.Call +} + +// QueryShardingStateByCollectionAndShard is a helper method to define mock.On call +// - ctx context.Context +// - collection string +// - shard string +func (_e *MockManager_Expecter) QueryShardingStateByCollectionAndShard(ctx interface{}, collection interface{}, shard interface{}) *MockManager_QueryShardingStateByCollectionAndShard_Call { + return &MockManager_QueryShardingStateByCollectionAndShard_Call{Call: _e.mock.On("QueryShardingStateByCollectionAndShard", ctx, collection, shard)} +} + +func (_c *MockManager_QueryShardingStateByCollectionAndShard_Call) Run(run func(ctx context.Context, collection string, shard string)) *MockManager_QueryShardingStateByCollectionAndShard_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string), args[2].(string)) + }) + return _c +} + +func (_c *MockManager_QueryShardingStateByCollectionAndShard_Call) Return(_a0 api.ShardingState, _a1 error) *MockManager_QueryShardingStateByCollectionAndShard_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockManager_QueryShardingStateByCollectionAndShard_Call) RunAndReturn(run func(context.Context, string, string) (api.ShardingState, error)) *MockManager_QueryShardingStateByCollectionAndShard_Call { + _c.Call.Return(run) + return _c +} + +// ReplicationReplicateReplica provides a mock function with given fields: ctx, opId, sourceNode, sourceCollection, sourceShard, targetNode, transferType +func (_m *MockManager) ReplicationReplicateReplica(ctx context.Context, opId strfmt.UUID, sourceNode string, sourceCollection string, sourceShard string, targetNode string, transferType string) error { + ret := _m.Called(ctx, opId, sourceNode, sourceCollection, sourceShard, targetNode, transferType) + + if len(ret) == 0 { + panic("no return value specified for ReplicationReplicateReplica") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, strfmt.UUID, string, string, string, string, string) error); ok { + r0 = rf(ctx, opId, sourceNode, sourceCollection, sourceShard, targetNode, transferType) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockManager_ReplicationReplicateReplica_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ReplicationReplicateReplica' +type MockManager_ReplicationReplicateReplica_Call struct { + *mock.Call +} + +// ReplicationReplicateReplica is a helper method to define mock.On call +// - ctx context.Context +// - opId strfmt.UUID +// - sourceNode string +// - sourceCollection string +// - sourceShard string +// - targetNode string +// - transferType string +func (_e *MockManager_Expecter) ReplicationReplicateReplica(ctx interface{}, opId interface{}, sourceNode interface{}, sourceCollection interface{}, sourceShard interface{}, targetNode interface{}, transferType interface{}) *MockManager_ReplicationReplicateReplica_Call { + return &MockManager_ReplicationReplicateReplica_Call{Call: _e.mock.On("ReplicationReplicateReplica", ctx, opId, sourceNode, sourceCollection, sourceShard, targetNode, transferType)} +} + +func (_c *MockManager_ReplicationReplicateReplica_Call) Run(run func(ctx context.Context, opId strfmt.UUID, sourceNode string, sourceCollection string, sourceShard string, targetNode string, transferType string)) *MockManager_ReplicationReplicateReplica_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(strfmt.UUID), args[2].(string), args[3].(string), args[4].(string), args[5].(string), args[6].(string)) + }) + return _c +} + +func (_c *MockManager_ReplicationReplicateReplica_Call) Return(_a0 error) *MockManager_ReplicationReplicateReplica_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockManager_ReplicationReplicateReplica_Call) RunAndReturn(run func(context.Context, strfmt.UUID, string, string, string, string, string) error) *MockManager_ReplicationReplicateReplica_Call { + _c.Call.Return(run) + return _c +} + +// NewMockManager creates a new instance of MockManager. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewMockManager(t interface { + mock.TestingT + Cleanup(func()) +}) *MockManager { + mock := &MockManager{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/replication/types/mock_replica_copier.go b/platform/dbops/binaries/weaviate-src/cluster/replication/types/mock_replica_copier.go new file mode 100644 index 0000000000000000000000000000000000000000..a2dc8608075399d0d79d5c16f229507822d4434e --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/replication/types/mock_replica_copier.go @@ -0,0 +1,400 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by mockery v2.53.2. DO NOT EDIT. + +package types + +import ( + context "context" + + additional "github.com/weaviate/weaviate/entities/additional" + + mock "github.com/stretchr/testify/mock" + + models "github.com/weaviate/weaviate/entities/models" +) + +// MockReplicaCopier is an autogenerated mock type for the ReplicaCopier type +type MockReplicaCopier struct { + mock.Mock +} + +type MockReplicaCopier_Expecter struct { + mock *mock.Mock +} + +func (_m *MockReplicaCopier) EXPECT() *MockReplicaCopier_Expecter { + return &MockReplicaCopier_Expecter{mock: &_m.Mock} +} + +// AddAsyncReplicationTargetNode provides a mock function with given fields: ctx, targetNodeOverride, schemaVersion +func (_m *MockReplicaCopier) AddAsyncReplicationTargetNode(ctx context.Context, targetNodeOverride additional.AsyncReplicationTargetNodeOverride, schemaVersion uint64) error { + ret := _m.Called(ctx, targetNodeOverride, schemaVersion) + + if len(ret) == 0 { + panic("no return value specified for AddAsyncReplicationTargetNode") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, additional.AsyncReplicationTargetNodeOverride, uint64) error); ok { + r0 = rf(ctx, targetNodeOverride, schemaVersion) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockReplicaCopier_AddAsyncReplicationTargetNode_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'AddAsyncReplicationTargetNode' +type MockReplicaCopier_AddAsyncReplicationTargetNode_Call struct { + *mock.Call +} + +// AddAsyncReplicationTargetNode is a helper method to define mock.On call +// - ctx context.Context +// - targetNodeOverride additional.AsyncReplicationTargetNodeOverride +// - schemaVersion uint64 +func (_e *MockReplicaCopier_Expecter) AddAsyncReplicationTargetNode(ctx interface{}, targetNodeOverride interface{}, schemaVersion interface{}) *MockReplicaCopier_AddAsyncReplicationTargetNode_Call { + return &MockReplicaCopier_AddAsyncReplicationTargetNode_Call{Call: _e.mock.On("AddAsyncReplicationTargetNode", ctx, targetNodeOverride, schemaVersion)} +} + +func (_c *MockReplicaCopier_AddAsyncReplicationTargetNode_Call) Run(run func(ctx context.Context, targetNodeOverride additional.AsyncReplicationTargetNodeOverride, schemaVersion uint64)) *MockReplicaCopier_AddAsyncReplicationTargetNode_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(additional.AsyncReplicationTargetNodeOverride), args[2].(uint64)) + }) + return _c +} + +func (_c *MockReplicaCopier_AddAsyncReplicationTargetNode_Call) Return(_a0 error) *MockReplicaCopier_AddAsyncReplicationTargetNode_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockReplicaCopier_AddAsyncReplicationTargetNode_Call) RunAndReturn(run func(context.Context, additional.AsyncReplicationTargetNodeOverride, uint64) error) *MockReplicaCopier_AddAsyncReplicationTargetNode_Call { + _c.Call.Return(run) + return _c +} + +// AsyncReplicationStatus provides a mock function with given fields: ctx, srcNodeId, targetNodeId, collectionName, shardName +func (_m *MockReplicaCopier) AsyncReplicationStatus(ctx context.Context, srcNodeId string, targetNodeId string, collectionName string, shardName string) (models.AsyncReplicationStatus, error) { + ret := _m.Called(ctx, srcNodeId, targetNodeId, collectionName, shardName) + + if len(ret) == 0 { + panic("no return value specified for AsyncReplicationStatus") + } + + var r0 models.AsyncReplicationStatus + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string, string, string, string) (models.AsyncReplicationStatus, error)); ok { + return rf(ctx, srcNodeId, targetNodeId, collectionName, shardName) + } + if rf, ok := ret.Get(0).(func(context.Context, string, string, string, string) models.AsyncReplicationStatus); ok { + r0 = rf(ctx, srcNodeId, targetNodeId, collectionName, shardName) + } else { + r0 = ret.Get(0).(models.AsyncReplicationStatus) + } + + if rf, ok := ret.Get(1).(func(context.Context, string, string, string, string) error); ok { + r1 = rf(ctx, srcNodeId, targetNodeId, collectionName, shardName) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockReplicaCopier_AsyncReplicationStatus_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'AsyncReplicationStatus' +type MockReplicaCopier_AsyncReplicationStatus_Call struct { + *mock.Call +} + +// AsyncReplicationStatus is a helper method to define mock.On call +// - ctx context.Context +// - srcNodeId string +// - targetNodeId string +// - collectionName string +// - shardName string +func (_e *MockReplicaCopier_Expecter) AsyncReplicationStatus(ctx interface{}, srcNodeId interface{}, targetNodeId interface{}, collectionName interface{}, shardName interface{}) *MockReplicaCopier_AsyncReplicationStatus_Call { + return &MockReplicaCopier_AsyncReplicationStatus_Call{Call: _e.mock.On("AsyncReplicationStatus", ctx, srcNodeId, targetNodeId, collectionName, shardName)} +} + +func (_c *MockReplicaCopier_AsyncReplicationStatus_Call) Run(run func(ctx context.Context, srcNodeId string, targetNodeId string, collectionName string, shardName string)) *MockReplicaCopier_AsyncReplicationStatus_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string), args[2].(string), args[3].(string), args[4].(string)) + }) + return _c +} + +func (_c *MockReplicaCopier_AsyncReplicationStatus_Call) Return(_a0 models.AsyncReplicationStatus, _a1 error) *MockReplicaCopier_AsyncReplicationStatus_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockReplicaCopier_AsyncReplicationStatus_Call) RunAndReturn(run func(context.Context, string, string, string, string) (models.AsyncReplicationStatus, error)) *MockReplicaCopier_AsyncReplicationStatus_Call { + _c.Call.Return(run) + return _c +} + +// CopyReplicaFiles provides a mock function with given fields: ctx, sourceNode, sourceCollection, sourceShard, schemaVersion +func (_m *MockReplicaCopier) CopyReplicaFiles(ctx context.Context, sourceNode string, sourceCollection string, sourceShard string, schemaVersion uint64) error { + ret := _m.Called(ctx, sourceNode, sourceCollection, sourceShard, schemaVersion) + + if len(ret) == 0 { + panic("no return value specified for CopyReplicaFiles") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, string, string, string, uint64) error); ok { + r0 = rf(ctx, sourceNode, sourceCollection, sourceShard, schemaVersion) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockReplicaCopier_CopyReplicaFiles_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CopyReplicaFiles' +type MockReplicaCopier_CopyReplicaFiles_Call struct { + *mock.Call +} + +// CopyReplicaFiles is a helper method to define mock.On call +// - ctx context.Context +// - sourceNode string +// - sourceCollection string +// - sourceShard string +// - schemaVersion uint64 +func (_e *MockReplicaCopier_Expecter) CopyReplicaFiles(ctx interface{}, sourceNode interface{}, sourceCollection interface{}, sourceShard interface{}, schemaVersion interface{}) *MockReplicaCopier_CopyReplicaFiles_Call { + return &MockReplicaCopier_CopyReplicaFiles_Call{Call: _e.mock.On("CopyReplicaFiles", ctx, sourceNode, sourceCollection, sourceShard, schemaVersion)} +} + +func (_c *MockReplicaCopier_CopyReplicaFiles_Call) Run(run func(ctx context.Context, sourceNode string, sourceCollection string, sourceShard string, schemaVersion uint64)) *MockReplicaCopier_CopyReplicaFiles_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string), args[2].(string), args[3].(string), args[4].(uint64)) + }) + return _c +} + +func (_c *MockReplicaCopier_CopyReplicaFiles_Call) Return(_a0 error) *MockReplicaCopier_CopyReplicaFiles_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockReplicaCopier_CopyReplicaFiles_Call) RunAndReturn(run func(context.Context, string, string, string, uint64) error) *MockReplicaCopier_CopyReplicaFiles_Call { + _c.Call.Return(run) + return _c +} + +// InitAsyncReplicationLocally provides a mock function with given fields: ctx, collectionName, shardName +func (_m *MockReplicaCopier) InitAsyncReplicationLocally(ctx context.Context, collectionName string, shardName string) error { + ret := _m.Called(ctx, collectionName, shardName) + + if len(ret) == 0 { + panic("no return value specified for InitAsyncReplicationLocally") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, string, string) error); ok { + r0 = rf(ctx, collectionName, shardName) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockReplicaCopier_InitAsyncReplicationLocally_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'InitAsyncReplicationLocally' +type MockReplicaCopier_InitAsyncReplicationLocally_Call struct { + *mock.Call +} + +// InitAsyncReplicationLocally is a helper method to define mock.On call +// - ctx context.Context +// - collectionName string +// - shardName string +func (_e *MockReplicaCopier_Expecter) InitAsyncReplicationLocally(ctx interface{}, collectionName interface{}, shardName interface{}) *MockReplicaCopier_InitAsyncReplicationLocally_Call { + return &MockReplicaCopier_InitAsyncReplicationLocally_Call{Call: _e.mock.On("InitAsyncReplicationLocally", ctx, collectionName, shardName)} +} + +func (_c *MockReplicaCopier_InitAsyncReplicationLocally_Call) Run(run func(ctx context.Context, collectionName string, shardName string)) *MockReplicaCopier_InitAsyncReplicationLocally_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string), args[2].(string)) + }) + return _c +} + +func (_c *MockReplicaCopier_InitAsyncReplicationLocally_Call) Return(_a0 error) *MockReplicaCopier_InitAsyncReplicationLocally_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockReplicaCopier_InitAsyncReplicationLocally_Call) RunAndReturn(run func(context.Context, string, string) error) *MockReplicaCopier_InitAsyncReplicationLocally_Call { + _c.Call.Return(run) + return _c +} + +// LoadLocalShard provides a mock function with given fields: ctx, collectionName, shardName +func (_m *MockReplicaCopier) LoadLocalShard(ctx context.Context, collectionName string, shardName string) error { + ret := _m.Called(ctx, collectionName, shardName) + + if len(ret) == 0 { + panic("no return value specified for LoadLocalShard") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, string, string) error); ok { + r0 = rf(ctx, collectionName, shardName) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockReplicaCopier_LoadLocalShard_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'LoadLocalShard' +type MockReplicaCopier_LoadLocalShard_Call struct { + *mock.Call +} + +// LoadLocalShard is a helper method to define mock.On call +// - ctx context.Context +// - collectionName string +// - shardName string +func (_e *MockReplicaCopier_Expecter) LoadLocalShard(ctx interface{}, collectionName interface{}, shardName interface{}) *MockReplicaCopier_LoadLocalShard_Call { + return &MockReplicaCopier_LoadLocalShard_Call{Call: _e.mock.On("LoadLocalShard", ctx, collectionName, shardName)} +} + +func (_c *MockReplicaCopier_LoadLocalShard_Call) Run(run func(ctx context.Context, collectionName string, shardName string)) *MockReplicaCopier_LoadLocalShard_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string), args[2].(string)) + }) + return _c +} + +func (_c *MockReplicaCopier_LoadLocalShard_Call) Return(_a0 error) *MockReplicaCopier_LoadLocalShard_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockReplicaCopier_LoadLocalShard_Call) RunAndReturn(run func(context.Context, string, string) error) *MockReplicaCopier_LoadLocalShard_Call { + _c.Call.Return(run) + return _c +} + +// RemoveAsyncReplicationTargetNode provides a mock function with given fields: ctx, targetNodeOverride +func (_m *MockReplicaCopier) RemoveAsyncReplicationTargetNode(ctx context.Context, targetNodeOverride additional.AsyncReplicationTargetNodeOverride) error { + ret := _m.Called(ctx, targetNodeOverride) + + if len(ret) == 0 { + panic("no return value specified for RemoveAsyncReplicationTargetNode") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, additional.AsyncReplicationTargetNodeOverride) error); ok { + r0 = rf(ctx, targetNodeOverride) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockReplicaCopier_RemoveAsyncReplicationTargetNode_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'RemoveAsyncReplicationTargetNode' +type MockReplicaCopier_RemoveAsyncReplicationTargetNode_Call struct { + *mock.Call +} + +// RemoveAsyncReplicationTargetNode is a helper method to define mock.On call +// - ctx context.Context +// - targetNodeOverride additional.AsyncReplicationTargetNodeOverride +func (_e *MockReplicaCopier_Expecter) RemoveAsyncReplicationTargetNode(ctx interface{}, targetNodeOverride interface{}) *MockReplicaCopier_RemoveAsyncReplicationTargetNode_Call { + return &MockReplicaCopier_RemoveAsyncReplicationTargetNode_Call{Call: _e.mock.On("RemoveAsyncReplicationTargetNode", ctx, targetNodeOverride)} +} + +func (_c *MockReplicaCopier_RemoveAsyncReplicationTargetNode_Call) Run(run func(ctx context.Context, targetNodeOverride additional.AsyncReplicationTargetNodeOverride)) *MockReplicaCopier_RemoveAsyncReplicationTargetNode_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(additional.AsyncReplicationTargetNodeOverride)) + }) + return _c +} + +func (_c *MockReplicaCopier_RemoveAsyncReplicationTargetNode_Call) Return(_a0 error) *MockReplicaCopier_RemoveAsyncReplicationTargetNode_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockReplicaCopier_RemoveAsyncReplicationTargetNode_Call) RunAndReturn(run func(context.Context, additional.AsyncReplicationTargetNodeOverride) error) *MockReplicaCopier_RemoveAsyncReplicationTargetNode_Call { + _c.Call.Return(run) + return _c +} + +// RevertAsyncReplicationLocally provides a mock function with given fields: ctx, collectionName, shardName +func (_m *MockReplicaCopier) RevertAsyncReplicationLocally(ctx context.Context, collectionName string, shardName string) error { + ret := _m.Called(ctx, collectionName, shardName) + + if len(ret) == 0 { + panic("no return value specified for RevertAsyncReplicationLocally") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, string, string) error); ok { + r0 = rf(ctx, collectionName, shardName) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockReplicaCopier_RevertAsyncReplicationLocally_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'RevertAsyncReplicationLocally' +type MockReplicaCopier_RevertAsyncReplicationLocally_Call struct { + *mock.Call +} + +// RevertAsyncReplicationLocally is a helper method to define mock.On call +// - ctx context.Context +// - collectionName string +// - shardName string +func (_e *MockReplicaCopier_Expecter) RevertAsyncReplicationLocally(ctx interface{}, collectionName interface{}, shardName interface{}) *MockReplicaCopier_RevertAsyncReplicationLocally_Call { + return &MockReplicaCopier_RevertAsyncReplicationLocally_Call{Call: _e.mock.On("RevertAsyncReplicationLocally", ctx, collectionName, shardName)} +} + +func (_c *MockReplicaCopier_RevertAsyncReplicationLocally_Call) Run(run func(ctx context.Context, collectionName string, shardName string)) *MockReplicaCopier_RevertAsyncReplicationLocally_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string), args[2].(string)) + }) + return _c +} + +func (_c *MockReplicaCopier_RevertAsyncReplicationLocally_Call) Return(_a0 error) *MockReplicaCopier_RevertAsyncReplicationLocally_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockReplicaCopier_RevertAsyncReplicationLocally_Call) RunAndReturn(run func(context.Context, string, string) error) *MockReplicaCopier_RevertAsyncReplicationLocally_Call { + _c.Call.Return(run) + return _c +} + +// NewMockReplicaCopier creates a new instance of MockReplicaCopier. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewMockReplicaCopier(t interface { + mock.TestingT + Cleanup(func()) +}) *MockReplicaCopier { + mock := &MockReplicaCopier{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/replication/types/mock_replication_fsm_reader.go b/platform/dbops/binaries/weaviate-src/cluster/replication/types/mock_replication_fsm_reader.go new file mode 100644 index 0000000000000000000000000000000000000000..414cc577d48e913b23d216395d72a6c1050e40eb --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/replication/types/mock_replication_fsm_reader.go @@ -0,0 +1,155 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by mockery v2.53.2. DO NOT EDIT. + +package types + +import mock "github.com/stretchr/testify/mock" + +// MockReplicationFSMReader is an autogenerated mock type for the ReplicationFSMReader type +type MockReplicationFSMReader struct { + mock.Mock +} + +type MockReplicationFSMReader_Expecter struct { + mock *mock.Mock +} + +func (_m *MockReplicationFSMReader) EXPECT() *MockReplicationFSMReader_Expecter { + return &MockReplicationFSMReader_Expecter{mock: &_m.Mock} +} + +// FilterOneShardReplicasRead provides a mock function with given fields: collection, shard, shardReplicasLocation +func (_m *MockReplicationFSMReader) FilterOneShardReplicasRead(collection string, shard string, shardReplicasLocation []string) []string { + ret := _m.Called(collection, shard, shardReplicasLocation) + + if len(ret) == 0 { + panic("no return value specified for FilterOneShardReplicasRead") + } + + var r0 []string + if rf, ok := ret.Get(0).(func(string, string, []string) []string); ok { + r0 = rf(collection, shard, shardReplicasLocation) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]string) + } + } + + return r0 +} + +// MockReplicationFSMReader_FilterOneShardReplicasRead_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'FilterOneShardReplicasRead' +type MockReplicationFSMReader_FilterOneShardReplicasRead_Call struct { + *mock.Call +} + +// FilterOneShardReplicasRead is a helper method to define mock.On call +// - collection string +// - shard string +// - shardReplicasLocation []string +func (_e *MockReplicationFSMReader_Expecter) FilterOneShardReplicasRead(collection interface{}, shard interface{}, shardReplicasLocation interface{}) *MockReplicationFSMReader_FilterOneShardReplicasRead_Call { + return &MockReplicationFSMReader_FilterOneShardReplicasRead_Call{Call: _e.mock.On("FilterOneShardReplicasRead", collection, shard, shardReplicasLocation)} +} + +func (_c *MockReplicationFSMReader_FilterOneShardReplicasRead_Call) Run(run func(collection string, shard string, shardReplicasLocation []string)) *MockReplicationFSMReader_FilterOneShardReplicasRead_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(string), args[1].(string), args[2].([]string)) + }) + return _c +} + +func (_c *MockReplicationFSMReader_FilterOneShardReplicasRead_Call) Return(_a0 []string) *MockReplicationFSMReader_FilterOneShardReplicasRead_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockReplicationFSMReader_FilterOneShardReplicasRead_Call) RunAndReturn(run func(string, string, []string) []string) *MockReplicationFSMReader_FilterOneShardReplicasRead_Call { + _c.Call.Return(run) + return _c +} + +// FilterOneShardReplicasWrite provides a mock function with given fields: collection, shard, shardReplicasLocation +func (_m *MockReplicationFSMReader) FilterOneShardReplicasWrite(collection string, shard string, shardReplicasLocation []string) ([]string, []string) { + ret := _m.Called(collection, shard, shardReplicasLocation) + + if len(ret) == 0 { + panic("no return value specified for FilterOneShardReplicasWrite") + } + + var r0 []string + var r1 []string + if rf, ok := ret.Get(0).(func(string, string, []string) ([]string, []string)); ok { + return rf(collection, shard, shardReplicasLocation) + } + if rf, ok := ret.Get(0).(func(string, string, []string) []string); ok { + r0 = rf(collection, shard, shardReplicasLocation) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]string) + } + } + + if rf, ok := ret.Get(1).(func(string, string, []string) []string); ok { + r1 = rf(collection, shard, shardReplicasLocation) + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).([]string) + } + } + + return r0, r1 +} + +// MockReplicationFSMReader_FilterOneShardReplicasWrite_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'FilterOneShardReplicasWrite' +type MockReplicationFSMReader_FilterOneShardReplicasWrite_Call struct { + *mock.Call +} + +// FilterOneShardReplicasWrite is a helper method to define mock.On call +// - collection string +// - shard string +// - shardReplicasLocation []string +func (_e *MockReplicationFSMReader_Expecter) FilterOneShardReplicasWrite(collection interface{}, shard interface{}, shardReplicasLocation interface{}) *MockReplicationFSMReader_FilterOneShardReplicasWrite_Call { + return &MockReplicationFSMReader_FilterOneShardReplicasWrite_Call{Call: _e.mock.On("FilterOneShardReplicasWrite", collection, shard, shardReplicasLocation)} +} + +func (_c *MockReplicationFSMReader_FilterOneShardReplicasWrite_Call) Run(run func(collection string, shard string, shardReplicasLocation []string)) *MockReplicationFSMReader_FilterOneShardReplicasWrite_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(string), args[1].(string), args[2].([]string)) + }) + return _c +} + +func (_c *MockReplicationFSMReader_FilterOneShardReplicasWrite_Call) Return(_a0 []string, _a1 []string) *MockReplicationFSMReader_FilterOneShardReplicasWrite_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockReplicationFSMReader_FilterOneShardReplicasWrite_Call) RunAndReturn(run func(string, string, []string) ([]string, []string)) *MockReplicationFSMReader_FilterOneShardReplicasWrite_Call { + _c.Call.Return(run) + return _c +} + +// NewMockReplicationFSMReader creates a new instance of MockReplicationFSMReader. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewMockReplicationFSMReader(t interface { + mock.TestingT + Cleanup(func()) +}) *MockReplicationFSMReader { + mock := &MockReplicationFSMReader{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/replication/types/replica_copier.go b/platform/dbops/binaries/weaviate-src/cluster/replication/types/replica_copier.go new file mode 100644 index 0000000000000000000000000000000000000000..a032e2dc1078fe62bd79eb9552b84a8e5c529cfd --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/replication/types/replica_copier.go @@ -0,0 +1,43 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package types + +import ( + "context" + + "github.com/weaviate/weaviate/entities/additional" + "github.com/weaviate/weaviate/entities/models" +) + +// ReplicaCopier see cluster/replication/copier.Copier +type ReplicaCopier interface { + // CopyReplicaFiles see cluster/replication/copier.Copier.CopyReplicaFiles + CopyReplicaFiles(ctx context.Context, sourceNode string, sourceCollection string, sourceShard string, schemaVersion uint64) error + + // LoadLocalShard see cluster/replication/copier.Copier.LoadLocalShard + LoadLocalShard(ctx context.Context, collectionName, shardName string) error + + // InitAsyncReplicationLocally see cluster/replication/copier.Copier.InitAsyncReplicationLocally + InitAsyncReplicationLocally(ctx context.Context, collectionName, shardName string) error + + // RevertAsyncReplicationLocally see cluster/replication/copier.Copier.RevertAsyncReplicationLocally + RevertAsyncReplicationLocally(ctx context.Context, collectionName, shardName string) error + + // AddAsyncReplicationTargetNode see cluster/replication/copier.Copier.AddAsyncReplicationTargetNode + AddAsyncReplicationTargetNode(ctx context.Context, targetNodeOverride additional.AsyncReplicationTargetNodeOverride, schemaVersion uint64) error + + // RemoveAsyncReplicationTargetNode see cluster/replication/copier.Copier.RemoveAsyncReplicationTargetNode + RemoveAsyncReplicationTargetNode(ctx context.Context, targetNodeOverride additional.AsyncReplicationTargetNodeOverride) error + + // AsyncReplicationStats see cluster/replication/copier.Copier.AsyncReplicationStatus + AsyncReplicationStatus(ctx context.Context, srcNodeId, targetNodeId, collectionName, shardName string) (models.AsyncReplicationStatus, error) +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/replication/types/replication_fsm_reader.go b/platform/dbops/binaries/weaviate-src/cluster/replication/types/replication_fsm_reader.go new file mode 100644 index 0000000000000000000000000000000000000000..76c5f97bdb4caa0adb70643bf2e04ff098f00a92 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/replication/types/replication_fsm_reader.go @@ -0,0 +1,21 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package types + +type ReplicationFSMReader interface { + // FilterOneShardReplicasRead returns the read and write replicas for a given shard + // It returns a tuple of readReplicas + FilterOneShardReplicasRead(collection string, shard string, shardReplicasLocation []string) []string + // FilterOneShardReplicasWrite returns the write replicas for a given shard + // It returns a tuple of (writeReplicas, additionalWriteReplicas) + FilterOneShardReplicasWrite(collection string, shard string, shardReplicasLocation []string) ([]string, []string) +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/replication/types/replication_fsm_updater.go b/platform/dbops/binaries/weaviate-src/cluster/replication/types/replication_fsm_updater.go new file mode 100644 index 0000000000000000000000000000000000000000..c944908b1d4e7bbd82521cbec1eeac1838fd4f00 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/replication/types/replication_fsm_updater.go @@ -0,0 +1,32 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package types + +import ( + "context" + + "github.com/weaviate/weaviate/cluster/proto/api" +) + +type FSMUpdater interface { + ReplicationAddReplicaToShard(ctx context.Context, collection string, shard string, nodeId string, opId uint64) (uint64, error) + DeleteReplicaFromShard(ctx context.Context, collection string, shard string, nodeId string) (uint64, error) + SyncShard(ctx context.Context, collection string, shard string, nodeId string) (uint64, error) + ReplicationUpdateReplicaOpStatus(ctx context.Context, id uint64, state api.ShardReplicationState) error + ReplicationRegisterError(ctx context.Context, id uint64, errorToRegister string) error + ReplicationRemoveReplicaOp(ctx context.Context, id uint64) error + ReplicationCancellationComplete(ctx context.Context, id uint64) error + ReplicationGetReplicaOpStatus(ctx context.Context, id uint64) (api.ShardReplicationState, error) + ReplicationStoreSchemaVersion(ctx context.Context, id uint64, schemaVersion uint64) error + UpdateTenants(ctx context.Context, class string, req *api.UpdateTenantsRequest) (uint64, error) + WaitForUpdate(ctx context.Context, schemaVersion uint64) error +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/replication/utils.go b/platform/dbops/binaries/weaviate-src/cluster/replication/utils.go new file mode 100644 index 0000000000000000000000000000000000000000..3377a9e0f51e56416078e59dfdecdb324329d474 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/replication/utils.go @@ -0,0 +1,38 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package replication + +import "github.com/sirupsen/logrus" + +func logFieldsForOp(op ShardReplicationOp) logrus.Fields { + return logrus.Fields{ + "op_uuid": op.UUID, + "op_id": op.ID, + "source_node": op.SourceShard.NodeId, + "target_node": op.TargetShard.NodeId, + "source_shard": op.SourceShard.ShardId, + "target_shard": op.TargetShard.ShardId, + "source_collection": op.SourceShard.CollectionId, + "target_collection": op.TargetShard.CollectionId, + "transfer_type": op.TransferType, + } +} + +func logFieldsForStatus(state ShardReplicationOpStatus) logrus.Fields { + return logrus.Fields{ + "state": state.GetCurrentState(), + } +} + +func getLoggerForOpAndStatus(logger *logrus.Entry, op ShardReplicationOp, state ShardReplicationOpStatus) *logrus.Entry { + return logger.WithFields(logFieldsForOp(op)).WithFields(logFieldsForStatus(state)) +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/replication/validate.go b/platform/dbops/binaries/weaviate-src/cluster/replication/validate.go new file mode 100644 index 0000000000000000000000000000000000000000..bdb64d4a263a14375cc06fc6e2288eaca36e3a64 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/replication/validate.go @@ -0,0 +1,67 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package replication + +import ( + "errors" + "fmt" + + "github.com/weaviate/weaviate/cluster/proto/api" + "github.com/weaviate/weaviate/cluster/schema" +) + +var ( + ErrAlreadyExists = errors.New("already exists") + ErrNodeNotFound = errors.New("node not found") + ErrClassNotFound = errors.New("class not found") + ErrShardNotFound = errors.New("shard not found") +) + +// ValidateReplicationReplicateShard validates that c is valid given the current state of the schema read using schemaReader +func ValidateReplicationReplicateShard(schemaReader schema.SchemaReader, c *api.ReplicationReplicateShardRequest) error { + if c.Uuid == "" { + return fmt.Errorf("uuid is required: %w", ErrBadRequest) + } + if c.SourceNode == c.TargetNode { + return fmt.Errorf("source and target node are the same: %w", ErrBadRequest) + } + + classInfo := schemaReader.ClassInfo(c.SourceCollection) + // ClassInfo doesn't return an error, so the only way to know if the class exist is to check if the Exists + // boolean is not set to default value + if !classInfo.Exists { + return fmt.Errorf("collection %s does not exists: %w", c.SourceCollection, ErrClassNotFound) + } + + // Ensure source shard replica exists and target replica doesn't already exist + nodes, err := schemaReader.ShardReplicas(c.SourceCollection, c.SourceShard) + if err != nil { + return err + } + var foundSource bool + var foundTarget bool + for _, n := range nodes { + if n == c.SourceNode { + foundSource = true + } + if n == c.TargetNode { + foundTarget = true + } + } + if !foundSource { + return fmt.Errorf("could not find shard %s for collection %s on source node %s: %w", c.SourceShard, c.SourceCollection, c.SourceNode, ErrNodeNotFound) + } + if foundTarget { + return fmt.Errorf("shard %s already exist for collection %s on target node %s: %w", c.SourceShard, c.SourceCollection, c.SourceNode, ErrAlreadyExists) + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/resolver/raft.go b/platform/dbops/binaries/weaviate-src/cluster/resolver/raft.go new file mode 100644 index 0000000000000000000000000000000000000000..f260a3c37c9b040143a0e2dd9a4a47d1956496a5 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/resolver/raft.go @@ -0,0 +1,109 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package resolver + +import ( + "fmt" + "net" + "sync" + "time" + + raftImpl "github.com/hashicorp/raft" + "github.com/sirupsen/logrus" + "github.com/weaviate/weaviate/cluster/log" +) + +const ( + // RaftTcpMaxPool controls how many connections raft transport will pool + raftTcpMaxPool = 3 + // RaftTcpTimeout is used to apply I/O deadlines. + raftTcpTimeout = 10 * time.Second +) + +type raft struct { + // ClusterStateReader allows the raft to also be used to the current cluster state + ClusterStateReader + + // RaftPort is the configured RAFT port in the cluster that the resolver will append to the node id. + RaftPort int + // IsLocalCluster is the cluster running on a single host machine. This is necessary to ensure that we don't use the + // same port multiple time when we only have a single underlying machine. + IsLocalCluster bool + // NodeNameToPortMap maps a given node name ot a given port. This is useful when running locally so that we can + // keep in memory which node uses which port. + NodeNameToPortMap map[string]int + + nodesLock sync.Mutex + notResolvedNodes map[raftImpl.ServerID]struct{} +} + +func NewRaft(cfg RaftConfig) *raft { + return &raft{ + ClusterStateReader: cfg.ClusterStateReader, + RaftPort: cfg.RaftPort, + IsLocalCluster: cfg.IsLocalHost, + NodeNameToPortMap: cfg.NodeNameToPortMap, + notResolvedNodes: make(map[raftImpl.ServerID]struct{}), + } +} + +// ServerAddr resolves server ID to a RAFT address +func (a *raft) ServerAddr(id raftImpl.ServerID) (raftImpl.ServerAddress, error) { + // Get the address from the node id + addr := a.ClusterStateReader.NodeAddress(string(id)) + + // Update the internal notResolvedNodes if the addr if empty, otherwise delete it from the map + a.nodesLock.Lock() + defer a.nodesLock.Unlock() + if addr == "" { + a.notResolvedNodes[id] = struct{}{} + return raftImpl.ServerAddress(invalidAddr), nil + } + delete(a.notResolvedNodes, id) + + // If we are not running a local cluster we can immediately return, otherwise we need to lookup the port of the node + // as we can't use the default raft port locally. + if !a.IsLocalCluster { + return raftImpl.ServerAddress(fmt.Sprintf("%s:%d", addr, a.RaftPort)), nil + } + return raftImpl.ServerAddress(fmt.Sprintf("%s:%d", addr, a.NodeNameToPortMap[string(id)])), nil +} + +// NewTCPTransport returns a new raft.NetworkTransportConfig that utilizes +// this resolver to resolve addresses based on server IDs. +// This is particularly crucial as K8s assigns new IPs on each node restart. +func (a *raft) NewTCPTransport( + bindAddr string, + advertise net.Addr, + maxPool int, + timeout time.Duration, + logger *logrus.Logger, +) (*raftImpl.NetworkTransport, error) { + cfg := &raftImpl.NetworkTransportConfig{ + ServerAddressProvider: a, + MaxPool: raftTcpMaxPool, + Timeout: raftTcpTimeout, + Logger: log.NewHCLogrusLogger("raft-net", logger), + } + return raftImpl.NewTCPTransportWithConfig(bindAddr, advertise, cfg) +} + +func (a *raft) NotResolvedNodes() map[raftImpl.ServerID]struct{} { + a.nodesLock.Lock() + defer a.nodesLock.Unlock() + + newMap := make(map[raftImpl.ServerID]struct{}) + for k, v := range a.notResolvedNodes { + newMap[k] = v + } + return newMap +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/resolver/rpc.go b/platform/dbops/binaries/weaviate-src/cluster/resolver/rpc.go new file mode 100644 index 0000000000000000000000000000000000000000..2c57e4a0017a498a5b5132de0ea39f5231e0cb5f --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/resolver/rpc.go @@ -0,0 +1,53 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package resolver + +import ( + "fmt" + "net" + "strconv" +) + +// Rpc implements resolving raft address to their RPC address depending on the configured rpcPort and whether or +// not this is a local cluster. +type Rpc struct { + isLocalCluster bool + rpcPort int +} + +// NewRpc returns an implementation of resolver +// isLocalHost is used to determine which remote port to expect given an address (See: rpcResolver.rpcAddressFromRAFT()) +// rpcPort is used as the default port on the returned rpcAddresses (see: rpcResolver.Address()) +func NewRpc(isLocalHost bool, rpcPort int) *Rpc { + return &Rpc{isLocalCluster: isLocalHost, rpcPort: rpcPort} +} + +// rpcAddressFromRAFT returns the rpc address (rpcAddr:rpcPort) based on raftAddr (raftAddr:raftPort). +// In a real cluster, the RPC port is the same for all nodes. In a local environment, the RPC ports need to be +// different. Specifically, we calculate the RPC port as the RAFT port + 1. +// Returns an error if raftAddr is not parseable. +// Returns an error if raftAddr port is not parse-able as an integer. +func (cl *Rpc) Address(raftAddr string) (string, error) { + host, port, err := net.SplitHostPort(raftAddr) + if err != nil { + return "", err + } + if !cl.isLocalCluster { + return fmt.Sprintf("%s:%d", host, cl.rpcPort), nil + } + iPort, err := strconv.Atoi(port) + if err != nil { + return "", err + } + + return fmt.Sprintf("%s:%d", host, iPort+1), nil +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/resolver/rpc_test.go b/platform/dbops/binaries/weaviate-src/cluster/resolver/rpc_test.go new file mode 100644 index 0000000000000000000000000000000000000000..9d399a5453f6f13309583670ae0897a474f532b6 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/resolver/rpc_test.go @@ -0,0 +1,40 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package resolver + +import ( + "fmt" + "net" + "strconv" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestRPCResolver(t *testing.T) { + rpcPort := 8081 + _, err := NewRpc(false, rpcPort).Address("localhost123") + addErr := &net.AddrError{} + assert.ErrorAs(t, err, &addErr) + + rAddr, err := NewRpc(false, rpcPort).Address("localhost:123") + assert.Nil(t, err) + assert.Equal(t, rAddr, fmt.Sprintf("localhost:%d", rpcPort)) + + _, err = NewRpc(true, rpcPort).Address("localhost:not_a_port") + numErr := &strconv.NumError{} + assert.ErrorAs(t, err, &numErr) + + rAddr, err = NewRpc(true, rpcPort).Address("localhost:123") + assert.Nil(t, err) + assert.Equal(t, "localhost:124", rAddr) +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/resolver/types.go b/platform/dbops/binaries/weaviate-src/cluster/resolver/types.go new file mode 100644 index 0000000000000000000000000000000000000000..dc919c25cf885043d38af233347d65b0eba208ff --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/resolver/types.go @@ -0,0 +1,40 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package resolver + +const ( + invalidAddr = "256.256.256.256:99999999" +) + +// ClusterStateReader allows the resolver to compute node-id to ip addresses. +type ClusterStateReader interface { + // NodeAddress resolves node id into an ip address without the port. + NodeAddress(id string) string + // NodeHostname resolves a node id into an ip address with internal cluster api port + NodeHostname(nodeName string) (string, bool) + // LocalName returns the local node name + LocalName() string +} + +type RaftConfig struct { + ClusterStateReader ClusterStateReader + RaftPort int + IsLocalHost bool + NodeNameToPortMap map[string]int +} + +type FQDNConfig struct { + RaftPort int + IsLocalHost bool + NodeNameToPortMap map[string]int + TLD string +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/router/executor/executor.go b/platform/dbops/binaries/weaviate-src/cluster/router/executor/executor.go new file mode 100644 index 0000000000000000000000000000000000000000..658dcf6925225ac25a4cf20c6c1d66732a898960 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/router/executor/executor.go @@ -0,0 +1,78 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package executor + +import ( + "fmt" + + "github.com/weaviate/weaviate/cluster/router/types" +) + +type Operation func(types.Replica) error + +func validateExecutors(localOperation Operation, remoteOperation Operation) error { + if localOperation == nil { + return fmt.Errorf("local executor cannot be nil") + } + if remoteOperation == nil { + return fmt.Errorf("remote executor cannot be nil") + } + return nil +} + +func ExecuteForEachShard(plan types.ReadRoutingPlan, localOperation Operation, remoteOperation Operation) error { + if err := validateExecutors(localOperation, remoteOperation); err != nil { + return err + } + + shardSet := make(map[string]struct{}) + for _, replica := range plan.ReplicaSet.Replicas { + if _, ok := shardSet[replica.ShardName]; ok { + continue + } + shardSet[replica.ShardName] = struct{}{} + + if plan.LocalHostname == replica.NodeName { + if err := localOperation(replica); err != nil { + return fmt.Errorf("failed to locally execute read plan on replica %s: %w", replica.NodeName, err) + } + } else { + if err := remoteOperation(replica); err != nil { + return fmt.Errorf("failed to remotely execute read plan on replica %s at addr %s: %w", replica.NodeName, replica.HostAddr, err) + } + } + } + return nil +} + +func ExecuteForEachReplicaOfShard(plan types.ReadRoutingPlan, shardName string, localOperation Operation, remoteOperation Operation) error { + if err := validateExecutors(localOperation, remoteOperation); err != nil { + return err + } + + for _, replica := range plan.ReplicaSet.Replicas { + if replica.ShardName != shardName { + continue + } + + if plan.LocalHostname == replica.NodeName { + if err := localOperation(replica); err != nil { + return fmt.Errorf("failed to locally execute read plan on replica %s: %w", replica.NodeName, err) + } + } else { + if err := remoteOperation(replica); err != nil { + return fmt.Errorf("failed to remotely execute read plan on replica %s at addr %s: %w", replica.NodeName, replica.HostAddr, err) + } + } + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/router/executor/executor_test.go b/platform/dbops/binaries/weaviate-src/cluster/router/executor/executor_test.go new file mode 100644 index 0000000000000000000000000000000000000000..4fedb08d07783d94f54119d0e040145dbed0193c --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/router/executor/executor_test.go @@ -0,0 +1,356 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package executor_test + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/cluster/router/executor" + "github.com/weaviate/weaviate/cluster/router/types" +) + +// mockExecutor creates a mock executor that tracks calls and can simulate errors +func mockExecutor(expectedCalls map[string]int, expectedErrors map[string]bool) executor.Operation { + return func(replica types.Replica) error { + expectedCalls[replica.NodeName]++ + if expectedErrors[replica.NodeName] { + return fmt.Errorf("mock error for node %s", replica.NodeName) + } + return nil + } +} + +// createTestPlan creates a test routing plan with the specified replicas +func createTestPlan(replicas []types.Replica) types.ReadRoutingPlan { + return types.ReadRoutingPlan{ + LocalHostname: "node1", // Default local hostname for tests + ReplicaSet: types.ReadReplicaSet{ + Replicas: replicas, + }, + } +} + +// createTestReplica creates a test replica +func createTestReplica(nodeName, shardName, hostAddr string) types.Replica { + return types.Replica{ + NodeName: nodeName, + ShardName: shardName, + HostAddr: hostAddr, + } +} + +func TestExecuteForEachShard(t *testing.T) { + type testCase struct { + name string + plan types.ReadRoutingPlan + expectedError string + expectedLocal map[string]int // nodeName -> expected local call count + expectedRemote map[string]int // nodeName -> expected remote call count + expectedErrors map[string]bool // nodeName -> whether error was expected + } + + tests := []testCase{ + { + name: "single local replica", + plan: createTestPlan([]types.Replica{ + createTestReplica("node1", "shard1", "node1:8080"), + }), + expectedLocal: map[string]int{"node1": 1}, + expectedRemote: map[string]int{}, + }, + { + name: "single remote replica", + plan: createTestPlan([]types.Replica{ + createTestReplica("node2", "shard1", "node2:8080"), + }), + expectedLocal: map[string]int{}, + expectedRemote: map[string]int{"node2": 1}, + }, + { + name: "multiple replicas same shard", + plan: createTestPlan([]types.Replica{ + createTestReplica("node1", "shard1", "node1:8080"), + createTestReplica("node2", "shard1", "node2:8080"), + createTestReplica("node3", "shard1", "node3:8080"), + }), + expectedLocal: map[string]int{"node1": 1}, // Only first replica of shard should be called + expectedRemote: map[string]int{}, + }, + { + name: "multiple shards", + plan: createTestPlan([]types.Replica{ + createTestReplica("node1", "shard1", "node1:8080"), + createTestReplica("node2", "shard2", "node2:8080"), + createTestReplica("node3", "shard3", "node3:8080"), + }), + expectedLocal: map[string]int{"node1": 1}, + expectedRemote: map[string]int{"node2": 1, "node3": 1}, + }, + { + name: "local executor error", + plan: createTestPlan([]types.Replica{ + createTestReplica("node1", "shard1", "node1:8080"), + createTestReplica("node2", "shard2", "node2:8080"), + }), + expectedLocal: map[string]int{"node1": 1}, + expectedRemote: map[string]int{}, + expectedErrors: map[string]bool{"node1": true}, + expectedError: "failed to locally execute read plan on replica node1: mock error for node node1", + }, + { + name: "remote executor error", + plan: createTestPlan([]types.Replica{ + createTestReplica("node2", "shard1", "node2:8080"), + createTestReplica("node3", "shard2", "node3:8080"), + }), + expectedLocal: map[string]int{}, + expectedRemote: map[string]int{"node2": 1}, + expectedErrors: map[string]bool{"node2": true}, + expectedError: "failed to remotely execute read plan on replica node2 at addr node2:8080: mock error for node node2", + }, + { + name: "empty replica set", + plan: createTestPlan([]types.Replica{}), + expectedLocal: map[string]int{}, + expectedRemote: map[string]int{}, + }, + { + name: "mixed local and remote replicas", + plan: createTestPlan([]types.Replica{ + createTestReplica("node1", "shard1", "node1:8080"), + createTestReplica("node2", "shard1", "node2:8080"), + createTestReplica("node3", "shard2", "node3:8080"), + }), + expectedLocal: map[string]int{"node1": 1}, + expectedRemote: map[string]int{"node3": 1}, + }, + { + name: "empty local hostname", + plan: types.ReadRoutingPlan{ + LocalHostname: "", + ReplicaSet: types.ReadReplicaSet{ + Replicas: []types.Replica{ + createTestReplica("node1", "shard1", "node1:8080"), + }, + }, + }, + expectedLocal: map[string]int{}, + expectedRemote: map[string]int{"node1": 1}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Create mock executors + localCalls := make(map[string]int) + remoteCalls := make(map[string]int) + + localExecutor := mockExecutor(localCalls, tt.expectedErrors) + remoteExecutor := mockExecutor(remoteCalls, tt.expectedErrors) + + // Execute the function + err := executor.ExecuteForEachShard(tt.plan, localExecutor, remoteExecutor) + + // Verify results + if tt.expectedError != "" { + require.Error(t, err) + require.Contains(t, err.Error(), tt.expectedError) + } else { + require.NoError(t, err) + } + + // Verify local call counts + if tt.expectedLocal != nil { + for nodeName, expectedCount := range tt.expectedLocal { + actualCount := localCalls[nodeName] + require.Equal(t, expectedCount, actualCount, + "node %s: expected %d local calls, got %d", nodeName, expectedCount, actualCount) + } + } + + // Verify remote call counts + if tt.expectedRemote != nil { + for nodeName, expectedCount := range tt.expectedRemote { + actualCount := remoteCalls[nodeName] + require.Equal(t, expectedCount, actualCount, + "node %s: expected %d remote calls, got %d", nodeName, expectedCount, actualCount) + } + } + + // Verify no unexpected calls + for nodeName, actualLocalCount := range localCalls { + if expectedLocalCount, exists := tt.expectedLocal[nodeName]; !exists || expectedLocalCount != actualLocalCount { + require.Fail(t, "unexpected local calls", "node %s: got %d local calls but expected %d", + nodeName, actualLocalCount, expectedLocalCount) + } + } + for nodeName, actualRemoteCount := range remoteCalls { + if expectedRemoteCount, exists := tt.expectedRemote[nodeName]; !exists || expectedRemoteCount != actualRemoteCount { + require.Fail(t, "unexpected remote calls", "node %s: got %d remote calls but expected %d", + nodeName, actualRemoteCount, expectedRemoteCount) + } + } + }) + } +} + +func TestExecuteForEachReplicaOfShard(t *testing.T) { + type testCase struct { + name string + plan types.ReadRoutingPlan + shardName string + expectedError string + expectedLocal map[string]int // nodeName -> expected local call count + expectedRemote map[string]int // nodeName -> expected remote call count + expectedErrors map[string]bool // nodeName -> whether error was expected + } + tests := []testCase{ + { + name: "single local replica matching shard", + plan: createTestPlan([]types.Replica{ + createTestReplica("node1", "shard1", "node1:8080"), + }), + shardName: "shard1", + expectedLocal: map[string]int{"node1": 1}, + expectedRemote: map[string]int{}, + }, + { + name: "single remote replica matching shard", + plan: createTestPlan([]types.Replica{ + createTestReplica("node2", "shard1", "node2:8080"), + }), + shardName: "shard1", + expectedLocal: map[string]int{}, + expectedRemote: map[string]int{"node2": 1}, + }, + { + name: "multiple replicas matching shard", + plan: createTestPlan([]types.Replica{ + createTestReplica("node1", "shard1", "node1:8080"), + createTestReplica("node2", "shard1", "node2:8080"), + createTestReplica("node3", "shard1", "node3:8080"), + }), + shardName: "shard1", + expectedLocal: map[string]int{"node1": 1}, + expectedRemote: map[string]int{"node2": 1, "node3": 1}, + }, + { + name: "replicas from different shards", + plan: createTestPlan([]types.Replica{ + createTestReplica("node1", "shard1", "node1:8080"), + createTestReplica("node2", "shard2", "node2:8080"), + createTestReplica("node3", "shard3", "node3:8080"), + }), + shardName: "shard1", + expectedLocal: map[string]int{"node1": 1}, + expectedRemote: map[string]int{}, + }, + { + name: "no replicas matching shard", + plan: createTestPlan([]types.Replica{ + createTestReplica("node1", "shard1", "node1:8080"), + createTestReplica("node2", "shard2", "node2:8080"), + }), + shardName: "shard3", + expectedLocal: map[string]int{}, + expectedRemote: map[string]int{}, + }, + { + name: "remote executor error with stop on error", + plan: createTestPlan([]types.Replica{ + createTestReplica("node2", "shard1", "node2:8080"), + createTestReplica("node3", "shard1", "node3:8080"), + }), + shardName: "shard1", + expectedLocal: map[string]int{}, + expectedRemote: map[string]int{"node2": 1}, + expectedErrors: map[string]bool{"node2": true}, + expectedError: "failed to remotely execute read plan on replica node2 at addr node2:8080: mock error for node node2", + }, + { + name: "empty replica set", + plan: createTestPlan([]types.Replica{}), + shardName: "shard1", + expectedLocal: map[string]int{}, + expectedRemote: map[string]int{}, + }, + { + name: "empty local hostname", + plan: types.ReadRoutingPlan{ + LocalHostname: "", + ReplicaSet: types.ReadReplicaSet{ + Replicas: []types.Replica{ + createTestReplica("node1", "shard1", "node1:8080"), + }, + }, + }, + shardName: "shard1", + expectedLocal: map[string]int{}, + expectedRemote: map[string]int{"node1": 1}, // Empty local hostname means all replicas are treated as remote + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Create mock executors + localCalls := make(map[string]int) + remoteCalls := make(map[string]int) + localExecutor := mockExecutor(localCalls, tt.expectedErrors) + remoteExecutor := mockExecutor(remoteCalls, tt.expectedErrors) + + // Execute the function + err := executor.ExecuteForEachReplicaOfShard(tt.plan, tt.shardName, localExecutor, remoteExecutor) + + // Verify results + if tt.expectedError != "" { + require.Error(t, err) + require.Contains(t, err.Error(), tt.expectedError) + } else { + require.NoError(t, err) + } + + // Verify local call counts + if tt.expectedLocal != nil { + for nodeName, expectedCount := range tt.expectedLocal { + actualCount := localCalls[nodeName] + require.Equal(t, expectedCount, actualCount, + "node %s: expected %d local calls, got %d", nodeName, expectedCount, actualCount) + } + } + + // Verify remote call counts + if tt.expectedRemote != nil { + for nodeName, expectedCount := range tt.expectedRemote { + actualCount := remoteCalls[nodeName] + require.Equal(t, expectedCount, actualCount, + "node %s: expected %d remote calls, got %d", nodeName, expectedCount, actualCount) + } + } + + // Verify no unexpected calls + for nodeName, actualLocalCount := range localCalls { + if expectedLocalCount, exists := tt.expectedLocal[nodeName]; !exists || expectedLocalCount != actualLocalCount { + require.Fail(t, "unexpected local calls", "node %s: got %d local calls but expected %d", + nodeName, actualLocalCount, expectedLocalCount) + } + } + for nodeName, actualRemoteCount := range remoteCalls { + if expectedRemoteCount, exists := tt.expectedRemote[nodeName]; !exists || expectedRemoteCount != actualRemoteCount { + require.Fail(t, "unexpected remote calls", "node %s: got %d remote calls but expected %d", + nodeName, actualRemoteCount, expectedRemoteCount) + } + } + }) + } +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/router/router.go b/platform/dbops/binaries/weaviate-src/cluster/router/router.go new file mode 100644 index 0000000000000000000000000000000000000000..4e9b8b1c0d737c979671199fa4ac2c284d382302 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/router/router.go @@ -0,0 +1,650 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Package router provides an abstraction for determining the optimal routing plans +// for reads and writes within a Weaviate cluster. It handles logic around sharding, +// replication, and consistency, helping determine which nodes and shards (replicas) +// should be queried for a given operation. +// +// The Router interface is implemented by single-tenant and multi-tenant routers, +// depending on the system's configuration. Use NewBuilder to create the +// appropriate router based on whether partitioning is enabled. +package router + +import ( + "context" + "fmt" + + enterrors "github.com/weaviate/weaviate/entities/errors" + + "github.com/weaviate/weaviate/usecases/objects" + + replicationTypes "github.com/weaviate/weaviate/cluster/replication/types" + + "github.com/weaviate/weaviate/entities/models" + + "github.com/weaviate/weaviate/usecases/schema" + + "github.com/weaviate/weaviate/cluster/router/types" + "github.com/weaviate/weaviate/usecases/cluster" +) + +// Builder provides a builder for creating router instances based on configuration. +// Use NewBuilder() with all required parameters, then call Build() to get the appropriate Router implementation, +// either a multi-tenant router or a single tenant router. The multi-tenant router will use the tenant name as the +// partitioning key to identify a specific tenant's partitioning. +type Builder struct { + collection string + partitioningEnabled bool + nodeSelector cluster.NodeSelector + schemaGetter schema.SchemaGetter + schemaReader schema.SchemaReader + replicationFSMReader replicationTypes.ReplicationFSMReader +} + +// NewBuilder creates a new Builder with the provided configuration. +// +// Parameters: +// - collection: the name of the collection that this router will handle. +// - partitioningEnabled: true for multi-tenant mode, false for single-tenant mode. +// - nodeSelector: provides cluster node state information and hostnames. +// - schemaGetter: provides collection schemas, sharding states, and tenant information. +// - schemaReader: provides shard replica (or node names) metadata. +// - replicationFSMReader: provides replica state information for replication consistency. +// +// Returns: +// - *Builder: a new builder instance ready to build the appropriate router. +func NewBuilder( + collection string, + partitioningEnabled bool, + nodeSelector cluster.NodeSelector, + schemaGetter schema.SchemaGetter, + schemaReader schema.SchemaReader, + replicationFSMReader replicationTypes.ReplicationFSMReader, +) *Builder { + return &Builder{ + collection: collection, + partitioningEnabled: partitioningEnabled, + nodeSelector: nodeSelector, + schemaGetter: schemaGetter, + schemaReader: schemaReader, + replicationFSMReader: replicationFSMReader, + } +} + +// Build builds and returns the appropriate router implementation based on the partitioning configuration. +// +// Returns: +// - Router: a concrete router implementation (*multiTenantRouter or *singleTenantRouter) that implements the Router interface. +func (b *Builder) Build() types.Router { + if b.partitioningEnabled { + return &multiTenantRouter{ + collection: b.collection, + schemaGetter: b.schemaGetter, + schemaReader: b.schemaReader, + replicationFSMReader: b.replicationFSMReader, + nodeSelector: b.nodeSelector, + } + } + return &singleTenantRouter{ + collection: b.collection, + schemaReader: b.schemaReader, + replicationFSMReader: b.replicationFSMReader, + nodeSelector: b.nodeSelector, + } +} + +// singleTenantRouter is the implementation of Router for single-tenant collections. +// In single-tenant mode, data is distributed across multiple physical shards without +// tenant-based partitioning. All data belongs to a single logical tenant (empty tenant +// or no partitioning key). +type singleTenantRouter struct { + collection string + schemaReader schema.SchemaReader + replicationFSMReader replicationTypes.ReplicationFSMReader + nodeSelector cluster.NodeSelector +} + +// multiTenantRouter is the implementation of Router for multi-tenant collections. +// In multi-tenant mode, tenant isolation is achieved through partitioning using +// the tenant name as the partitioning key. Each tenant effectively becomes its own shard. +type multiTenantRouter struct { + collection string + schemaGetter schema.SchemaGetter + schemaReader schema.SchemaReader + replicationFSMReader replicationTypes.ReplicationFSMReader + nodeSelector cluster.NodeSelector +} + +// Interface compliance check at compile time. +var ( + _ types.Router = (*multiTenantRouter)(nil) + _ types.Router = (*singleTenantRouter)(nil) +) + +// sort orders replicas with the preferred node first, followed by the remaining replicas. +func sort(replicas []types.Replica, preferredNodeName string) []types.Replica { + if len(replicas) == 0 { + return replicas + } + + var orderedReplicas []types.Replica + var otherReplicas []types.Replica + + for _, replica := range replicas { + if replica.NodeName == preferredNodeName { + orderedReplicas = append(orderedReplicas, replica) + } else { + otherReplicas = append(otherReplicas, replica) + } + } + + return append(orderedReplicas, otherReplicas...) +} + +// preferredNode determines the preferred node for replica ordering by selecting +// the direct candidate if specified, otherwise falling back to the local node. +func preferredNode(directCandidate string, localNodeName string) string { + if directCandidate != "" { + return directCandidate + } + return localNodeName +} + +// buildReplicas constructs a slice of replicas from node names, resolving hostnames +// for each node and filtering out nodes that cannot be resolved. +func buildReplicas(nodeNames []string, shard string, hostnameResolver func(nodeName string) (string, bool)) []types.Replica { + if len(nodeNames) == 0 { + return []types.Replica{} + } + + replicas := make([]types.Replica, 0, len(nodeNames)) + for _, nodeName := range nodeNames { + if hostAddr, ok := hostnameResolver(nodeName); ok { + replicas = append(replicas, types.Replica{ + NodeName: nodeName, + ShardName: shard, + HostAddr: hostAddr, + }) + } + } + return replicas +} + +// validateTenant for a single-tenant router checks the tenant is empty and returns an error if it is not. +func (r *singleTenantRouter) validateTenant(tenant string) error { + if tenant != "" { + return objects.NewErrMultiTenancy(fmt.Errorf("class %s has multi-tenancy disabled, but request was with tenant", r.collection)) + } + return nil +} + +// NodeHostname returns the hostname for the given node name in single-tenant collections. +func (r *singleTenantRouter) NodeHostname(nodeName string) (string, bool) { + return r.nodeSelector.NodeHostname(nodeName) +} + +// AllHostnames returns all known hostnames in the cluster for single-tenant collections. +func (r *singleTenantRouter) AllHostnames() []string { + return r.nodeSelector.AllHostnames() +} + +// GetReadWriteReplicasLocation returns read and write replicas for single-tenant collections. +func (r *singleTenantRouter) GetReadWriteReplicasLocation(collection string, tenant string, shard string) (types.ReadReplicaSet, types.WriteReplicaSet, error) { + if err := r.validateTenant(tenant); err != nil { + return types.ReadReplicaSet{}, types.WriteReplicaSet{}, err + } + + readReplicas, err := r.getReadReplicasLocation(collection, tenant, shard) + if err != nil { + return types.ReadReplicaSet{}, types.WriteReplicaSet{}, err + } + writeReplicas, err := r.getWriteReplicasLocation(collection, tenant, shard) + if err != nil { + return types.ReadReplicaSet{}, types.WriteReplicaSet{}, err + } + return readReplicas, writeReplicas, nil +} + +// GetWriteReplicasLocation returns write replicas for single-tenant collections. +func (r *singleTenantRouter) GetWriteReplicasLocation(collection string, tenant string, shard string) (types.WriteReplicaSet, error) { + if err := r.validateTenant(tenant); err != nil { + return types.WriteReplicaSet{}, err + } + writeReplicas, err := r.getWriteReplicasLocation(collection, tenant, shard) + if err != nil { + return types.WriteReplicaSet{}, err + } + return writeReplicas, nil +} + +// GetReadReplicasLocation returns read replicas for single-tenant collections. +func (r *singleTenantRouter) GetReadReplicasLocation(collection string, tenant string, shard string) (types.ReadReplicaSet, error) { + if err := r.validateTenant(tenant); err != nil { + return types.ReadReplicaSet{}, err + } + readReplicas, err := r.getReadReplicasLocation(collection, tenant, shard) + if err != nil { + return types.ReadReplicaSet{}, err + } + return readReplicas, nil +} + +// getReadReplicasLocation returns only read replicas for single-tenant collections. +func (r *singleTenantRouter) getReadReplicasLocation(collection string, tenant string, shard string) (types.ReadReplicaSet, error) { + targetShards, err := r.targetShards(collection, shard) + if err != nil { + return types.ReadReplicaSet{}, err + } + + var replicas []types.Replica + + for _, shardName := range targetShards { + readReplica, err := r.readReplicasForShard(collection, tenant, shardName) + if err != nil { + return types.ReadReplicaSet{}, err + } + + replicas = append(replicas, readReplica...) + } + + return types.ReadReplicaSet{Replicas: replicas}, nil +} + +// getWriteReplicasLocation returns only write replicas for single-tenant collections. +func (r *singleTenantRouter) getWriteReplicasLocation(collection string, tenant string, shard string) (types.WriteReplicaSet, error) { + targetShards, err := r.targetShards(collection, shard) + if err != nil { + return types.WriteReplicaSet{}, err + } + + var replicas []types.Replica + var additionalReplicas []types.Replica + + for _, shardName := range targetShards { + writeReplica, additionalReplica, err := r.writeReplicasForShard(collection, tenant, shardName) + if err != nil { + return types.WriteReplicaSet{}, err + } + + replicas = append(replicas, writeReplica...) + additionalReplicas = append(additionalReplicas, additionalReplica...) + } + + return types.WriteReplicaSet{Replicas: replicas, AdditionalReplicas: additionalReplicas}, nil +} + +// targetShards returns either all shards or a single one, depending on the value of the shard parameter. +func (r *singleTenantRouter) targetShards(collection, shardName string) ([]string, error) { + shards, err := r.schemaReader.Shards(collection) + if err != nil { + return nil, err + } + if shardName == "" { + return shards, nil + } + + found := false + for _, shard := range shards { + if shard == shardName { + found = true + break + } + } + if !found { + return nil, fmt.Errorf("error while trying to find shard: %s in collection: %s", shardName, collection) + } + return []string{shardName}, nil +} + +// readReplicasForShard gathers only read replicas for one shard. +func (r *singleTenantRouter) readReplicasForShard(collection, tenant, shard string) ([]types.Replica, error) { + replicas, err := r.schemaReader.ShardReplicas(collection, shard) + if err != nil { + return nil, fmt.Errorf("error while getting replicas for collection %q shard %q: %w", collection, shard, err) + } + + readNodeNames := r.replicationFSMReader.FilterOneShardReplicasRead(collection, shard, replicas) + return buildReplicas(readNodeNames, shard, r.nodeSelector.NodeHostname), nil +} + +// writeReplicasForShard gathers only write and additional write replicas for one shard. +func (r *singleTenantRouter) writeReplicasForShard(collection, tenant, shard string) (write, additional []types.Replica, err error) { + replicas, err := r.schemaReader.ShardReplicas(collection, shard) + if err != nil { + return nil, nil, fmt.Errorf("error while getting replicas for collection %q shard %q: %w", collection, shard, err) + } + + writeNodeNames, additionalWriteNodeNames := r.replicationFSMReader.FilterOneShardReplicasWrite(collection, shard, replicas) + + write = buildReplicas(writeNodeNames, shard, r.nodeSelector.NodeHostname) + additional = buildReplicas(additionalWriteNodeNames, shard, r.nodeSelector.NodeHostname) + + return write, additional, nil +} + +// BuildReadRoutingPlan constructs a read routing plan for single-tenant collections. +func (r *singleTenantRouter) BuildReadRoutingPlan(params types.RoutingPlanBuildOptions) (types.ReadRoutingPlan, error) { + if err := r.validateTenant(params.Tenant); err != nil { + return types.ReadRoutingPlan{}, err + } + return r.buildReadRoutingPlan(params) +} + +// buildReadRoutingPlan constructs a read routing plan for single-tenant collections. +func (r *singleTenantRouter) buildReadRoutingPlan(params types.RoutingPlanBuildOptions) (types.ReadRoutingPlan, error) { + readReplicas, err := r.getReadReplicasLocation(r.collection, params.Tenant, params.Shard) + if err != nil { + return types.ReadRoutingPlan{}, err + } + + if len(readReplicas.Replicas) == 0 { + return types.ReadRoutingPlan{}, fmt.Errorf("no read replica found") + } + + cl, err := readReplicas.ValidateConsistencyLevel(params.ConsistencyLevel) + if err != nil { + return types.ReadRoutingPlan{}, err + } + + orderedReplicas := sort(readReplicas.Replicas, preferredNode(params.DirectCandidateNode, r.nodeSelector.LocalName())) + + plan := types.ReadRoutingPlan{ + LocalHostname: r.nodeSelector.LocalName(), + Shard: params.Shard, + Tenant: params.Tenant, + ReplicaSet: types.ReadReplicaSet{ + Replicas: orderedReplicas, + }, + ConsistencyLevel: params.ConsistencyLevel, + IntConsistencyLevel: cl, + } + + return plan, nil +} + +// BuildWriteRoutingPlan constructs a write routing plan for single-tenant collections. +func (r *singleTenantRouter) BuildWriteRoutingPlan(params types.RoutingPlanBuildOptions) (types.WriteRoutingPlan, error) { + if err := r.validateTenant(params.Tenant); err != nil { + return types.WriteRoutingPlan{}, err + } + return r.buildWriteRoutingPlan(params) +} + +// buildWriteRoutingPlan constructs a write routing plan for single-tenant collections. +func (r *singleTenantRouter) buildWriteRoutingPlan(params types.RoutingPlanBuildOptions) (types.WriteRoutingPlan, error) { + writeReplicas, err := r.getWriteReplicasLocation(r.collection, params.Tenant, params.Shard) + if err != nil { + return types.WriteRoutingPlan{}, err + } + + if len(writeReplicas.Replicas) == 0 { + return types.WriteRoutingPlan{}, fmt.Errorf("no write replica found") + } + + cl, err := writeReplicas.ValidateConsistencyLevel(params.ConsistencyLevel) + if err != nil { + return types.WriteRoutingPlan{}, err + } + + sortedWriteReplicas := sort(writeReplicas.Replicas, preferredNode(params.DirectCandidateNode, r.nodeSelector.LocalName())) + + plan := types.WriteRoutingPlan{ + Shard: params.Shard, + Tenant: params.Tenant, + ReplicaSet: types.WriteReplicaSet{ + Replicas: sortedWriteReplicas, + AdditionalReplicas: writeReplicas.AdditionalReplicas, + }, + ConsistencyLevel: params.ConsistencyLevel, + IntConsistencyLevel: cl, + } + + return plan, nil +} + +// BuildRoutingPlanOptions constructs routing plan options for single-tenant collections. +func (r *singleTenantRouter) BuildRoutingPlanOptions(_, shard string, cl types.ConsistencyLevel, directCandidate string) types.RoutingPlanBuildOptions { + return types.RoutingPlanBuildOptions{ + Shard: shard, + Tenant: "", + ConsistencyLevel: cl, + DirectCandidateNode: directCandidate, + } +} + +// validateTenant for a multi-tenant router checks the tenant is not empty and returns an error if it is. +func (r *multiTenantRouter) validateTenant(tenant string) error { + if tenant == "" { + return objects.NewErrMultiTenancy(fmt.Errorf("class %s has multi-tenancy enabled, but request was without tenant", r.collection)) + } + return nil +} + +// NodeHostname returns the hostname for the given node name in multi-tenant collections. +func (r *multiTenantRouter) NodeHostname(nodeName string) (string, bool) { + return r.nodeSelector.NodeHostname(nodeName) +} + +// AllHostnames returns all known hostnames in the cluster for multi-tenant collections. +func (r *multiTenantRouter) AllHostnames() []string { + return r.nodeSelector.AllHostnames() +} + +// GetReadWriteReplicasLocation returns read and write replicas for multi-tenant collections. +func (r *multiTenantRouter) GetReadWriteReplicasLocation(collection string, tenant string, shard string) (types.ReadReplicaSet, types.WriteReplicaSet, error) { + shard = tenantShard(shard, tenant) + if err := r.validateTenant(tenant); err != nil { + return types.ReadReplicaSet{}, types.WriteReplicaSet{}, err + } + if err := r.validateTenantShard(tenant, shard); err != nil { + return types.ReadReplicaSet{}, types.WriteReplicaSet{}, err + } + + readReplicas, err := r.getReadReplicasLocation(collection, tenant, shard) + if err != nil { + return types.ReadReplicaSet{}, types.WriteReplicaSet{}, err + } + writeReplicas, err := r.getWriteReplicasLocation(collection, tenant, shard) + if err != nil { + return types.ReadReplicaSet{}, types.WriteReplicaSet{}, err + } + return readReplicas, writeReplicas, nil +} + +// GetWriteReplicasLocation returns write replicas for multi-tenant collections. +func (r *multiTenantRouter) GetWriteReplicasLocation(collection string, tenant string, shard string) (types.WriteReplicaSet, error) { + shard = tenantShard(shard, tenant) + if err := r.validateTenant(tenant); err != nil { + return types.WriteReplicaSet{}, err + } + if err := r.validateTenantShard(tenant, shard); err != nil { + return types.WriteReplicaSet{}, err + } + return r.getWriteReplicasLocation(collection, tenant, shard) +} + +// GetReadReplicasLocation returns read replicas for multi-tenant collections. +func (r *multiTenantRouter) GetReadReplicasLocation(collection string, tenant string, shard string) (types.ReadReplicaSet, error) { + shard = tenantShard(shard, tenant) + if err := r.validateTenant(tenant); err != nil { + return types.ReadReplicaSet{}, err + } + if err := r.validateTenantShard(tenant, shard); err != nil { + return types.ReadReplicaSet{}, err + } + return r.getReadReplicasLocation(collection, tenant, shard) +} + +// getReadReplicasLocation returns only read replicas for multi-tenant collections. +func (r *multiTenantRouter) getReadReplicasLocation(collection string, tenant, shard string) (types.ReadReplicaSet, error) { + tenantStatus, err := r.schemaGetter.OptimisticTenantStatus(context.TODO(), collection, tenant) + if err != nil { + return types.ReadReplicaSet{}, objects.NewErrMultiTenancy(err) + } + + if err = r.tenantExistsAndIsActive(tenantStatus, tenant); err != nil { + return types.ReadReplicaSet{}, err + } + + replicas, err := r.schemaReader.ShardReplicas(collection, shard) + if err != nil { + return types.ReadReplicaSet{}, err + } + + readNodeNames := r.replicationFSMReader.FilterOneShardReplicasRead(collection, shard, replicas) + readReplicas := buildReplicas(readNodeNames, shard, r.nodeSelector.NodeHostname) + + return types.ReadReplicaSet{Replicas: readReplicas}, nil +} + +// getWriteReplicasLocation returns only write replicas for multi-tenant collections. +func (r *multiTenantRouter) getWriteReplicasLocation(collection string, tenant, shard string) (types.WriteReplicaSet, error) { + tenantStatus, err := r.schemaGetter.OptimisticTenantStatus(context.TODO(), collection, tenant) + if err != nil { + return types.WriteReplicaSet{}, objects.NewErrMultiTenancy(err) + } + + if err = r.tenantExistsAndIsActive(tenantStatus, tenant); err != nil { + return types.WriteReplicaSet{}, err + } + + replicas, err := r.schemaReader.ShardReplicas(collection, shard) + if err != nil { + return types.WriteReplicaSet{}, err + } + + writeNodeNames, additionalWriteNodeNames := r.replicationFSMReader.FilterOneShardReplicasWrite(collection, shard, replicas) + writeReplicas := buildReplicas(writeNodeNames, shard, r.nodeSelector.NodeHostname) + additionalWriteReplicas := buildReplicas(additionalWriteNodeNames, shard, r.nodeSelector.NodeHostname) + + return types.WriteReplicaSet{Replicas: writeReplicas, AdditionalReplicas: additionalWriteReplicas}, nil +} + +// tenantExistsAndIsActive validates that the tenant exists and is in HOT status. +func (r *multiTenantRouter) tenantExistsAndIsActive(tenantStatus map[string]string, tenant string) error { + status, ok := tenantStatus[tenant] + if !ok { + return objects.NewErrMultiTenancy(fmt.Errorf("%w: %q", enterrors.ErrTenantNotFound, tenant)) + } + if status != models.TenantActivityStatusHOT { + return objects.NewErrMultiTenancy(fmt.Errorf("%w: '%s'", enterrors.ErrTenantNotActive, tenant)) + } + return nil +} + +// BuildWriteRoutingPlan constructs a write routing plan for multi-tenant collections. +func (r *multiTenantRouter) BuildWriteRoutingPlan(params types.RoutingPlanBuildOptions) (types.WriteRoutingPlan, error) { + params.Shard = tenantShard(params.Shard, params.Tenant) + if err := r.validateTenant(params.Tenant); err != nil { + return types.WriteRoutingPlan{}, err + } + return r.buildWriteRoutingPlan(params) +} + +// buildWriteRoutingPlan constructs a write routing plan for multi-tenant collections. +func (r *multiTenantRouter) buildWriteRoutingPlan(params types.RoutingPlanBuildOptions) (types.WriteRoutingPlan, error) { + writeReplicas, err := r.getWriteReplicasLocation(r.collection, params.Tenant, params.Shard) + if err != nil { + return types.WriteRoutingPlan{}, err + } + + if len(writeReplicas.Replicas) == 0 { + return types.WriteRoutingPlan{}, fmt.Errorf("no write replica found") + } + + cl, err := writeReplicas.ValidateConsistencyLevel(params.ConsistencyLevel) + if err != nil { + return types.WriteRoutingPlan{}, err + } + + orderedReplicas := sort(writeReplicas.Replicas, preferredNode(params.DirectCandidateNode, r.nodeSelector.LocalName())) + + plan := types.WriteRoutingPlan{ + Shard: params.Shard, + Tenant: params.Tenant, + ReplicaSet: types.WriteReplicaSet{ + Replicas: orderedReplicas, + AdditionalReplicas: writeReplicas.AdditionalReplicas, + }, + ConsistencyLevel: params.ConsistencyLevel, + IntConsistencyLevel: cl, + } + + return plan, nil +} + +// BuildReadRoutingPlan constructs a read routing plan for multi-tenant collections. +func (r *multiTenantRouter) BuildReadRoutingPlan(params types.RoutingPlanBuildOptions) (types.ReadRoutingPlan, error) { + params.Shard = tenantShard(params.Shard, params.Tenant) + if err := r.validateTenant(params.Tenant); err != nil { + return types.ReadRoutingPlan{}, err + } + return r.buildReadRoutingPlan(params) +} + +// buildReadRoutingPlan constructs a read routing plan for multi-tenant collections. +func (r *multiTenantRouter) buildReadRoutingPlan(params types.RoutingPlanBuildOptions) (types.ReadRoutingPlan, error) { + readReplicas, err := r.getReadReplicasLocation(r.collection, params.Tenant, params.Shard) + if err != nil { + return types.ReadRoutingPlan{}, err + } + + if len(readReplicas.Replicas) == 0 { + return types.ReadRoutingPlan{}, fmt.Errorf("no read replica found") + } + + cl, err := readReplicas.ValidateConsistencyLevel(params.ConsistencyLevel) + if err != nil { + return types.ReadRoutingPlan{}, err + } + + orderedReplicas := sort(readReplicas.Replicas, preferredNode(params.DirectCandidateNode, r.nodeSelector.LocalName())) + + return types.ReadRoutingPlan{ + LocalHostname: r.nodeSelector.LocalName(), + Shard: params.Shard, + Tenant: params.Tenant, + ReplicaSet: types.ReadReplicaSet{ + Replicas: orderedReplicas, + }, + ConsistencyLevel: params.ConsistencyLevel, + IntConsistencyLevel: cl, + }, nil +} + +// validateTenantShard validates that the tenant and shard are consistent. +func (r *multiTenantRouter) validateTenantShard(tenant, shard string) error { + if shard != "" && tenant != "" && shard != tenant { + return fmt.Errorf("invalid tenant shard %q, expected %q", shard, tenant) + } + + return nil +} + +// BuildRoutingPlanOptions constructs routing plan options for multi-tenant collections. +func (r *multiTenantRouter) BuildRoutingPlanOptions(tenant, shard string, cl types.ConsistencyLevel, directCandidate string) types.RoutingPlanBuildOptions { + return types.RoutingPlanBuildOptions{ + Shard: shard, + Tenant: tenant, + ConsistencyLevel: cl, + DirectCandidateNode: directCandidate, + } +} + +// tenantShard normalizes the shard parameter by using the tenant name as the shard +// if no explicit shard is provided, as required in multi-tenant mode. +func tenantShard(shard string, tenant string) string { + if shard == "" { + shard = tenant + } + return shard +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/router/router_fsm_test.go b/platform/dbops/binaries/weaviate-src/cluster/router/router_fsm_test.go new file mode 100644 index 0000000000000000000000000000000000000000..0f0b2b0a16785917c3e70ef2bf381ff3b1c68b4e --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/router/router_fsm_test.go @@ -0,0 +1,374 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package router_test + +import ( + "math/rand" + "strconv" + "testing" + + "github.com/weaviate/weaviate/usecases/sharding" + "github.com/weaviate/weaviate/usecases/sharding/config" + + "github.com/weaviate/weaviate/entities/models" + + "github.com/prometheus/client_golang/prometheus" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/cluster/proto/api" + "github.com/weaviate/weaviate/cluster/replication" + "github.com/weaviate/weaviate/cluster/router" + "github.com/weaviate/weaviate/cluster/router/types" + clusterMocks "github.com/weaviate/weaviate/usecases/cluster/mocks" + "github.com/weaviate/weaviate/usecases/schema" +) + +func TestReadRoutingWithFSM(t *testing.T) { + testCases := []struct { + name string + partitioningEnabled bool + allShardNodes []string + opStatus api.ShardReplicationState + preRoutingPlanAction func(fsm *replication.ShardReplicationFSM) + directCandidate string + localNodeName string + expectedReplicas types.ReadReplicaSet + expectedErrorStr string + }{ + { + name: "registered", + partitioningEnabled: rand.Uint64()%2 == 0, + allShardNodes: []string{"node1", "node2"}, + opStatus: api.REGISTERED, + expectedReplicas: types.ReadReplicaSet{Replicas: []types.Replica{{NodeName: "node1", ShardName: "shard1", HostAddr: "node1"}}}, + directCandidate: "node1", + localNodeName: "node1", + }, + { + name: "hydrating", + partitioningEnabled: rand.Uint64()%2 == 0, + allShardNodes: []string{"node1", "node2"}, + opStatus: api.HYDRATING, + expectedReplicas: types.ReadReplicaSet{Replicas: []types.Replica{{NodeName: "node1", ShardName: "shard1", HostAddr: "node1"}}}, + directCandidate: "node1", + localNodeName: "node1", + }, + { + name: "finalizing", + partitioningEnabled: rand.Uint64()%2 == 0, + allShardNodes: []string{"node1", "node2"}, + opStatus: api.FINALIZING, + expectedReplicas: types.ReadReplicaSet{Replicas: []types.Replica{{NodeName: "node1", ShardName: "shard1", HostAddr: "node1"}}}, + directCandidate: "node1", + localNodeName: "node1", + }, + { + name: "ready", + partitioningEnabled: rand.Uint64()%2 == 0, + allShardNodes: []string{"node1", "node2"}, + opStatus: api.READY, + expectedReplicas: types.ReadReplicaSet{Replicas: []types.Replica{{NodeName: "node1", ShardName: "shard1", HostAddr: "node1"}, {NodeName: "node2", ShardName: "shard1", HostAddr: "node2"}}}, + directCandidate: "node1", + localNodeName: "node1", + }, + { + name: "dehydrating", + partitioningEnabled: rand.Uint64()%2 == 0, + allShardNodes: []string{"node1", "node2"}, + opStatus: api.DEHYDRATING, + expectedReplicas: types.ReadReplicaSet{Replicas: []types.Replica{{NodeName: "node2", ShardName: "shard1", HostAddr: "node2"}}}, + directCandidate: "node1", + localNodeName: "node1", + }, + { + name: "cancelled", + partitioningEnabled: rand.Uint64()%2 == 0, + allShardNodes: []string{"node1", "node2"}, + opStatus: api.CANCELLED, + expectedReplicas: types.ReadReplicaSet{Replicas: []types.Replica{{NodeName: "node1", ShardName: "shard1", HostAddr: "node1"}}}, + directCandidate: "node1", + localNodeName: "node1", + }, + { + name: "ready deleted", + partitioningEnabled: rand.Uint64()%2 == 0, + allShardNodes: []string{"node1", "node2"}, + opStatus: api.READY, + preRoutingPlanAction: func(fsm *replication.ShardReplicationFSM) { + fsm.CancelReplication(&api.ReplicationCancelRequest{ + Version: api.ReplicationCommandVersionV0, + Uuid: "00000000-0000-0000-0000-000000000000", + }) + }, + expectedReplicas: types.ReadReplicaSet{Replicas: []types.Replica{{NodeName: "node1", ShardName: "shard1", HostAddr: "node1"}, {NodeName: "node2", ShardName: "shard1", HostAddr: "node2"}}}, + directCandidate: "node1", + localNodeName: "node1", + }, + { + name: "registered extra node", + partitioningEnabled: rand.Uint64()%2 == 0, + allShardNodes: []string{"node1", "node2", "node3"}, + opStatus: api.REGISTERED, + expectedReplicas: types.ReadReplicaSet{Replicas: []types.Replica{{NodeName: "node1", ShardName: "shard1", HostAddr: "node1"}, {NodeName: "node3", ShardName: "shard1", HostAddr: "node3"}}}, + directCandidate: "node1", + localNodeName: "node1", + }, + } + for _, testCase := range testCases { + t.Run(testCase.name+"_partitioning_enabled_"+strconv.FormatBool(testCase.partitioningEnabled), func(t *testing.T) { + reg := prometheus.NewRegistry() + shardReplicationFSM := replication.NewShardReplicationFSM(reg) + clusterState := clusterMocks.NewMockNodeSelector(testCase.allShardNodes...) + schemaReaderMock := schema.NewMockSchemaReader(t) + schemaGetterMock := schema.NewMockSchemaGetter(t) + schemaGetterMock.EXPECT().OptimisticTenantStatus(mock.Anything, "collection1", "shard1").Return( + map[string]string{ + "shard1": models.TenantActivityStatusHOT, + }, nil).Maybe() + state := &sharding.State{ + IndexID: "index-001", + Config: config.Config{ + VirtualPerPhysical: 0, + DesiredCount: 1, + ActualCount: 1, + DesiredVirtualCount: 0, + ActualVirtualCount: 0, + Key: "", + Strategy: "", + Function: "", + }, + Physical: map[string]sharding.Physical{ + "shard1": { + Name: "shard1", + OwnsVirtual: []string{}, + OwnsPercentage: 100, + LegacyBelongsToNodeForBackwardCompat: "", + BelongsToNodes: testCase.expectedReplicas.NodeNames(), + Status: testCase.opStatus.String(), + }, + }, + Virtual: []sharding.Virtual{}, + PartitioningEnabled: false, + ReplicationFactor: 1, + } + schemaReaderMock.EXPECT().Shards(mock.Anything).Return(state.AllPhysicalShards(), nil).Maybe() + schemaReaderMock.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readFunc func(*models.Class, *sharding.State) error) error { + class := &models.Class{Class: className} + return readFunc(class, state) + }).Maybe() + schemaReaderMock.On("ShardReplicas", mock.Anything, mock.Anything).Return(func(class string, shard string) ([]string, error) { + return testCase.allShardNodes, nil + }) + myRouter := router.NewBuilder("collection1", testCase.partitioningEnabled, clusterState, schemaGetterMock, schemaReaderMock, shardReplicationFSM).Build() + + // Setup the FSM with the right state + shardReplicationFSM.Replicate(1, &api.ReplicationReplicateShardRequest{ + Version: api.ReplicationCommandVersionV0, + SourceNode: "node1", + SourceCollection: "collection1", + SourceShard: "shard1", + TargetNode: "node2", + Uuid: "00000000-0000-0000-0000-000000000000", + }) + err := shardReplicationFSM.UpdateReplicationOpStatus(&api.ReplicationUpdateOpStateRequest{ + Version: api.ReplicationCommandVersionV0, + Id: 1, + State: testCase.opStatus, + }) + require.NoError(t, err) + if testCase.preRoutingPlanAction != nil { + testCase.preRoutingPlanAction(shardReplicationFSM) + } + + tenant := "" + if testCase.partitioningEnabled { + tenant = "shard1" + } + // Build the routing plan + readPlan, err := myRouter.BuildReadRoutingPlan(types.RoutingPlanBuildOptions{ + Shard: "shard1", + Tenant: tenant, + }) + if testCase.expectedErrorStr != "" { + require.Error(t, err) + require.Contains(t, err.Error(), testCase.expectedErrorStr) + } else { + require.NoError(t, err) + require.Equal(t, testCase.expectedReplicas, readPlan.ReplicaSet, "test case: %s", testCase.name) + } + }) + } +} + +func TestWriteRoutingWithFSM(t *testing.T) { + testCases := []struct { + name string + partitioningEnabled bool + allShardNodes []string + opStatus api.ShardReplicationState + preRoutingPlanAction func(fsm *replication.ShardReplicationFSM) + directCandidate string + localNodeName string + expectedReplicas []types.Replica + expectedErrorStr string + }{ + { + name: "registered", + partitioningEnabled: rand.Uint64()%2 == 0, + allShardNodes: []string{"node1", "node2"}, + opStatus: api.REGISTERED, + expectedReplicas: []types.Replica{{NodeName: "node1", ShardName: "shard1", HostAddr: "node1"}}, + directCandidate: "node1", + localNodeName: "node1", + }, + { + name: "hydrating", + partitioningEnabled: true, + allShardNodes: []string{"node1", "node2"}, + opStatus: api.HYDRATING, + expectedReplicas: []types.Replica{{NodeName: "node1", ShardName: "shard1", HostAddr: "node1"}}, + directCandidate: "node1", + localNodeName: "node1", + }, + { + name: "finalizing", + partitioningEnabled: rand.Uint64()%2 == 0, + allShardNodes: []string{"node1", "node2"}, + opStatus: api.FINALIZING, + expectedReplicas: []types.Replica{{NodeName: "node1", ShardName: "shard1", HostAddr: "node1"}}, + directCandidate: "node1", + localNodeName: "node1", + }, + { + name: "ready", + partitioningEnabled: rand.Uint64()%2 == 0, + allShardNodes: []string{"node1", "node2"}, + opStatus: api.READY, + expectedReplicas: []types.Replica{{NodeName: "node1", ShardName: "shard1", HostAddr: "node1"}, {NodeName: "node2", ShardName: "shard1", HostAddr: "node2"}}, + directCandidate: "node1", + localNodeName: "node1", + }, + { + name: "dehydrating", + partitioningEnabled: rand.Uint64()%2 == 0, + allShardNodes: []string{"node1", "node2"}, + opStatus: api.DEHYDRATING, + expectedReplicas: []types.Replica{{NodeName: "node2", ShardName: "shard1", HostAddr: "node2"}}, + directCandidate: "node1", + localNodeName: "node1", + }, + { + name: "cancelled", + partitioningEnabled: rand.Uint64()%2 == 0, + allShardNodes: []string{"node1", "node2"}, + opStatus: api.CANCELLED, + expectedReplicas: []types.Replica{{NodeName: "node1", ShardName: "shard1", HostAddr: "node1"}}, + directCandidate: "node1", + localNodeName: "node1", + }, + { + name: "ready deleted", + partitioningEnabled: rand.Uint64()%2 == 0, + allShardNodes: []string{"node1", "node2"}, + opStatus: api.READY, + preRoutingPlanAction: func(fsm *replication.ShardReplicationFSM) { + fsm.CancelReplication(&api.ReplicationCancelRequest{ + Version: api.ReplicationCommandVersionV0, + Uuid: "00000000-0000-0000-0000-000000000000", + }) + }, + expectedReplicas: []types.Replica{{NodeName: "node1", ShardName: "shard1", HostAddr: "node1"}, {NodeName: "node2", ShardName: "shard1", HostAddr: "node2"}}, + directCandidate: "node1", + localNodeName: "node1", + }, + { + name: "registered extra node", + partitioningEnabled: rand.Uint64()%2 == 0, + allShardNodes: []string{"node1", "node2", "node3"}, + opStatus: api.REGISTERED, + expectedReplicas: []types.Replica{{NodeName: "node1", ShardName: "shard1", HostAddr: "node1"}, {NodeName: "node3", ShardName: "shard1", HostAddr: "node3"}}, + directCandidate: "node1", + localNodeName: "node1", + }, + } + for _, testCase := range testCases { + t.Run(testCase.name+"_partitioning_enabled_"+strconv.FormatBool(testCase.partitioningEnabled), func(t *testing.T) { + reg := prometheus.NewRegistry() + shardReplicationFSM := replication.NewShardReplicationFSM(reg) + clusterState := clusterMocks.NewMockNodeSelector(testCase.allShardNodes...) + schemaReaderMock := schema.NewMockSchemaReader(t) + schemaGetterMock := schema.NewMockSchemaGetter(t) + schemaGetterMock.EXPECT().OptimisticTenantStatus(mock.Anything, "collection1", "shard1").Return( + map[string]string{ + "shard1": models.TenantActivityStatusHOT, + }, nil).Maybe() + state := &sharding.State{ + IndexID: "index-001", + Config: config.Config{}, + Physical: map[string]sharding.Physical{ + "shard1": { + Name: "shard1", + OwnsVirtual: []string{}, + OwnsPercentage: 100, + LegacyBelongsToNodeForBackwardCompat: "", + BelongsToNodes: testCase.allShardNodes, + Status: testCase.opStatus.String(), + }, + }, + Virtual: []sharding.Virtual{}, + PartitioningEnabled: false, + ReplicationFactor: 1, + } + schemaReaderMock.EXPECT().Shards(mock.Anything).Return(state.AllPhysicalShards(), nil).Maybe() + schemaReaderMock.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readFunc func(*models.Class, *sharding.State) error) error { + class := &models.Class{Class: className} + return readFunc(class, state) + }).Maybe() + schemaReaderMock.On("ShardReplicas", mock.Anything, mock.Anything).Return(func(class string, shard string) ([]string, error) { + return testCase.allShardNodes, nil + }) + myRouter := router.NewBuilder("collection1", testCase.partitioningEnabled, clusterState, schemaGetterMock, schemaReaderMock, shardReplicationFSM).Build() + + // Setup the FSM with the right state + shardReplicationFSM.Replicate(1, &api.ReplicationReplicateShardRequest{ + Version: api.ReplicationCommandVersionV0, + SourceNode: "node1", + SourceCollection: "collection1", + SourceShard: "shard1", + TargetNode: "node2", + Uuid: "00000000-0000-0000-0000-000000000000", + }) + err := shardReplicationFSM.UpdateReplicationOpStatus(&api.ReplicationUpdateOpStateRequest{ + Version: api.ReplicationCommandVersionV0, + Id: 1, + State: testCase.opStatus, + }) + require.NoError(t, err) + if testCase.preRoutingPlanAction != nil { + testCase.preRoutingPlanAction(shardReplicationFSM) + } + + tenant := "" + if testCase.partitioningEnabled { + tenant = "shard1" + } + ws, err := myRouter.GetWriteReplicasLocation("collection1", tenant, "shard1") + if testCase.expectedErrorStr != "" { + require.Error(t, err) + require.Contains(t, err.Error(), testCase.expectedErrorStr) + } else { + require.NoError(t, err) + require.Equal(t, testCase.expectedReplicas, ws.Replicas, "test case: %s", testCase.name) + } + }) + } +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/router/router_test.go b/platform/dbops/binaries/weaviate-src/cluster/router/router_test.go new file mode 100644 index 0000000000000000000000000000000000000000..3c0db2c9c27f36ce0701f5f7679900aaa96ee583 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/router/router_test.go @@ -0,0 +1,2133 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package router_test + +import ( + "fmt" + "math/rand" + "sort" + "testing" + + "github.com/stretchr/testify/mock" + "github.com/weaviate/weaviate/entities/models" + + "github.com/pkg/errors" + "github.com/stretchr/testify/require" + replicationTypes "github.com/weaviate/weaviate/cluster/replication/types" + "github.com/weaviate/weaviate/cluster/router" + "github.com/weaviate/weaviate/cluster/router/types" + + "github.com/weaviate/weaviate/usecases/cluster" + "github.com/weaviate/weaviate/usecases/cluster/mocks" + "github.com/weaviate/weaviate/usecases/schema" + "github.com/weaviate/weaviate/usecases/sharding" + "github.com/weaviate/weaviate/usecases/sharding/config" +) + +func createShardingStateWithShards(shards []string) *sharding.State { + state := &sharding.State{ + Physical: make(map[string]sharding.Physical), + Config: config.Config{}, + } + + for _, shard := range shards { + state.Physical[shard] = sharding.Physical{ + Name: shard, + BelongsToNodes: []string{"node1", "node2"}, + OwnsPercentage: 1.0 / float64(len(shards)), + } + } + + return state +} + +func TestSingleTenantRouter_GetReadWriteReplicasLocation_NoShards(t *testing.T) { + mockSchemaGetter := schema.NewMockSchemaGetter(t) + mockSchemaReader := schema.NewMockSchemaReader(t) + mockReplicationFSM := replicationTypes.NewMockReplicationFSMReader(t) + mockNodeSelector := mocks.NewMockNodeSelector("node1", "node2") + + emptyState := createShardingStateWithShards([]string{}) + mockSchemaReader.EXPECT().Shards(mock.Anything).Return(emptyState.AllPhysicalShards(), nil).Maybe() + mockSchemaReader.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readFunc func(*models.Class, *sharding.State) error) error { + class := &models.Class{Class: className} + return readFunc(class, emptyState) + }).Maybe() + + r := router.NewBuilder( + "TestClass", + false, + mockNodeSelector, + mockSchemaGetter, + mockSchemaReader, + mockReplicationFSM, + ).Build() + + rs, ws, err := r.GetReadWriteReplicasLocation("TestClass", "", "") + + require.NoError(t, err, "unexpected error while getting read/write replica locations") + require.Empty(t, rs.Replicas, "read replica locations should be empty") + require.Empty(t, ws.Replicas, "write replica locations should be empty") + require.Empty(t, ws.AdditionalReplicas, "additional write replica locations should be empty") +} + +func TestSingleTenantRouter_GetReadWriteReplicasLocation_OneShard(t *testing.T) { + mockSchemaGetter := schema.NewMockSchemaGetter(t) + mockSchemaReader := schema.NewMockSchemaReader(t) + mockReplicationFSM := replicationTypes.NewMockReplicationFSMReader(t) + mockNodeSelector := mocks.NewMockNodeSelector("node1", "node2", "node3") + + state := createShardingStateWithShards([]string{"shard1"}) + mockSchemaReader.EXPECT().Shards(mock.Anything).Return(state.AllPhysicalShards(), nil).Maybe() + mockSchemaReader.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readFunc func(*models.Class, *sharding.State) error) error { + class := &models.Class{Class: className} + return readFunc(class, state) + }).Maybe() + + mockSchemaReader.EXPECT().ShardReplicas("TestClass", "shard1").Return([]string{"node1", "node2"}, nil) + + mockReplicationFSM.EXPECT().FilterOneShardReplicasRead("TestClass", "shard1", []string{"node1", "node2"}). + Return([]string{"node1", "node2"}) + mockReplicationFSM.EXPECT().FilterOneShardReplicasWrite("TestClass", "shard1", []string{"node1", "node2"}). + Return([]string{"node1"}, []string{"node2"}) + + r := router.NewBuilder( + "TestClass", + false, + mockNodeSelector, + mockSchemaGetter, + mockSchemaReader, + mockReplicationFSM, + ).Build() + + rs, ws, err := r.GetReadWriteReplicasLocation("TestClass", "", "") + + expectedReadReplicas := types.ReadReplicaSet{ + Replicas: []types.Replica{ + { + NodeName: "node1", + ShardName: "shard1", + HostAddr: "node1", + }, + { + NodeName: "node2", + ShardName: "shard1", + HostAddr: "node2", + }, + }, + } + + expectedWriteReplicas := types.WriteReplicaSet{ + Replicas: []types.Replica{ + { + NodeName: "node1", + ShardName: "shard1", + HostAddr: "node1", + }, + }, + AdditionalReplicas: []types.Replica{ + { + NodeName: "node2", + ShardName: "shard1", + HostAddr: "node2", + }, + }, + } + + require.NoError(t, err, "unexpected error while getting read/write replica locations") + require.Equal(t, expectedReadReplicas, rs) + require.Equal(t, expectedWriteReplicas, ws) +} + +func TestSingleTenantRouter_GetReadWriteReplicasLocation_MultipleShards(t *testing.T) { + mockSchemaGetter := schema.NewMockSchemaGetter(t) + mockSchemaReader := schema.NewMockSchemaReader(t) + mockReplicationFSM := replicationTypes.NewMockReplicationFSMReader(t) + mockNodeSelector := mocks.NewMockNodeSelector("node1", "node2", "node3") + + state := createShardingStateWithShards([]string{"shard1", "shard2", "shard3"}) + mockSchemaReader.EXPECT().Shards(mock.Anything).Return(state.AllPhysicalShards(), nil).Maybe() + mockSchemaReader.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readFunc func(*models.Class, *sharding.State) error) error { + class := &models.Class{Class: className} + return readFunc(class, state) + }).Maybe() + + mockSchemaReader.EXPECT().ShardReplicas("TestClass", "shard1").Return([]string{"node1", "node2"}, nil) + mockSchemaReader.EXPECT().ShardReplicas("TestClass", "shard2").Return([]string{"node2", "node3"}, nil) + mockSchemaReader.EXPECT().ShardReplicas("TestClass", "shard3").Return([]string{"node3", "node1"}, nil) + + mockReplicationFSM.EXPECT().FilterOneShardReplicasRead("TestClass", "shard1", []string{"node1", "node2"}). + Return([]string{"node1", "node2"}) + mockReplicationFSM.EXPECT().FilterOneShardReplicasWrite("TestClass", "shard1", []string{"node1", "node2"}). + Return([]string{"node1"}, []string{"node2"}) + mockReplicationFSM.EXPECT().FilterOneShardReplicasRead("TestClass", "shard2", []string{"node2", "node3"}). + Return([]string{"node2", "node3"}) + mockReplicationFSM.EXPECT().FilterOneShardReplicasWrite("TestClass", "shard2", []string{"node2", "node3"}). + Return([]string{"node2"}, []string{"node3"}) + mockReplicationFSM.EXPECT().FilterOneShardReplicasRead("TestClass", "shard3", []string{"node3", "node1"}). + Return([]string{"node3", "node1"}) + mockReplicationFSM.EXPECT().FilterOneShardReplicasWrite("TestClass", "shard3", []string{"node3", "node1"}). + Return([]string{"node3"}, []string{"node1"}) + + r := router.NewBuilder( + "TestClass", + false, + mockNodeSelector, + mockSchemaGetter, + mockSchemaReader, + mockReplicationFSM, + ).Build() + + rs, ws, err := r.GetReadWriteReplicasLocation("TestClass", "", "") + + expectedReadReplicas := []types.Replica{ + {NodeName: "node1", ShardName: "shard1", HostAddr: "node1"}, + {NodeName: "node2", ShardName: "shard1", HostAddr: "node2"}, + {NodeName: "node2", ShardName: "shard2", HostAddr: "node2"}, + {NodeName: "node3", ShardName: "shard2", HostAddr: "node3"}, + {NodeName: "node3", ShardName: "shard3", HostAddr: "node3"}, + {NodeName: "node1", ShardName: "shard3", HostAddr: "node1"}, + } + + expectedWriteReplicas := []types.Replica{ + {NodeName: "node1", ShardName: "shard1", HostAddr: "node1"}, + {NodeName: "node2", ShardName: "shard2", HostAddr: "node2"}, + {NodeName: "node3", ShardName: "shard3", HostAddr: "node3"}, + } + + expectedAdditionalWriteReplicas := []types.Replica{ + {NodeName: "node2", ShardName: "shard1", HostAddr: "node2"}, + {NodeName: "node3", ShardName: "shard2", HostAddr: "node3"}, + {NodeName: "node1", ShardName: "shard3", HostAddr: "node1"}, + } + + require.NoError(t, err, "unexpected error while getting read/write replica locations") + require.ElementsMatch(t, expectedReadReplicas, rs.Replicas) + require.ElementsMatch(t, expectedWriteReplicas, ws.Replicas) + require.ElementsMatch(t, expectedAdditionalWriteReplicas, ws.AdditionalReplicas) +} + +func TestSingleTenantRouter_GetWriteReplicasLocation(t *testing.T) { + mockSchemaGetter := schema.NewMockSchemaGetter(t) + mockSchemaReader := schema.NewMockSchemaReader(t) + mockReplicationFSM := replicationTypes.NewMockReplicationFSMReader(t) + mockNodeSelector := mocks.NewMockNodeSelector("node1", "node2") + + state := createShardingStateWithShards([]string{"shard1"}) + mockSchemaReader.EXPECT().Shards(mock.Anything).Return(state.AllPhysicalShards(), nil).Maybe() + mockSchemaReader.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readFunc func(*models.Class, *sharding.State) error) error { + class := &models.Class{Class: className} + return readFunc(class, state) + }).Maybe() + mockSchemaReader.EXPECT().ShardReplicas("TestClass", "shard1").Return([]string{"node1", "node2"}, nil) + + mockReplicationFSM.EXPECT(). + FilterOneShardReplicasWrite("TestClass", "shard1", []string{"node1", "node2"}). + Return([]string{"node1"}, []string{"node2"}) + + r := router.NewBuilder( + "TestClass", + false, + mockNodeSelector, + mockSchemaGetter, + mockSchemaReader, + mockReplicationFSM, + ).Build() + + ws, err := r.GetWriteReplicasLocation("TestClass", "", "") + require.NoError(t, err, "unexpected error while getting write replicas") + + expectedWriteReplicas := []types.Replica{ + {NodeName: "node1", ShardName: "shard1", HostAddr: "node1"}, + } + + expectedAdditionalWriteReplicas := []types.Replica{ + {NodeName: "node2", ShardName: "shard1", HostAddr: "node2"}, + } + + require.ElementsMatch(t, expectedWriteReplicas, ws.Replicas) + require.ElementsMatch(t, expectedAdditionalWriteReplicas, ws.AdditionalReplicas) +} + +func TestSingleTenantRouter_GetReadReplicasLocation(t *testing.T) { + mockSchemaGetter := schema.NewMockSchemaGetter(t) + mockSchemaReader := schema.NewMockSchemaReader(t) + mockReplicationFSM := replicationTypes.NewMockReplicationFSMReader(t) + mockNodeSelector := mocks.NewMockNodeSelector("node1", "node2") + + state := createShardingStateWithShards([]string{"shard1"}) + mockSchemaReader.EXPECT().Shards(mock.Anything).Return(state.AllPhysicalShards(), nil).Maybe() + mockSchemaReader.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readFunc func(*models.Class, *sharding.State) error) error { + class := &models.Class{Class: className} + return readFunc(class, state) + }).Maybe() + + mockSchemaReader.EXPECT().ShardReplicas("TestClass", "shard1").Return([]string{"node1", "node2"}, nil) + mockReplicationFSM.EXPECT().FilterOneShardReplicasRead("TestClass", "shard1", []string{"node1", "node2"}). + Return([]string{"node1", "node2"}) + + r := router.NewBuilder( + "TestClass", + false, + mockNodeSelector, + mockSchemaGetter, + mockSchemaReader, + mockReplicationFSM, + ).Build() + + readReplicas, err := r.GetReadReplicasLocation("TestClass", "", "") + expectedReadReplicas := []types.Replica{ + {NodeName: "node1", ShardName: "shard1", HostAddr: "node1"}, + {NodeName: "node2", ShardName: "shard1", HostAddr: "node2"}, + } + + require.NoError(t, err) + require.ElementsMatch(t, expectedReadReplicas, readReplicas.Replicas) +} + +func TestSingleTenantRouter_ErrorInMiddleOfShardProcessing(t *testing.T) { + mockSchemaGetter := schema.NewMockSchemaGetter(t) + mockSchemaReader := schema.NewMockSchemaReader(t) + mockReplicationFSM := replicationTypes.NewMockReplicationFSMReader(t) + mockNodeSelector := mocks.NewMockNodeSelector("node1") + + state := createShardingStateWithShards([]string{"shard1", "shard2", "shard3"}) + mockSchemaReader.EXPECT().Shards(mock.Anything).Return(state.AllPhysicalShards(), nil).Maybe() + mockSchemaReader.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readFunc func(*models.Class, *sharding.State) error) error { + class := &models.Class{Class: className} + return readFunc(class, state) + }).Maybe() + + // First shard success + mockSchemaReader.EXPECT().ShardReplicas("TestClass", "shard1").Return([]string{"node1"}, nil) + mockReplicationFSM.EXPECT().FilterOneShardReplicasRead("TestClass", "shard1", []string{"node1"}). + Return([]string{"node1"}) + // Second shard failure + mockSchemaReader.EXPECT().ShardReplicas("TestClass", "shard2").Return([]string{}, errors.New("shard2 error")) + + r := router.NewBuilder( + "TestClass", + false, + mockNodeSelector, + mockSchemaGetter, + mockSchemaReader, + mockReplicationFSM, + ).Build() + + rs, ws, err := r.GetReadWriteReplicasLocation("TestClass", "", "") + + require.Error(t, err) + require.Contains(t, err.Error(), "shard2 error") + require.Empty(t, rs.Replicas) + require.Empty(t, ws.Replicas) + require.Empty(t, ws.AdditionalReplicas) +} + +func TestMultiTenantRouter_GetReadWriteReplicasLocation_Success(t *testing.T) { + mockSchemaGetter := schema.NewMockSchemaGetter(t) + mockReplicationFSM := replicationTypes.NewMockReplicationFSMReader(t) + mockNodeSelector := mocks.NewMockNodeSelector("node1", "node2") + mockSchemaReader := schema.NewMockSchemaReader(t) + + mockSchemaReader.EXPECT(). + ShardReplicas("TestClass", "luke"). + Return([]string{"node1"}, nil) + + tenantStatus := map[string]string{ + "luke": models.TenantActivityStatusHOT, + } + mockSchemaGetter.EXPECT(). + OptimisticTenantStatus(mock.Anything, "TestClass", "luke"). + Return(tenantStatus, nil) + + mockReplicationFSM.EXPECT(). + FilterOneShardReplicasRead("TestClass", "luke", []string{"node1"}). + Return([]string{"node1", "node2"}) + + mockReplicationFSM.EXPECT(). + FilterOneShardReplicasWrite("TestClass", "luke", []string{"node1"}). + Return([]string{"node1"}, []string{"node2"}) + + r := router.NewBuilder( + "TestClass", + true, + mockNodeSelector, + mockSchemaGetter, + mockSchemaReader, + mockReplicationFSM, + ).Build() + + rs, ws, err := r.GetReadWriteReplicasLocation("TestClass", "luke", "") + + expectedReadReplicas := []types.Replica{ + {NodeName: "node1", ShardName: "luke", HostAddr: "node1"}, + {NodeName: "node2", ShardName: "luke", HostAddr: "node2"}, + } + expectedWriteReplicas := []types.Replica{ + {NodeName: "node1", ShardName: "luke", HostAddr: "node1"}, + } + expectedAdditionalWriteReplicas := []types.Replica{ + {NodeName: "node2", ShardName: "luke", HostAddr: "node2"}, + } + + require.NoError(t, err) + require.ElementsMatch(t, expectedReadReplicas, rs.Replicas) + require.ElementsMatch(t, expectedWriteReplicas, ws.Replicas) + require.ElementsMatch(t, expectedAdditionalWriteReplicas, ws.AdditionalReplicas) +} + +func TestMultiTenantRouter_GetReadWriteReplicasLocation_TenantNotFound(t *testing.T) { + mockSchemaGetter := schema.NewMockSchemaGetter(t) + mockReplicationFSM := replicationTypes.NewMockReplicationFSMReader(t) + mockNodeSelector := mocks.NewMockNodeSelector("node1", "node2") + mockSchemaReader := schema.NewMockSchemaReader(t) + + tenantStatus := map[string]string{} + mockSchemaGetter.EXPECT().OptimisticTenantStatus(mock.Anything, "TestClass", "luke"). + Return(tenantStatus, errors.New("tenant not found: \"luke\"")) + + r := router.NewBuilder( + "TestClass", + true, + mockNodeSelector, + mockSchemaGetter, + mockSchemaReader, + mockReplicationFSM, + ).Build() + + rs, ws, err := r.GetReadWriteReplicasLocation("TestClass", "luke", "") + + require.Error(t, err) + require.Contains(t, err.Error(), "tenant not found: \"luke\"") + require.Empty(t, rs.Replicas) + require.Empty(t, ws.Replicas) + require.Empty(t, ws.AdditionalReplicas) +} + +func TestMultiTenantRouter_GetReadWriteReplicasLocation_TenantNotActive(t *testing.T) { + mockSchemaGetter := schema.NewMockSchemaGetter(t) + mockReplicationFSM := replicationTypes.NewMockReplicationFSMReader(t) + mockNodeSelector := mocks.NewMockNodeSelector("node1", "node2") + mockSchemaReader := schema.NewMockSchemaReader(t) + + tenantStatus := map[string]string{ + "luke": models.TenantActivityStatusCOLD, + } + mockSchemaGetter.EXPECT().OptimisticTenantStatus(mock.Anything, "TestClass", "luke"). + Return(tenantStatus, nil) + + r := router.NewBuilder( + "TestClass", + true, + mockNodeSelector, + mockSchemaGetter, + mockSchemaReader, + mockReplicationFSM, + ).Build() + + rs, ws, err := r.GetReadWriteReplicasLocation("TestClass", "luke", "") + + require.Error(t, err) + require.Contains(t, err.Error(), "tenant not active: 'luke'") + require.Empty(t, rs.Replicas) + require.Empty(t, ws.Replicas) + require.Empty(t, ws.AdditionalReplicas) +} + +func TestMultiTenantRouter_GetReadWriteReplicasLocation_NonTenantRequestForMultiTenant(t *testing.T) { + mockSchemaGetter := schema.NewMockSchemaGetter(t) + mockReplicationFSM := replicationTypes.NewMockReplicationFSMReader(t) + mockNodeSelector := mocks.NewMockNodeSelector("node1", "node2") + metadataReader := schema.NewMockSchemaReader(t) + + r := router.NewBuilder( + "TestClass", + true, + mockNodeSelector, + mockSchemaGetter, + metadataReader, + mockReplicationFSM, + ).Build() + + rs, ws, err := r.GetReadWriteReplicasLocation("TestClass", "", "") + + require.Error(t, err) + require.Contains(t, err.Error(), "class TestClass has multi-tenancy enabled, but request was without tenant") + require.Empty(t, rs.Replicas) + require.Empty(t, ws.Replicas) + require.Empty(t, ws.AdditionalReplicas) +} + +func TestMultiTenantRouter_GetWriteReplicasLocation(t *testing.T) { + mockSchemaGetter := schema.NewMockSchemaGetter(t) + mockReplicationFSM := replicationTypes.NewMockReplicationFSMReader(t) + mockNodeSelector := mocks.NewMockNodeSelector("node1", "node2") + mockSchemaReader := schema.NewMockSchemaReader(t) + + mockSchemaReader.EXPECT().ShardReplicas("TestClass", "luke").Return([]string{"node1"}, nil) + + tenantStatus := map[string]string{ + "luke": models.TenantActivityStatusHOT, + } + mockSchemaGetter.EXPECT(). + OptimisticTenantStatus(mock.Anything, "TestClass", "luke"). + Return(tenantStatus, nil) + mockReplicationFSM.EXPECT(). + FilterOneShardReplicasWrite("TestClass", "luke", []string{"node1"}). + Return([]string{"node1"}, []string{"node2"}) + + r := router.NewBuilder( + "TestClass", + true, + mockNodeSelector, + mockSchemaGetter, + mockSchemaReader, + mockReplicationFSM, + ).Build() + + ws, err := r.GetWriteReplicasLocation("TestClass", "luke", "") + require.NoError(t, err) + + expectedWriteReplicas := []types.Replica{ + {NodeName: "node1", ShardName: "luke", HostAddr: "node1"}, + } + expectedAdditionalWriteReplicas := []types.Replica{ + {NodeName: "node2", ShardName: "luke", HostAddr: "node2"}, + } + + require.Equal(t, expectedWriteReplicas, ws.Replicas) + require.Equal(t, expectedAdditionalWriteReplicas, ws.AdditionalReplicas) +} + +func TestMultiTenantRouter_GetReadReplicasLocation(t *testing.T) { + mockSchemaGetter := schema.NewMockSchemaGetter(t) + mockReplicationFSM := replicationTypes.NewMockReplicationFSMReader(t) + mockNodeSelector := mocks.NewMockNodeSelector("node1", "node2") + mockSchemaReader := schema.NewMockSchemaReader(t) + + mockSchemaReader.EXPECT().ShardReplicas("TestClass", "luke").Return([]string{"node1"}, nil) + tenantStatus := map[string]string{ + "luke": models.TenantActivityStatusHOT, + } + mockSchemaGetter.EXPECT(). + OptimisticTenantStatus(mock.Anything, "TestClass", "luke"). + Return(tenantStatus, nil) + mockReplicationFSM.EXPECT(). + FilterOneShardReplicasRead("TestClass", "luke", []string{"node1"}). + Return([]string{"node1"}) + + r := router.NewBuilder( + "TestClass", + true, + mockNodeSelector, + mockSchemaGetter, + mockSchemaReader, + mockReplicationFSM, + ).Build() + + readReplicas, err := r.GetReadReplicasLocation("TestClass", "luke", "") + require.NoError(t, err) + + expected := []types.Replica{ + {NodeName: "node1", ShardName: "luke", HostAddr: "node1"}, + } + require.Equal(t, expected, readReplicas.Replicas) +} + +func TestMultiTenantRouter_TenantStatusChangeDuringOperation(t *testing.T) { + mockSchemaGetter := schema.NewMockSchemaGetter(t) + mockReplicationFSM := replicationTypes.NewMockReplicationFSMReader(t) + mockNodeSelector := mocks.NewMockNodeSelector("node1") + mockSchemaReader := schema.NewMockSchemaReader(t) + + mockSchemaReader.EXPECT().ShardReplicas("TestClass", "luke").Return([]string{"node1"}, nil) + tenantStatusFirst := map[string]string{ + "luke": models.TenantActivityStatusHOT, + } + tenantStatusSecond := map[string]string{ + "luke": models.TenantActivityStatusFREEZING, + } + + mockSchemaGetter.EXPECT(). + OptimisticTenantStatus(mock.Anything, "TestClass", "luke"). + Return(tenantStatusFirst, nil).Once() // first tenant read replicas + mockSchemaGetter.EXPECT(). + OptimisticTenantStatus(mock.Anything, "TestClass", "luke"). + Return(tenantStatusFirst, nil).Once() // first tenant write replicas + mockSchemaGetter.EXPECT(). + OptimisticTenantStatus(mock.Anything, "TestClass", "luke"). + Return(tenantStatusSecond, nil).Once() // second tenant read replica (error) + + mockReplicationFSM.EXPECT(). + FilterOneShardReplicasRead("TestClass", "luke", []string{"node1"}). + Return([]string{"node1"}).Once() + mockReplicationFSM.EXPECT(). + FilterOneShardReplicasWrite("TestClass", "luke", []string{"node1"}). + Return([]string{"node1"}, []string{}).Once() + + r := router.NewBuilder( + "TestClass", + true, + mockNodeSelector, + mockSchemaGetter, + mockSchemaReader, + mockReplicationFSM, + ).Build() + + rs, ws, err := r.GetReadWriteReplicasLocation("TestClass", "luke", "") + require.NoError(t, err) + + expected := []types.Replica{ + {NodeName: "node1", ShardName: "luke", HostAddr: "node1"}, + } + require.Equal(t, expected, rs.Replicas) + require.Equal(t, expected, ws.Replicas) + require.Empty(t, ws.AdditionalReplicas) + + rs, ws, err = r.GetReadWriteReplicasLocation("TestClass", "luke", "") + require.Error(t, err) + require.Contains(t, err.Error(), "tenant not active: 'luke'") + require.Empty(t, rs.Replicas) + require.Empty(t, ws.Replicas) + require.Empty(t, ws.AdditionalReplicas) +} + +func TestMultiTenantRouter_VariousTenantStatuses(t *testing.T) { + statusTests := []struct { + status string + shouldErr bool + errMsg string + }{ + {models.TenantActivityStatusHOT, false, ""}, + {models.TenantActivityStatusCOLD, true, "tenant not active"}, + {models.TenantActivityStatusFROZEN, true, "tenant not active"}, + {models.TenantActivityStatusFREEZING, true, "tenant not active"}, + {"UNKNOWN_STATUS", true, "tenant not active"}, + } + + for _, test := range statusTests { + t.Run("status_"+test.status, func(t *testing.T) { + mockSchemaGetter := schema.NewMockSchemaGetter(t) + mockReplicationFSM := replicationTypes.NewMockReplicationFSMReader(t) + mockNodeSelector := mocks.NewMockNodeSelector("node1") + mockSchemaReader := schema.NewMockSchemaReader(t) + + tenantStatus := map[string]string{ + "luke": test.status, + } + mockSchemaGetter.EXPECT().OptimisticTenantStatus(mock.Anything, "TestClass", "luke"). + Return(tenantStatus, nil) + + var expectedReplicas []types.Replica + if !test.shouldErr { + mockSchemaReader.EXPECT().ShardReplicas("TestClass", "luke").Return([]string{"node1"}, nil) + mockReplicationFSM.EXPECT(). + FilterOneShardReplicasRead("TestClass", "luke", []string{"node1"}). + Return([]string{"node1"}) + mockReplicationFSM.EXPECT(). + FilterOneShardReplicasWrite("TestClass", "luke", []string{"node1"}). + Return([]string{"node1"}, []string{}) + expectedReplicas = []types.Replica{ + {NodeName: "node1", ShardName: "luke", HostAddr: "node1"}, + } + } + + r := router.NewBuilder( + "TestClass", + true, + mockNodeSelector, + mockSchemaGetter, + mockSchemaReader, + mockReplicationFSM, + ).Build() + + rs, ws, err := r.GetReadWriteReplicasLocation("TestClass", "luke", "") + + if test.shouldErr { + require.Error(t, err) + require.Contains(t, err.Error(), test.errMsg) + require.Empty(t, rs.Replicas) + require.Empty(t, ws.Replicas) + require.Empty(t, ws.AdditionalReplicas) + } else { + require.NoError(t, err) + require.Equal(t, expectedReplicas, rs.Replicas) + require.Equal(t, expectedReplicas, ws.Replicas) + require.Empty(t, ws.AdditionalReplicas) + } + }) + } +} + +func TestSingleTenantRouter_BuildReadRoutingPlan_NoReplicas(t *testing.T) { + mockSchemaGetter := schema.NewMockSchemaGetter(t) + mockSchemaReader := schema.NewMockSchemaReader(t) + mockReplicationFSM := replicationTypes.NewMockReplicationFSMReader(t) + mockNodeSelector := mocks.NewMockNodeSelector("node1") + emptyState := createShardingStateWithShards([]string{"shard1"}) + mockSchemaReader.EXPECT().Shards(mock.Anything).Return(emptyState.AllPhysicalShards(), nil).Maybe() + mockSchemaReader.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readFunc func(*models.Class, *sharding.State) error) error { + class := &models.Class{Class: className} + return readFunc(class, emptyState) + }).Maybe() + mockSchemaReader.EXPECT().ShardReplicas("TestClass", "shard1").Return([]string{}, nil) + mockReplicationFSM.EXPECT(). + FilterOneShardReplicasRead("TestClass", "shard1", []string{}). + Return([]string{}) + + r := router.NewBuilder( + "TestClass", + false, + mockNodeSelector, + mockSchemaGetter, + mockSchemaReader, + mockReplicationFSM, + ).Build() + rs, err := r.GetReadReplicasLocation("TestClass", "", "shard1") + + require.NoError(t, err) + require.Equal(t, []types.Replica(nil), rs.Replicas) +} + +func TestMultiTenantRouter_BuildReadRoutingPlan_NoReplicas(t *testing.T) { + mockSchemaGetter := schema.NewMockSchemaGetter(t) + mockSchemaReader := schema.NewMockSchemaReader(t) + mockReplicationFSM := replicationTypes.NewMockReplicationFSMReader(t) + mockNodeSelector := mocks.NewMockNodeSelector("node1") + + mockSchemaReader.EXPECT().ShardReplicas("TestClass", "luke").Return([]string{}, nil) + tenantStatus := map[string]string{ + "luke": models.TenantActivityStatusHOT, + } + mockSchemaGetter.EXPECT(). + OptimisticTenantStatus(mock.Anything, "TestClass", "luke"). + Return(tenantStatus, nil) + mockReplicationFSM.EXPECT(). + FilterOneShardReplicasRead("TestClass", "luke", []string{}). + Return([]string{}) + + r := router.NewBuilder( + "TestClass", + true, + mockNodeSelector, + mockSchemaGetter, + mockSchemaReader, + mockReplicationFSM, + ).Build() + rs, err := r.GetReadReplicasLocation("TestClass", "luke", "") + require.NoError(t, err) + require.Empty(t, rs.Replicas, "should have empty replicas when no replicas available") +} + +func TestMultiTenantRouter_BuildReadRoutingPlan_Success(t *testing.T) { + mockSchemaGetter := schema.NewMockSchemaGetter(t) + mockReplicationFSM := replicationTypes.NewMockReplicationFSMReader(t) + mockNodeSelector := mocks.NewMockNodeSelector("node1", "node2") + mockSchemaReader := schema.NewMockSchemaReader(t) + + mockSchemaReader.EXPECT().ShardReplicas("TestClass", "luke").Return([]string{"node1"}, nil) + tenantStatus := map[string]string{ + "luke": models.TenantActivityStatusHOT, + } + mockSchemaGetter.EXPECT().OptimisticTenantStatus(mock.Anything, "TestClass", "luke"). + Return(tenantStatus, nil) + + mockReplicationFSM.EXPECT().FilterOneShardReplicasRead("TestClass", "luke", []string{"node1"}). + Return([]string{"node1"}) + + r := router.NewBuilder( + "TestClass", + true, + mockNodeSelector, + mockSchemaGetter, + mockSchemaReader, + mockReplicationFSM, + ).Build() + rs, err := r.GetReadReplicasLocation("TestClass", "luke", "") + require.NoError(t, err) + require.Equal(t, []types.Replica{{ + NodeName: "node1", + ShardName: "luke", + HostAddr: "node1", + }}, rs.Replicas) +} + +func TestMultiTenantRouter_BuildRoutingPlan_TenantNotFoundDuringBuild(t *testing.T) { + mockSchemaGetter := schema.NewMockSchemaGetter(t) + mockReplicationFSM := replicationTypes.NewMockReplicationFSMReader(t) + mockNodeSelector := mocks.NewMockNodeSelector("node1", "node2") + metadataReader := schema.NewMockSchemaReader(t) + + tenantStatus := map[string]string{} + mockSchemaGetter.EXPECT().OptimisticTenantStatus(mock.Anything, "TestClass", "nonexistent"). + Return(tenantStatus, nil) + + r := router.NewBuilder( + "TestClass", + true, + mockNodeSelector, + mockSchemaGetter, + metadataReader, + mockReplicationFSM, + ).Build() + rs, err := r.GetReadReplicasLocation("TestClass", "nonexistent", "") + require.Error(t, err) + require.Contains(t, err.Error(), "tenant not found: \"nonexistent\"") + require.Empty(t, rs.Replicas) +} + +func TestRouter_NodeHostname(t *testing.T) { + tests := []struct { + name string + partitioning bool + }{ + {"single-tenant", false}, + {"multi-tenant", true}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + mockNodeSelector := cluster.NewMockNodeSelector(t) + mockSchemaGetter := schema.NewMockSchemaGetter(t) + mockReplicationFSM := replicationTypes.NewMockReplicationFSMReader(t) + mockNodeSelector.EXPECT().NodeHostname("node1").Return("host1.example.com", true) + mockNodeSelector.EXPECT().NodeHostname("node2").Return("", false) + + var mockSchemaReader schema.SchemaReader + if !tt.partitioning { + mockSchemaReader = schema.NewMockSchemaReader(t) + } + + r := router.NewBuilder( + "TestClass", + tt.partitioning, + mockNodeSelector, + mockSchemaGetter, + mockSchemaReader, + mockReplicationFSM, + ).Build() + + hostname, ok := r.NodeHostname("node1") + require.True(t, ok) + require.Equal(t, "host1.example.com", hostname) + + hostname, ok = r.NodeHostname("node2") + require.False(t, ok) + require.Empty(t, hostname) + }) + } +} + +func TestRouter_AllHostnames(t *testing.T) { + tests := []struct { + name string + partitioning bool + }{ + {"single-tenant", false}, + {"multi-tenant", true}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + mockNodeSelector := mocks.NewMockNodeSelector("node1", "node2", "node3") + mockSchemaGetter := schema.NewMockSchemaGetter(t) + mockReplicationFSM := replicationTypes.NewMockReplicationFSMReader(t) + + var mockSchemaReader schema.SchemaReader + if !tt.partitioning { + mockSchemaReader = schema.NewMockSchemaReader(t) + } + + expectedHostnames := []string{"node1", "node2", "node3"} + + r := router.NewBuilder( + "TestClass", + tt.partitioning, + mockNodeSelector, + mockSchemaGetter, + mockSchemaReader, + mockReplicationFSM, + ).Build() + + hostnames := r.AllHostnames() + require.Equal(t, expectedHostnames, hostnames) + }) + } +} + +func TestMultiTenantRouter_MultipleTenantsSameCollection(t *testing.T) { + mockSchemaGetter := schema.NewMockSchemaGetter(t) + mockReplicationFSM := replicationTypes.NewMockReplicationFSMReader(t) + mockNodeSelector := mocks.NewMockNodeSelector("node1", "node2", "node3", "node4") + mockSchemaReader := schema.NewMockSchemaReader(t) + + tenants := map[string][]string{ + "alice": {"node1", "node2"}, + "bob": {"node2", "node3"}, + "charlie": {"node1", "node3"}, + "diana": {"node3", "node4"}, + } + + for tenant, replicas := range tenants { + tenantStatus := map[string]string{tenant: models.TenantActivityStatusHOT} + mockSchemaGetter.EXPECT().OptimisticTenantStatus(mock.Anything, "TestClass", tenant). + Return(tenantStatus, nil) + mockSchemaReader.EXPECT().ShardReplicas("TestClass", tenant).Return(replicas, nil) + mockReplicationFSM.EXPECT().FilterOneShardReplicasRead("TestClass", tenant, replicas). + Return(replicas) + mockReplicationFSM.EXPECT().FilterOneShardReplicasWrite("TestClass", tenant, replicas). + Return([]string{replicas[0]}, replicas[1:]) + } + + r := router.NewBuilder("TestClass", true, mockNodeSelector, + mockSchemaGetter, mockSchemaReader, mockReplicationFSM).Build() + + for tenant, expected := range tenants { + rs, ws, err := r.GetReadWriteReplicasLocation("TestClass", tenant, "") + + require.NoError(t, err, "unexpected error for tenant %s", tenant) + + require.Equal(t, sorted(expected), sorted(rs.NodeNames()), "read replicas mismatch for tenant %s", tenant) + require.Equal(t, []string{expected[0]}, ws.NodeNames(), "write replicas mismatch for tenant %s", tenant) + + if len(expected) > 1 { + require.Equal(t, sorted(expected[1:]), sorted(ws.AdditionalNodeNames()), "additional writes mismatch for tenant %s", tenant) + } else { + require.Empty(t, ws.AdditionalReplicas, "additional writes should be empty for tenant %s", tenant) + } + } +} + +// sorted returns a sorted copy of a string slice +func sorted(input []string) []string { + cp := append([]string(nil), input...) + sort.Strings(cp) + return cp +} + +func TestMultiTenantRouter_MixedTenantStates(t *testing.T) { + tenants := map[string]struct { + status string + shouldWork bool + description string + }{ + "active-tenant-1": {models.TenantActivityStatusHOT, true, "router error for active-tenant-1"}, + "active-tenant-2": {models.TenantActivityStatusHOT, true, "router error for active-tenant-2"}, + "cold-tenant": {models.TenantActivityStatusCOLD, false, "tenant not active"}, + "frozen-tenant": {models.TenantActivityStatusFROZEN, false, "tenant not active"}, + "freezing-tenant": {models.TenantActivityStatusFREEZING, false, "tenant not active"}, + } + + mockSchemaGetter := schema.NewMockSchemaGetter(t) + mockReplicationFSM := replicationTypes.NewMockReplicationFSMReader(t) + mockNodeSelector := mocks.NewMockNodeSelector("node1", "node2", "node3") + mockSchemaReader := schema.NewMockSchemaReader(t) + + for tenantName, tenantsStatus := range tenants { + tenantStatus := map[string]string{tenantName: tenantsStatus.status} + mockSchemaGetter.EXPECT().OptimisticTenantStatus(mock.Anything, "TestClass", tenantName). + Return(tenantStatus, nil) + + if tenantsStatus.shouldWork { + mockSchemaReader.EXPECT().ShardReplicas("TestClass", tenantName).Return([]string{"node1", "node2"}, nil) + mockReplicationFSM.EXPECT().FilterOneShardReplicasRead("TestClass", tenantName, []string{"node1", "node2"}). + Return([]string{"node1", "node2"}) + mockReplicationFSM.EXPECT().FilterOneShardReplicasWrite("TestClass", tenantName, []string{"node1", "node2"}). + Return([]string{"node1"}, []string{"node2"}) + } + } + + r := router.NewBuilder("TestClass", true, mockNodeSelector, + mockSchemaGetter, mockSchemaReader, mockReplicationFSM).Build() + + for tenantName, tenantsStatus := range tenants { + t.Run(tenantName, func(t *testing.T) { + rs, ws, err := r.GetReadWriteReplicasLocation("TestClass", tenantName, "") + + if tenantsStatus.shouldWork { + require.NoError(t, err, "%s: should work", tenantsStatus.description) + require.ElementsMatch(t, []string{"node1", "node2"}, rs.NodeNames()) + require.Equal(t, []string{"node1"}, ws.NodeNames()) + require.Equal(t, []string{"node2"}, ws.AdditionalNodeNames()) + } else { + require.Error(t, err, "%s: should fail", tenantsStatus.description) + require.Contains(t, err.Error(), "tenant not active", "error should mention tenant not active") + require.Empty(t, rs.Replicas) + require.Empty(t, ws.Replicas) + require.Empty(t, ws.AdditionalReplicas) + } + }) + } +} + +func TestMultiTenantRouter_SameTenantDifferentCollections(t *testing.T) { + collections := []string{"Articles", "Users", "Products"} + tenantName := "alice" + + for _, collection := range collections { + t.Run(collection, func(t *testing.T) { + mockSchemaGetter := schema.NewMockSchemaGetter(t) + mockReplicationFSM := replicationTypes.NewMockReplicationFSMReader(t) + mockNodeSelector := mocks.NewMockNodeSelector("node1", "node2", "node3") + mockSchemaReader := schema.NewMockSchemaReader(t) + + var expectedReplicas []string + switch collection { + case "Articles": + expectedReplicas = []string{"node1", "node2"} + case "Users": + expectedReplicas = []string{"node2", "node3"} + case "Products": + expectedReplicas = []string{"node1", "node3"} + } + + tenantStatus := map[string]string{tenantName: models.TenantActivityStatusHOT} + mockSchemaGetter.EXPECT().OptimisticTenantStatus(mock.Anything, collection, tenantName). + Return(tenantStatus, nil) + mockSchemaReader.EXPECT().ShardReplicas(collection, tenantName).Return(expectedReplicas, nil) + mockReplicationFSM.EXPECT().FilterOneShardReplicasRead(collection, tenantName, expectedReplicas). + Return(expectedReplicas) + mockReplicationFSM.EXPECT().FilterOneShardReplicasWrite(collection, tenantName, expectedReplicas). + Return([]string{expectedReplicas[0]}, expectedReplicas[1:]) + + r := router.NewBuilder(collection, true, mockNodeSelector, + mockSchemaGetter, mockSchemaReader, mockReplicationFSM).Build() + + rs, ws, err := r.GetReadWriteReplicasLocation(collection, tenantName, "") + + require.NoError(t, err, "unexpected error for collection %s", collection) + require.ElementsMatch(t, expectedReplicas, rs.NodeNames(), "read replicas mismatch for collection %s", collection) + require.Equal(t, []string{expectedReplicas[0]}, ws.NodeNames(), "write replicas mismatch for collection %s", collection) + require.ElementsMatch(t, expectedReplicas[1:], ws.AdditionalNodeNames(), "additional writes mismatch for collection %s", collection) + }) + } +} + +func TestSingleTenantRouter_GetReadWriteReplicasLocation_SpecificRandomShard(t *testing.T) { + mockSchemaGetter := schema.NewMockSchemaGetter(t) + mockSchemaReader := schema.NewMockSchemaReader(t) + mockReplicationFSM := replicationTypes.NewMockReplicationFSMReader(t) + mockNodeSelector := mocks.NewMockNodeSelector("node1", "node2", "node3", "node4", "node5") + + allShards := []string{"shard1", "shard2", "shard3", "shard4", "shard5"} + state := createShardingStateWithShards(allShards) + mockSchemaReader.EXPECT().Shards(mock.Anything).Return(state.AllPhysicalShards(), nil).Maybe() + mockSchemaReader.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readFunc func(*models.Class, *sharding.State) error) error { + class := &models.Class{Class: className} + return readFunc(class, state) + }).Maybe() + targetShard := allShards[rand.Intn(len(allShards))] + shardToNodes := map[string][]string{ + "shard1": {"node1", "node2"}, + "shard2": {"node2", "node3"}, + "shard3": {"node3", "node4"}, + "shard4": {"node4", "node5"}, + "shard5": {"node5", "node1"}, + } + + targetNodes := shardToNodes[targetShard] + mockSchemaReader.EXPECT().ShardReplicas("TestClass", targetShard).Return(targetNodes, nil) + mockReplicationFSM.EXPECT().FilterOneShardReplicasRead("TestClass", targetShard, targetNodes). + Return(targetNodes) + mockReplicationFSM.EXPECT().FilterOneShardReplicasWrite("TestClass", targetShard, targetNodes). + Return([]string{targetNodes[0]}, targetNodes[1:]) + + r := router.NewBuilder( + "TestClass", + false, + mockNodeSelector, + mockSchemaGetter, + mockSchemaReader, + mockReplicationFSM, + ).Build() + + rs, ws, err := r.GetReadWriteReplicasLocation("TestClass", "", targetShard) + + var expectedReadReplicas []types.Replica + var expectedWriteReplicas []types.Replica + var expectedAdditionalWriteReplicas []types.Replica + + for _, node := range targetNodes { + expectedReadReplicas = append(expectedReadReplicas, types.Replica{ + NodeName: node, ShardName: targetShard, HostAddr: node, + }) + } + + expectedWriteReplicas = []types.Replica{ + {NodeName: targetNodes[0], ShardName: targetShard, HostAddr: targetNodes[0]}, + } + + for _, node := range targetNodes[1:] { + expectedAdditionalWriteReplicas = append(expectedAdditionalWriteReplicas, types.Replica{ + NodeName: node, ShardName: targetShard, HostAddr: node, + }) + } + + require.NoError(t, err) + require.Equal(t, expectedReadReplicas, rs.Replicas) + require.Equal(t, expectedWriteReplicas, ws.Replicas) + require.Equal(t, expectedAdditionalWriteReplicas, ws.AdditionalReplicas) +} + +func TestSingleTenantRouter_GetReadWriteReplicasLocation_InvalidShard(t *testing.T) { + mockSchemaGetter := schema.NewMockSchemaGetter(t) + mockSchemaReader := schema.NewMockSchemaReader(t) + mockReplicationFSM := replicationTypes.NewMockReplicationFSMReader(t) + mockNodeSelector := mocks.NewMockNodeSelector("node1", "node2") + + state := createShardingStateWithShards([]string{"shard1", "shard2"}) + mockSchemaReader.EXPECT().Shards(mock.Anything).RunAndReturn(func(class string) ([]string, error) { + return []string{"foo", "bar"}, nil + }) + mockSchemaReader.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readFunc func(*models.Class, *sharding.State) error) error { + class := &models.Class{Class: className} + return readFunc(class, state) + }).Maybe() + + r := router.NewBuilder( + "TestClass", + false, + mockNodeSelector, + mockSchemaGetter, + mockSchemaReader, + mockReplicationFSM, + ).Build() + + rs, ws, err := r.GetReadWriteReplicasLocation("TestClass", "", "invalid_shard") + + require.Error(t, err) + require.Contains(t, err.Error(), "error while trying to find shard: invalid_shard in collection: TestClass") + require.Empty(t, rs.Replicas) + require.Empty(t, ws.Replicas) + require.Empty(t, ws.AdditionalReplicas) +} + +func TestSingleTenantRouter_BroadcastVsTargeted(t *testing.T) { + allShards := []string{"shard1", "shard2", "shard3", "shard4", "shard5"} + randomShard := allShards[rand.Intn(len(allShards))] + + testCases := []struct { + name string + shard string + expectShards []string + description string + }{ + { + name: "broadcast_empty_shard", + shard: "", + expectShards: allShards, + description: "empty shard should target all shards", + }, + { + name: "targeted_random_shard", + shard: randomShard, + expectShards: []string{randomShard}, + description: fmt.Sprintf("specific shard %s should target only that shard", randomShard), + }, + } + + for _, testCase := range testCases { + t.Run(testCase.name, func(t *testing.T) { + mockSchemaGetter := schema.NewMockSchemaGetter(t) + mockSchemaReader := schema.NewMockSchemaReader(t) + mockReplicationFSM := replicationTypes.NewMockReplicationFSMReader(t) + mockNodeSelector := cluster.NewMockNodeSelector(t) + + state := createShardingStateWithShards(allShards) + mockSchemaReader.EXPECT().Shards(mock.Anything).Return(state.AllPhysicalShards(), nil).Maybe() + mockSchemaReader.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readFunc func(*models.Class, *sharding.State) error) error { + class := &models.Class{Class: className} + return readFunc(class, state) + }).Maybe() + + for _, shard := range testCase.expectShards { + mockSchemaReader.EXPECT().ShardReplicas("TestClass", shard).Return([]string{"node1"}, nil) + mockReplicationFSM.EXPECT().FilterOneShardReplicasRead("TestClass", shard, []string{"node1"}). + Return([]string{"node1"}) + mockNodeSelector.EXPECT().NodeHostname("node1").Return("host1.example.com", true) + } + + r := router.NewBuilder( + "TestClass", + false, + mockNodeSelector, + mockSchemaGetter, + mockSchemaReader, + mockReplicationFSM, + ).Build() + rs, err := r.GetReadReplicasLocation("TestClass", "", testCase.shard) + require.NoError(t, err, "unexpected error for %s", testCase.description) + actualShards := rs.Shards() + require.ElementsMatch(t, testCase.expectShards, actualShards, "shard targeting mismatch for %s", testCase.description) + }) + } +} + +func TestSingleTenantRouter_BuildWriteRoutingPlan_NoWriteReplicas(t *testing.T) { + mockSchemaGetter := schema.NewMockSchemaGetter(t) + mockSchemaReader := schema.NewMockSchemaReader(t) + mockReplicationFSM := replicationTypes.NewMockReplicationFSMReader(t) + mockNodeSelector := cluster.NewMockNodeSelector(t) + + emptyState := createShardingStateWithShards([]string{"shard1"}) + mockSchemaReader.EXPECT().Shards(mock.Anything).Return(emptyState.AllPhysicalShards(), nil).Maybe() + mockSchemaReader.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readFunc func(*models.Class, *sharding.State) error) error { + class := &models.Class{Class: className} + return readFunc(class, emptyState) + }).Maybe() + mockSchemaReader.EXPECT().ShardReplicas("TestClass", "shard1").Return([]string{"node1", "node2", "node3"}, nil) + mockReplicationFSM.EXPECT().FilterOneShardReplicasWrite("TestClass", "shard1", []string{"node1", "node2", "node3"}). + Return([]string{}, []string{}) + + r := router.NewBuilder( + "TestClass", + false, + mockNodeSelector, + mockSchemaGetter, + mockSchemaReader, + mockReplicationFSM, + ).Build() + ws, err := r.GetWriteReplicasLocation("TestClass", "", "shard1") + require.NoError(t, err) + require.Empty(t, ws.Replicas) + require.Empty(t, ws.AdditionalReplicas) +} + +func TestSingleTenantRouter_BuildWriteRoutingPlan_MultipleShards(t *testing.T) { + mockSchemaGetter := schema.NewMockSchemaGetter(t) + mockSchemaReader := schema.NewMockSchemaReader(t) + mockReplicationFSM := replicationTypes.NewMockReplicationFSMReader(t) + mockNodeSelector := cluster.NewMockNodeSelector(t) + + state := createShardingStateWithShards([]string{"shard1", "shard2", "shard3"}) + mockSchemaReader.EXPECT().Shards(mock.Anything).Return(state.AllPhysicalShards(), nil).Maybe() + mockSchemaReader.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readFunc func(*models.Class, *sharding.State) error) error { + class := &models.Class{Class: className} + return readFunc(class, state) + }).Maybe() + mockSchemaReader.EXPECT().ShardReplicas("TestClass", "shard1").Return([]string{"node1", "node2"}, nil) + mockReplicationFSM.EXPECT().FilterOneShardReplicasWrite("TestClass", "shard1", []string{"node1", "node2"}). + Return([]string{"node1"}, []string{"node2"}) + + for _, node := range []string{"node1", "node2", "node3"} { + mockNodeSelector.EXPECT().NodeHostname(node).Return(node+".example.com", true).Maybe() + } + + r := router.NewBuilder( + "TestClass", + false, + mockNodeSelector, + mockSchemaGetter, + mockSchemaReader, + mockReplicationFSM, + ).Build() + ws, err := r.GetWriteReplicasLocation("TestClass", "", "shard1") + require.NoError(t, err) + + // Should contain write replicas from all shards + expectedWriteReplicas := []types.Replica{ + {NodeName: "node1", ShardName: "shard1", HostAddr: "node1.example.com"}, + } + + expectedAdditionalReplicas := []types.Replica{ + {NodeName: "node2", ShardName: "shard1", HostAddr: "node2.example.com"}, + } + + require.ElementsMatch(t, expectedWriteReplicas, ws.Replicas) + require.ElementsMatch(t, expectedAdditionalReplicas, ws.AdditionalReplicas) +} + +func TestMultiTenantRouter_BuildWriteRoutingPlan_Success(t *testing.T) { + mockSchemaGetter := schema.NewMockSchemaGetter(t) + mockReplicationFSM := replicationTypes.NewMockReplicationFSMReader(t) + mockNodeSelector := cluster.NewMockNodeSelector(t) + mockSchemaReader := schema.NewMockSchemaReader(t) + + mockSchemaReader.EXPECT().ShardReplicas("TestClass", "alice").Return([]string{"node1", "node2"}, nil) + tenantStatus := map[string]string{ + "alice": models.TenantActivityStatusHOT, + } + mockSchemaGetter.EXPECT().OptimisticTenantStatus(mock.Anything, "TestClass", "alice"). + Return(tenantStatus, nil) + mockReplicationFSM.EXPECT().FilterOneShardReplicasWrite("TestClass", "alice", []string{"node1", "node2"}). + Return([]string{"node1"}, []string{"node2"}) + mockNodeSelector.EXPECT().NodeHostname("node1").Return("host1.example.com", true) + mockNodeSelector.EXPECT().NodeHostname("node2").Return("host2.example.com", true) + + r := router.NewBuilder( + "TestClass", + true, + mockNodeSelector, + mockSchemaGetter, + mockSchemaReader, + mockReplicationFSM, + ).Build() + ws, err := r.GetWriteReplicasLocation("TestClass", "alice", "") + require.NoError(t, err) + expectedWriteReplicas := []types.Replica{ + {NodeName: "node1", ShardName: "alice", HostAddr: "host1.example.com"}, + } + expectedAdditionalWriteReplicas := []types.Replica{ + {NodeName: "node2", ShardName: "alice", HostAddr: "host2.example.com"}, + } + require.Equal(t, expectedWriteReplicas, ws.Replicas) + require.Equal(t, expectedAdditionalWriteReplicas, ws.AdditionalReplicas) +} + +func TestMultiTenantRouter_BuildWriteRoutingPlan_NoWriteReplicas(t *testing.T) { + mockSchemaGetter := schema.NewMockSchemaGetter(t) + mockSchemaReader := schema.NewMockSchemaReader(t) + mockReplicationFSM := replicationTypes.NewMockReplicationFSMReader(t) + mockNodeSelector := mocks.NewMockNodeSelector("node1") + + mockSchemaReader.EXPECT().ShardReplicas("TestClass", "alice").Return([]string{}, nil) + tenantStatus := map[string]string{ + "alice": models.TenantActivityStatusHOT, + } + mockSchemaGetter.EXPECT().OptimisticTenantStatus(mock.Anything, "TestClass", "alice"). + Return(tenantStatus, nil) + mockReplicationFSM.EXPECT().FilterOneShardReplicasWrite("TestClass", "alice", []string{}). + Return([]string{}, []string{}) + + r := router.NewBuilder( + "TestClass", + true, + mockNodeSelector, + mockSchemaGetter, + mockSchemaReader, + mockReplicationFSM, + ).Build() + ws, err := r.GetWriteReplicasLocation("TestClass", "alice", "") + require.NoError(t, err) + require.Empty(t, ws.Replicas) + require.Empty(t, ws.AdditionalReplicas) +} + +func TestMultiTenantRouter_BuildWriteRoutingPlan_TenantValidation(t *testing.T) { + mockSchemaGetter := schema.NewMockSchemaGetter(t) + mockReplicationFSM := replicationTypes.NewMockReplicationFSMReader(t) + mockNodeSelector := cluster.NewMockNodeSelector(t) + mockSchemaReader := schema.NewMockSchemaReader(t) + + r := router.NewBuilder( + "TestClass", + true, + mockNodeSelector, + mockSchemaGetter, + mockSchemaReader, + mockReplicationFSM, + ).Build() + ws, err := r.GetWriteReplicasLocation("TestClass", "", "") + require.Error(t, err) + require.Contains(t, err.Error(), "class TestClass has multi-tenancy enabled, but request was without tenant") + require.Empty(t, ws.Replicas) + require.Empty(t, ws.AdditionalReplicas) +} + +func TestMultiTenantRouter_BuildWriteRoutingPlan_TenantNotActive(t *testing.T) { + mockSchemaGetter := schema.NewMockSchemaGetter(t) + mockReplicationFSM := replicationTypes.NewMockReplicationFSMReader(t) + mockNodeSelector := mocks.NewMockNodeSelector("node1", "node2") + mockSchemaReader := schema.NewMockSchemaReader(t) + + tenantStatus := map[string]string{ + "alice": models.TenantActivityStatusCOLD, + } + mockSchemaGetter.EXPECT().OptimisticTenantStatus(mock.Anything, "TestClass", "alice"). + Return(tenantStatus, nil) + + r := router.NewBuilder( + "TestClass", + true, + mockNodeSelector, + mockSchemaGetter, + mockSchemaReader, + mockReplicationFSM, + ).Build() + ws, err := r.GetWriteReplicasLocation("TestClass", "alice", "") + require.Error(t, err) + require.Contains(t, err.Error(), "tenant not active") + require.Empty(t, ws.Replicas) + require.Empty(t, ws.AdditionalReplicas) +} + +func TestSingleTenantRouter_BuildWriteRoutingPlan_SpecifiedShard(t *testing.T) { + mockSchemaGetter := schema.NewMockSchemaGetter(t) + mockSchemaReader := schema.NewMockSchemaReader(t) + mockReplFSM := replicationTypes.NewMockReplicationFSMReader(t) + mockNodeSelector := cluster.NewMockNodeSelector(t) + + state := createShardingStateWithShards([]string{"shardA"}) + mockSchemaReader.EXPECT().Shards(mock.Anything).Return(state.AllPhysicalShards(), nil).Maybe() + mockSchemaReader.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readFunc func(*models.Class, *sharding.State) error) error { + class := &models.Class{Class: className} + return readFunc(class, state) + }).Maybe() + mockSchemaReader.EXPECT().ShardReplicas("TestClass", "shardA"). + Return([]string{"node1", "node2"}, nil) + + mockReplFSM.EXPECT(). + FilterOneShardReplicasWrite("TestClass", "shardA", []string{"node1", "node2"}). + Return([]string{"node1"}, []string{"node2"}) + + mockNodeSelector.EXPECT().NodeHostname("node1").Return("host1", true) + mockNodeSelector.EXPECT().NodeHostname("node2").Return("host2", true) + mockNodeSelector.EXPECT().LocalName().Return("node1") + + r := router.NewBuilder("TestClass", false, mockNodeSelector, + mockSchemaGetter, mockSchemaReader, mockReplFSM).Build() + + opts := types.RoutingPlanBuildOptions{ + Tenant: "", + Shard: "shardA", + ConsistencyLevel: types.ConsistencyLevelOne, + DirectCandidateNode: "", + } + + plan, err := r.BuildWriteRoutingPlan(opts) + require.NoError(t, err) + + want := []types.Replica{{NodeName: "node1", ShardName: "shardA", HostAddr: "host1"}} + require.Equal(t, want, plan.ReplicaSet.Replicas) + require.Equal(t, []types.Replica{{NodeName: "node2", ShardName: "shardA", HostAddr: "host2"}}, + plan.ReplicaSet.AdditionalReplicas) +} + +func TestMultiTenantRouter_BuildWriteRoutingPlan_DefaultShard(t *testing.T) { + mockSchemaGetter := schema.NewMockSchemaGetter(t) + mockSchemaReader := schema.NewMockSchemaReader(t) + mockReplFSM := replicationTypes.NewMockReplicationFSMReader(t) + mockNodeSelector := cluster.NewMockNodeSelector(t) + + tenant := "luke" + + mockSchemaGetter.EXPECT(). + OptimisticTenantStatus(mock.Anything, "TestClass", tenant). + Return(map[string]string{tenant: models.TenantActivityStatusHOT}, nil) + + mockSchemaReader.EXPECT().ShardReplicas("TestClass", tenant). + Return([]string{"node1", "node2"}, nil) + + mockReplFSM.EXPECT(). + FilterOneShardReplicasWrite("TestClass", tenant, []string{"node1", "node2"}). + Return([]string{"node1"}, []string{"node2"}) + + mockNodeSelector.EXPECT().NodeHostname("node1").Return("host1", true) + mockNodeSelector.EXPECT().NodeHostname("node2").Return("host2", true) + mockNodeSelector.EXPECT().LocalName().Return("node1") + + r := router.NewBuilder("TestClass", true, mockNodeSelector, + mockSchemaGetter, mockSchemaReader, mockReplFSM).Build() + + opts := types.RoutingPlanBuildOptions{ + Tenant: tenant, + Shard: "", + ConsistencyLevel: types.ConsistencyLevelOne, + DirectCandidateNode: "", + } + + plan, err := r.BuildWriteRoutingPlan(opts) + require.NoError(t, err) + + want := []types.Replica{{NodeName: "node1", ShardName: tenant, HostAddr: "host1"}} + require.Equal(t, want, plan.ReplicaSet.Replicas) + require.Equal(t, []types.Replica{{NodeName: "node2", ShardName: tenant, HostAddr: "host2"}}, + plan.ReplicaSet.AdditionalReplicas) +} + +func TestSingleTenantRouter_BuildWriteRoutingPlan_NoReplicas(t *testing.T) { + mockSchemaGetter := schema.NewMockSchemaGetter(t) + mockSchemaReader := schema.NewMockSchemaReader(t) + mockReplicationFSM := replicationTypes.NewMockReplicationFSMReader(t) + mockNodeSelector := mocks.NewMockNodeSelector("node1") + + emptyState := createShardingStateWithShards([]string{"shard1"}) + mockSchemaReader.EXPECT().Shards(mock.Anything).Return(emptyState.AllPhysicalShards(), nil).Maybe() + mockSchemaReader.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readFunc func(*models.Class, *sharding.State) error) error { + class := &models.Class{Class: className} + return readFunc(class, emptyState) + }).Maybe() + mockSchemaReader.EXPECT().ShardReplicas("TestClass", "shard1").Return([]string{"node1"}, nil) + mockReplicationFSM.EXPECT().FilterOneShardReplicasWrite("TestClass", "shard1", []string{"node1"}). + Return([]string{}, []string{}) // No write replicas + + r := router.NewBuilder( + "TestClass", + false, + mockNodeSelector, + mockSchemaGetter, + mockSchemaReader, + mockReplicationFSM, + ).Build() + + opts := types.RoutingPlanBuildOptions{ + Tenant: "", + Shard: "shard1", + ConsistencyLevel: types.ConsistencyLevelOne, + } + + plan, err := r.BuildWriteRoutingPlan(opts) + require.Error(t, err) + require.Contains(t, err.Error(), "no write replica found") + require.Empty(t, plan.ReplicaSet.Replicas) +} + +func TestSingleTenantRouter_BuildWriteRoutingPlan_TenantValidation(t *testing.T) { + mockSchemaGetter := schema.NewMockSchemaGetter(t) + mockSchemaReader := schema.NewMockSchemaReader(t) + mockReplicationFSM := replicationTypes.NewMockReplicationFSMReader(t) + mockNodeSelector := mocks.NewMockNodeSelector("node1") + + r := router.NewBuilder( + "TestClass", + false, + mockNodeSelector, + mockSchemaGetter, + mockSchemaReader, + mockReplicationFSM, + ).Build() + + opts := types.RoutingPlanBuildOptions{ + Tenant: "some-tenant", + Shard: "shard1", + ConsistencyLevel: types.ConsistencyLevelOne, + } + + plan, err := r.BuildWriteRoutingPlan(opts) + require.Error(t, err) + require.Contains(t, err.Error(), "class TestClass has multi-tenancy disabled, but request was with tenant") + require.Empty(t, plan.ReplicaSet.Replicas) +} + +func TestSingleTenantRouter_BuildWriteRoutingPlan_ConsistencyLevelValidation(t *testing.T) { + mockSchemaGetter := schema.NewMockSchemaGetter(t) + mockSchemaReader := schema.NewMockSchemaReader(t) + mockReplicationFSM := replicationTypes.NewMockReplicationFSMReader(t) + mockNodeSelector := mocks.NewMockNodeSelector("node1", "node2") + + state := createShardingStateWithShards([]string{"shard1"}) + mockSchemaReader.EXPECT().Shards(mock.Anything).Return(state.AllPhysicalShards(), nil).Maybe() + mockSchemaReader.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readFunc func(*models.Class, *sharding.State) error) error { + class := &models.Class{Class: className} + return readFunc(class, state) + }).Maybe() + mockSchemaReader.EXPECT().ShardReplicas("TestClass", "shard1").Return([]string{"node1", "node2"}, nil) + mockReplicationFSM.EXPECT().FilterOneShardReplicasWrite("TestClass", "shard1", []string{"node1", "node2"}). + Return([]string{"node1"}, []string{"node2"}) + + r := router.NewBuilder( + "TestClass", + false, + mockNodeSelector, + mockSchemaGetter, + mockSchemaReader, + mockReplicationFSM, + ).Build() + + opts := types.RoutingPlanBuildOptions{ + Tenant: "", + Shard: "shard1", + ConsistencyLevel: "INVALID_LEVEL", // Invalid consistency level + } + + plan, err := r.BuildWriteRoutingPlan(opts) + require.NoError(t, err) + require.Equal(t, 1, plan.IntConsistencyLevel) +} + +func TestSingleTenantRouter_BuildWriteRoutingPlan_ReplicaOrdering(t *testing.T) { + mockSchemaGetter := schema.NewMockSchemaGetter(t) + mockSchemaReader := schema.NewMockSchemaReader(t) + mockReplicationFSM := replicationTypes.NewMockReplicationFSMReader(t) + mockNodeSelector := mocks.NewMockNodeSelector("node1", "node2", "node3") + + state := createShardingStateWithShards([]string{"shard1"}) + mockSchemaReader.EXPECT().Shards(mock.Anything).Return(state.AllPhysicalShards(), nil).Maybe() + mockSchemaReader.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readFunc func(*models.Class, *sharding.State) error) error { + class := &models.Class{Class: className} + return readFunc(class, state) + }).Maybe() + mockSchemaReader.EXPECT().ShardReplicas("TestClass", "shard1").Return([]string{"node1", "node2", "node3"}, nil) + mockReplicationFSM.EXPECT().FilterOneShardReplicasWrite("TestClass", "shard1", []string{"node1", "node2", "node3"}). + Return([]string{"node1", "node2"}, []string{"node3"}) + + r := router.NewBuilder( + "TestClass", + false, + mockNodeSelector, + mockSchemaGetter, + mockSchemaReader, + mockReplicationFSM, + ).Build() + + opts := types.RoutingPlanBuildOptions{ + Tenant: "", + Shard: "shard1", + ConsistencyLevel: types.ConsistencyLevelOne, + DirectCandidateNode: "node2", // Should be ordered first + } + + plan, err := r.BuildWriteRoutingPlan(opts) + require.NoError(t, err) + require.Equal(t, "node2", plan.ReplicaSet.Replicas[0].NodeName, "DirectCandidateNode should be first") +} + +func TestMultiTenantRouter_BuildWriteRoutingPlan_NoReplicas(t *testing.T) { + mockSchemaGetter := schema.NewMockSchemaGetter(t) + mockSchemaReader := schema.NewMockSchemaReader(t) + mockReplicationFSM := replicationTypes.NewMockReplicationFSMReader(t) + mockNodeSelector := mocks.NewMockNodeSelector("node1") + + mockSchemaReader.EXPECT().ShardReplicas("TestClass", "alice").Return([]string{"node1"}, nil) + tenantStatus := map[string]string{ + "alice": models.TenantActivityStatusHOT, + } + mockSchemaGetter.EXPECT().OptimisticTenantStatus(mock.Anything, "TestClass", "alice"). + Return(tenantStatus, nil) + mockReplicationFSM.EXPECT().FilterOneShardReplicasWrite("TestClass", "alice", []string{"node1"}). + Return([]string{}, []string{}) // No write replicas + + r := router.NewBuilder( + "TestClass", + true, + mockNodeSelector, + mockSchemaGetter, + mockSchemaReader, + mockReplicationFSM, + ).Build() + + opts := types.RoutingPlanBuildOptions{ + Tenant: "alice", + Shard: "", + ConsistencyLevel: types.ConsistencyLevelOne, + } + + plan, err := r.BuildWriteRoutingPlan(opts) + require.Error(t, err) + require.Contains(t, err.Error(), "no write replica found") + require.Empty(t, plan.ReplicaSet.Replicas) +} + +func TestMultiTenantRouter_BuildWriteRoutingPlan_ConsistencyLevelValidation(t *testing.T) { + mockSchemaGetter := schema.NewMockSchemaGetter(t) + mockReplicationFSM := replicationTypes.NewMockReplicationFSMReader(t) + mockNodeSelector := mocks.NewMockNodeSelector("node1", "node2") + mockSchemaReader := schema.NewMockSchemaReader(t) + + mockSchemaReader.EXPECT().ShardReplicas("TestClass", "alice").Return([]string{"node1", "node2"}, nil) + tenantStatus := map[string]string{ + "alice": models.TenantActivityStatusHOT, + } + mockSchemaGetter.EXPECT().OptimisticTenantStatus(mock.Anything, "TestClass", "alice"). + Return(tenantStatus, nil) + mockReplicationFSM.EXPECT().FilterOneShardReplicasWrite("TestClass", "alice", []string{"node1", "node2"}). + Return([]string{"node1"}, []string{"node2"}) + + r := router.NewBuilder( + "TestClass", + true, + mockNodeSelector, + mockSchemaGetter, + mockSchemaReader, + mockReplicationFSM, + ).Build() + + opts := types.RoutingPlanBuildOptions{ + Tenant: "alice", + Shard: "", + ConsistencyLevel: "INVALID_LEVEL", + } + + plan, err := r.BuildWriteRoutingPlan(opts) + require.NoError(t, err) + require.Equal(t, 1, plan.IntConsistencyLevel) +} + +func TestMultiTenantRouter_BuildWriteRoutingPlan_ReplicaOrdering(t *testing.T) { + mockSchemaGetter := schema.NewMockSchemaGetter(t) + mockReplicationFSM := replicationTypes.NewMockReplicationFSMReader(t) + mockNodeSelector := mocks.NewMockNodeSelector("node1", "node2", "node3") + mockSchemaReader := schema.NewMockSchemaReader(t) + + mockSchemaReader.EXPECT().ShardReplicas("TestClass", "alice").Return([]string{"node1", "node2", "node3"}, nil) + tenantStatus := map[string]string{ + "alice": models.TenantActivityStatusHOT, + } + mockSchemaGetter.EXPECT().OptimisticTenantStatus(mock.Anything, "TestClass", "alice"). + Return(tenantStatus, nil) + mockReplicationFSM.EXPECT().FilterOneShardReplicasWrite("TestClass", "alice", []string{"node1", "node2", "node3"}). + Return([]string{"node1", "node3"}, []string{"node2"}) + + r := router.NewBuilder( + "TestClass", + true, + mockNodeSelector, + mockSchemaGetter, + mockSchemaReader, + mockReplicationFSM, + ).Build() + + opts := types.RoutingPlanBuildOptions{ + Tenant: "alice", + Shard: "", + ConsistencyLevel: types.ConsistencyLevelOne, + DirectCandidateNode: "node3", // Should be ordered first + } + + plan, err := r.BuildWriteRoutingPlan(opts) + require.NoError(t, err) + require.Equal(t, "node3", plan.ReplicaSet.Replicas[0].NodeName, "DirectCandidateNode should be first") +} + +func TestMultiTenantRouter_BuildWriteRoutingPlan_TenantNotFound(t *testing.T) { + mockSchemaGetter := schema.NewMockSchemaGetter(t) + mockReplicationFSM := replicationTypes.NewMockReplicationFSMReader(t) + mockNodeSelector := mocks.NewMockNodeSelector("node1", "node2") + mockSchemaReader := schema.NewMockSchemaReader(t) + + tenantStatus := map[string]string{} + mockSchemaGetter.EXPECT().OptimisticTenantStatus(mock.Anything, "TestClass", "nonexistent"). + Return(tenantStatus, errors.New("tenant not found: \"nonexistent\"")) + + r := router.NewBuilder( + "TestClass", + true, + mockNodeSelector, + mockSchemaGetter, + mockSchemaReader, + mockReplicationFSM, + ).Build() + + opts := types.RoutingPlanBuildOptions{ + Tenant: "nonexistent", + Shard: "", + ConsistencyLevel: types.ConsistencyLevelOne, + } + + plan, err := r.BuildWriteRoutingPlan(opts) + require.Error(t, err) + require.Contains(t, err.Error(), "tenant not found: \"nonexistent\"") + require.Empty(t, plan.ReplicaSet.Replicas) +} + +func TestSingleTenantRouter_BuildReadRoutingPlan_TenantValidation(t *testing.T) { + mockSchemaGetter := schema.NewMockSchemaGetter(t) + mockSchemaReader := schema.NewMockSchemaReader(t) + mockReplicationFSM := replicationTypes.NewMockReplicationFSMReader(t) + mockNodeSelector := mocks.NewMockNodeSelector("node1") + + r := router.NewBuilder( + "TestClass", + false, + mockNodeSelector, + mockSchemaGetter, + mockSchemaReader, + mockReplicationFSM, + ).Build() + + opts := types.RoutingPlanBuildOptions{ + Tenant: "some-tenant", // Single tenant should reject non-empty tenant + Shard: "shard1", + ConsistencyLevel: types.ConsistencyLevelOne, + } + + plan, err := r.BuildReadRoutingPlan(opts) + require.Error(t, err) + require.Contains(t, err.Error(), "class TestClass has multi-tenancy disabled, but request was with tenant") + require.Empty(t, plan.ReplicaSet.Replicas) +} + +func TestMultiTenantRouter_BuildReadRoutingPlan_ConsistencyLevelValidation(t *testing.T) { + mockSchemaGetter := schema.NewMockSchemaGetter(t) + mockReplicationFSM := replicationTypes.NewMockReplicationFSMReader(t) + mockNodeSelector := mocks.NewMockNodeSelector("node1", "node2") + mockSchemaReader := schema.NewMockSchemaReader(t) + + mockSchemaReader.EXPECT().ShardReplicas("TestClass", "alice").Return([]string{"node1", "node2"}, nil) + tenantStatus := map[string]string{ + "alice": models.TenantActivityStatusHOT, + } + mockSchemaGetter.EXPECT().OptimisticTenantStatus(mock.Anything, "TestClass", "alice"). + Return(tenantStatus, nil) + mockReplicationFSM.EXPECT().FilterOneShardReplicasRead("TestClass", "alice", []string{"node1", "node2"}). + Return([]string{"node1", "node2"}) + + r := router.NewBuilder( + "TestClass", + true, + mockNodeSelector, + mockSchemaGetter, + mockSchemaReader, + mockReplicationFSM, + ).Build() + + opts := types.RoutingPlanBuildOptions{ + Tenant: "alice", + Shard: "", + ConsistencyLevel: "INVALID_LEVEL", + } + + plan, err := r.BuildReadRoutingPlan(opts) + require.NoError(t, err) + require.Equal(t, 1, plan.IntConsistencyLevel) +} + +func TestMultiTenantRouter_BuildReadRoutingPlan_NoReplicasError(t *testing.T) { + mockSchemaGetter := schema.NewMockSchemaGetter(t) + mockSchemaReader := schema.NewMockSchemaReader(t) + mockReplicationFSM := replicationTypes.NewMockReplicationFSMReader(t) + mockNodeSelector := mocks.NewMockNodeSelector("node1") + + mockSchemaReader.EXPECT().ShardReplicas("TestClass", "alice").Return([]string{}, nil) + tenantStatus := map[string]string{ + "alice": models.TenantActivityStatusHOT, + } + mockSchemaGetter.EXPECT().OptimisticTenantStatus(mock.Anything, "TestClass", "alice"). + Return(tenantStatus, nil) + mockReplicationFSM.EXPECT().FilterOneShardReplicasRead("TestClass", "alice", []string{}). + Return([]string{}) // No read replicas + + r := router.NewBuilder( + "TestClass", + true, + mockNodeSelector, + mockSchemaGetter, + mockSchemaReader, + mockReplicationFSM, + ).Build() + + opts := types.RoutingPlanBuildOptions{ + Tenant: "alice", + Shard: "", + ConsistencyLevel: types.ConsistencyLevelOne, + } + + plan, err := r.BuildReadRoutingPlan(opts) + require.Error(t, err) + require.Contains(t, err.Error(), "no read replica found") + require.Empty(t, plan.ReplicaSet.Replicas) +} + +func TestSingleTenantRouter_BuildReadRoutingPlan_ConsistencyLevelValidation(t *testing.T) { + mockSchemaGetter := schema.NewMockSchemaGetter(t) + mockSchemaReader := schema.NewMockSchemaReader(t) + mockReplicationFSM := replicationTypes.NewMockReplicationFSMReader(t) + mockNodeSelector := mocks.NewMockNodeSelector("node1", "node2") + + state := createShardingStateWithShards([]string{"shard1"}) + mockSchemaReader.EXPECT().Shards(mock.Anything).Return(state.AllPhysicalShards(), nil).Maybe() + mockSchemaReader.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readFunc func(*models.Class, *sharding.State) error) error { + class := &models.Class{Class: className} + return readFunc(class, state) + }).Maybe() + mockSchemaReader.EXPECT().ShardReplicas("TestClass", "shard1").Return([]string{"node1", "node2"}, nil) + mockReplicationFSM.EXPECT().FilterOneShardReplicasRead("TestClass", "shard1", []string{"node1", "node2"}). + Return([]string{"node1", "node2"}) + + r := router.NewBuilder( + "TestClass", + false, + mockNodeSelector, + mockSchemaGetter, + mockSchemaReader, + mockReplicationFSM, + ).Build() + + opts := types.RoutingPlanBuildOptions{ + Tenant: "", + Shard: "shard1", + ConsistencyLevel: "INVALID_LEVEL", + } + + plan, err := r.BuildReadRoutingPlan(opts) + require.NoError(t, err) + require.Equal(t, 1, plan.IntConsistencyLevel) +} + +func TestSingleTenantRouter_BuildReadRoutingPlan_ReplicaOrdering(t *testing.T) { + mockSchemaGetter := schema.NewMockSchemaGetter(t) + mockSchemaReader := schema.NewMockSchemaReader(t) + mockReplicationFSM := replicationTypes.NewMockReplicationFSMReader(t) + mockNodeSelector := mocks.NewMockNodeSelector("node1", "node2", "node3") + + state := createShardingStateWithShards([]string{"shard1"}) + mockSchemaReader.EXPECT().Shards(mock.Anything).Return(state.AllPhysicalShards(), nil).Maybe() + mockSchemaReader.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readFunc func(*models.Class, *sharding.State) error) error { + class := &models.Class{Class: className} + return readFunc(class, state) + }).Maybe() + mockSchemaReader.EXPECT().ShardReplicas("TestClass", "shard1").Return([]string{"node1", "node2", "node3"}, nil) + mockReplicationFSM.EXPECT().FilterOneShardReplicasRead("TestClass", "shard1", []string{"node1", "node2", "node3"}). + Return([]string{"node1", "node2", "node3"}) + + r := router.NewBuilder( + "TestClass", + false, + mockNodeSelector, + mockSchemaGetter, + mockSchemaReader, + mockReplicationFSM, + ).Build() + + opts := types.RoutingPlanBuildOptions{ + Tenant: "", + Shard: "shard1", + ConsistencyLevel: types.ConsistencyLevelOne, + DirectCandidateNode: "node3", + } + + plan, err := r.BuildReadRoutingPlan(opts) + require.NoError(t, err) + require.Equal(t, "node3", plan.ReplicaSet.Replicas[0].NodeName, "DirectCandidateNode should be first") +} + +func TestMultiTenantRouter_BuildReadRoutingPlan_ReplicaOrdering(t *testing.T) { + mockSchemaGetter := schema.NewMockSchemaGetter(t) + mockReplicationFSM := replicationTypes.NewMockReplicationFSMReader(t) + mockNodeSelector := mocks.NewMockNodeSelector("node1", "node2", "node3") + mockSchemaReader := schema.NewMockSchemaReader(t) + + mockSchemaReader.EXPECT().ShardReplicas("TestClass", "alice").Return([]string{"node1", "node2", "node3"}, nil) + tenantStatus := map[string]string{ + "alice": models.TenantActivityStatusHOT, + } + mockSchemaGetter.EXPECT().OptimisticTenantStatus(mock.Anything, "TestClass", "alice"). + Return(tenantStatus, nil) + mockReplicationFSM.EXPECT().FilterOneShardReplicasRead("TestClass", "alice", []string{"node1", "node2", "node3"}). + Return([]string{"node1", "node2", "node3"}) + + r := router.NewBuilder( + "TestClass", + true, + mockNodeSelector, + mockSchemaGetter, + mockSchemaReader, + mockReplicationFSM, + ).Build() + + opts := types.RoutingPlanBuildOptions{ + Tenant: "alice", + Shard: "", + ConsistencyLevel: types.ConsistencyLevelOne, + DirectCandidateNode: "node2", + } + + plan, err := r.BuildReadRoutingPlan(opts) + require.NoError(t, err) + require.Equal(t, "node2", plan.ReplicaSet.Replicas[0].NodeName, "DirectCandidateNode should be first") +} + +func TestMultiTenantRouter_BuildReadRoutingPlan_TenantValidation(t *testing.T) { + mockSchemaGetter := schema.NewMockSchemaGetter(t) + mockReplicationFSM := replicationTypes.NewMockReplicationFSMReader(t) + mockNodeSelector := mocks.NewMockNodeSelector("node1", "node2") + mockSchemaReader := schema.NewMockSchemaReader(t) + + r := router.NewBuilder( + "TestClass", + true, + mockNodeSelector, + mockSchemaGetter, + mockSchemaReader, + mockReplicationFSM, + ).Build() + + opts := types.RoutingPlanBuildOptions{ + Tenant: "", // Empty tenant should fail for multi-tenant + Shard: "", + ConsistencyLevel: types.ConsistencyLevelOne, + } + + plan, err := r.BuildReadRoutingPlan(opts) + require.Error(t, err) + require.Contains(t, err.Error(), "class TestClass has multi-tenancy enabled, but request was without tenant") + require.Empty(t, plan.ReplicaSet.Replicas) +} + +func TestSingleTenantRouter_BuildRoutingPlanOptions(t *testing.T) { + mockSchemaGetter := schema.NewMockSchemaGetter(t) + mockSchemaReader := schema.NewMockSchemaReader(t) + mockReplicationFSM := replicationTypes.NewMockReplicationFSMReader(t) + mockNodeSelector := mocks.NewMockNodeSelector("node1") + + r := router.NewBuilder( + "TestClass", + false, + mockNodeSelector, + mockSchemaGetter, + mockSchemaReader, + mockReplicationFSM, + ).Build() + + opts := r.BuildRoutingPlanOptions("ignored-tenant", "shard1", types.ConsistencyLevelQuorum, "node2") + + expected := types.RoutingPlanBuildOptions{ + Shard: "shard1", + Tenant: "", + ConsistencyLevel: types.ConsistencyLevelQuorum, + DirectCandidateNode: "node2", + } + + require.Equal(t, expected, opts) +} + +func TestMultiTenantRouter_BuildRoutingPlanOptions(t *testing.T) { + mockSchemaGetter := schema.NewMockSchemaGetter(t) + mockSchemaReader := schema.NewMockSchemaReader(t) + mockReplicationFSM := replicationTypes.NewMockReplicationFSMReader(t) + mockNodeSelector := mocks.NewMockNodeSelector("node1") + + r := router.NewBuilder( + "TestClass", + true, + mockNodeSelector, + mockSchemaGetter, + mockSchemaReader, + mockReplicationFSM, + ).Build() + + opts := r.BuildRoutingPlanOptions("alice", "shard1", types.ConsistencyLevelAll, "node3") + + expected := types.RoutingPlanBuildOptions{ + Shard: "shard1", + Tenant: "alice", + ConsistencyLevel: types.ConsistencyLevelAll, + DirectCandidateNode: "node3", + } + + require.Equal(t, expected, opts) +} + +func TestSingleTenantRouter_BuildRoutingPlanOptions_EmptyInputs(t *testing.T) { + mockSchemaGetter := schema.NewMockSchemaGetter(t) + mockSchemaReader := schema.NewMockSchemaReader(t) + mockReplicationFSM := replicationTypes.NewMockReplicationFSMReader(t) + mockNodeSelector := mocks.NewMockNodeSelector("node1") + + r := router.NewBuilder( + "TestClass", + false, + mockNodeSelector, + mockSchemaGetter, + mockSchemaReader, + mockReplicationFSM, + ).Build() + + opts := r.BuildRoutingPlanOptions("", "", types.ConsistencyLevelOne, "") + + expected := types.RoutingPlanBuildOptions{ + Shard: "", + Tenant: "", + ConsistencyLevel: types.ConsistencyLevelOne, + DirectCandidateNode: "", + } + + require.Equal(t, expected, opts) +} + +func TestMultiTenantRouter_BuildRoutingPlanOptions_EmptyInputs(t *testing.T) { + mockSchemaGetter := schema.NewMockSchemaGetter(t) + mockSchemaReader := schema.NewMockSchemaReader(t) + mockReplicationFSM := replicationTypes.NewMockReplicationFSMReader(t) + mockNodeSelector := mocks.NewMockNodeSelector("node1") + + r := router.NewBuilder( + "TestClass", + true, + mockNodeSelector, + mockSchemaGetter, + mockSchemaReader, + mockReplicationFSM, + ).Build() + + opts := r.BuildRoutingPlanOptions("", "", types.ConsistencyLevelOne, "") + + expected := types.RoutingPlanBuildOptions{ + Shard: "", + Tenant: "", + ConsistencyLevel: types.ConsistencyLevelOne, + DirectCandidateNode: "", + } + + require.Equal(t, expected, opts) +} + +func TestMultiTenantRouter_GetReadWriteReplicasLocation_ShardMismatch(t *testing.T) { + mockSchemaGetter := schema.NewMockSchemaGetter(t) + mockReplicationFSM := replicationTypes.NewMockReplicationFSMReader(t) + mockNodeSelector := mocks.NewMockNodeSelector("node1", "node2") + mockSchemaReader := schema.NewMockSchemaReader(t) + + r := router.NewBuilder( + "TestClass", + true, + mockNodeSelector, + mockSchemaGetter, + mockSchemaReader, + mockReplicationFSM, + ).Build() + + rs, ws, err := r.GetReadWriteReplicasLocation("TestClass", "alice", "bob") + + require.Error(t, err) + require.Contains(t, err.Error(), "invalid tenant shard") + require.Empty(t, rs.Replicas) + require.Empty(t, ws.Replicas) + require.Empty(t, ws.AdditionalReplicas) +} + +func TestSingleTenantRouter_BuildReadRoutingPlan_AllShards(t *testing.T) { + mockSchemaGetter := schema.NewMockSchemaGetter(t) + mockSchemaReader := schema.NewMockSchemaReader(t) + mockReplicationFSM := replicationTypes.NewMockReplicationFSMReader(t) + mockNodeSelector := mocks.NewMockNodeSelector("node1", "node2") + + shards := []string{"shard1", "shard2"} + state := createShardingStateWithShards([]string{"shard1", "shard2"}) + mockSchemaReader.EXPECT().Shards(mock.Anything).Return(state.AllPhysicalShards(), nil).Maybe() + mockSchemaReader.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readFunc func(*models.Class, *sharding.State) error) error { + class := &models.Class{Class: className} + return readFunc(class, state) + }).Maybe() + + for _, shard := range shards { + mockSchemaReader.EXPECT().ShardReplicas("TestClass", shard).Return([]string{"node1"}, nil) + mockReplicationFSM.EXPECT().FilterOneShardReplicasRead("TestClass", shard, []string{"node1"}). + Return([]string{"node1"}) + } + + r := router.NewBuilder( + "TestClass", + false, + mockNodeSelector, + mockSchemaGetter, + mockSchemaReader, + mockReplicationFSM, + ).Build() + + opts := types.RoutingPlanBuildOptions{ + Tenant: "", + Shard: "", + ConsistencyLevel: types.ConsistencyLevelOne, + } + + plan, err := r.BuildReadRoutingPlan(opts) + require.NoError(t, err) + require.Len(t, plan.ReplicaSet.Replicas, 2, "should have replicas from all shards") + + shardNames := make(map[string]bool) + for _, replica := range plan.ReplicaSet.Replicas { + shardNames[replica.ShardName] = true + } + require.True(t, shardNames["shard1"], "should have replica from shard1") + require.True(t, shardNames["shard2"], "should have replica from shard2") +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/router/types/consistency_level.go b/platform/dbops/binaries/weaviate-src/cluster/router/types/consistency_level.go new file mode 100644 index 0000000000000000000000000000000000000000..11808e13b86d22943dcfbf60803e183f6af7f0b0 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/router/types/consistency_level.go @@ -0,0 +1,33 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package types + +// ConsistencyLevel is an enum of all possible consistency level +type ConsistencyLevel string + +const ( + ConsistencyLevelOne ConsistencyLevel = "ONE" + ConsistencyLevelQuorum ConsistencyLevel = "QUORUM" + ConsistencyLevelAll ConsistencyLevel = "ALL" +) + +// ToInt returns the minimum number needed to satisfy consistency level l among N +func (l ConsistencyLevel) ToInt(n int) int { + switch l { + case ConsistencyLevelAll: + return n + case ConsistencyLevelQuorum: + return n/2 + 1 + default: + return 1 + } +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/router/types/mock_read_replica_strategy.go b/platform/dbops/binaries/weaviate-src/cluster/router/types/mock_read_replica_strategy.go new file mode 100644 index 0000000000000000000000000000000000000000..0bd59fe8f938f869aaabb5927ede45ea1defcb8b --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/router/types/mock_read_replica_strategy.go @@ -0,0 +1,90 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by mockery v2.53.2. DO NOT EDIT. + +package types + +import mock "github.com/stretchr/testify/mock" + +// MockReadReplicaStrategy is an autogenerated mock type for the ReadReplicaStrategy type +type MockReadReplicaStrategy struct { + mock.Mock +} + +type MockReadReplicaStrategy_Expecter struct { + mock *mock.Mock +} + +func (_m *MockReadReplicaStrategy) EXPECT() *MockReadReplicaStrategy_Expecter { + return &MockReadReplicaStrategy_Expecter{mock: &_m.Mock} +} + +// Apply provides a mock function with given fields: replicas, options +func (_m *MockReadReplicaStrategy) Apply(replicas ReadReplicaSet, options RoutingPlanBuildOptions) ReadReplicaSet { + ret := _m.Called(replicas, options) + + if len(ret) == 0 { + panic("no return value specified for Apply") + } + + var r0 ReadReplicaSet + if rf, ok := ret.Get(0).(func(ReadReplicaSet, RoutingPlanBuildOptions) ReadReplicaSet); ok { + r0 = rf(replicas, options) + } else { + r0 = ret.Get(0).(ReadReplicaSet) + } + + return r0 +} + +// MockReadReplicaStrategy_Apply_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Apply' +type MockReadReplicaStrategy_Apply_Call struct { + *mock.Call +} + +// Apply is a helper method to define mock.On call +// - replicas ReadReplicaSet +// - options RoutingPlanBuildOptions +func (_e *MockReadReplicaStrategy_Expecter) Apply(replicas interface{}, options interface{}) *MockReadReplicaStrategy_Apply_Call { + return &MockReadReplicaStrategy_Apply_Call{Call: _e.mock.On("Apply", replicas, options)} +} + +func (_c *MockReadReplicaStrategy_Apply_Call) Run(run func(replicas ReadReplicaSet, options RoutingPlanBuildOptions)) *MockReadReplicaStrategy_Apply_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(ReadReplicaSet), args[1].(RoutingPlanBuildOptions)) + }) + return _c +} + +func (_c *MockReadReplicaStrategy_Apply_Call) Return(_a0 ReadReplicaSet) *MockReadReplicaStrategy_Apply_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockReadReplicaStrategy_Apply_Call) RunAndReturn(run func(ReadReplicaSet, RoutingPlanBuildOptions) ReadReplicaSet) *MockReadReplicaStrategy_Apply_Call { + _c.Call.Return(run) + return _c +} + +// NewMockReadReplicaStrategy creates a new instance of MockReadReplicaStrategy. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewMockReadReplicaStrategy(t interface { + mock.TestingT + Cleanup(func()) +}) *MockReadReplicaStrategy { + mock := &MockReadReplicaStrategy{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/router/types/mock_router.go b/platform/dbops/binaries/weaviate-src/cluster/router/types/mock_router.go new file mode 100644 index 0000000000000000000000000000000000000000..6b6e31b7b30c35c5152208f7940e24b029133633 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/router/types/mock_router.go @@ -0,0 +1,488 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by mockery v2.53.2. DO NOT EDIT. + +package types + +import mock "github.com/stretchr/testify/mock" + +// MockRouter is an autogenerated mock type for the Router type +type MockRouter struct { + mock.Mock +} + +type MockRouter_Expecter struct { + mock *mock.Mock +} + +func (_m *MockRouter) EXPECT() *MockRouter_Expecter { + return &MockRouter_Expecter{mock: &_m.Mock} +} + +// AllHostnames provides a mock function with no fields +func (_m *MockRouter) AllHostnames() []string { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for AllHostnames") + } + + var r0 []string + if rf, ok := ret.Get(0).(func() []string); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]string) + } + } + + return r0 +} + +// MockRouter_AllHostnames_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'AllHostnames' +type MockRouter_AllHostnames_Call struct { + *mock.Call +} + +// AllHostnames is a helper method to define mock.On call +func (_e *MockRouter_Expecter) AllHostnames() *MockRouter_AllHostnames_Call { + return &MockRouter_AllHostnames_Call{Call: _e.mock.On("AllHostnames")} +} + +func (_c *MockRouter_AllHostnames_Call) Run(run func()) *MockRouter_AllHostnames_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *MockRouter_AllHostnames_Call) Return(_a0 []string) *MockRouter_AllHostnames_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockRouter_AllHostnames_Call) RunAndReturn(run func() []string) *MockRouter_AllHostnames_Call { + _c.Call.Return(run) + return _c +} + +// BuildReadRoutingPlan provides a mock function with given fields: params +func (_m *MockRouter) BuildReadRoutingPlan(params RoutingPlanBuildOptions) (ReadRoutingPlan, error) { + ret := _m.Called(params) + + if len(ret) == 0 { + panic("no return value specified for BuildReadRoutingPlan") + } + + var r0 ReadRoutingPlan + var r1 error + if rf, ok := ret.Get(0).(func(RoutingPlanBuildOptions) (ReadRoutingPlan, error)); ok { + return rf(params) + } + if rf, ok := ret.Get(0).(func(RoutingPlanBuildOptions) ReadRoutingPlan); ok { + r0 = rf(params) + } else { + r0 = ret.Get(0).(ReadRoutingPlan) + } + + if rf, ok := ret.Get(1).(func(RoutingPlanBuildOptions) error); ok { + r1 = rf(params) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockRouter_BuildReadRoutingPlan_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'BuildReadRoutingPlan' +type MockRouter_BuildReadRoutingPlan_Call struct { + *mock.Call +} + +// BuildReadRoutingPlan is a helper method to define mock.On call +// - params RoutingPlanBuildOptions +func (_e *MockRouter_Expecter) BuildReadRoutingPlan(params interface{}) *MockRouter_BuildReadRoutingPlan_Call { + return &MockRouter_BuildReadRoutingPlan_Call{Call: _e.mock.On("BuildReadRoutingPlan", params)} +} + +func (_c *MockRouter_BuildReadRoutingPlan_Call) Run(run func(params RoutingPlanBuildOptions)) *MockRouter_BuildReadRoutingPlan_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(RoutingPlanBuildOptions)) + }) + return _c +} + +func (_c *MockRouter_BuildReadRoutingPlan_Call) Return(_a0 ReadRoutingPlan, _a1 error) *MockRouter_BuildReadRoutingPlan_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockRouter_BuildReadRoutingPlan_Call) RunAndReturn(run func(RoutingPlanBuildOptions) (ReadRoutingPlan, error)) *MockRouter_BuildReadRoutingPlan_Call { + _c.Call.Return(run) + return _c +} + +// BuildRoutingPlanOptions provides a mock function with given fields: tenant, shard, cl, directCandidate +func (_m *MockRouter) BuildRoutingPlanOptions(tenant string, shard string, cl ConsistencyLevel, directCandidate string) RoutingPlanBuildOptions { + ret := _m.Called(tenant, shard, cl, directCandidate) + + if len(ret) == 0 { + panic("no return value specified for BuildRoutingPlanOptions") + } + + var r0 RoutingPlanBuildOptions + if rf, ok := ret.Get(0).(func(string, string, ConsistencyLevel, string) RoutingPlanBuildOptions); ok { + r0 = rf(tenant, shard, cl, directCandidate) + } else { + r0 = ret.Get(0).(RoutingPlanBuildOptions) + } + + return r0 +} + +// MockRouter_BuildRoutingPlanOptions_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'BuildRoutingPlanOptions' +type MockRouter_BuildRoutingPlanOptions_Call struct { + *mock.Call +} + +// BuildRoutingPlanOptions is a helper method to define mock.On call +// - tenant string +// - shard string +// - cl ConsistencyLevel +// - directCandidate string +func (_e *MockRouter_Expecter) BuildRoutingPlanOptions(tenant interface{}, shard interface{}, cl interface{}, directCandidate interface{}) *MockRouter_BuildRoutingPlanOptions_Call { + return &MockRouter_BuildRoutingPlanOptions_Call{Call: _e.mock.On("BuildRoutingPlanOptions", tenant, shard, cl, directCandidate)} +} + +func (_c *MockRouter_BuildRoutingPlanOptions_Call) Run(run func(tenant string, shard string, cl ConsistencyLevel, directCandidate string)) *MockRouter_BuildRoutingPlanOptions_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(string), args[1].(string), args[2].(ConsistencyLevel), args[3].(string)) + }) + return _c +} + +func (_c *MockRouter_BuildRoutingPlanOptions_Call) Return(_a0 RoutingPlanBuildOptions) *MockRouter_BuildRoutingPlanOptions_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockRouter_BuildRoutingPlanOptions_Call) RunAndReturn(run func(string, string, ConsistencyLevel, string) RoutingPlanBuildOptions) *MockRouter_BuildRoutingPlanOptions_Call { + _c.Call.Return(run) + return _c +} + +// BuildWriteRoutingPlan provides a mock function with given fields: params +func (_m *MockRouter) BuildWriteRoutingPlan(params RoutingPlanBuildOptions) (WriteRoutingPlan, error) { + ret := _m.Called(params) + + if len(ret) == 0 { + panic("no return value specified for BuildWriteRoutingPlan") + } + + var r0 WriteRoutingPlan + var r1 error + if rf, ok := ret.Get(0).(func(RoutingPlanBuildOptions) (WriteRoutingPlan, error)); ok { + return rf(params) + } + if rf, ok := ret.Get(0).(func(RoutingPlanBuildOptions) WriteRoutingPlan); ok { + r0 = rf(params) + } else { + r0 = ret.Get(0).(WriteRoutingPlan) + } + + if rf, ok := ret.Get(1).(func(RoutingPlanBuildOptions) error); ok { + r1 = rf(params) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockRouter_BuildWriteRoutingPlan_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'BuildWriteRoutingPlan' +type MockRouter_BuildWriteRoutingPlan_Call struct { + *mock.Call +} + +// BuildWriteRoutingPlan is a helper method to define mock.On call +// - params RoutingPlanBuildOptions +func (_e *MockRouter_Expecter) BuildWriteRoutingPlan(params interface{}) *MockRouter_BuildWriteRoutingPlan_Call { + return &MockRouter_BuildWriteRoutingPlan_Call{Call: _e.mock.On("BuildWriteRoutingPlan", params)} +} + +func (_c *MockRouter_BuildWriteRoutingPlan_Call) Run(run func(params RoutingPlanBuildOptions)) *MockRouter_BuildWriteRoutingPlan_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(RoutingPlanBuildOptions)) + }) + return _c +} + +func (_c *MockRouter_BuildWriteRoutingPlan_Call) Return(_a0 WriteRoutingPlan, _a1 error) *MockRouter_BuildWriteRoutingPlan_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockRouter_BuildWriteRoutingPlan_Call) RunAndReturn(run func(RoutingPlanBuildOptions) (WriteRoutingPlan, error)) *MockRouter_BuildWriteRoutingPlan_Call { + _c.Call.Return(run) + return _c +} + +// GetReadReplicasLocation provides a mock function with given fields: collection, tenant, shard +func (_m *MockRouter) GetReadReplicasLocation(collection string, tenant string, shard string) (ReadReplicaSet, error) { + ret := _m.Called(collection, tenant, shard) + + if len(ret) == 0 { + panic("no return value specified for GetReadReplicasLocation") + } + + var r0 ReadReplicaSet + var r1 error + if rf, ok := ret.Get(0).(func(string, string, string) (ReadReplicaSet, error)); ok { + return rf(collection, tenant, shard) + } + if rf, ok := ret.Get(0).(func(string, string, string) ReadReplicaSet); ok { + r0 = rf(collection, tenant, shard) + } else { + r0 = ret.Get(0).(ReadReplicaSet) + } + + if rf, ok := ret.Get(1).(func(string, string, string) error); ok { + r1 = rf(collection, tenant, shard) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockRouter_GetReadReplicasLocation_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetReadReplicasLocation' +type MockRouter_GetReadReplicasLocation_Call struct { + *mock.Call +} + +// GetReadReplicasLocation is a helper method to define mock.On call +// - collection string +// - tenant string +// - shard string +func (_e *MockRouter_Expecter) GetReadReplicasLocation(collection interface{}, tenant interface{}, shard interface{}) *MockRouter_GetReadReplicasLocation_Call { + return &MockRouter_GetReadReplicasLocation_Call{Call: _e.mock.On("GetReadReplicasLocation", collection, tenant, shard)} +} + +func (_c *MockRouter_GetReadReplicasLocation_Call) Run(run func(collection string, tenant string, shard string)) *MockRouter_GetReadReplicasLocation_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(string), args[1].(string), args[2].(string)) + }) + return _c +} + +func (_c *MockRouter_GetReadReplicasLocation_Call) Return(_a0 ReadReplicaSet, _a1 error) *MockRouter_GetReadReplicasLocation_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockRouter_GetReadReplicasLocation_Call) RunAndReturn(run func(string, string, string) (ReadReplicaSet, error)) *MockRouter_GetReadReplicasLocation_Call { + _c.Call.Return(run) + return _c +} + +// GetReadWriteReplicasLocation provides a mock function with given fields: collection, tenant, shard +func (_m *MockRouter) GetReadWriteReplicasLocation(collection string, tenant string, shard string) (ReadReplicaSet, WriteReplicaSet, error) { + ret := _m.Called(collection, tenant, shard) + + if len(ret) == 0 { + panic("no return value specified for GetReadWriteReplicasLocation") + } + + var r0 ReadReplicaSet + var r1 WriteReplicaSet + var r2 error + if rf, ok := ret.Get(0).(func(string, string, string) (ReadReplicaSet, WriteReplicaSet, error)); ok { + return rf(collection, tenant, shard) + } + if rf, ok := ret.Get(0).(func(string, string, string) ReadReplicaSet); ok { + r0 = rf(collection, tenant, shard) + } else { + r0 = ret.Get(0).(ReadReplicaSet) + } + + if rf, ok := ret.Get(1).(func(string, string, string) WriteReplicaSet); ok { + r1 = rf(collection, tenant, shard) + } else { + r1 = ret.Get(1).(WriteReplicaSet) + } + + if rf, ok := ret.Get(2).(func(string, string, string) error); ok { + r2 = rf(collection, tenant, shard) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// MockRouter_GetReadWriteReplicasLocation_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetReadWriteReplicasLocation' +type MockRouter_GetReadWriteReplicasLocation_Call struct { + *mock.Call +} + +// GetReadWriteReplicasLocation is a helper method to define mock.On call +// - collection string +// - tenant string +// - shard string +func (_e *MockRouter_Expecter) GetReadWriteReplicasLocation(collection interface{}, tenant interface{}, shard interface{}) *MockRouter_GetReadWriteReplicasLocation_Call { + return &MockRouter_GetReadWriteReplicasLocation_Call{Call: _e.mock.On("GetReadWriteReplicasLocation", collection, tenant, shard)} +} + +func (_c *MockRouter_GetReadWriteReplicasLocation_Call) Run(run func(collection string, tenant string, shard string)) *MockRouter_GetReadWriteReplicasLocation_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(string), args[1].(string), args[2].(string)) + }) + return _c +} + +func (_c *MockRouter_GetReadWriteReplicasLocation_Call) Return(readReplicas ReadReplicaSet, writeReplicas WriteReplicaSet, err error) *MockRouter_GetReadWriteReplicasLocation_Call { + _c.Call.Return(readReplicas, writeReplicas, err) + return _c +} + +func (_c *MockRouter_GetReadWriteReplicasLocation_Call) RunAndReturn(run func(string, string, string) (ReadReplicaSet, WriteReplicaSet, error)) *MockRouter_GetReadWriteReplicasLocation_Call { + _c.Call.Return(run) + return _c +} + +// GetWriteReplicasLocation provides a mock function with given fields: collection, tenant, shard +func (_m *MockRouter) GetWriteReplicasLocation(collection string, tenant string, shard string) (WriteReplicaSet, error) { + ret := _m.Called(collection, tenant, shard) + + if len(ret) == 0 { + panic("no return value specified for GetWriteReplicasLocation") + } + + var r0 WriteReplicaSet + var r1 error + if rf, ok := ret.Get(0).(func(string, string, string) (WriteReplicaSet, error)); ok { + return rf(collection, tenant, shard) + } + if rf, ok := ret.Get(0).(func(string, string, string) WriteReplicaSet); ok { + r0 = rf(collection, tenant, shard) + } else { + r0 = ret.Get(0).(WriteReplicaSet) + } + + if rf, ok := ret.Get(1).(func(string, string, string) error); ok { + r1 = rf(collection, tenant, shard) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockRouter_GetWriteReplicasLocation_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetWriteReplicasLocation' +type MockRouter_GetWriteReplicasLocation_Call struct { + *mock.Call +} + +// GetWriteReplicasLocation is a helper method to define mock.On call +// - collection string +// - tenant string +// - shard string +func (_e *MockRouter_Expecter) GetWriteReplicasLocation(collection interface{}, tenant interface{}, shard interface{}) *MockRouter_GetWriteReplicasLocation_Call { + return &MockRouter_GetWriteReplicasLocation_Call{Call: _e.mock.On("GetWriteReplicasLocation", collection, tenant, shard)} +} + +func (_c *MockRouter_GetWriteReplicasLocation_Call) Run(run func(collection string, tenant string, shard string)) *MockRouter_GetWriteReplicasLocation_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(string), args[1].(string), args[2].(string)) + }) + return _c +} + +func (_c *MockRouter_GetWriteReplicasLocation_Call) Return(_a0 WriteReplicaSet, _a1 error) *MockRouter_GetWriteReplicasLocation_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockRouter_GetWriteReplicasLocation_Call) RunAndReturn(run func(string, string, string) (WriteReplicaSet, error)) *MockRouter_GetWriteReplicasLocation_Call { + _c.Call.Return(run) + return _c +} + +// NodeHostname provides a mock function with given fields: nodeName +func (_m *MockRouter) NodeHostname(nodeName string) (string, bool) { + ret := _m.Called(nodeName) + + if len(ret) == 0 { + panic("no return value specified for NodeHostname") + } + + var r0 string + var r1 bool + if rf, ok := ret.Get(0).(func(string) (string, bool)); ok { + return rf(nodeName) + } + if rf, ok := ret.Get(0).(func(string) string); ok { + r0 = rf(nodeName) + } else { + r0 = ret.Get(0).(string) + } + + if rf, ok := ret.Get(1).(func(string) bool); ok { + r1 = rf(nodeName) + } else { + r1 = ret.Get(1).(bool) + } + + return r0, r1 +} + +// MockRouter_NodeHostname_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'NodeHostname' +type MockRouter_NodeHostname_Call struct { + *mock.Call +} + +// NodeHostname is a helper method to define mock.On call +// - nodeName string +func (_e *MockRouter_Expecter) NodeHostname(nodeName interface{}) *MockRouter_NodeHostname_Call { + return &MockRouter_NodeHostname_Call{Call: _e.mock.On("NodeHostname", nodeName)} +} + +func (_c *MockRouter_NodeHostname_Call) Run(run func(nodeName string)) *MockRouter_NodeHostname_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(string)) + }) + return _c +} + +func (_c *MockRouter_NodeHostname_Call) Return(_a0 string, _a1 bool) *MockRouter_NodeHostname_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockRouter_NodeHostname_Call) RunAndReturn(run func(string) (string, bool)) *MockRouter_NodeHostname_Call { + _c.Call.Return(run) + return _c +} + +// NewMockRouter creates a new instance of MockRouter. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewMockRouter(t interface { + mock.TestingT + Cleanup(func()) +}) *MockRouter { + mock := &MockRouter{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/router/types/repair_response.go b/platform/dbops/binaries/weaviate-src/cluster/router/types/repair_response.go new file mode 100644 index 0000000000000000000000000000000000000000..5dea275925e70476a108905bf24d47f568335153 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/router/types/repair_response.go @@ -0,0 +1,20 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package types + +type RepairResponse struct { + ID string // object id + Version int64 // sender's current version of the object + UpdateTime int64 // sender's current update time + Err string + Deleted bool +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/router/types/replicaset.go b/platform/dbops/binaries/weaviate-src/cluster/router/types/replicaset.go new file mode 100644 index 0000000000000000000000000000000000000000..802bff11d6fa731e62c153c6d8001fa0bf410b4a --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/router/types/replicaset.go @@ -0,0 +1,211 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package types + +import ( + "fmt" + "strings" +) + +// ReadReplicaSet contains *exactly one* replica per shard and is produced by +// ReadReplicaStrategy implementations for read paths. +type ReadReplicaSet struct { + Replicas []Replica +} + +// String returns a human-readable representation of a ReplicaSet, +// showing all Replicas in the set. +func (s ReadReplicaSet) String() string { + var b strings.Builder + b.WriteString("[") + for i, r := range s.Replicas { + if i > 0 { + b.WriteString(", ") + } + b.WriteString(r.String()) + } + b.WriteString("]") + return b.String() +} + +// NodeNames returns a list of node names contained in the ReplicaSet. +func (s ReadReplicaSet) NodeNames() []string { + nodeNames := make([]string, 0, len(s.Replicas)) + for _, replica := range s.Replicas { + nodeNames = append(nodeNames, replica.NodeName) + } + return nodeNames +} + +// HostAddresses returns a list of host addresses for all Replicas in the ReplicaSet. +func (s ReadReplicaSet) HostAddresses() []string { + hostAddresses := make([]string, 0, len(s.Replicas)) + for _, replica := range s.Replicas { + hostAddresses = append(hostAddresses, replica.HostAddr) + } + return hostAddresses +} + +// Shards returns a list of unique shard names for all Replicas in the ReplicaSet. +func (s ReadReplicaSet) Shards() []string { + if len(s.Replicas) == 0 { + return []string{} + } + + seen := make(map[string]bool, len(s.Replicas)) + shards := make([]string, 0, len(s.Replicas)) + + for _, replica := range s.Replicas { + if !seen[replica.ShardName] { + seen[replica.ShardName] = true + shards = append(shards, replica.ShardName) + } + } + + return shards +} + +func (s ReadReplicaSet) EmptyReplicas() bool { + return len(s.Replicas) == 0 +} + +type WriteReplicaSet struct { + Replicas []Replica + AdditionalReplicas []Replica +} + +// NodeNames returns a list of node names contained in the ReplicaSet. +func (s WriteReplicaSet) NodeNames() []string { + nodeNames := make([]string, 0, len(s.Replicas)) + for _, replica := range s.Replicas { + nodeNames = append(nodeNames, replica.NodeName) + } + return nodeNames +} + +// HostAddresses returns a list of host addresses for all Replicas in the ReplicaSet. +func (s WriteReplicaSet) HostAddresses() []string { + hostAddresses := make([]string, 0, len(s.Replicas)) + for _, replica := range s.Replicas { + hostAddresses = append(hostAddresses, replica.HostAddr) + } + return hostAddresses +} + +// Shards returns a list of unique shard names for all Replicas in the ReplicaSet. +func (s WriteReplicaSet) Shards() []string { + if len(s.Replicas) == 0 { + return []string{} + } + + seen := make(map[string]bool, len(s.Replicas)) + shards := make([]string, 0, len(s.Replicas)) + + for _, replica := range s.Replicas { + if !seen[replica.ShardName] { + seen[replica.ShardName] = true + shards = append(shards, replica.ShardName) + } + } + + return shards +} + +func (s WriteReplicaSet) EmptyAdditionalReplicas() bool { + return len(s.AdditionalReplicas) == 0 +} + +// AdditionalNodeNames returns a list of node names contained in the AdditionalReplicaSet. +func (s WriteReplicaSet) AdditionalNodeNames() []string { + nodeNames := make([]string, 0, len(s.AdditionalReplicas)) + for _, replica := range s.AdditionalReplicas { + nodeNames = append(nodeNames, replica.NodeName) + } + return nodeNames +} + +// AdditionalHostAddresses returns a list of host addresses for all Replicas in the AdditionalReplicaSet. +func (s WriteReplicaSet) AdditionalHostAddresses() []string { + hostAddresses := make([]string, 0, len(s.AdditionalReplicas)) + for _, replica := range s.AdditionalReplicas { + hostAddresses = append(hostAddresses, replica.HostAddr) + } + return hostAddresses +} + +// AdditionalShards returns a list of unique shard names for all Replicas in the AdditionalReplicaSet. +func (s WriteReplicaSet) AdditionalShards() []string { + if len(s.AdditionalReplicas) == 0 { + return []string{} + } + + seen := make(map[string]bool, len(s.AdditionalReplicas)) + shards := make([]string, 0, len(s.AdditionalReplicas)) + + for _, replica := range s.AdditionalReplicas { + if !seen[replica.ShardName] { + seen[replica.ShardName] = true + shards = append(shards, replica.ShardName) + } + } + + return shards +} + +func (s WriteReplicaSet) IsEmpty() bool { + return len(s.Replicas) == 0 +} + +// validateReplicaSetConsistency validates that the consistency level can be satisfied +// by grouping replicas by shard and validating each shard independently. +func validateReplicaSetConsistency(replicas []Replica, level ConsistencyLevel) (int, error) { + if len(replicas) == 0 { + return 0, nil + } + + // Group replicas by shard + replicasByShard := make(map[string][]Replica) + for _, replica := range replicas { + replicasByShard[replica.ShardName] = append(replicasByShard[replica.ShardName], replica) + } + + var expectedConsistencyLevel int + var firstShard string + + for shardName, shardReplicas := range replicasByShard { + resolved := level.ToInt(len(shardReplicas)) + if resolved > len(shardReplicas) { + return 0, fmt.Errorf( + "shard %s: impossible to satisfy consistency level (%d) > available replicas (%d)", + shardName, resolved, len(shardReplicas)) + } + + if firstShard == "" { + expectedConsistencyLevel = resolved + firstShard = shardName + } else if resolved != expectedConsistencyLevel { + return 0, fmt.Errorf( + "inconsistent consistency levels: shard %s resolved to %d, shard %s resolved to %d", + firstShard, expectedConsistencyLevel, shardName, resolved) + } + } + + return expectedConsistencyLevel, nil +} + +func (s ReadReplicaSet) ValidateConsistencyLevel(level ConsistencyLevel) (int, error) { + return validateReplicaSetConsistency(s.Replicas, level) +} + +func (s WriteReplicaSet) ValidateConsistencyLevel(level ConsistencyLevel) (int, error) { + return validateReplicaSetConsistency(s.Replicas, level) +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/router/types/replicaset_test.go b/platform/dbops/binaries/weaviate-src/cluster/router/types/replicaset_test.go new file mode 100644 index 0000000000000000000000000000000000000000..d8c95a77c4ca4311b8bede6c4b809fd3a7b59ebc --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/router/types/replicaset_test.go @@ -0,0 +1,289 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package types_test + +import ( + "reflect" + "testing" + + "github.com/weaviate/weaviate/cluster/router/types" +) + +func TestReadReplicaSet_Shards(t *testing.T) { + tests := []struct { + name string + replicas []types.Replica + want []string + }{ + { + name: "empty replicas", + replicas: []types.Replica{}, + want: []string{}, + }, + { + name: "single replica", + replicas: []types.Replica{ + {ShardName: "shard_A", NodeName: "node1", HostAddr: "host1"}, + }, + want: []string{"shard_A"}, + }, + { + name: "multiple replicas different shards", + replicas: []types.Replica{ + {ShardName: "shard_A", NodeName: "node1", HostAddr: "host1"}, + {ShardName: "shard_B", NodeName: "node2", HostAddr: "host2"}, + {ShardName: "shard_C", NodeName: "node3", HostAddr: "host3"}, + }, + want: []string{"shard_A", "shard_B", "shard_C"}, + }, + { + name: "multiple replicas same shard - should deduplicate", + replicas: []types.Replica{ + {ShardName: "shard_A", NodeName: "node1", HostAddr: "host1"}, + {ShardName: "shard_A", NodeName: "node2", HostAddr: "host2"}, + {ShardName: "shard_A", NodeName: "node3", HostAddr: "host3"}, + }, + want: []string{"shard_A"}, + }, + { + name: "mixed - multiple shards with duplicates", + replicas: []types.Replica{ + {ShardName: "shard_A", NodeName: "node1", HostAddr: "host1"}, + {ShardName: "shard_B", NodeName: "node2", HostAddr: "host2"}, + {ShardName: "shard_A", NodeName: "node3", HostAddr: "host3"}, // duplicate + {ShardName: "shard_C", NodeName: "node4", HostAddr: "host4"}, + {ShardName: "shard_B", NodeName: "node5", HostAddr: "host5"}, // duplicate + }, + want: []string{"shard_A", "shard_B", "shard_C"}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + rs := types.ReadReplicaSet{Replicas: tt.replicas} + got := rs.Shards() + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("ReadReplicaSet.Shards() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestWriteReplicaSet_Shards(t *testing.T) { + tests := []struct { + name string + replicas []types.Replica + want []string + }{ + { + name: "empty replicas", + replicas: []types.Replica{}, + want: []string{}, + }, + { + name: "single replica", + replicas: []types.Replica{ + {ShardName: "shard_A", NodeName: "node1", HostAddr: "host1"}, + }, + want: []string{"shard_A"}, + }, + { + name: "multiple replicas same shard - should deduplicate", + replicas: []types.Replica{ + {ShardName: "shard_A", NodeName: "node1", HostAddr: "host1"}, + {ShardName: "shard_A", NodeName: "node2", HostAddr: "host2"}, + }, + want: []string{"shard_A"}, + }, + { + name: "multiple different shards", + replicas: []types.Replica{ + {ShardName: "shard_A", NodeName: "node1", HostAddr: "host1"}, + {ShardName: "shard_B", NodeName: "node2", HostAddr: "host2"}, + }, + want: []string{"shard_A", "shard_B"}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ws := types.WriteReplicaSet{Replicas: tt.replicas} + got := ws.Shards() + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("WriteReplicaSet.Shards() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestWriteReplicaSet_AdditionalShards(t *testing.T) { + tests := []struct { + name string + additionalReplicas []types.Replica + want []string + }{ + { + name: "empty additional replicas", + additionalReplicas: []types.Replica{}, + want: []string{}, + }, + { + name: "single additional replica", + additionalReplicas: []types.Replica{ + {ShardName: "shard_A", NodeName: "node1", HostAddr: "host1"}, + }, + want: []string{"shard_A"}, + }, + { + name: "multiple additional replicas same shard - should deduplicate", + additionalReplicas: []types.Replica{ + {ShardName: "shard_A", NodeName: "node1", HostAddr: "host1"}, + {ShardName: "shard_A", NodeName: "node2", HostAddr: "host2"}, + }, + want: []string{"shard_A"}, + }, + { + name: "multiple different additional shards", + additionalReplicas: []types.Replica{ + {ShardName: "shard_A", NodeName: "node1", HostAddr: "host1"}, + {ShardName: "shard_B", NodeName: "node2", HostAddr: "host2"}, + }, + want: []string{"shard_A", "shard_B"}, + }, + { + name: "complex additional replica scenario", + additionalReplicas: []types.Replica{ + {ShardName: "shard_A", NodeName: "node1", HostAddr: "host1"}, + {ShardName: "shard_B", NodeName: "node2", HostAddr: "host2"}, + {ShardName: "shard_A", NodeName: "node3", HostAddr: "host3"}, // duplicate + {ShardName: "shard_C", NodeName: "node4", HostAddr: "host4"}, + }, + want: []string{"shard_A", "shard_B", "shard_C"}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ws := types.WriteReplicaSet{AdditionalReplicas: tt.additionalReplicas} + got := ws.AdditionalShards() + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("WriteReplicaSet.AdditionalShards() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestReadReplicaSet_OtherMethods(t *testing.T) { + replicas := []types.Replica{ + {ShardName: "shard_A", NodeName: "node1", HostAddr: "host1:8080"}, + {ShardName: "shard_B", NodeName: "node2", HostAddr: "host2:8080"}, + } + rs := types.ReadReplicaSet{Replicas: replicas} + + t.Run("NodeNames", func(t *testing.T) { + want := []string{"node1", "node2"} + got := rs.NodeNames() + if !reflect.DeepEqual(got, want) { + t.Errorf("NodeNames() = %v, want %v", got, want) + } + }) + + t.Run("HostAddresses", func(t *testing.T) { + want := []string{"host1:8080", "host2:8080"} + got := rs.HostAddresses() + if !reflect.DeepEqual(got, want) { + t.Errorf("HostAddresses() = %v, want %v", got, want) + } + }) + + t.Run("EmptyReplicas", func(t *testing.T) { + if rs.EmptyReplicas() { + t.Error("EmptyReplicas() should return false for non-empty replica set") + } + + emptyRS := types.ReadReplicaSet{Replicas: []types.Replica{}} + if !emptyRS.EmptyReplicas() { + t.Error("EmptyReplicas() should return true for empty replica set") + } + }) +} + +func TestWriteReplicaSet_OtherMethods(t *testing.T) { + replicas := []types.Replica{ + {ShardName: "shard_A", NodeName: "node1", HostAddr: "host1:8080"}, + {ShardName: "shard_B", NodeName: "node2", HostAddr: "host2:8080"}, + } + additionalReplicas := []types.Replica{ + {ShardName: "shard_C", NodeName: "node3", HostAddr: "host3:8080"}, + } + ws := types.WriteReplicaSet{ + Replicas: replicas, + AdditionalReplicas: additionalReplicas, + } + + t.Run("NodeNames", func(t *testing.T) { + want := []string{"node1", "node2"} + got := ws.NodeNames() + if !reflect.DeepEqual(got, want) { + t.Errorf("NodeNames() = %v, want %v", got, want) + } + }) + + t.Run("AdditionalNodeNames", func(t *testing.T) { + want := []string{"node3"} + got := ws.AdditionalNodeNames() + if !reflect.DeepEqual(got, want) { + t.Errorf("AdditionalNodeNames() = %v, want %v", got, want) + } + }) + + t.Run("HostAddresses", func(t *testing.T) { + want := []string{"host1:8080", "host2:8080"} + got := ws.HostAddresses() + if !reflect.DeepEqual(got, want) { + t.Errorf("HostAddresses() = %v, want %v", got, want) + } + }) + + t.Run("AdditionalHostAddresses", func(t *testing.T) { + want := []string{"host3:8080"} + got := ws.AdditionalHostAddresses() + if !reflect.DeepEqual(got, want) { + t.Errorf("AdditionalHostAddresses() = %v, want %v", got, want) + } + }) + + t.Run("IsEmpty", func(t *testing.T) { + if ws.IsEmpty() { + t.Error("IsEmpty() should return false for non-empty replica set") + } + + emptyWS := types.WriteReplicaSet{Replicas: []types.Replica{}} + if !emptyWS.IsEmpty() { + t.Error("IsEmpty() should return true for empty replica set") + } + }) + + t.Run("EmptyAdditionalReplicas", func(t *testing.T) { + if ws.EmptyAdditionalReplicas() { + t.Error("EmptyAdditionalReplicas() should return false when additional replicas exist") + } + + wsNoAdditional := types.WriteReplicaSet{ + Replicas: replicas, + AdditionalReplicas: []types.Replica{}, + } + if !wsNoAdditional.EmptyAdditionalReplicas() { + t.Error("EmptyAdditionalReplicas() should return true when no additional replicas") + } + }) +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/router/types/router_intf.go b/platform/dbops/binaries/weaviate-src/cluster/router/types/router_intf.go new file mode 100644 index 0000000000000000000000000000000000000000..8eb090af0d468694c445e8018f48740aaf23413c --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/router/types/router_intf.go @@ -0,0 +1,134 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package types + +import ( + "fmt" +) + +// Router defines the contract for determining routing plans for reads and writes +// within a cluster. It abstracts the logic to identify read/write Replicas, +// construct routing plans, and access cluster host information including hostnames +// and ip addresses. +type Router interface { + // GetReadWriteReplicasLocation returns the read and write Replicas for a given + // collection. + // + // Parameters: + // - collection: the name of the collection to get Replicas for. + // - shard: the shard identifier (matches the tenant name for multi-tenant collections). + // + // Returns: + // - readReplicas: a replica set serving as read Replicas. + // - writeReplicas: a replica set serving as primary write Replicas. + // - error: if an error occurs while retrieving Replicas. + GetReadWriteReplicasLocation(collection string, tenant string, shard string) (readReplicas ReadReplicaSet, writeReplicas WriteReplicaSet, err error) + + // GetWriteReplicasLocation returns the write Replicas for a given collection. + // + // Parameters: + // - collection: the name of the collection to get write Replicas for. + // - shard: the shard identifier (matches the tenant name for multi-tenant collections). + // + // Returns: + // - writeReplicas: a replica set serving as primary write Replicas. + // - error: if an error occurs while retrieving Replicas. + GetWriteReplicasLocation(collection string, tenant string, shard string) (WriteReplicaSet, error) + + // GetReadReplicasLocation returns the read Replicas for a given collection. + // + // Parameters: + // - collection: the name of the collection to get read Replicas for. + // - shard: the shard identifier (matches the tenant name for multi-tenant collections). + // + // Returns: + // - readReplicas: a replica set serving as read Replicas. + // - error: if an error occurs while retrieving Replicas. + GetReadReplicasLocation(collection string, tenant string, shard string) (ReadReplicaSet, error) + + // BuildRoutingPlanOptions constructs routing plan build options with router-specific tenant handling. + // + // This method creates RoutingPlanBuildOptions configured appropriately for the router type: + // - Single-tenant routers: ignore the tenant parameter and always set tenant to empty string + // - Multi-tenant routers: preserve the tenant parameter as provided + // + // This allows callers to use the same code with different router types without needing to know + // the specific router implementation details. + // + // Parameters: + // - tenant: the tenant identifier to target. For single-tenant routers, this parameter is + // ignored and the resulting options will have an empty tenant. For multi-tenant routers, + // this value is preserved in the resulting options. + // - shard: the shard identifier to target. For multi-tenant collections, this should typically + // match the tenant name due to partitioning constraints. For single-tenant collections, + // this can be empty to target all shards or set to a specific shard name. + // - cl: the desired consistency level for operations using these options. + // - directCandidate: the preferred node name to contact first when executing routing plans. + // If empty, the router will use the local node as the preferred candidate. + // + // Returns: + // - RoutingPlanBuildOptions: configured routing plan build options with router-appropriate + // tenant handling applied. + BuildRoutingPlanOptions(tenant, shard string, cl ConsistencyLevel, directCandidate string) RoutingPlanBuildOptions + + // BuildWriteRoutingPlan constructs a routing plan for a write operation based on the provided options. + // + // Parameters: + // - params: the routing plan build options containing tenant, shard, consistency level, + // and direct candidate preferences for constructing the write routing plan. + // + // Returns: + // - WriteRoutingPlan: a routing plan optimized for write operations. + // - error: if an error occurs while building the routing plan. + BuildWriteRoutingPlan(params RoutingPlanBuildOptions) (WriteRoutingPlan, error) + + // BuildReadRoutingPlan constructs a routing plan for a read operation based on the provided options. + // + // Parameters: + // - params: the routing plan build options containing tenant, shard, consistency level, + // and direct candidate preferences for constructing the read routing plan. + // + // Returns: + // - ReadRoutingPlan: a routing plan optimized for read operations. + // - error: if an error occurs while building the routing plan. + BuildReadRoutingPlan(params RoutingPlanBuildOptions) (ReadRoutingPlan, error) + + // NodeHostname returns the hostname for a given node name. + // + // Parameters: + // - nodeName: the name of the node to get the hostname for. + // + // Returns: + // - hostname: the hostname of the node. + // - ok: true if the hostname was found, false if the node name is unknown or unregistered. + NodeHostname(nodeName string) (string, bool) + + // AllHostnames returns all known hostnames in the cluster. + // + // Returns: + // - hostnames: a slice of all known hostnames; always returns a valid slice, possibly empty. + AllHostnames() []string +} + +// Replica represents a single replica in the system, containing enough information +// to route traffic to it: the node name, shard name, and host address. +type Replica struct { + NodeName string + ShardName string + HostAddr string +} + +// String returns a human-readable representation of a single Replica, +// including node name, shard name, and host address. +func (r Replica) String() string { + return fmt.Sprintf("{node: %q, shard: %q, host: %q}", r.NodeName, r.ShardName, r.HostAddr) +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/router/types/router_plan.go b/platform/dbops/binaries/weaviate-src/cluster/router/types/router_plan.go new file mode 100644 index 0000000000000000000000000000000000000000..393d7cca258595407494f77077fa2f0bec304c7b --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/router/types/router_plan.go @@ -0,0 +1,195 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package types + +import ( + "fmt" + + "github.com/sirupsen/logrus" +) + +// RoutingPlanBuildOptions contains parameters used to construct a routing plan +// for either read or write operations. +// +// Fields: +// - Shard: The name of the shard to route to. For multi-tenant collections, this must be the tenant name. +// For single-tenant collections, this should be empty to route to all shards, or optionally set to a specific shard +// if targeting all shards when creating routing plans for reading. +// - Tenant: The tenant name targeted by this routing plan. Expected to be empty and ignored for single-tenant collections. +// - ConsistencyLevel: The desired level of consistency for the operation. +// - DirectCandidateNode: Optional. The preferred node to use first when building the routing plan. +// If empty, the local node is used as the default candidate. +type RoutingPlanBuildOptions struct { + Shard string + Tenant string + ConsistencyLevel ConsistencyLevel + DirectCandidateNode string +} + +// String returns a human-readable representation of the RoutingPlanBuildOptions. +// Useful for debugging and logging. +func (o RoutingPlanBuildOptions) String() string { + return fmt.Sprintf( + "RoutingPlanBuildOptions{shard: %q, tenant: %q, consistencyLevel: %s, directCandidateNode: %q}", + o.Shard, o.Tenant, o.ConsistencyLevel, o.DirectCandidateNode, + ) +} + +// ReadRoutingPlan represents the plan for routing a read operation. +// +// Fields: +// - Shard: The (optional) shard targeted by this routing plan. If empty, all relevant shards are targeted. +// - Tenant: The tenant name targeted by this routing plan. Expected to be empty and ignored for single-tenant collections. +// - ReplicaSet: The ordered list of Replicas to contact. +// - ConsistencyLevel: The user-specified consistency level. +// - IntConsistencyLevel: The resolved numeric value for the consistency level. +type ReadRoutingPlan struct { + LocalHostname string + Shard string + Tenant string + ReplicaSet ReadReplicaSet + ConsistencyLevel ConsistencyLevel + IntConsistencyLevel int +} + +// String returns a human-readable representation of the ReadRoutingPlan, +// including shard, consistency level, and list of Replicas. +func (p ReadRoutingPlan) String() string { + return fmt.Sprintf( + "ReadRoutingPlan{shard: %q, tenant: %q, consistencyLevel: %s (%d), Replicas: %v}", + p.Shard, p.Tenant, p.ConsistencyLevel, p.IntConsistencyLevel, p.ReplicaSet, + ) +} + +// WriteRoutingPlan represents the plan for routing a write operation. +// +// Fields: +// - Shard: The shard targeted by this routing plan. For writing, this is required as a write operation +// always targets a specific shard. Usually, the shard is determined based on the object's UUID. +// - Tenant: The tenant name targeted by this routing plan. Expected to be empty and ignored for single-tenant collections. +// - ReplicaSet: The ordered list of primary write Replicas. +// Write Replicas will normally also include read Replicas. A node that accepts writes is also eligible to +// serve reads. +// - AdditionalReplicaSet: Any secondary or additional Replicas to include in the write operation. +// - ConsistencyLevel: The user-specified consistency level. +// - IntConsistencyLevel: The resolved numeric value for the consistency level. +type WriteRoutingPlan struct { + Shard string + Tenant string + ReplicaSet WriteReplicaSet + ConsistencyLevel ConsistencyLevel + IntConsistencyLevel int +} + +// String returns a human-readable representation of the WriteRoutingPlan, +// including shard, consistency level, write Replicas, and additional Replicas. +func (p WriteRoutingPlan) String() string { + return fmt.Sprintf( + "WriteRoutingPlan{shard: %q, tenant: %q, consistencyLevel: %s (%d), writeReplicas: %v}", + p.Shard, p.Tenant, p.ConsistencyLevel, p.IntConsistencyLevel, p.ReplicaSet, + ) +} + +// LogFields returns a structured representation of the ReadRoutingPlan for logging purposes. +func (p ReadRoutingPlan) LogFields() logrus.Fields { + tenant := p.Tenant + if tenant == "" { + tenant = "no tenant" + } + return logrus.Fields{ + "shard": p.Shard, + "tenant": tenant, + "read_replica_set": p.ReplicaSet, + "consistency_level": p.ConsistencyLevel, + } +} + +// LogFields returns a structured representation of the WriteRoutingPlan for logging purposes. +func (p WriteRoutingPlan) LogFields() logrus.Fields { + tenant := p.Tenant + if tenant == "" { + tenant = "no tenant" + } + return logrus.Fields{ + "shard": p.Shard, + "tenant": tenant, + "write_replica_set": p.ReplicaSet, + "consistency_level": p.ConsistencyLevel, + } +} + +// NodeNames returns the hostnames of the Replicas included in the ReadRoutingPlan. +func (p ReadRoutingPlan) NodeNames() []string { + return p.ReplicaSet.NodeNames() +} + +// HostAddresses returns the host addresses of all Replicas in the ReadRoutingPlan. +func (p ReadRoutingPlan) HostAddresses() []string { + return p.ReplicaSet.HostAddresses() +} + +// Shards returns the logical shard names associated with the Replicas +// in the ReadRoutingPlan. +func (p ReadRoutingPlan) Shards() []string { + return p.ReplicaSet.Shards() +} + +// Replicas returns a list of replicas +func (p ReadRoutingPlan) Replicas() []Replica { + return p.ReplicaSet.Replicas +} + +// HostNames returns the hostnames of the primary write Replicas +// in the WriteRoutingPlan. +func (p WriteRoutingPlan) HostNames() []string { + return p.ReplicaSet.HostAddresses() +} + +// HostAddresses returns the host addresses of the primary write Replicas +// in the WriteRoutingPlan. +func (p WriteRoutingPlan) HostAddresses() []string { + return p.ReplicaSet.HostAddresses() +} + +// Shards returns the logical shard names associated with the primary write +// Replicas in the WriteRoutingPlan. +func (p WriteRoutingPlan) Shards() []string { + return p.ReplicaSet.Shards() +} + +// Replicas returns a list of replicas +func (p WriteRoutingPlan) Replicas() []Replica { + return p.ReplicaSet.Replicas +} + +// AdditionalHostNames returns the hostnames of the additional write Replicas, +// which are not part of the primary ReplicaSet, in the WriteRoutingPlan. +func (p WriteRoutingPlan) AdditionalHostNames() []string { + return p.ReplicaSet.AdditionalHostAddresses() +} + +// AdditionalHostAddresses returns the host addresses of the additional write +// Replicas, which are not part of the primary ReplicaSet, in the WriteRoutingPlan. +func (p WriteRoutingPlan) AdditionalHostAddresses() []string { + return p.ReplicaSet.AdditionalHostAddresses() +} + +// AdditionalShards returns the shard names associated with the additional write Replicas +// in the WriteRoutingPlan. +func (p WriteRoutingPlan) AdditionalShards() []string { + return p.ReplicaSet.AdditionalShards() +} + +// AdditionalReplicas returns a list of additional replicas +func (p WriteRoutingPlan) AdditionalReplicas() []Replica { + return p.ReplicaSet.AdditionalReplicas +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/rpc/client.go b/platform/dbops/binaries/weaviate-src/cluster/rpc/client.go new file mode 100644 index 0000000000000000000000000000000000000000..b563919a3f8c3cc49cf417362c335fe630751056 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/rpc/client.go @@ -0,0 +1,231 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package rpc + +import ( + "context" + "fmt" + "sync" + + grpc_sentry "github.com/johnbellone/grpc-middleware-sentry" + "github.com/sirupsen/logrus" + cmd "github.com/weaviate/weaviate/cluster/proto/api" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" +) + +const serviceConfig = ` +{ + "methodConfig": [ + { + "name": [ + { + "service": "weaviate.internal.cluster.ClusterService", "method": "Apply" + }, + { + "service": "weaviate.internal.cluster.ClusterService", "method": "Query" + } + ], + "waitForReady": true, + "retryPolicy": { + "MaxAttempts": 5, + "BackoffMultiplier": 2, + "InitialBackoff": "0.5s", + "MaxBackoff": "15s", + "RetryableStatusCodes": [ + "ABORTED", + "RESOURCE_EXHAUSTED", + "INTERNAL", + "UNAVAILABLE" + ] + } + } + ] +}` + +type rpcAddressResolver interface { + // Address returns the RPC address corresponding to the given Raft address. + Address(raftAddress string) (string, error) +} + +// Client is used for communication with remote nodes in a RAFT cluster +// It wraps the gRPC client to our gRPC server that is running on the raft port on each node +type Client struct { + addrResolver rpcAddressResolver + // connLock is used to ensure that we are trying to establish/close the connection to the leader while no request + // are in progress + connLock sync.Mutex + // leaderRaftAddr is the raft address of the current leader. It is updated at the same time as the leaderConn below + // when leader is changing + leaderRaftAddr string + // leaderConn is the gRPC client to the leader node of the RAFT cluster. It is used for queries that must be sent to + // the leader to have strong read consistency + leaderRpcConn *grpc.ClientConn + // rpcMessageMaxSize is the maximum size allows for gRPC call. As we re-instantiate the client when the leader + // change we store that setting to re-use it. We set a custom limit to ensure that big queries that would exceed the + // default maximum can still get through + rpcMessageMaxSize int + + // sentryEnabled will configure the RPC client to set spans and captures traces using sentry SDK + sentryEnabled bool + + // logger is the logger to log client warns etc. + logger *logrus.Logger +} + +// NewClient returns a Client using the rpcAddressResolver to resolve raft nodes and configured with rpcMessageMaxSize +func NewClient(r rpcAddressResolver, rpcMessageMaxSize int, sentryEnabled bool, logger *logrus.Logger) *Client { + return &Client{addrResolver: r, rpcMessageMaxSize: rpcMessageMaxSize, sentryEnabled: sentryEnabled, logger: logger} +} + +// Join will contact the node at leaderRaftAddr and try to join this node to the cluster leaded by leaderRaftAddress using req +// Returns the server response to the join request +// Returns an error if an RPC connection to leaderRaftAddr can't be established +// Returns an error if joining the node fails +func (cl *Client) Join(ctx context.Context, leaderRaftAddr string, req *cmd.JoinPeerRequest) (*cmd.JoinPeerResponse, error) { + conn, err := cl.getConn(ctx, leaderRaftAddr) + if err != nil { + return nil, err + } + + return cmd.NewClusterServiceClient(conn).JoinPeer(ctx, req) +} + +// Notify will contact the node at remoteAddr using the configured resolver and notify it of it's readiness to join a +// cluster using req +// Returns the server response to the notify request +// Returns an error if remoteAddr is not resolvable +// Returns an error if remoteAddr after resolve is not dial-able +// Returns an error if notifying the node fails. Note that Notify will not return an error if the node has notified +// itself already or if the remote node is already bootstrapped +// If the remote node is already bootstrapped/running a cluster, nodes should call Join instead +// Once a remote node has reached the sufficient amount of ready nodes (bootstrap_expect) it will initiate a cluster +// bootstrap process +func (cl *Client) Notify(ctx context.Context, remoteAddr string, req *cmd.NotifyPeerRequest) (*cmd.NotifyPeerResponse, error) { + // Explicitly instantiate a connection here and avoid using cl.leaderRpcConn because notify will be called for each + // remote node we have available to build a RAFT cluster. This connection is short lived to this function only + addr, err := cl.addrResolver.Address(remoteAddr) + if err != nil { + return nil, fmt.Errorf("resolve address: %w", err) + } + + conn, err := grpc.NewClient(addr, grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + return nil, fmt.Errorf("dial: %w", err) + } + defer conn.Close() + + return cmd.NewClusterServiceClient(conn).NotifyPeer(ctx, req) +} + +// Remove will contact the node at leaderRaftAddr and remove the client node from the RAFT cluster using req +// Returns the server response to the remove request +// Returns an error if an RPC connection to leaderRaftAddr can't be established +func (cl *Client) Remove(ctx context.Context, leaderRaftAddr string, req *cmd.RemovePeerRequest) (*cmd.RemovePeerResponse, error) { + conn, err := cl.getConn(ctx, leaderRaftAddr) + if err != nil { + return nil, err + } + + return cmd.NewClusterServiceClient(conn).RemovePeer(ctx, req) +} + +// Apply will contact the node at leaderRaftAddr and send req to be applied in the RAFT store +// Returns the server response to the apply request +// Returns an error if an RPC connection to leaderRaftAddr can't be established +// Returns an error if the apply command fails +func (cl *Client) Apply(ctx context.Context, leaderRaftAddr string, req *cmd.ApplyRequest) (*cmd.ApplyResponse, error) { + conn, err := cl.getConn(ctx, leaderRaftAddr) + if err != nil { + return nil, err + } + + return cmd.NewClusterServiceClient(conn).Apply(ctx, req) +} + +// Query will contact the node at leaderRaftAddr and send req to read data in the RAFT store +// Returns the server response to the query request +// Returns an error if an RPC connection to leaderRaftAddr can't be established +// Returns an error if the query command fails +func (cl *Client) Query(ctx context.Context, leaderRaftAddr string, req *cmd.QueryRequest) (*cmd.QueryResponse, error) { + conn, err := cl.getConn(ctx, leaderRaftAddr) + if err != nil { + return nil, err + } + + return cmd.NewClusterServiceClient(conn).Query(ctx, req) +} + +// Close the client and allocated resources +func (cl *Client) Close() { + if cl.leaderRpcConn == nil { + return + } + + if err := cl.leaderRpcConn.Close(); err != nil { + cl.logger.WithFields( + logrus.Fields{ + "error": err, + "leader_addr": cl.leaderRaftAddr, + }, + ).Warn("error closing the leader gRPC connection") + } +} + +// getConn either returns the cached connection in the client to the leader or will instantiate a new one towards +// leaderRaftAddr and close the old one +// Returns the gRPC client connection to leaderRaftAddr +// Returns an error if an RPC connection to leaderRaftAddr can't be established +func (cl *Client) getConn(ctx context.Context, leaderRaftAddr string) (*grpc.ClientConn, error) { + cl.connLock.Lock() + defer cl.connLock.Unlock() + + if cl.leaderRpcConn != nil && leaderRaftAddr == cl.leaderRaftAddr { + return cl.leaderRpcConn, nil + } + + if cl.leaderRpcConn != nil { + if err := cl.leaderRpcConn.Close(); err != nil { + cl.logger.WithFields( + logrus.Fields{ + "error": err, + "closing_on_leader_addr": cl.leaderRaftAddr, + "new_leader_addr": leaderRaftAddr, + }, + ).Warn("error closing the leader gRPC connection") + } + } + + addr, err := cl.addrResolver.Address(leaderRaftAddr) + if err != nil { + return nil, fmt.Errorf("resolve address: %w", err) + } + + options := []grpc.DialOption{ + grpc.WithTransportCredentials(insecure.NewCredentials()), + grpc.WithDefaultServiceConfig(serviceConfig), + grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(cl.rpcMessageMaxSize)), + } + + if cl.sentryEnabled { + options = append(options, grpc.WithUnaryInterceptor(grpc_sentry.UnaryClientInterceptor())) + } + + cl.leaderRpcConn, err = grpc.NewClient(addr, options...) + if err != nil { + return nil, fmt.Errorf("dial: %w", err) + } + + cl.leaderRaftAddr = leaderRaftAddr + + return cl.leaderRpcConn, nil +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/rpc/client_test.go b/platform/dbops/binaries/weaviate-src/cluster/rpc/client_test.go new file mode 100644 index 0000000000000000000000000000000000000000..105de87945b3e63b17ab85a413a32565bb6723ca --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/rpc/client_test.go @@ -0,0 +1,76 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package rpc + +import ( + "context" + "errors" + "fmt" + "testing" + + "github.com/sirupsen/logrus" + "github.com/stretchr/testify/require" + cmd "github.com/weaviate/weaviate/cluster/proto/api" + "github.com/weaviate/weaviate/usecases/fakes" +) + +var ErrAny = errors.New("any error") + +func TestClient(t *testing.T) { + t.Parallel() + ctx := context.Background() + + t.Run("Verify error on invalid raft address", func(t *testing.T) { + addr := fmt.Sprintf("localhost:%v", 8013) + c := NewClient(fakes.NewFakeRPCAddressResolver(addr, ErrAny), 1024*1024*1024, false, logrus.StandardLogger()) + _, err := c.Join(ctx, addr, &cmd.JoinPeerRequest{Id: "Node1", Address: addr, Voter: false}) + require.ErrorIs(t, err, ErrAny) + require.ErrorContains(t, err, "resolve") + + _, err = c.Notify(ctx, addr, &cmd.NotifyPeerRequest{Id: "Node1", Address: addr}) + require.ErrorIs(t, err, ErrAny) + require.ErrorContains(t, err, "resolve") + + _, err = c.Remove(ctx, addr, &cmd.RemovePeerRequest{Id: "Node1"}) + require.ErrorIs(t, err, ErrAny) + require.ErrorContains(t, err, "resolve") + + _, err = c.Apply(context.TODO(), addr, &cmd.ApplyRequest{Type: cmd.ApplyRequest_TYPE_DELETE_CLASS, Class: "C"}) + require.ErrorIs(t, err, ErrAny) + require.ErrorContains(t, err, "resolve") + + _, err = c.Query(ctx, addr, &cmd.QueryRequest{Type: cmd.QueryRequest_TYPE_GET_CLASSES}) + require.ErrorIs(t, err, ErrAny) + require.ErrorContains(t, err, "resolve") + }) + + t.Run("Verify error on invalid address dial", func(t *testing.T) { + // invalid control character in URL + badAddr := string(byte(0)) + c := NewClient(fakes.NewFakeRPCAddressResolver(badAddr, nil), 1024*1024*1024, false, logrus.StandardLogger()) + + _, err := c.Join(ctx, badAddr, &cmd.JoinPeerRequest{Id: "Node1", Address: "abc", Voter: false}) + require.ErrorContains(t, err, "dial") + + _, err = c.Notify(ctx, badAddr, &cmd.NotifyPeerRequest{Id: "Node1", Address: badAddr}) + require.ErrorContains(t, err, "dial") + + _, err = c.Remove(ctx, badAddr, &cmd.RemovePeerRequest{Id: "Node1"}) + require.ErrorContains(t, err, "dial") + + _, err = c.Apply(context.TODO(), badAddr, &cmd.ApplyRequest{Type: cmd.ApplyRequest_TYPE_DELETE_CLASS, Class: "C"}) + require.ErrorContains(t, err, "dial") + + _, err = c.Query(ctx, badAddr, &cmd.QueryRequest{Type: cmd.QueryRequest_TYPE_GET_CLASSES}) + require.ErrorContains(t, err, "dial") + }) +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/rpc/server.go b/platform/dbops/binaries/weaviate-src/cluster/rpc/server.go new file mode 100644 index 0000000000000000000000000000000000000000..76a1b5490c0a5716d119d5e4d7e0f0c8b3fb977b --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/rpc/server.go @@ -0,0 +1,203 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package rpc + +import ( + "context" + "errors" + "fmt" + "net" + "strings" + + enterrors "github.com/weaviate/weaviate/entities/errors" + "github.com/weaviate/weaviate/usecases/monitoring" + + grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware" + grpc_sentry "github.com/johnbellone/grpc-middleware-sentry" + "github.com/sirupsen/logrus" + cmd "github.com/weaviate/weaviate/cluster/proto/api" + "github.com/weaviate/weaviate/cluster/schema" + "github.com/weaviate/weaviate/cluster/types" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +type raftPeers interface { + Join(id string, addr string, voter bool) error + Notify(id string, addr string) error + Remove(id string) error + Leader() string +} + +type raftFSM interface { + Execute(ctx context.Context, cmd *cmd.ApplyRequest) (uint64, error) + Query(ctx context.Context, req *cmd.QueryRequest) (*cmd.QueryResponse, error) +} + +type Server struct { + raftPeers raftPeers + raftFSM raftFSM + listenAddress string + grpcMessageMaxSize int + log *logrus.Logger + sentryEnabled bool + + grpcServer *grpc.Server + metrics *monitoring.GRPCServerMetrics +} + +// NewServer returns the Server implementing the RPC interface for RAFT peers management and execute/query commands. +// The server must subsequently be started with Open(). +// The server will be configure the gRPC service with grpcMessageMaxSize. +func NewServer( + raftPeers raftPeers, + raftFSM raftFSM, + listenAddress string, + grpcMessageMaxSize int, + sentryEnabled bool, + metrics *monitoring.GRPCServerMetrics, + log *logrus.Logger, +) *Server { + return &Server{ + raftPeers: raftPeers, + raftFSM: raftFSM, + listenAddress: listenAddress, + log: log, + grpcMessageMaxSize: grpcMessageMaxSize, + sentryEnabled: sentryEnabled, + metrics: metrics, + } +} + +// JoinPeer will notify the RAFT cluster that a new peer is joining the cluster. +// Returns an error and the current raft leader if joining fails. +func (s *Server) JoinPeer(_ context.Context, req *cmd.JoinPeerRequest) (*cmd.JoinPeerResponse, error) { + err := s.raftPeers.Join(req.Id, req.Address, req.Voter) + if err != nil { + return &cmd.JoinPeerResponse{Leader: s.raftPeers.Leader()}, toRPCError(err) + } + + return &cmd.JoinPeerResponse{}, nil +} + +// RemovePeer will notify the RAFT cluster that a peer is removed from the cluster. +// Returns an error and the current raft leader if removal fails. +func (s *Server) RemovePeer(_ context.Context, req *cmd.RemovePeerRequest) (*cmd.RemovePeerResponse, error) { + err := s.raftPeers.Remove(req.Id) + if err != nil { + return &cmd.RemovePeerResponse{Leader: s.raftPeers.Leader()}, toRPCError(err) + } + return &cmd.RemovePeerResponse{}, nil +} + +// NotifyPeer will notify the RAFT cluster that a peer has notified that it is ready to be joined. +// Returns an error if notifying fails. +func (s *Server) NotifyPeer(_ context.Context, req *cmd.NotifyPeerRequest) (*cmd.NotifyPeerResponse, error) { + return &cmd.NotifyPeerResponse{}, toRPCError(s.raftPeers.Notify(req.Id, req.Address)) +} + +// Apply will update the RAFT FSM representation to apply req. +// Returns the FSM version of that change. +// Returns an error and the current raft leader if applying fails. +func (s *Server) Apply(ctx context.Context, req *cmd.ApplyRequest) (*cmd.ApplyResponse, error) { + v, err := s.raftFSM.Execute(ctx, req) + if err != nil { + return &cmd.ApplyResponse{Leader: s.raftPeers.Leader()}, toRPCError(err) + } + return &cmd.ApplyResponse{Version: v}, nil +} + +// Query will read the RAFT FSM schema representation using req. +// Returns the result of the query. +// Returns an error if querying fails. +func (s *Server) Query(ctx context.Context, req *cmd.QueryRequest) (*cmd.QueryResponse, error) { + resp, err := s.raftFSM.Query(ctx, req) + if err != nil { + return &cmd.QueryResponse{}, toRPCError(err) + } + + return resp, nil +} + +// Leader returns the current leader of the RAFT cluster. +func (s *Server) Leader() string { + return s.raftPeers.Leader() +} + +// Open starts the server and registers it as the cluster service server. +// Returns asynchronously once the server has started. +// Returns an error if the configured listenAddress is invalid. +// Returns an error if the configured listenAddress is un-usable to listen on. +func (s *Server) Open() error { + s.log.WithField("address", s.listenAddress).Info("starting cloud rpc server ...") + if s.listenAddress == "" { + return fmt.Errorf("address of rpc server cannot be empty") + } + + listener, err := net.Listen("tcp", s.listenAddress) + if err != nil { + return fmt.Errorf("server tcp net.listen: %w", err) + } + + var options []grpc.ServerOption + options = append(options, grpc.MaxRecvMsgSize(s.grpcMessageMaxSize)) + if s.sentryEnabled { + options = append(options, + grpc.UnaryInterceptor(grpc_middleware.ChainUnaryServer( + grpc_sentry.UnaryServerInterceptor(), + ))) + } + + if s.metrics != nil { + options = append(options, monitoring.InstrumentGrpc(s.metrics)...) + } + + s.grpcServer = grpc.NewServer(options...) + cmd.RegisterClusterServiceServer(s.grpcServer, s) + enterrors.GoWrapper(func() { + if err := s.grpcServer.Serve(listener); err != nil { + s.log.WithError(err).Error("serving incoming requests") + panic("error accepting incoming requests") + } + }, s.log) + return nil +} + +// Close closes the server and free any used ressources. +func (s *Server) Close() { + if s.grpcServer != nil { + s.grpcServer.Stop() + } +} + +// toRPCError returns a gRPC error with the right error code based on the error. +func toRPCError(err error) error { + if err == nil { + return nil + } + + var ec codes.Code + switch { + case errors.Is(err, types.ErrNotLeader), errors.Is(err, types.ErrLeaderNotFound): + ec = codes.ResourceExhausted + case errors.Is(err, types.ErrNotOpen): + ec = codes.Unavailable + case errors.Is(err, schema.ErrMTDisabled): + ec = codes.FailedPrecondition + case strings.Contains(err.Error(), types.ErrNotFound.Error()): + ec = codes.NotFound + default: + ec = codes.Internal + } + return status.Error(ec, err.Error()) +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/rpc/server_test.go b/platform/dbops/binaries/weaviate-src/cluster/rpc/server_test.go new file mode 100644 index 0000000000000000000000000000000000000000..71919a75c69e571e77bbce7b99662fd267b478eb --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/rpc/server_test.go @@ -0,0 +1,415 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package rpc + +import ( + "context" + "fmt" + "net" + "testing" + + "github.com/prometheus/client_golang/prometheus" + "github.com/sirupsen/logrus" + logrustest "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/assert" + cmd "github.com/weaviate/weaviate/cluster/proto/api" + "github.com/weaviate/weaviate/cluster/types" + "github.com/weaviate/weaviate/cluster/utils" + "github.com/weaviate/weaviate/usecases/fakes" + "github.com/weaviate/weaviate/usecases/monitoring" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +const raftGrpcMessageMaxSize = 1024 * 1024 * 1024 + +func TestServerNewError(t *testing.T) { + sm := monitoring.NewGRPCServerMetrics("rpc_test", prometheus.NewPedanticRegistry()) + + var ( + addr = fmt.Sprintf("localhost:%v", utils.MustGetFreeTCPPort()) + members = &MockMembers{leader: addr} + executor = &MockExecutor{} + logger, _ = logrustest.NewNullLogger() + ) + + t.Run("Empty server address", func(t *testing.T) { + srv := NewServer(members, executor, "", raftGrpcMessageMaxSize, false, sm, logger) + assert.NotNil(t, srv.Open()) + }) + + t.Run("Invalid IP", func(t *testing.T) { + srv := NewServer(members, executor, "abc", raftGrpcMessageMaxSize, false, sm, logger) + netErr := &net.OpError{} + assert.ErrorAs(t, srv.Open(), &netErr) + }) +} + +func TestRaftRelatedRPC(t *testing.T) { + sm := monitoring.NewGRPCServerMetrics("rpc_test", prometheus.NewPedanticRegistry()) + + tests := []struct { + name string + members *MockMembers + executor *MockExecutor + testFunc func(t *testing.T, leaderAddr string, members *MockMembers, executor *MockExecutor) + }{ + { + name: "Join leader not found", + members: &MockMembers{errJoin: types.ErrLeaderNotFound}, + executor: &MockExecutor{}, + testFunc: func(t *testing.T, leaderAddr string, members *MockMembers, executor *MockExecutor) { + // Setup var, client and server + logger, _ := logrustest.NewNullLogger() + ctx := context.Background() + server := NewServer(members, executor, leaderAddr, raftGrpcMessageMaxSize, false, sm, logger) + assert.Nil(t, server.Open()) + defer server.Close() + client := NewClient(fakes.NewFakeRPCAddressResolver(leaderAddr, nil), raftGrpcMessageMaxSize, false, logrus.StandardLogger()) + defer client.Close() + + _, err := client.Join(ctx, leaderAddr, &cmd.JoinPeerRequest{Id: "Node1", Address: leaderAddr, Voter: false}) + assert.NotNil(t, err) + st, ok := status.FromError(err) + assert.True(t, ok) + assert.Equal(t, st.Code(), codes.ResourceExhausted) + assert.ErrorContains(t, st.Err(), types.ErrLeaderNotFound.Error()) + }, + }, + { + name: "Join success", + members: &MockMembers{}, + executor: &MockExecutor{}, + testFunc: func(t *testing.T, leaderAddr string, members *MockMembers, executor *MockExecutor) { + // Setup var, client and server + logger, _ := logrustest.NewNullLogger() + ctx := context.Background() + server := NewServer(members, executor, leaderAddr, raftGrpcMessageMaxSize, false, sm, logger) + assert.Nil(t, server.Open()) + defer server.Close() + client := NewClient(fakes.NewFakeRPCAddressResolver(leaderAddr, nil), raftGrpcMessageMaxSize, false, logrus.StandardLogger()) + defer client.Close() + + _, err := client.Join(ctx, leaderAddr, &cmd.JoinPeerRequest{Id: "Node1", Address: leaderAddr, Voter: false}) + assert.Nil(t, err) + }, + }, + { + name: "Notify members error", + members: &MockMembers{errNotify: types.ErrNotOpen}, + executor: &MockExecutor{}, + testFunc: func(t *testing.T, leaderAddr string, members *MockMembers, executor *MockExecutor) { + // Setup var, client and server + logger, _ := logrustest.NewNullLogger() + ctx := context.Background() + server := NewServer(members, executor, leaderAddr, raftGrpcMessageMaxSize, false, sm, logger) + assert.Nil(t, server.Open()) + defer server.Close() + client := NewClient(fakes.NewFakeRPCAddressResolver(leaderAddr, nil), raftGrpcMessageMaxSize, false, logrus.StandardLogger()) + defer client.Close() + + _, err := client.Notify(ctx, leaderAddr, &cmd.NotifyPeerRequest{Id: "Node1", Address: leaderAddr}) + assert.NotNil(t, err) + st, ok := status.FromError(err) + assert.True(t, ok) + assert.Equal(t, st.Code(), codes.Unavailable) + assert.ErrorContains(t, st.Err(), types.ErrNotOpen.Error()) + }, + }, + { + name: "Notify success", + members: &MockMembers{}, + executor: &MockExecutor{}, + testFunc: func(t *testing.T, leaderAddr string, members *MockMembers, executor *MockExecutor) { + // Setup var, client and server + ctx := context.Background() + logger, _ := logrustest.NewNullLogger() + server := NewServer(members, executor, leaderAddr, raftGrpcMessageMaxSize, false, sm, logger) + assert.Nil(t, server.Open()) + defer server.Close() + client := NewClient(fakes.NewFakeRPCAddressResolver(leaderAddr, nil), raftGrpcMessageMaxSize, false, logrus.StandardLogger()) + defer client.Close() + + _, err := client.Notify(ctx, leaderAddr, &cmd.NotifyPeerRequest{Id: "Node1", Address: leaderAddr}) + assert.Nil(t, err) + }, + }, + { + name: "Remove members error", + members: &MockMembers{errRemove: types.ErrNotLeader}, + executor: &MockExecutor{}, + testFunc: func(t *testing.T, leaderAddr string, members *MockMembers, executor *MockExecutor) { + // Setup var, client and server + ctx := context.Background() + logger, _ := logrustest.NewNullLogger() + server := NewServer(members, executor, leaderAddr, raftGrpcMessageMaxSize, false, sm, logger) + assert.Nil(t, server.Open()) + defer server.Close() + client := NewClient(fakes.NewFakeRPCAddressResolver(leaderAddr, nil), raftGrpcMessageMaxSize, false, logrus.StandardLogger()) + defer client.Close() + + _, err := client.Remove(ctx, leaderAddr, &cmd.RemovePeerRequest{Id: "node1"}) + assert.NotNil(t, err) + st, ok := status.FromError(err) + assert.True(t, ok) + assert.Equal(t, st.Code(), codes.ResourceExhausted) + assert.ErrorContains(t, st.Err(), types.ErrNotLeader.Error()) + }, + }, + { + name: "Remove success", + members: &MockMembers{}, + executor: &MockExecutor{}, + testFunc: func(t *testing.T, leaderAddr string, members *MockMembers, executor *MockExecutor) { + // Setup var, client and server + ctx := context.Background() + logger, _ := logrustest.NewNullLogger() + server := NewServer(members, executor, leaderAddr, raftGrpcMessageMaxSize, false, sm, logger) + assert.Nil(t, server.Open()) + defer server.Close() + client := NewClient(fakes.NewFakeRPCAddressResolver(leaderAddr, nil), raftGrpcMessageMaxSize, false, logrus.StandardLogger()) + defer client.Close() + + _, err := client.Remove(ctx, leaderAddr, &cmd.RemovePeerRequest{Id: "node1"}) + assert.Nil(t, err) + }, + }, + } + for _, test := range tests { + test := test + t.Run(test.name, func(t *testing.T) { + leaderAddr := fmt.Sprintf("localhost:%v", utils.MustGetFreeTCPPort()) + test.members.leader = leaderAddr + test.testFunc(t, leaderAddr, test.members, test.executor) + }) + } +} + +func TestQueryEndpoint(t *testing.T) { + sm := monitoring.NewGRPCServerMetrics("rpc_test", prometheus.NewPedanticRegistry()) + + tests := []struct { + name string + testFunc func(t *testing.T, leaderAddr string, members *MockMembers, executor *MockExecutor) + }{ + { + name: "Query success", + testFunc: func(t *testing.T, leaderAddr string, members *MockMembers, executor *MockExecutor) { + // Setup var, client and server + ctx := context.Background() + logger, _ := logrustest.NewNullLogger() + server := NewServer(members, executor, leaderAddr, raftGrpcMessageMaxSize, false, sm, logger) + assert.Nil(t, server.Open()) + defer server.Close() + client := NewClient(fakes.NewFakeRPCAddressResolver(leaderAddr, nil), raftGrpcMessageMaxSize, false, logrus.StandardLogger()) + defer client.Close() + + _, err := client.Query(ctx, leaderAddr, &cmd.QueryRequest{Type: cmd.QueryRequest_TYPE_GET_CLASSES}) + assert.Nil(t, err) + }, + }, + { + name: "Query verify retry mechanism on leader not found", + testFunc: func(t *testing.T, leaderAddr string, members *MockMembers, executor *MockExecutor) { + // Setup var, client and server + ctx := context.Background() + logger, _ := logrustest.NewNullLogger() + server := NewServer(members, executor, leaderAddr, raftGrpcMessageMaxSize, false, sm, logger) + assert.Nil(t, server.Open()) + defer server.Close() + client := NewClient(fakes.NewFakeRPCAddressResolver(leaderAddr, nil), raftGrpcMessageMaxSize, false, logrus.StandardLogger()) + defer client.Close() + + n := 0 + executor.qf = func(*cmd.QueryRequest) (*cmd.QueryResponse, error) { + n++ + if n < 2 { + return &cmd.QueryResponse{}, types.ErrLeaderNotFound + } + return &cmd.QueryResponse{}, nil + } + + _, err := client.Query(ctx, leaderAddr, &cmd.QueryRequest{Type: cmd.QueryRequest_TYPE_GET_CLASSES}) + assert.Nil(t, err) + assert.Greater(t, n, 1) + }, + }, + { + name: "Query leader not found", + testFunc: func(t *testing.T, leaderAddr string, members *MockMembers, executor *MockExecutor) { + // Setup var, client and server + ctx := context.Background() + logger, _ := logrustest.NewNullLogger() + server := NewServer(members, executor, leaderAddr, raftGrpcMessageMaxSize, false, sm, logger) + assert.Nil(t, server.Open()) + defer server.Close() + client := NewClient(fakes.NewFakeRPCAddressResolver(leaderAddr, nil), raftGrpcMessageMaxSize, false, logrus.StandardLogger()) + defer client.Close() + + executor.qf = func(*cmd.QueryRequest) (*cmd.QueryResponse, error) { + return &cmd.QueryResponse{}, types.ErrLeaderNotFound + } + _, err := client.Query(ctx, leaderAddr, &cmd.QueryRequest{Type: cmd.QueryRequest_TYPE_GET_CLASSES}) + assert.NotNil(t, err) + st, ok := status.FromError(err) + assert.True(t, ok) + assert.Equal(t, st.Code(), codes.ResourceExhausted) + assert.ErrorContains(t, st.Err(), types.ErrLeaderNotFound.Error()) + }, + }, + } + for _, test := range tests { + test := test + t.Run(test.name, func(t *testing.T) { + leaderAddr := fmt.Sprintf("localhost:%v", utils.MustGetFreeTCPPort()) + members := &MockMembers{ + leader: leaderAddr, + } + executor := &MockExecutor{ + qf: func(qr *cmd.QueryRequest) (*cmd.QueryResponse, error) { return nil, nil }, + ef: func() error { return nil }, + } + test.testFunc(t, leaderAddr, members, executor) + }) + } +} + +func TestApply(t *testing.T) { + sm := monitoring.NewGRPCServerMetrics("rpc_test", prometheus.NewPedanticRegistry()) + + tests := []struct { + name string + members *MockMembers + executor *MockExecutor + testFunc func(t *testing.T, leaderAddr string, members *MockMembers, executor *MockExecutor) + }{ + { + name: "Apply error on leader not found", + members: &MockMembers{}, + executor: &MockExecutor{ + ef: func() error { + return types.ErrLeaderNotFound + }, + }, + testFunc: func(t *testing.T, leaderAddr string, members *MockMembers, executor *MockExecutor) { + // Setup var, client and server + logger, _ := logrustest.NewNullLogger() + server := NewServer(members, executor, leaderAddr, raftGrpcMessageMaxSize, false, sm, logger) + assert.Nil(t, server.Open()) + defer server.Close() + client := NewClient(fakes.NewFakeRPCAddressResolver(leaderAddr, nil), raftGrpcMessageMaxSize, false, logrus.StandardLogger()) + defer client.Close() + + _, err := client.Apply(context.TODO(), leaderAddr, &cmd.ApplyRequest{Type: cmd.ApplyRequest_TYPE_DELETE_CLASS, Class: "C"}) + assert.NotNil(t, err) + st, ok := status.FromError(err) + assert.True(t, ok) + assert.Equal(t, st.Code(), codes.ResourceExhausted) + assert.ErrorContains(t, st.Err(), types.ErrLeaderNotFound.Error()) + }, + }, + { + name: "Apply verify retry", + members: &MockMembers{}, + executor: &MockExecutor{}, + testFunc: func(t *testing.T, leaderAddr string, members *MockMembers, executor *MockExecutor) { + // Setup var, client and server + logger, _ := logrustest.NewNullLogger() + server := NewServer(members, executor, leaderAddr, raftGrpcMessageMaxSize, false, sm, logger) + assert.Nil(t, server.Open()) + defer server.Close() + client := NewClient(fakes.NewFakeRPCAddressResolver(leaderAddr, nil), raftGrpcMessageMaxSize, false, logrus.StandardLogger()) + defer client.Close() + + n := 0 + executor.ef = func() error { + n++ + if n < 2 { + return types.ErrLeaderNotFound + } + return nil + } + + _, err := client.Apply(context.TODO(), leaderAddr, &cmd.ApplyRequest{Type: cmd.ApplyRequest_TYPE_DELETE_CLASS, Class: "C"}) + assert.Nil(t, err) + assert.Greater(t, n, 1) + }, + }, + { + name: "Apply success", + members: &MockMembers{}, + executor: &MockExecutor{}, + testFunc: func(t *testing.T, leaderAddr string, members *MockMembers, executor *MockExecutor) { + // Setup var, client and server + logger, _ := logrustest.NewNullLogger() + server := NewServer(members, executor, leaderAddr, raftGrpcMessageMaxSize, false, sm, logger) + assert.Nil(t, server.Open()) + defer server.Close() + client := NewClient(fakes.NewFakeRPCAddressResolver(leaderAddr, nil), raftGrpcMessageMaxSize, false, logrus.StandardLogger()) + defer client.Close() + + _, err := client.Apply(context.TODO(), leaderAddr, &cmd.ApplyRequest{Type: cmd.ApplyRequest_TYPE_DELETE_CLASS, Class: "C"}) + assert.Nil(t, err) + }, + }, + } + for _, test := range tests { + test := test + t.Run(test.name, func(t *testing.T) { + leaderAddr := fmt.Sprintf("localhost:%v", utils.MustGetFreeTCPPort()) + test.members.leader = leaderAddr + test.testFunc(t, leaderAddr, test.members, test.executor) + }) + } +} + +type MockMembers struct { + leader string + errJoin error + errNotify error + errRemove error +} + +func (m *MockMembers) Join(id string, addr string, voter bool) error { + return m.errJoin +} + +func (m *MockMembers) Notify(id string, addr string) error { + return m.errNotify +} + +func (m *MockMembers) Remove(id string) error { + return m.errRemove +} + +func (m *MockMembers) Leader() string { + return m.leader +} + +type MockExecutor struct { + ef func() error + qf func(*cmd.QueryRequest) (*cmd.QueryResponse, error) +} + +func (m *MockExecutor) Execute(_ context.Context, cmd *cmd.ApplyRequest) (uint64, error) { + if m.ef != nil { + return 0, m.ef() + } + return 0, nil +} + +func (m *MockExecutor) Query(ctx context.Context, req *cmd.QueryRequest) (*cmd.QueryResponse, error) { + if m.qf != nil { + return m.qf(req) + } + return &cmd.QueryResponse{}, nil +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/schema/manager.go b/platform/dbops/binaries/weaviate-src/cluster/schema/manager.go new file mode 100644 index 0000000000000000000000000000000000000000..70db9591e654e486f21dd5ca46650cf1d15eb35a --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/schema/manager.go @@ -0,0 +1,647 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package schema + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "slices" + + "github.com/prometheus/client_golang/prometheus" + "github.com/sirupsen/logrus" + command "github.com/weaviate/weaviate/cluster/proto/api" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/usecases/sharding" + gproto "google.golang.org/protobuf/proto" +) + +var ( + ErrBadRequest = errors.New("bad request") + errDB = errors.New("updating db") + ErrSchema = errors.New("updating schema") +) + +type replicationFSM interface { + HasOngoingReplication(collection string, shard string, replica string) bool + DeleteReplicationsByCollection(collection string) error + DeleteReplicationsByTenants(collection string, tenants []string) error + SetUnCancellable(id uint64) error +} + +type SchemaManager struct { + schema *schema + db Indexer + parser Parser + log *logrus.Logger + replicationFSM replicationFSM +} + +func NewSchemaManager(nodeId string, db Indexer, parser Parser, reg prometheus.Registerer, log *logrus.Logger) *SchemaManager { + return &SchemaManager{ + schema: NewSchema(nodeId, db, reg), + db: db, + parser: parser, + log: log, + } +} + +func (s *SchemaManager) NewSchemaReader() SchemaReader { + return NewSchemaReader( + s.schema, + // Pass a versioned reader that will ignore all version and always return valid, we want to read the latest + // state and not have to wait on a version + VersionedSchemaReader{ + schema: s.schema, + WaitForUpdate: func(context.Context, uint64) error { return nil }, + }, + ) +} + +func (s *SchemaManager) NewSchemaReaderWithWaitFunc(f func(context.Context, uint64) error) SchemaReader { + return NewSchemaReader( + s.schema, + VersionedSchemaReader{ + schema: s.schema, + WaitForUpdate: f, + }, + ) +} + +func (s *SchemaManager) SetIndexer(idx Indexer) { + s.db = idx + s.schema.shardReader = idx +} + +func (s *SchemaManager) SetReplicationFSM(fsm replicationFSM) { + s.replicationFSM = fsm +} + +func (s *SchemaManager) SchemaSnapshot() ([]byte, error) { + var buf bytes.Buffer + + err := json.NewEncoder(&buf).Encode(s.schema.MetaClasses()) + return buf.Bytes(), err +} + +func (s *SchemaManager) AliasSnapshot() ([]byte, error) { + var buf bytes.Buffer + + err := json.NewEncoder(&buf).Encode(s.schema.aliases) + return buf.Bytes(), err +} + +func (s *SchemaManager) Restore(data []byte, parser Parser) error { + return s.schema.Restore(data, parser) +} + +func (s *SchemaManager) RestoreAliases(data []byte) error { + return s.schema.RestoreAlias(data) +} + +func (s *SchemaManager) RestoreLegacy(data []byte, parser Parser) error { + return s.schema.RestoreLegacy(data, parser) +} + +func (s *SchemaManager) PreApplyFilter(req *command.ApplyRequest) error { + classInfo := s.schema.ClassInfo(req.Class) + + // Discard restoring a class if it already exists + if req.Type == command.ApplyRequest_TYPE_RESTORE_CLASS && classInfo.Exists { + s.log.WithField("class", req.Class).Info("class already restored") + return fmt.Errorf("class name %s already exists", req.Class) + } + + // Discard adding class if the name already exists or a similar one exists + if req.Type == command.ApplyRequest_TYPE_ADD_CLASS { + other, isAlias := s.schema.ClassEqual(req.Class) + item := "class" + if isAlias { + item = "alias" + } + + if other == req.Class { + return fmt.Errorf("%s name %s already exists", item, req.Class) + } else if other != "" { + return fmt.Errorf("%w: found similar %s %q", ErrClassExists, item, other) + } + } + + return nil +} + +func (s *SchemaManager) Load(ctx context.Context, nodeID string) error { + if err := s.db.Open(ctx); err != nil { + return err + } + return nil +} + +func (s *SchemaManager) ReloadDBFromSchema() { + classes := s.schema.MetaClasses() + + cs := make([]command.UpdateClassRequest, len(classes)) + i := 0 + for _, v := range classes { + migratePropertiesIfNecessary(&v.Class) + cs[i] = command.UpdateClassRequest{Class: &v.Class, State: &v.Sharding} + i++ + } + s.db.TriggerSchemaUpdateCallbacks() + s.log.Info("reload local db: update schema ...") + s.db.ReloadLocalDB(context.Background(), cs) +} + +func (s *SchemaManager) Close(ctx context.Context) (err error) { + return s.db.Close(ctx) +} + +func (s *SchemaManager) AddClass(cmd *command.ApplyRequest, nodeID string, schemaOnly bool, enableSchemaCallback bool) error { + req := command.AddClassRequest{} + // dupa + if err := json.Unmarshal(cmd.SubCommand, &req); err != nil { + return fmt.Errorf("%w: %w", ErrBadRequest, err) + } + if req.State == nil { + return fmt.Errorf("%w: nil sharding state", ErrBadRequest) + } + if err := s.parser.ParseClass(req.Class); err != nil { + return fmt.Errorf("%w: parsing class: %w", ErrBadRequest, err) + } + req.State.SetLocalName(nodeID) + // We need to make a copy of the sharding state to ensure that the state stored in the internal schema has no + // references to. As we will make modification to it to reflect change in the sharding state (adding/removing + // tenant) we don't want another goroutine holding a pointer to it and finding issues with concurrent read/writes. + shardingStateCopy := req.State.DeepCopy() + return s.apply( + applyOp{ + op: cmd.GetType().String(), + updateSchema: func() error { return s.schema.addClass(req.Class, &shardingStateCopy, cmd.Version) }, + updateStore: func() error { return s.db.AddClass(req) }, + schemaOnly: schemaOnly, + enableSchemaCallback: enableSchemaCallback, + }, + ) +} + +func (s *SchemaManager) RestoreClass(cmd *command.ApplyRequest, nodeID string, schemaOnly bool, enableSchemaCallback bool) error { + req := command.AddClassRequest{} + if err := json.Unmarshal(cmd.SubCommand, &req); err != nil { + return fmt.Errorf("%w: %w", ErrBadRequest, err) + } + if req.State == nil { + return fmt.Errorf("%w: nil sharding state", ErrBadRequest) + } + if err := s.parser.ParseClass(req.Class); err != nil { + return fmt.Errorf("%w: parsing class: %w", ErrBadRequest, err) + } + req.State.SetLocalName(nodeID) + + if err := s.db.RestoreClassDir(cmd.Class); err != nil { + s.log.WithField("class", cmd.Class).WithError(err). + Error("restore class directory from backup") + // continue since we need to add class to the schema anyway + } + + return s.apply( + applyOp{ + op: cmd.GetType().String(), + updateSchema: func() error { return s.schema.addClass(req.Class, req.State, cmd.Version) }, + updateStore: func() error { return s.db.AddClass(req) }, + schemaOnly: schemaOnly, + enableSchemaCallback: enableSchemaCallback, + }, + ) +} + +// ReplaceStatesNodeName it update the node name inside sharding states. +// WARNING: this shall be used in one node cluster environments only. +// because it will replace the shard node name if the node name got updated +// only if the replication factor is 1, otherwise it's no-op +func (s *SchemaManager) ReplaceStatesNodeName(new string) { + s.schema.replaceStatesNodeName(new) +} + +// UpdateClass modifies the vectors and inverted indexes associated with a class +// Other class properties are handled by separate functions +func (s *SchemaManager) UpdateClass(cmd *command.ApplyRequest, nodeID string, schemaOnly bool, enableSchemaCallback bool) error { + req := command.UpdateClassRequest{} + if err := json.Unmarshal(cmd.SubCommand, &req); err != nil { + return fmt.Errorf("%w: %w", ErrBadRequest, err) + } + if req.State != nil { + req.State.SetLocalName(nodeID) + } + + update := func(meta *metaClass) error { + // Ensure that if non-default values for properties is stored in raft we fix them before processing an update to + // avoid triggering diff on properties and therefore discarding a legitimate update. + migratePropertiesIfNecessary(&meta.Class) + u, err := s.parser.ParseClassUpdate(&meta.Class, req.Class) + if err != nil { + return fmt.Errorf("%w :parse class update: %w", ErrBadRequest, err) + } + meta.Class.VectorIndexConfig = u.VectorIndexConfig + meta.Class.InvertedIndexConfig = u.InvertedIndexConfig + meta.Class.VectorConfig = u.VectorConfig + meta.Class.ReplicationConfig = u.ReplicationConfig + meta.Class.MultiTenancyConfig = u.MultiTenancyConfig + meta.Class.Description = u.Description + meta.Class.Properties = u.Properties + meta.ClassVersion = cmd.Version + if req.State != nil { + meta.Sharding = *req.State + } + return nil + } + + return s.apply( + applyOp{ + op: cmd.GetType().String(), + updateSchema: func() error { return s.schema.updateClass(req.Class.Class, update) }, + updateStore: func() error { return s.db.UpdateClass(req) }, + schemaOnly: schemaOnly, + enableSchemaCallback: enableSchemaCallback, + }, + ) +} + +func (s *SchemaManager) DeleteClass(cmd *command.ApplyRequest, schemaOnly bool, enableSchemaCallback bool) error { + var hasFrozen bool + tenants, err := s.schema.getTenants(cmd.Class, nil) + if err != nil { + hasFrozen = false + } + + for _, t := range tenants { + if t.ActivityStatus == models.TenantActivityStatusFROZEN || + t.ActivityStatus == models.TenantActivityStatusFREEZING { + hasFrozen = true + break + } + } + + return s.apply( + applyOp{ + op: cmd.GetType().String(), + updateSchema: func() error { s.schema.deleteClass(cmd.Class); return nil }, + updateStore: func() error { + if s.replicationFSM == nil { + return fmt.Errorf("replication deleter is not set, this should never happen") + } else if err := s.replicationFSM.DeleteReplicationsByCollection(cmd.Class); err != nil { + // If there is an error deleting the replications then we log it but make sure not to block the deletion of the class from a UX PoV + s.log.WithField("error", err).WithField("class", cmd.Class).Error("could not delete replication operations for deleted class") + } + return s.db.DeleteClass(cmd.Class, hasFrozen) + }, + schemaOnly: schemaOnly, + enableSchemaCallback: enableSchemaCallback, + }, + ) +} + +func (s *SchemaManager) AddProperty(cmd *command.ApplyRequest, schemaOnly bool, enableSchemaCallback bool) error { + req := command.AddPropertyRequest{} + if err := json.Unmarshal(cmd.SubCommand, &req); err != nil { + return fmt.Errorf("%w: %w", ErrBadRequest, err) + } + if len(req.Properties) == 0 { + return fmt.Errorf("%w: empty property", ErrBadRequest) + } + + return s.apply( + applyOp{ + op: cmd.GetType().String(), + updateSchema: func() error { return s.schema.addProperty(cmd.Class, cmd.Version, req.Properties...) }, + updateStore: func() error { return s.db.AddProperty(cmd.Class, req) }, + schemaOnly: schemaOnly, + enableSchemaCallback: enableSchemaCallback, + }, + ) +} + +func (s *SchemaManager) UpdateShardStatus(cmd *command.ApplyRequest, schemaOnly bool) error { + req := command.UpdateShardStatusRequest{} + if err := json.Unmarshal(cmd.SubCommand, &req); err != nil { + return fmt.Errorf("%w: %w", ErrBadRequest, err) + } + + return s.apply( + applyOp{ + op: cmd.GetType().String(), + updateSchema: func() error { return nil }, + updateStore: func() error { return s.db.UpdateShardStatus(&req) }, + schemaOnly: schemaOnly, + }, + ) +} + +func (s *SchemaManager) AddReplicaToShard(cmd *command.ApplyRequest, schemaOnly bool) error { + req := command.AddReplicaToShard{} + if err := json.Unmarshal(cmd.SubCommand, &req); err != nil { + return fmt.Errorf("%w: %w", ErrBadRequest, err) + } + + return s.apply( + applyOp{ + op: cmd.GetType().String(), + updateSchema: func() error { return s.schema.addReplicaToShard(cmd.Class, cmd.Version, req.Shard, req.TargetNode) }, + updateStore: func() error { + if req.TargetNode == s.schema.nodeID { + return s.db.AddReplicaToShard(req.Class, req.Shard, req.TargetNode) + } + return nil + }, + schemaOnly: schemaOnly, + }, + ) +} + +func (s *SchemaManager) DeleteReplicaFromShard(cmd *command.ApplyRequest, schemaOnly bool) error { + req := command.DeleteReplicaFromShard{} + if err := json.Unmarshal(cmd.SubCommand, &req); err != nil { + return fmt.Errorf("%w: %w", ErrBadRequest, err) + } + + return s.apply( + applyOp{ + op: cmd.GetType().String(), + updateSchema: func() error { + return s.schema.deleteReplicaFromShard(cmd.Class, cmd.Version, req.Shard, req.TargetNode) + }, + updateStore: func() error { + if req.TargetNode == s.schema.nodeID { + return s.db.DeleteReplicaFromShard(req.Class, req.Shard, req.TargetNode) + } + return nil + }, + schemaOnly: schemaOnly, + }, + ) +} + +func (s *SchemaManager) AddTenants(cmd *command.ApplyRequest, schemaOnly bool) error { + req := &command.AddTenantsRequest{} + if err := gproto.Unmarshal(cmd.SubCommand, req); err != nil { + return fmt.Errorf("%w: %w", ErrBadRequest, err) + } + + return s.apply( + applyOp{ + op: cmd.GetType().String(), + updateSchema: func() error { return s.schema.addTenants(cmd.Class, cmd.Version, req) }, + updateStore: func() error { return s.db.AddTenants(cmd.Class, req) }, + schemaOnly: schemaOnly, + }, + ) +} + +func (s *SchemaManager) UpdateTenants(cmd *command.ApplyRequest, schemaOnly bool) error { + req := &command.UpdateTenantsRequest{} + if err := gproto.Unmarshal(cmd.SubCommand, req); err != nil { + return fmt.Errorf("%w: %w", ErrBadRequest, err) + } + + return s.apply( + applyOp{ + op: cmd.GetType().String(), + // updateSchema func will update the request's tenants and therefore we use it as a filter that is then sent + // to the updateStore function. This allows us to effectively use the schema update to narrow down work for + // the DB update. + updateSchema: func() error { return s.schema.updateTenants(cmd.Class, cmd.Version, req, s.replicationFSM) }, + updateStore: func() error { return s.db.UpdateTenants(cmd.Class, req) }, + schemaOnly: schemaOnly, + }, + ) +} + +func (s *SchemaManager) DeleteTenants(cmd *command.ApplyRequest, schemaOnly bool) error { + req := &command.DeleteTenantsRequest{} + if err := gproto.Unmarshal(cmd.SubCommand, req); err != nil { + return fmt.Errorf("%w: %w", ErrBadRequest, err) + } + + tenants, err := s.schema.getTenants(cmd.Class, req.Tenants) + if err != nil { + // error are handled by the updateSchema, so they are ignored here. + // Instead, we log the error to detect tenant status before deleting + // them from the schema. this allows the database layer to decide whether + // to send the delete request to the cloud provider. + s.log.WithFields(logrus.Fields{ + "class": cmd.Class, + "tenants": req.Tenants, + "error": err.Error(), + }).Error("error getting tenants") + } + + return s.apply( + applyOp{ + op: cmd.GetType().String(), + updateSchema: func() error { return s.schema.deleteTenants(cmd.Class, cmd.Version, req) }, + updateStore: func() error { + if s.replicationFSM == nil { + return fmt.Errorf("replication deleter is not set, this should never happen") + } else if err := s.replicationFSM.DeleteReplicationsByTenants(cmd.Class, req.Tenants); err != nil { + // If there is an error deleting the replications then we log it but make sure not to block the deletion of the class from a UX PoV + s.log.WithField("error", err).WithField("class", cmd.Class).WithField("tenants", tenants).Error("could not delete replication operations for deleted tenants") + } + return s.db.DeleteTenants(cmd.Class, tenants) + }, + schemaOnly: schemaOnly, + }, + ) +} + +func (s *SchemaManager) UpdateTenantsProcess(cmd *command.ApplyRequest, schemaOnly bool) error { + req := &command.TenantProcessRequest{} + if err := gproto.Unmarshal(cmd.SubCommand, req); err != nil { + return fmt.Errorf("%w: %w", ErrBadRequest, err) + } + + return s.apply( + applyOp{ + op: cmd.GetType().String(), + updateSchema: func() error { return s.schema.updateTenantsProcess(cmd.Class, cmd.Version, req) }, + updateStore: func() error { return s.db.UpdateTenantsProcess(cmd.Class, req) }, + schemaOnly: schemaOnly, + }, + ) +} + +func (s *SchemaManager) SyncShard(cmd *command.ApplyRequest, schemaOnly bool) error { + req := command.SyncShardRequest{} + if err := json.Unmarshal(cmd.SubCommand, &req); err != nil { + return fmt.Errorf("%w: %w", ErrBadRequest, err) + } + + if req.NodeId != s.schema.nodeID { + return nil + } + + return s.apply( + applyOp{ + op: cmd.GetType().String(), + updateSchema: func() error { return nil }, + updateStore: func() error { + return s.schema.Read(req.Collection, func(class *models.Class, state *sharding.State) error { + physical, ok := state.Physical[req.Shard] + // shard does not exist in the sharding state + if !ok { + // TODO: can we guarantee that the shard is not in use? + // If so we should call s.db.DropShard(cmd.Class, req.Shard) here instead + // For now, to be safe and avoid data loss, we just shut it down + s.db.ShutdownShard(cmd.Class, req.Shard) + // return early + return nil + } + // if shard doesn't belong to this node + if !slices.Contains(physical.BelongsToNodes, req.NodeId) { + // shut it down + s.db.ShutdownShard(cmd.Class, req.Shard) + // return early + return nil + } + // collection is single-tenant, shard is present, replica belongs to node + if !state.PartitioningEnabled { + // load it + s.db.LoadShard(cmd.Class, req.Shard) + // return early + return nil + } + // collection is multi-tenant, shard is present, replica belongs to node + switch physical.ActivityStatus() { + // tenant is active + case models.TenantActivityStatusACTIVE: + // load it + s.db.LoadShard(cmd.Class, req.Shard) + // tenant is inactive + case models.TenantActivityStatusINACTIVE: + // shut it down + s.db.ShutdownShard(cmd.Class, req.Shard) + // tenant is in some other state + default: + // do nothing + + } + return nil + }) + }, + schemaOnly: schemaOnly, + }, + ) +} + +func (s *SchemaManager) ReplicationAddReplicaToShard(cmd *command.ApplyRequest, schemaOnly bool) error { + req := command.ReplicationAddReplicaToShard{} + if err := json.Unmarshal(cmd.SubCommand, &req); err != nil { + return fmt.Errorf("%w: %w", ErrBadRequest, err) + } + + return s.apply( + applyOp{ + op: cmd.GetType().String(), + updateSchema: func() error { + err := s.replicationFSM.SetUnCancellable(req.OpId) + if err != nil { + return fmt.Errorf("set un-cancellable: %w", err) + } + return s.schema.addReplicaToShard(cmd.Class, cmd.Version, req.Shard, req.TargetNode) + }, + updateStore: func() error { + if req.TargetNode == s.schema.nodeID { + return s.db.AddReplicaToShard(req.Class, req.Shard, req.TargetNode) + } + return nil + }, + schemaOnly: schemaOnly, + }, + ) +} + +type applyOp struct { + op string + updateSchema func() error + updateStore func() error + schemaOnly bool + enableSchemaCallback bool +} + +func (op applyOp) validate() error { + if op.op == "" { + return fmt.Errorf("op is not specified") + } + if op.updateSchema == nil { + return fmt.Errorf("updateSchema func is nil") + } + if op.updateStore == nil { + return fmt.Errorf("updateStore func is nil") + } + return nil +} + +// apply does apply commands from RAFT to schema 1st and then db +func (s *SchemaManager) apply(op applyOp) error { + if err := op.validate(); err != nil { + return fmt.Errorf("could not validate raft apply op: %w", err) + } + + // schema applied 1st to make sure any validation happen before applying it to db + if err := op.updateSchema(); err != nil { + return fmt.Errorf("%w: %s: %w", ErrSchema, op.op, err) + } + + if op.enableSchemaCallback && s.db != nil { + // TriggerSchemaUpdateCallbacks is concurrent and at + // this point of time schema shall be up to date. + s.db.TriggerSchemaUpdateCallbacks() + } + + if !op.schemaOnly { + if err := op.updateStore(); err != nil { + return fmt.Errorf("%w: %s: %w", errDB, op.op, err) + } + } + + return nil +} + +// migratePropertiesIfNecessary migrate properties and set default values for them. +// This is useful when adding new properties to ensure that their default value is properly set. +// Current migrated properties: +// IndexRangeFilters was introduced with 1.26, so objects which were created +// on an older version, will have this value set to nil when the instance is +// upgraded. If we come across a property with nil IndexRangeFilters, it +// needs to be set as false, to avoid false positive class differences on +// comparison during class updates. +func migratePropertiesIfNecessary(class *models.Class) { + for _, prop := range class.Properties { + if prop.IndexRangeFilters == nil { + prop.IndexRangeFilters = func() *bool { f := false; return &f }() + } + + // Ensure we also migrate nested properties + for _, nprop := range prop.NestedProperties { + migrateNestedPropertiesIfNecessary(nprop) + } + } +} + +func migrateNestedPropertiesIfNecessary(nprop *models.NestedProperty) { + // migrate this nested property + nprop.IndexRangeFilters = func() *bool { f := false; return &f }() + // Recurse on all nested properties this one has + for _, recurseNestedProperty := range nprop.NestedProperties { + migrateNestedPropertiesIfNecessary(recurseNestedProperty) + } +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/schema/manager_alias.go b/platform/dbops/binaries/weaviate-src/cluster/schema/manager_alias.go new file mode 100644 index 0000000000000000000000000000000000000000..21711e00bf5ab01c48a2991bd5494b3d16854db6 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/schema/manager_alias.go @@ -0,0 +1,105 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package schema + +import ( + "encoding/json" + "fmt" + + command "github.com/weaviate/weaviate/cluster/proto/api" + gproto "google.golang.org/protobuf/proto" +) + +func (s *SchemaManager) CreateAlias(cmd *command.ApplyRequest) error { + req := &command.CreateAliasRequest{} + if err := gproto.Unmarshal(cmd.SubCommand, req); err != nil { + return fmt.Errorf("%w: %w", ErrBadRequest, err) + } + + return s.apply( + applyOp{ + op: cmd.GetType().String(), + updateSchema: func() error { return s.schema.createAlias(req.Collection, req.Alias) }, + updateStore: func() error { return nil /* nothing do to here */ }, + enableSchemaCallback: true, + }, + ) +} + +func (s *SchemaManager) ReplaceAlias(cmd *command.ApplyRequest) error { + req := &command.ReplaceAliasRequest{} + if err := gproto.Unmarshal(cmd.SubCommand, req); err != nil { + return fmt.Errorf("%w: %w", ErrBadRequest, err) + } + + return s.apply( + applyOp{ + op: cmd.GetType().String(), + updateSchema: func() error { return s.schema.replaceAlias(req.Collection, req.Alias) }, + updateStore: func() error { return nil /* nothing do to here */ }, + enableSchemaCallback: true, + }, + ) +} + +func (s *SchemaManager) DeleteAlias(cmd *command.ApplyRequest) error { + req := &command.DeleteAliasRequest{} + if err := gproto.Unmarshal(cmd.SubCommand, req); err != nil { + return fmt.Errorf("%w: %w", ErrBadRequest, err) + } + + return s.apply( + applyOp{ + op: cmd.GetType().String(), + updateSchema: func() error { return s.schema.deleteAlias(req.Alias) }, + updateStore: func() error { return nil /* nothing do to here */ }, + enableSchemaCallback: true, + }, + ) +} + +func (s *SchemaManager) ResolveAlias(req *command.QueryRequest) ([]byte, error) { + subCommand := command.QueryResolveAliasRequest{} + if err := json.Unmarshal(req.SubCommand, &subCommand); err != nil { + return []byte{}, fmt.Errorf("%w: %w", ErrBadRequest, err) + } + + rootClass := s.schema.ResolveAlias(subCommand.Alias) + if rootClass == "" { + return nil, fmt.Errorf("resolve alias: %s, %w", subCommand.Alias, ErrAliasNotFound) + } + + response := command.QueryResolveAliasResponse{ + Class: rootClass, + } + payload, err := json.Marshal(&response) + if err != nil { + return []byte{}, fmt.Errorf("could not marshal resolve alias response: %w", err) + } + return payload, nil +} + +func (s *SchemaManager) GetAliases(req *command.QueryRequest) ([]byte, error) { + subCommand := command.QueryGetAliasesRequest{} + if err := json.Unmarshal(req.SubCommand, &subCommand); err != nil { + return []byte{}, fmt.Errorf("%w: %w", ErrBadRequest, err) + } + + response := command.QueryGetAliasesResponse{ + Aliases: s.schema.getAliases(subCommand.Alias, subCommand.Class), + } + payload, err := json.Marshal(&response) + if err != nil { + return []byte{}, fmt.Errorf("could not marshal get aliases response: %w", err) + } + return payload, nil +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/schema/manager_query.go b/platform/dbops/binaries/weaviate-src/cluster/schema/manager_query.go new file mode 100644 index 0000000000000000000000000000000000000000..a7dcc15e51dcb9a958fede950bc59c32a2bf75a6 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/schema/manager_query.go @@ -0,0 +1,171 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package schema + +import ( + "encoding/json" + "fmt" + + cmd "github.com/weaviate/weaviate/cluster/proto/api" +) + +func (sm *SchemaManager) QueryReadOnlyClasses(req *cmd.QueryRequest) ([]byte, error) { + // Validate that the subcommand is the correct type + subCommand := cmd.QueryReadOnlyClassesRequest{} + if err := json.Unmarshal(req.SubCommand, &subCommand); err != nil { + return []byte{}, fmt.Errorf("%w: %w", ErrBadRequest, err) + } + + // Read the meta class to get both the class and sharding information + vclasses := sm.schema.ReadOnlyClasses(subCommand.Classes...) + if len(vclasses) == 0 { + return []byte{}, nil + } + + // Build the response, marshal and return + response := cmd.QueryReadOnlyClassResponse{ + Classes: vclasses, + } + payload, err := json.Marshal(&response) + if err != nil { + return []byte{}, fmt.Errorf("could not marshal query response: %w", err) + } + return payload, nil +} + +func (sm *SchemaManager) QuerySchema() ([]byte, error) { + // Build the response, marshal and return + response := cmd.QuerySchemaResponse{Schema: sm.schema.ReadOnlySchema()} + payload, err := json.Marshal(&response) + if err != nil { + return []byte{}, fmt.Errorf("could not marshal query response: %w", err) + } + return payload, nil +} + +func (sm *SchemaManager) QueryCollectionsCount() ([]byte, error) { + // Build the response, marshal and return + response := cmd.QueryCollectionsCountResponse{Count: sm.schema.CollectionsCount()} + payload, err := json.Marshal(&response) + if err != nil { + return []byte{}, fmt.Errorf("could not marshal query response: %w", err) + } + return payload, nil +} + +func (sm *SchemaManager) QueryTenants(req *cmd.QueryRequest) ([]byte, error) { + // Validate that the subcommand is the correct type + subCommand := cmd.QueryTenantsRequest{} + if err := json.Unmarshal(req.SubCommand, &subCommand); err != nil { + return []byte{}, fmt.Errorf("%w: %w", ErrBadRequest, err) + } + + // Read the tenants + tenants, err := sm.schema.getTenants(subCommand.Class, subCommand.Tenants) + if err != nil { + return []byte{}, fmt.Errorf("could not get tenants: %w", err) + } + + // Build the response, marshal and return + response := cmd.QueryTenantsResponse{Tenants: tenants} + payload, err := json.Marshal(&response) + if err != nil { + return []byte{}, fmt.Errorf("could not marshal query response: %w", err) + } + return payload, nil +} + +func (sm *SchemaManager) QueryShardOwner(req *cmd.QueryRequest) ([]byte, error) { + // Validate that the subcommand is the correct type + subCommand := cmd.QueryShardOwnerRequest{} + if err := json.Unmarshal(req.SubCommand, &subCommand); err != nil { + return []byte{}, fmt.Errorf("%w: %w", ErrBadRequest, err) + } + + // Read the meta class to get both the class and sharding information + owner, version, err := sm.schema.ShardOwner(subCommand.Class, subCommand.Shard) + if err != nil { + return []byte{}, err + } + + // Build the response, marshal and return + response := cmd.QueryShardOwnerResponse{ShardVersion: version, Owner: owner} + payload, err := json.Marshal(&response) + if err != nil { + return []byte{}, fmt.Errorf("could not marshal query response: %w", err) + } + return payload, nil +} + +func (sm *SchemaManager) QueryTenantsShards(req *cmd.QueryRequest) ([]byte, error) { + // Validate that the subcommand is the correct type + subCommand := cmd.QueryTenantsShardsRequest{} + if err := json.Unmarshal(req.SubCommand, &subCommand); err != nil { + return []byte{}, fmt.Errorf("%w: %w", ErrBadRequest, err) + } + + // Read the meta class to get both the class and sharding information + tenants, version := sm.schema.TenantsShards(subCommand.Class, subCommand.Tenants...) + // Build the response, marshal and return + response := cmd.QueryTenantsShardsResponse{TenantsActivityStatus: tenants, SchemaVersion: version} + payload, err := json.Marshal(&response) + if err != nil { + return []byte{}, fmt.Errorf("could not marshal query response: %w", err) + } + return payload, nil +} + +func (sm *SchemaManager) QueryShardingState(req *cmd.QueryRequest) ([]byte, error) { + // Validate that the subcommand is the correct type + subCommand := cmd.QueryShardingStateRequest{} + if err := json.Unmarshal(req.SubCommand, &subCommand); err != nil { + return []byte{}, fmt.Errorf("%w: %w", ErrBadRequest, err) + } + + state, version := sm.schema.CopyShardingState(subCommand.Class) + // Build the response, marshal and return + response := cmd.QueryShardingStateResponse{State: state, Version: version} + payload, err := json.Marshal(&response) + if err != nil { + return []byte{}, fmt.Errorf("could not marshal query response: %w", err) + } + return payload, nil +} + +// QueryClassVersions returns the versions of the requested classes +func (sm *SchemaManager) QueryClassVersions(req *cmd.QueryRequest) ([]byte, error) { + // Validate that the subcommand is the correct type + subCommand := cmd.QueryClassVersionsRequest{} + if err := json.Unmarshal(req.SubCommand, &subCommand); err != nil { + return []byte{}, fmt.Errorf("%w: %w", ErrBadRequest, err) + } + + // Read the meta class to get the class version + vclasses := sm.schema.ReadOnlyClasses(subCommand.Classes...) + if len(vclasses) == 0 { + return []byte{}, nil + } + + // Build the response, marshal and return + classVersions := make(map[string]uint64, len(vclasses)) + for _, vclass := range vclasses { + classVersions[vclass.Class.Class] = vclass.Version + } + response := cmd.QueryClassVersionsResponse{ + Classes: classVersions, + } + payload, err := json.Marshal(&response) + if err != nil { + return []byte{}, fmt.Errorf("could not marshal query response: %w", err) + } + return payload, nil +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/schema/manager_test.go b/platform/dbops/binaries/weaviate-src/cluster/schema/manager_test.go new file mode 100644 index 0000000000000000000000000000000000000000..d2c3c2270803e2c24587ff4782f9c0fe008bff96 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/schema/manager_test.go @@ -0,0 +1,321 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package schema + +import ( + "context" + "encoding/json" + "fmt" + "testing" + + "github.com/prometheus/client_golang/prometheus" + "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + + "github.com/stretchr/testify/require" + cmd "github.com/weaviate/weaviate/cluster/proto/api" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/usecases/fakes" + + "github.com/weaviate/weaviate/usecases/sharding" +) + +func TestResolveAlais(t *testing.T) { + parser := fakes.NewMockParser() + parser.On("ParseClass", mock.Anything).Return(nil) + sm := NewSchemaManager("test-node", nil, parser, prometheus.NewPedanticRegistry(), logrus.New()) + areq := cmd.QueryResolveAliasRequest{ + Alias: "AliasNotExist", + } + + subCommand, err := json.Marshal(&areq) + require.NoError(t, err) + + req := &cmd.QueryRequest{ + Type: cmd.QueryRequest_TYPE_RESOLVE_ALIAS, + SubCommand: subCommand, + } + res, err := sm.ResolveAlias(req) + // Make sure ResolveAlias api returns ErrAliasNotFound in the error chain + // This is used to decide the final http status code on the http handlers + require.ErrorIs(t, err, ErrAliasNotFound) + require.Nil(t, res) +} + +func TestVersionedSchemaReaderShardReplicas(t *testing.T) { + var ( + ctx = context.Background() + sc = NewSchema(t.Name(), nil, prometheus.NewPedanticRegistry()) + vsc = VersionedSchemaReader{ + schema: sc, + WaitForUpdate: func(ctx context.Context, version uint64) error { return nil }, + } + ) + // class not found + _, _, err := sc.ShardReplicas("C", "S") + assert.ErrorIs(t, err, ErrClassNotFound) + + // shard not found + ss := &sharding.State{Physical: make(map[string]sharding.Physical)} + + sc.addClass(&models.Class{Class: "C"}, ss, 1) + + _, err = vsc.ShardReplicas(ctx, "C", "S", 1) + assert.ErrorIs(t, err, ErrShardNotFound) + + // two replicas found + nodes := []string{"A", "B"} + ss.Physical["S"] = sharding.Physical{BelongsToNodes: nodes} + res, err := vsc.ShardReplicas(ctx, "C", "S", 1) + assert.Nil(t, err) + assert.Equal(t, nodes, res) +} + +func TestVersionedSchemaReaderClass(t *testing.T) { + var ( + ctx = context.Background() + retErr error + f = func(ctx context.Context, version uint64) error { return retErr } + nodes = []string{"N1", "N2"} + s = NewSchema(t.Name(), &MockShardReader{}, prometheus.NewPedanticRegistry()) + sc = VersionedSchemaReader{s, f} + ) + + // class not found + cls, err := sc.ReadOnlyClass(ctx, "C", 1) + assert.Nil(t, cls) + assert.Nil(t, err) + mt, err := sc.MultiTenancy(ctx, "C", 1) + assert.Equal(t, mt, models.MultiTenancyConfig{}) + assert.Nil(t, err) + + info, err := sc.ClassInfo(ctx, "C", 1) + assert.Equal(t, ClassInfo{}, info) + assert.Nil(t, err) + + _, err = sc.ShardReplicas(ctx, "C", "S", 1) + assert.ErrorIs(t, err, ErrClassNotFound) + _, err = sc.ShardOwner(ctx, "C", "S", 1) + assert.ErrorIs(t, err, ErrClassNotFound) + err = sc.Read(ctx, "C", 1, func(c *models.Class, s *sharding.State) error { return nil }) + assert.ErrorIs(t, err, ErrClassNotFound) + + // Add Simple class + cls1 := &models.Class{Class: "C"} + ss1 := &sharding.State{Physical: map[string]sharding.Physical{ + "S1": {Status: "A"}, + "S2": {Status: "A", BelongsToNodes: nodes}, + }} + + assert.Nil(t, sc.schema.addClass(cls1, ss1, 1)) + info, err = sc.ClassInfo(ctx, "C", 1) + assert.Equal(t, ClassInfo{ + ReplicationFactor: 1, + ClassVersion: 1, + ShardVersion: 1, Exists: true, Tenants: len(ss1.Physical), + }, info) + assert.Nil(t, err) + + cls, err = sc.ReadOnlyClass(ctx, "C", 1) + assert.Equal(t, cls, cls1) + assert.Nil(t, err) + mt, err = sc.MultiTenancy(ctx, "D", 1) + assert.Equal(t, models.MultiTenancyConfig{}, mt) + assert.Nil(t, err) + + // Shards + _, err = sc.ShardOwner(ctx, "C", "S1", 1) + assert.ErrorContains(t, err, "node not found") + _, err = sc.ShardOwner(ctx, "C", "Sx", 1) + assert.ErrorIs(t, err, ErrShardNotFound) + shards, _, err := sc.TenantsShards(ctx, 1, "C", "S2") + assert.Empty(t, shards) + assert.Nil(t, err) + shard, err := sc.ShardFromUUID(ctx, "Cx", nil, 1) + assert.Empty(t, shard) + assert.Nil(t, err) + + // Add MT Class + cls2 := &models.Class{Class: "D", MultiTenancyConfig: &models.MultiTenancyConfig{Enabled: true}} + ss2 := &sharding.State{ + PartitioningEnabled: true, + Physical: map[string]sharding.Physical{"S1": {Status: "A", BelongsToNodes: nodes}}, + } + sc.schema.addClass(cls2, ss2, 1) + cls, err = sc.ReadOnlyClass(ctx, "D", 1) + assert.Equal(t, cls, cls2, 1) + assert.Nil(t, err) + + mt, err = sc.MultiTenancy(ctx, "D", 1) + assert.Equal(t, models.MultiTenancyConfig{Enabled: true}, mt) + assert.Nil(t, err) + + // ShardOwner + owner, err := sc.ShardOwner(ctx, "D", "S1", 1) + assert.Nil(t, err) + assert.Contains(t, nodes, owner) + + // TenantShard + shards, _, err = sc.TenantsShards(ctx, 1, "D", "S1") + assert.Equal(t, shards, map[string]string{"S1": "A"}) + assert.Equal(t, shards["S1"], "A") + assert.Nil(t, err) + + shards, _, err = sc.TenantsShards(ctx, 1, "D", "Sx") + assert.Empty(t, shards) + assert.Nil(t, err) + + reader := func(c *models.Class, s *sharding.State) error { return nil } + assert.Nil(t, sc.Read(ctx, "C", 1, reader)) + retErr = fmt.Errorf("waiting error") + assert.ErrorIs(t, sc.Read(ctx, "C", 1, reader), retErr) + retErr = nil +} + +func TestSchemaReaderShardReplicas(t *testing.T) { + sc := NewSchema(t.Name(), nil, prometheus.NewPedanticRegistry()) + rsc := SchemaReader{sc, VersionedSchemaReader{}} + // class not found + _, _, err := sc.ShardReplicas("C", "S") + assert.ErrorIs(t, err, ErrClassNotFound) + + // shard not found + ss := &sharding.State{Physical: make(map[string]sharding.Physical)} + + sc.addClass(&models.Class{Class: "C"}, ss, 1) + + _, err = rsc.ShardReplicas("C", "S") + assert.ErrorIs(t, err, ErrShardNotFound) + + // two replicas found + nodes := []string{"A", "B"} + ss.Physical["S"] = sharding.Physical{BelongsToNodes: nodes} + res, err := rsc.ShardReplicas("C", "S") + assert.Nil(t, err) + assert.Equal(t, nodes, res) +} + +func TestSchemaReaderClass(t *testing.T) { + var ( + nodes = []string{"N1", "N2"} + s = NewSchema(t.Name(), &MockShardReader{}, prometheus.NewPedanticRegistry()) + sc = SchemaReader{s, VersionedSchemaReader{}} + ) + + // class not found + assert.Nil(t, sc.ReadOnlyClass("C")) + cl := sc.ReadOnlyVersionedClass("C") + assert.Nil(t, cl.Class) + assert.Equal(t, sc.ReadOnlySchema(), models.Schema{Classes: make([]*models.Class, 0)}) + assert.Equal(t, sc.MultiTenancy("C"), models.MultiTenancyConfig{}) + + _, err := sc.ShardReplicas("C", "S") + assert.ErrorIs(t, err, ErrClassNotFound) + _, err = sc.ShardOwner("C", "S") + assert.ErrorIs(t, err, ErrClassNotFound) + err = sc.Read("C", func(c *models.Class, s *sharding.State) error { return nil }) + assert.ErrorIs(t, err, ErrClassNotFound) + + // Add Simple class + cls1 := &models.Class{Class: "C"} + ss1 := &sharding.State{Physical: map[string]sharding.Physical{ + "S1": {Status: "A"}, + "S2": {Status: "A", BelongsToNodes: nodes}, + }} + + sc.schema.addClass(cls1, ss1, 1) + assert.Equal(t, sc.ReadOnlyClass("C"), cls1) + versionedClass := sc.ReadOnlyVersionedClass("C") + assert.Equal(t, versionedClass.Class, cls1) + assert.Equal(t, sc.MultiTenancy("D"), models.MultiTenancyConfig{}) + assert.Nil(t, sc.Read("C", func(c *models.Class, s *sharding.State) error { return nil })) + + // Shards + _, err = sc.ShardOwner("C", "S1") + assert.ErrorContains(t, err, "node not found") + _, err = sc.ShardOwner("C", "Sx") + assert.ErrorIs(t, err, ErrShardNotFound) + shard, _ := sc.TenantsShards("C", "S2") + assert.Empty(t, shard) + assert.Empty(t, sc.ShardFromUUID("Cx", nil)) + + _, err = sc.GetShardsStatus("C", "") + assert.Nil(t, err) + + // Add MT Class + cls2 := &models.Class{Class: "D", MultiTenancyConfig: &models.MultiTenancyConfig{Enabled: true}} + ss2 := &sharding.State{ + PartitioningEnabled: true, + Physical: map[string]sharding.Physical{"S1": {Status: "A", BelongsToNodes: nodes}}, + } + sc.schema.addClass(cls2, ss2, 1) + assert.Equal(t, sc.ReadOnlyClass("D"), cls2) + versionedClass = sc.ReadOnlyVersionedClass("D") + assert.Equal(t, versionedClass.Class, cls2) + assert.Equal(t, sc.MultiTenancy("D"), models.MultiTenancyConfig{Enabled: true}) + + assert.ElementsMatch(t, sc.ReadOnlySchema().Classes, []*models.Class{cls1, cls2}) + + // ShardOwner + owner, err := sc.ShardOwner("D", "S1") + assert.Nil(t, err) + assert.Contains(t, nodes, owner) + + // TenantShard + shards, _ := sc.TenantsShards("D", "S1") + assert.Equal(t, shards["S1"], "A") + shards, _ = sc.TenantsShards("D", "Sx") + assert.Empty(t, shards) +} + +// TestPropertiesMigration ensures that our migration function sets proper default values +// The test verifies that we migrate top level properties and then at least one layer deep nested properties +func TestPropertiesMigration(t *testing.T) { + class := &models.Class{ + Class: "C", + Properties: []*models.Property{ + { + NestedProperties: []*models.NestedProperty{ + { + NestedProperties: []*models.NestedProperty{ + {}, + }, + }, + }, + }, + }, + } + + // Set the values to nil, which would be the case if we're upgrading a cluster with "old" classes in it + class.Properties[0].IndexRangeFilters = nil + class.Properties[0].NestedProperties[0].IndexRangeFilters = nil + class.Properties[0].NestedProperties[0].NestedProperties[0].IndexRangeFilters = nil + migratePropertiesIfNecessary(class) + + // Check + require.NotNil(t, class.Properties[0].IndexRangeFilters) + require.False(t, *(class.Properties[0].IndexRangeFilters)) + require.NotNil(t, class.Properties[0].NestedProperties[0].IndexRangeFilters) + require.False(t, *(class.Properties[0].NestedProperties[0].IndexRangeFilters)) + require.NotNil(t, class.Properties[0].NestedProperties[0].NestedProperties[0].IndexRangeFilters) + require.False(t, *(class.Properties[0].NestedProperties[0].NestedProperties[0].IndexRangeFilters)) +} + +type MockShardReader struct { + lst models.ShardStatusList + err error +} + +func (m *MockShardReader) GetShardsStatus(class, tenant string) (models.ShardStatusList, error) { + return m.lst, m.err +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/schema/meta_class.go b/platform/dbops/binaries/weaviate-src/cluster/schema/meta_class.go new file mode 100644 index 0000000000000000000000000000000000000000..86f5587ba49377f91639fd5b5b7f3bbb36713808 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/schema/meta_class.go @@ -0,0 +1,641 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package schema + +import ( + "fmt" + "math/rand" + "slices" + "strings" + "sync" + + command "github.com/weaviate/weaviate/cluster/proto/api" + "github.com/weaviate/weaviate/cluster/types" + "github.com/weaviate/weaviate/entities/models" + entSchema "github.com/weaviate/weaviate/entities/schema" + "github.com/weaviate/weaviate/usecases/sharding" +) + +type ( + NodeShardProcess map[string]*command.TenantsProcess + metaClass struct { + sync.RWMutex + Class models.Class + ClassVersion uint64 + Sharding sharding.State + ShardVersion uint64 + // ShardProcesses map[tenantName-action(FREEZING/UNFREEZING)]map[nodeID]TenantsProcess + ShardProcesses map[string]NodeShardProcess + } +) + +func (m *metaClass) ClassInfo() ClassInfo { + if m == nil { + return ClassInfo{} + } + + m.RLock() + defer m.RUnlock() + + ci := ClassInfo{ + ReplicationFactor: 1, + Exists: true, + Properties: len(m.Class.Properties), + MultiTenancy: models.MultiTenancyConfig{}, + Tenants: len(m.Sharding.Physical), + ClassVersion: m.ClassVersion, + ShardVersion: m.ShardVersion, + } + + if m.Class.MultiTenancyConfig != nil { + ci.MultiTenancy = *m.Class.MultiTenancyConfig + } + if m.Class.ReplicationConfig != nil && m.Class.ReplicationConfig.Factor > 1 { + ci.ReplicationFactor = int(m.Class.ReplicationConfig.Factor) + } + return ci +} + +func (m *metaClass) version() uint64 { + if m == nil { + return 0 + } + return max(m.ClassVersion, m.ShardVersion) +} + +func (m *metaClass) MultiTenancyConfig() (mc models.MultiTenancyConfig, v uint64) { + if m == nil { + return + } + m.RLock() + defer m.RUnlock() + if m.Class.MultiTenancyConfig == nil { + return + } + + return *m.Class.MultiTenancyConfig, m.version() +} + +// CloneClass returns a shallow copy of m +func (m *metaClass) CloneClass() *models.Class { + m.RLock() + defer m.RUnlock() + cp := m.Class + return &cp +} + +// ShardOwner returns the node owner of the specified shard +// will randomize the owner if there is more than one node +func (m *metaClass) ShardOwner(shard string) (string, uint64, error) { + m.RLock() + defer m.RUnlock() + x, ok := m.Sharding.Physical[shard] + + if !ok { + return "", 0, ErrShardNotFound + } + if len(x.BelongsToNodes) < 1 || x.BelongsToNodes[0] == "" { + return "", 0, fmt.Errorf("owner node not found") + } + + // we randomize the owner if there is more than one node + // - avoid hotspots + // - tolerate down nodes + // - distribute load + return x.BelongsToNodes[rand.Intn(len(x.BelongsToNodes))], m.version(), nil +} + +// ShardFromUUID returns shard name of the provided uuid +func (m *metaClass) ShardFromUUID(uuid []byte) (string, uint64) { + m.RLock() + defer m.RUnlock() + return m.Sharding.PhysicalShard(uuid), m.version() +} + +// ShardReplicas returns the replica nodes of a shard +func (m *metaClass) ShardReplicas(shard string) ([]string, uint64, error) { + m.RLock() + defer m.RUnlock() + x, ok := m.Sharding.Physical[shard] + if !ok { + return nil, 0, ErrShardNotFound + } + return slices.Clone(x.BelongsToNodes), m.version(), nil +} + +// TenantsShards returns shard name for the provided tenant and its activity status +func (m *metaClass) TenantsShards(class string, tenants ...string) (map[string]string, uint64) { + m.RLock() + defer m.RUnlock() + + v := m.version() + if !m.Sharding.PartitioningEnabled { + return nil, v + } + + res := make(map[string]string, len(tenants)) + for _, t := range tenants { + if physical, ok := m.Sharding.Physical[t]; ok { + res[t] = physical.ActivityStatus() + } + } + return res, v +} + +func (m *metaClass) AddProperty(v uint64, props ...*models.Property) error { + m.Lock() + defer m.Unlock() + + // update all at once to prevent race condition with concurrent readers + mergedProps := MergeProps(m.Class.Properties, props) + m.Class.Properties = mergedProps + m.ClassVersion = v + return nil +} + +func (m *metaClass) AddReplicaToShard(v uint64, shard string, replica string) error { + m.Lock() + defer m.Unlock() + + err := m.Sharding.AddReplicaToShard(shard, replica) + if err != nil { + return err + } + m.ClassVersion = v + return nil +} + +func (m *metaClass) DeleteReplicaFromShard(v uint64, shard string, replica string) error { + m.Lock() + defer m.Unlock() + + err := m.Sharding.DeleteReplicaFromShard(shard, replica) + if err != nil { + return err + } + m.ClassVersion = v + return nil +} + +// MergeProps makes sure duplicates are not created by ignoring new props +// with the same names as old props. +// If property of nested type is present in both new and old slices, +// final property is created by merging new property into copy of old one +func MergeProps(old, new []*models.Property) []*models.Property { + mergedProps := make([]*models.Property, len(old), len(old)+len(new)) + copy(mergedProps, old) + + // create memory to avoid duplication + mem := make(map[string]int, len(old)) + for idx := range old { + mem[strings.ToLower(old[idx].Name)] = idx + } + + // pick ones not present in old slice or merge nested properties + // if already present + for idx := range new { + if oldIdx, exists := mem[strings.ToLower(new[idx].Name)]; !exists { + mergedProps = append(mergedProps, new[idx]) + } else { + mergedProps[oldIdx].IndexRangeFilters = new[idx].IndexRangeFilters + + nestedProperties, merged := entSchema.MergeRecursivelyNestedProperties( + mergedProps[oldIdx].NestedProperties, + new[idx].NestedProperties) + if merged { + propCopy := *mergedProps[oldIdx] + propCopy.NestedProperties = nestedProperties + mergedProps[oldIdx] = &propCopy + } + } + } + + return mergedProps +} + +func (m *metaClass) AddTenants(nodeID string, req *command.AddTenantsRequest, replFactor int64, v uint64) (map[string]int, error) { + req.Tenants = removeNilTenants(req.Tenants) + m.Lock() + defer m.Unlock() + + // TODO-RAFT: Optimize here and avoid iteration twice on the req.Tenants array + names := make([]string, len(req.Tenants)) + for i, tenant := range req.Tenants { + names[i] = tenant.Name + } + // First determine the partition based on the node *present at the time of the log entry being created* + partitions, err := m.Sharding.GetPartitions(req.ClusterNodes, names, replFactor) + if err != nil { + return nil, fmt.Errorf("get partitions: %w", err) + } + + // sc tracks number of shards in this collection to be added by status. + sc := make(map[string]int) + + // Iterate over requested tenants and assign them, if found, a partition + for i, t := range req.Tenants { + if _, ok := m.Sharding.Physical[t.Name]; ok { + req.Tenants[i] = nil // already exists + continue + } + // TODO-RAFT: Check in which cases can the partition not have assigned one to a tenant + part, ok := partitions[t.Name] + if !ok { + // TODO-RAFT: Do we want to silently continue here or raise an error ? + continue + } + p := sharding.Physical{Name: t.Name, Status: t.Status, BelongsToNodes: part} + if m.Sharding.Physical == nil { + m.Sharding.Physical = make(map[string]sharding.Physical, 128) + } + m.Sharding.Physical[t.Name] = p + // TODO-RAFT: Check here why we set =nil if it is "owned by another node" + if !slices.Contains(part, nodeID) { + req.Tenants[i] = nil // is owned by another node + } + sc[p.Status]++ + } + m.ShardVersion = v + req.Tenants = removeNilTenants(req.Tenants) + return sc, nil +} + +// DeleteTenants try to delete the tenants from given request and returns +// total number of deleted tenants. +func (m *metaClass) DeleteTenants(req *command.DeleteTenantsRequest, v uint64) (map[string]int, error) { + m.Lock() + defer m.Unlock() + + count := make(map[string]int) + + for _, name := range req.Tenants { + shardingState := m.Sharding + status, ok, err := shardingState.DeletePartition(name) + if err != nil { + return nil, fmt.Errorf("error while migrating sharding state: %w", err) + } + if ok { + count[status]++ + } + } + m.ShardVersion = v + return count, nil +} + +func (m *metaClass) UpdateTenantsProcess(nodeID string, req *command.TenantProcessRequest, v uint64) (map[string]int, error) { + m.Lock() + defer m.Unlock() + + // sc tracks number of tenants updated by "status" + sc := make(map[string]int) + + for idx := range req.TenantsProcesses { + name := req.TenantsProcesses[idx].Tenant.Name + + shard, ok := m.Sharding.Physical[name] + if !ok { + return nil, fmt.Errorf("shard %s not found", name) + } + oldStatus := shard.Status + + if req.Action == command.TenantProcessRequest_ACTION_UNFREEZING { + // on unfreezing get the requested status from the shard process + if status := m.findRequestedStatus(nodeID, name, req.Action); status != "" { + req.TenantsProcesses[idx].Tenant.Status = status + } + } + + // NOTE: Have to get the `newStatus` only after `findRequestedStatus`, else req.Tenant.Status can be empty. + newStatus := req.TenantsProcesses[idx].Tenant.Status + + process := m.shardProcess(name, req.Action) + process[req.Node] = req.TenantsProcesses[idx] + + if m.allShardProcessExecuted(name, req.Action) { + m.applyShardProcess(name, req.Action, req.TenantsProcesses[idx], &shard) + } else { + // ignore applying in case of aborts (upload action only) + if !m.updateShardProcess(name, req.Action, req.TenantsProcesses[idx], &shard) { + req.TenantsProcesses[idx] = nil + continue + } + } + + m.ShardVersion = v + m.Sharding.Physical[shard.Name] = shard + + sc[oldStatus]-- + sc[newStatus]++ + + if !slices.Contains(shard.BelongsToNodes, nodeID) { + req.TenantsProcesses[idx] = nil + continue + } + } + return sc, nil +} + +func (m *metaClass) UpdateTenants(nodeID string, req *command.UpdateTenantsRequest, replicationFSM replicationFSM, v uint64) (map[string]int, error) { + m.Lock() + defer m.Unlock() + + // sc tracks number of tenants updated by "status" + sc := make(map[string]int) + + // For each requested tenant update we'll check if we the schema is missing that shard. If we have any missing shard + // we'll return an error but any other successful shard will be updated. + // If we're not adding a new shard we'll then check if the activity status needs to be changed + // If the activity status is changed we will deep copy the tenant and update the status + missingShards := []string{} + writeIndex := 0 + + for i, requestTenant := range req.Tenants { + oldTenant, ok := m.Sharding.Physical[requestTenant.Name] + oldStatus := oldTenant.Status + // If we can't find the shard add it to missing shards to error later + if !ok { + missingShards = append(missingShards, requestTenant.Name) + continue + } + + // validate status + switch oldTenant.ActivityStatus() { + case req.Tenants[i].Status: + continue + case types.TenantActivityStatusFREEZING: + // ignore multiple freezing + if requestTenant.Status == models.TenantActivityStatusFROZEN { + continue + } + case types.TenantActivityStatusUNFREEZING: + // ignore multiple unfreezing + var statusInProgress string + processes, exists := m.ShardProcesses[shardProcessID(req.Tenants[i].Name, command.TenantProcessRequest_ACTION_UNFREEZING)] + if exists { + for _, process := range processes { + statusInProgress = process.Tenant.Status + break + } + } + if requestTenant.Status == statusInProgress { + continue + } + } + + if requestTenant.Status == models.TenantActivityStatusCOLD && replicationFSM.HasOngoingReplication(m.Class.Class, requestTenant.Name, nodeID) { + continue + } + + existedSharedFrozen := oldTenant.ActivityStatus() == models.TenantActivityStatusFROZEN || oldTenant.ActivityStatus() == models.TenantActivityStatusFREEZING + requestedToFrozen := requestTenant.Status == models.TenantActivityStatusFROZEN + + switch { + case existedSharedFrozen && !requestedToFrozen: + if err := m.unfreeze(nodeID, i, req, &oldTenant); err != nil { + return sc, err + } + if req.Tenants[i] != nil { + requestTenant.Status = req.Tenants[i].Status + } + + case requestedToFrozen && !existedSharedFrozen: + m.freeze(i, req, oldTenant) + default: + // do nothing + } + + newTenant := oldTenant.DeepCopy() + newTenant.Status = requestTenant.Status + + // Update the schema tenant representation with the deep copy (necessary as the initial is a shallow copy from + // the map read + m.Sharding.Physical[oldTenant.Name] = newTenant + + // At this point we know, we are going to change the status of a tenant from old-state to new-state. + sc[oldStatus]-- + sc[newTenant.ActivityStatus()]++ + + // If the shard is not stored on that node skip updating the request tenant as there will be nothing to load on + // the DB side + if !slices.Contains(oldTenant.BelongsToNodes, nodeID) { + continue + } + + // Save the "valid" tenant on writeIndex and increment. This allows us to filter in place in req.Tenants the + // tenants that actually have a change to process + req.Tenants[writeIndex] = requestTenant + writeIndex++ + } + + // Remove the ignore tenants from the request to act as filter on the subsequent DB update + req.Tenants = req.Tenants[:writeIndex] + + // Check for any missing shard to return an error + var err error + if len(missingShards) > 0 { + err = fmt.Errorf("%w: %v", ErrShardNotFound, missingShards) + } + // Update the version of the shard to the current version + m.ShardVersion = v + + return sc, err +} + +// LockGuard provides convenient mechanism for owning mutex by function which mutates the state. +func (m *metaClass) LockGuard(mutator func(*metaClass) error) error { + m.Lock() + defer m.Unlock() + return mutator(m) +} + +// RLockGuard provides convenient mechanism for owning mutex function which doesn't mutates the state +func (m *metaClass) RLockGuard(reader func(*models.Class, *sharding.State) error) error { + m.RLock() + defer m.RUnlock() + return reader(&m.Class, &m.Sharding) +} + +func shardProcessID(name string, action command.TenantProcessRequest_Action) string { + return fmt.Sprintf("%s-%s", name, action) +} + +func (m *metaClass) findRequestedStatus(nodeID, name string, action command.TenantProcessRequest_Action) string { + processes, pExists := m.ShardProcesses[shardProcessID(name, action)] + if _, tExists := processes[nodeID]; pExists && tExists { + return processes[nodeID].Tenant.Status + } + return "" +} + +func (m *metaClass) allShardProcessExecuted(name string, action command.TenantProcessRequest_Action) bool { + name = shardProcessID(name, action) + expectedCount := len(m.ShardProcesses[name]) + i := 0 + for _, p := range m.ShardProcesses[name] { + if p.Op > command.TenantsProcess_OP_START { // DONE or ABORT + i++ + } + } + return i != 0 && i == expectedCount +} + +func (m *metaClass) updateShardProcess(name string, action command.TenantProcessRequest_Action, req *command.TenantsProcess, copy *sharding.Physical) bool { + processes := m.ShardProcesses[shardProcessID(name, action)] + switch action { + case command.TenantProcessRequest_ACTION_UNFREEZING: + for _, sp := range processes { + if sp.Op == command.TenantsProcess_OP_DONE { + copy.Status = sp.Tenant.Status + req.Tenant.Status = sp.Tenant.Status + break + } + } + return false + case command.TenantProcessRequest_ACTION_FREEZING: + for _, sp := range processes { + if sp.Op == command.TenantsProcess_OP_ABORT { + copy.Status = req.Tenant.Status + req.Tenant.Status = sp.Tenant.Status + return true + } + } + default: + return false + } + + return false +} + +func (m *metaClass) applyShardProcess(name string, action command.TenantProcessRequest_Action, req *command.TenantsProcess, copy *sharding.Physical) { + processes := m.ShardProcesses[shardProcessID(name, action)] + switch action { + case command.TenantProcessRequest_ACTION_UNFREEZING: + for _, sp := range processes { + if sp.Op == command.TenantsProcess_OP_DONE { + copy.Status = sp.Tenant.Status + req.Tenant.Status = sp.Tenant.Status + break + } + } + case command.TenantProcessRequest_ACTION_FREEZING: + count := 0 + onAbortStatus := copy.Status + for _, sp := range processes { + if sp.Op == command.TenantsProcess_OP_DONE { + count++ + } else { + count-- + onAbortStatus = sp.Tenant.Status + } + } + + if count == len(processes) { + copy.Status = req.Tenant.Status + } else { + copy.Status = onAbortStatus + req.Tenant.Status = onAbortStatus + } + default: + // do nothing + return + } + delete(m.ShardProcesses, shardProcessID(name, action)) +} + +func (m *metaClass) shardProcess(name string, action command.TenantProcessRequest_Action) map[string]*command.TenantsProcess { + if len(m.ShardProcesses) == 0 { + m.ShardProcesses = make(map[string]NodeShardProcess) + } + + process, ok := m.ShardProcesses[shardProcessID(name, action)] + if !ok { + process = make(map[string]*command.TenantsProcess) + } + return process +} + +// freeze creates a process requests and add them in memory to compare it later when +// TenantProcessRequest comes. +// it updates the tenant status to FREEZING in RAFT schema +func (m *metaClass) freeze(i int, req *command.UpdateTenantsRequest, shard sharding.Physical) { + process := m.shardProcess(req.Tenants[i].Name, command.TenantProcessRequest_ACTION_FREEZING) + + for _, node := range shard.BelongsToNodes { + process[node] = &command.TenantsProcess{ + Op: command.TenantsProcess_OP_START, + Tenant: &command.Tenant{ + Name: req.Tenants[i].Name, + Status: req.Tenants[i].Status, + }, + } + } + // to be proceed in the db layer + req.Tenants[i].Status = types.TenantActivityStatusFREEZING + m.ShardProcesses[shardProcessID(req.Tenants[i].Name, command.TenantProcessRequest_ACTION_FREEZING)] = process +} + +// unfreeze creates a process requests and add them in memory to compare it later when +// TenantProcessRequest comes. +// it keeps the requested state ACTIVE/INACTIVE in memory. +// it updates the tenant status to UNFREEZING in RAFT schema. +// NOTE: can make some of the requests nil. +func (m *metaClass) unfreeze(nodeID string, i int, req *command.UpdateTenantsRequest, p *sharding.Physical) error { + name := req.Tenants[i].Name + process := m.shardProcess(name, command.TenantProcessRequest_ACTION_UNFREEZING) + + partitions, err := m.Sharding.GetPartitions(req.ClusterNodes, []string{name}, m.Class.ReplicationConfig.Factor) + if err != nil { + req.Tenants[i] = nil + return fmt.Errorf("get partitions: %w", err) + } + + newNodes, ok := partitions[name] + if !ok { + req.Tenants[i] = nil + return fmt.Errorf("can not assign new nodes to shard %s, it didn't exist in the new partitions", name) + } + + oldNodes := p.BelongsToNodes + p.Status = types.TenantActivityStatusUNFREEZING + p.BelongsToNodes = newNodes + + newToOld := map[string]string{} + slices.Sort(newNodes) + slices.Sort(oldNodes) + + for idx, node := range newNodes { + if idx >= len(oldNodes) { + // ignore new nodes if the replication factor increase + // and relay on replication client will replicate the data + // after it's downloaded + continue + } + newToOld[node] = oldNodes[idx] + process[node] = &command.TenantsProcess{ + Op: command.TenantsProcess_OP_START, + Tenant: &command.Tenant{ + Name: name, + Status: req.Tenants[i].Status, // requested status HOT, COLD + }, + } + } + + if _, exists := newToOld[nodeID]; !exists { + // it does not belong to the new partitions + req.Tenants[i] = nil + return nil + } + m.ShardProcesses[shardProcessID(req.Tenants[i].Name, command.TenantProcessRequest_ACTION_UNFREEZING)] = process + req.Tenants[i].Name = fmt.Sprintf("%s#%s", req.Tenants[i].Name, newToOld[nodeID]) + req.Tenants[i].Status = p.Status + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/schema/mock_parser.go b/platform/dbops/binaries/weaviate-src/cluster/schema/mock_parser.go new file mode 100644 index 0000000000000000000000000000000000000000..b8ce41e318f6cb93f559f4745937db0941ea5002 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/schema/mock_parser.go @@ -0,0 +1,151 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by mockery v2.53.2. DO NOT EDIT. + +package schema + +import ( + mock "github.com/stretchr/testify/mock" + models "github.com/weaviate/weaviate/entities/models" +) + +// MockParser is an autogenerated mock type for the Parser type +type MockParser struct { + mock.Mock +} + +type MockParser_Expecter struct { + mock *mock.Mock +} + +func (_m *MockParser) EXPECT() *MockParser_Expecter { + return &MockParser_Expecter{mock: &_m.Mock} +} + +// ParseClass provides a mock function with given fields: class +func (_m *MockParser) ParseClass(class *models.Class) error { + ret := _m.Called(class) + + if len(ret) == 0 { + panic("no return value specified for ParseClass") + } + + var r0 error + if rf, ok := ret.Get(0).(func(*models.Class) error); ok { + r0 = rf(class) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockParser_ParseClass_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ParseClass' +type MockParser_ParseClass_Call struct { + *mock.Call +} + +// ParseClass is a helper method to define mock.On call +// - class *models.Class +func (_e *MockParser_Expecter) ParseClass(class interface{}) *MockParser_ParseClass_Call { + return &MockParser_ParseClass_Call{Call: _e.mock.On("ParseClass", class)} +} + +func (_c *MockParser_ParseClass_Call) Run(run func(class *models.Class)) *MockParser_ParseClass_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(*models.Class)) + }) + return _c +} + +func (_c *MockParser_ParseClass_Call) Return(_a0 error) *MockParser_ParseClass_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockParser_ParseClass_Call) RunAndReturn(run func(*models.Class) error) *MockParser_ParseClass_Call { + _c.Call.Return(run) + return _c +} + +// ParseClassUpdate provides a mock function with given fields: class, update +func (_m *MockParser) ParseClassUpdate(class *models.Class, update *models.Class) (*models.Class, error) { + ret := _m.Called(class, update) + + if len(ret) == 0 { + panic("no return value specified for ParseClassUpdate") + } + + var r0 *models.Class + var r1 error + if rf, ok := ret.Get(0).(func(*models.Class, *models.Class) (*models.Class, error)); ok { + return rf(class, update) + } + if rf, ok := ret.Get(0).(func(*models.Class, *models.Class) *models.Class); ok { + r0 = rf(class, update) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*models.Class) + } + } + + if rf, ok := ret.Get(1).(func(*models.Class, *models.Class) error); ok { + r1 = rf(class, update) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockParser_ParseClassUpdate_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ParseClassUpdate' +type MockParser_ParseClassUpdate_Call struct { + *mock.Call +} + +// ParseClassUpdate is a helper method to define mock.On call +// - class *models.Class +// - update *models.Class +func (_e *MockParser_Expecter) ParseClassUpdate(class interface{}, update interface{}) *MockParser_ParseClassUpdate_Call { + return &MockParser_ParseClassUpdate_Call{Call: _e.mock.On("ParseClassUpdate", class, update)} +} + +func (_c *MockParser_ParseClassUpdate_Call) Run(run func(class *models.Class, update *models.Class)) *MockParser_ParseClassUpdate_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(*models.Class), args[1].(*models.Class)) + }) + return _c +} + +func (_c *MockParser_ParseClassUpdate_Call) Return(_a0 *models.Class, _a1 error) *MockParser_ParseClassUpdate_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockParser_ParseClassUpdate_Call) RunAndReturn(run func(*models.Class, *models.Class) (*models.Class, error)) *MockParser_ParseClassUpdate_Call { + _c.Call.Return(run) + return _c +} + +// NewMockParser creates a new instance of MockParser. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewMockParser(t interface { + mock.TestingT + Cleanup(func()) +}) *MockParser { + mock := &MockParser{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/schema/mock_replication_fsm.go b/platform/dbops/binaries/weaviate-src/cluster/schema/mock_replication_fsm.go new file mode 100644 index 0000000000000000000000000000000000000000..e4bc24beb815102d34c02d72d42c09033b640959 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/schema/mock_replication_fsm.go @@ -0,0 +1,230 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by mockery v2.53.2. DO NOT EDIT. + +package schema + +import mock "github.com/stretchr/testify/mock" + +// MockreplicationFSM is an autogenerated mock type for the replicationFSM type +type MockreplicationFSM struct { + mock.Mock +} + +type MockreplicationFSM_Expecter struct { + mock *mock.Mock +} + +func (_m *MockreplicationFSM) EXPECT() *MockreplicationFSM_Expecter { + return &MockreplicationFSM_Expecter{mock: &_m.Mock} +} + +// DeleteReplicationsByCollection provides a mock function with given fields: collection +func (_m *MockreplicationFSM) DeleteReplicationsByCollection(collection string) error { + ret := _m.Called(collection) + + if len(ret) == 0 { + panic("no return value specified for DeleteReplicationsByCollection") + } + + var r0 error + if rf, ok := ret.Get(0).(func(string) error); ok { + r0 = rf(collection) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockreplicationFSM_DeleteReplicationsByCollection_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DeleteReplicationsByCollection' +type MockreplicationFSM_DeleteReplicationsByCollection_Call struct { + *mock.Call +} + +// DeleteReplicationsByCollection is a helper method to define mock.On call +// - collection string +func (_e *MockreplicationFSM_Expecter) DeleteReplicationsByCollection(collection interface{}) *MockreplicationFSM_DeleteReplicationsByCollection_Call { + return &MockreplicationFSM_DeleteReplicationsByCollection_Call{Call: _e.mock.On("DeleteReplicationsByCollection", collection)} +} + +func (_c *MockreplicationFSM_DeleteReplicationsByCollection_Call) Run(run func(collection string)) *MockreplicationFSM_DeleteReplicationsByCollection_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(string)) + }) + return _c +} + +func (_c *MockreplicationFSM_DeleteReplicationsByCollection_Call) Return(_a0 error) *MockreplicationFSM_DeleteReplicationsByCollection_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockreplicationFSM_DeleteReplicationsByCollection_Call) RunAndReturn(run func(string) error) *MockreplicationFSM_DeleteReplicationsByCollection_Call { + _c.Call.Return(run) + return _c +} + +// DeleteReplicationsByTenants provides a mock function with given fields: collection, tenants +func (_m *MockreplicationFSM) DeleteReplicationsByTenants(collection string, tenants []string) error { + ret := _m.Called(collection, tenants) + + if len(ret) == 0 { + panic("no return value specified for DeleteReplicationsByTenants") + } + + var r0 error + if rf, ok := ret.Get(0).(func(string, []string) error); ok { + r0 = rf(collection, tenants) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockreplicationFSM_DeleteReplicationsByTenants_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DeleteReplicationsByTenants' +type MockreplicationFSM_DeleteReplicationsByTenants_Call struct { + *mock.Call +} + +// DeleteReplicationsByTenants is a helper method to define mock.On call +// - collection string +// - tenants []string +func (_e *MockreplicationFSM_Expecter) DeleteReplicationsByTenants(collection interface{}, tenants interface{}) *MockreplicationFSM_DeleteReplicationsByTenants_Call { + return &MockreplicationFSM_DeleteReplicationsByTenants_Call{Call: _e.mock.On("DeleteReplicationsByTenants", collection, tenants)} +} + +func (_c *MockreplicationFSM_DeleteReplicationsByTenants_Call) Run(run func(collection string, tenants []string)) *MockreplicationFSM_DeleteReplicationsByTenants_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(string), args[1].([]string)) + }) + return _c +} + +func (_c *MockreplicationFSM_DeleteReplicationsByTenants_Call) Return(_a0 error) *MockreplicationFSM_DeleteReplicationsByTenants_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockreplicationFSM_DeleteReplicationsByTenants_Call) RunAndReturn(run func(string, []string) error) *MockreplicationFSM_DeleteReplicationsByTenants_Call { + _c.Call.Return(run) + return _c +} + +// HasOngoingReplication provides a mock function with given fields: collection, shard, replica +func (_m *MockreplicationFSM) HasOngoingReplication(collection string, shard string, replica string) bool { + ret := _m.Called(collection, shard, replica) + + if len(ret) == 0 { + panic("no return value specified for HasOngoingReplication") + } + + var r0 bool + if rf, ok := ret.Get(0).(func(string, string, string) bool); ok { + r0 = rf(collection, shard, replica) + } else { + r0 = ret.Get(0).(bool) + } + + return r0 +} + +// MockreplicationFSM_HasOngoingReplication_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'HasOngoingReplication' +type MockreplicationFSM_HasOngoingReplication_Call struct { + *mock.Call +} + +// HasOngoingReplication is a helper method to define mock.On call +// - collection string +// - shard string +// - replica string +func (_e *MockreplicationFSM_Expecter) HasOngoingReplication(collection interface{}, shard interface{}, replica interface{}) *MockreplicationFSM_HasOngoingReplication_Call { + return &MockreplicationFSM_HasOngoingReplication_Call{Call: _e.mock.On("HasOngoingReplication", collection, shard, replica)} +} + +func (_c *MockreplicationFSM_HasOngoingReplication_Call) Run(run func(collection string, shard string, replica string)) *MockreplicationFSM_HasOngoingReplication_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(string), args[1].(string), args[2].(string)) + }) + return _c +} + +func (_c *MockreplicationFSM_HasOngoingReplication_Call) Return(_a0 bool) *MockreplicationFSM_HasOngoingReplication_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockreplicationFSM_HasOngoingReplication_Call) RunAndReturn(run func(string, string, string) bool) *MockreplicationFSM_HasOngoingReplication_Call { + _c.Call.Return(run) + return _c +} + +// SetUnCancellable provides a mock function with given fields: id +func (_m *MockreplicationFSM) SetUnCancellable(id uint64) error { + ret := _m.Called(id) + + if len(ret) == 0 { + panic("no return value specified for SetUnCancellable") + } + + var r0 error + if rf, ok := ret.Get(0).(func(uint64) error); ok { + r0 = rf(id) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockreplicationFSM_SetUnCancellable_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SetUnCancellable' +type MockreplicationFSM_SetUnCancellable_Call struct { + *mock.Call +} + +// SetUnCancellable is a helper method to define mock.On call +// - id uint64 +func (_e *MockreplicationFSM_Expecter) SetUnCancellable(id interface{}) *MockreplicationFSM_SetUnCancellable_Call { + return &MockreplicationFSM_SetUnCancellable_Call{Call: _e.mock.On("SetUnCancellable", id)} +} + +func (_c *MockreplicationFSM_SetUnCancellable_Call) Run(run func(id uint64)) *MockreplicationFSM_SetUnCancellable_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(uint64)) + }) + return _c +} + +func (_c *MockreplicationFSM_SetUnCancellable_Call) Return(_a0 error) *MockreplicationFSM_SetUnCancellable_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockreplicationFSM_SetUnCancellable_Call) RunAndReturn(run func(uint64) error) *MockreplicationFSM_SetUnCancellable_Call { + _c.Call.Return(run) + return _c +} + +// NewMockreplicationFSM creates a new instance of MockreplicationFSM. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewMockreplicationFSM(t interface { + mock.TestingT + Cleanup(func()) +}) *MockreplicationFSM { + mock := &MockreplicationFSM{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/schema/reader.go b/platform/dbops/binaries/weaviate-src/cluster/schema/reader.go new file mode 100644 index 0000000000000000000000000000000000000000..c49e98ac02e6307d6e049fe300781f89dee6ae54 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/schema/reader.go @@ -0,0 +1,218 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package schema + +import ( + "context" + + "github.com/cenkalti/backoff/v4" + "github.com/prometheus/client_golang/prometheus" + "github.com/weaviate/weaviate/cluster/types" + "github.com/weaviate/weaviate/cluster/utils" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/versioned" + "github.com/weaviate/weaviate/usecases/monitoring" + "github.com/weaviate/weaviate/usecases/sharding" +) + +// SchemaReader is used for retrying schema queries. It is a thin wrapper around +// the original schema, separating retry logic from the actual operation. +// Retry may be needed due to eventual consistency issues where +// updates might take some time to arrive at the follower. +type SchemaReader struct { + schema *schema + versionedSchemaReader VersionedSchemaReader +} + +func NewSchemaReader(sc *schema, vsr VersionedSchemaReader) SchemaReader { + return SchemaReader{ + schema: sc, + versionedSchemaReader: vsr, + } +} + +func NewSchemaReaderWithoutVersion(sc *schema) SchemaReader { + return SchemaReader{ + schema: sc, + versionedSchemaReader: VersionedSchemaReader{ + schema: sc, + WaitForUpdate: func(context.Context, uint64) error { return nil }, + }, + } +} + +func (rs SchemaReader) States() map[string]types.ClassState { + t := prometheus.NewTimer(monitoring.GetMetrics().SchemaReadsLocal.WithLabelValues("States")) + defer t.ObserveDuration() + + return rs.schema.States() +} + +func (rs SchemaReader) ClassInfo(class string) (ci ClassInfo) { + t := prometheus.NewTimer(monitoring.GetMetrics().SchemaReadsLocal.WithLabelValues("ClassInfo")) + defer t.ObserveDuration() + + res, _ := rs.ClassInfoWithVersion(context.TODO(), class, 0) + return res +} + +// ClassEqual returns the name of an existing class with a similar name, and "" otherwise +// strings.EqualFold is used to compare classes +func (rs SchemaReader) ClassEqual(name string) string { + x, _ := rs.schema.ClassEqual(name) + return x +} + +func (rs SchemaReader) MultiTenancy(class string) models.MultiTenancyConfig { + t := prometheus.NewTimer(monitoring.GetMetrics().SchemaReadsLocal.WithLabelValues("MultiTenancy")) + defer t.ObserveDuration() + res, _ := rs.MultiTenancyWithVersion(context.TODO(), class, 0) + return res +} + +// Read performs a read operation `reader` on the specified class and sharding state +func (rs SchemaReader) Read(class string, reader func(*models.Class, *sharding.State) error) error { + t := prometheus.NewTimer(monitoring.GetMetrics().SchemaReadsLocal.WithLabelValues("Read")) + defer t.ObserveDuration() + + return rs.retry(func(s *schema) error { + return s.Read(class, reader) + }) +} + +func (rs SchemaReader) Shards(class string) ([]string, error) { + var shards []string + err := rs.Read(class, func(class *models.Class, state *sharding.State) error { + shards = state.AllPhysicalShards() + return nil + }) + + return shards, err +} + +func (rs SchemaReader) LocalShards(class string) ([]string, error) { + var shards []string + err := rs.Read(class, func(class *models.Class, state *sharding.State) error { + shards = state.AllLocalPhysicalShards() + return nil + }) + return shards, err +} + +// ReadOnlyClass returns a shallow copy of a class. +// The copy is read-only and should not be modified. +func (rs SchemaReader) ReadOnlyClass(class string) (cls *models.Class) { + t := prometheus.NewTimer(monitoring.GetMetrics().SchemaReadsLocal.WithLabelValues("ReadOnlyClass")) + defer t.ObserveDuration() + + res, _ := rs.ReadOnlyClassWithVersion(context.TODO(), class, 0) + return res +} + +func (rs SchemaReader) GetAliasesForClass(class string) []*models.Alias { + return rs.schema.GetAliasesForClass(class) +} + +// ReadOnlyVersionedClass returns a shallow copy of a class along with its version. +// The copy is read-only and should not be modified. +func (rs SchemaReader) ReadOnlyVersionedClass(className string) versioned.Class { + class, version := rs.schema.ReadOnlyClass(className) + return versioned.Class{ + Class: class, + Version: version, + } +} + +func (rs SchemaReader) metaClass(class string) (meta *metaClass) { + rs.retry(func(s *schema) error { + if meta = s.metaClass(class); meta == nil { + return ErrClassNotFound + } + return nil + }) + return +} + +// ReadOnlySchema returns a read only schema +// Changing the schema outside this package might lead to undefined behavior. +// +// it creates a shallow copy of existing classes +// +// This function assumes that class attributes are being overwritten. +// The properties attribute is the only one that might vary in size; +// therefore, we perform a shallow copy of the existing properties. +// This implementation assumes that individual properties are overwritten rather than partially updated +func (rs SchemaReader) ReadOnlySchema() models.Schema { + t := prometheus.NewTimer(monitoring.GetMetrics().SchemaReadsLocal.WithLabelValues("ReadOnlySchema")) + defer t.ObserveDuration() + return rs.schema.ReadOnlySchema() +} + +func (rs SchemaReader) ResolveAlias(alias string) string { + t := prometheus.NewTimer(monitoring.GetMetrics().SchemaReadsLocal.WithLabelValues("ResolveAlias")) + defer t.ObserveDuration() + return rs.schema.ResolveAlias(alias) +} + +func (rs SchemaReader) Aliases() map[string]string { + return rs.schema.getAliases("", "") +} + +// ShardOwner returns the node owner of the specified shard +func (rs SchemaReader) ShardOwner(class, shard string) (owner string, err error) { + t := prometheus.NewTimer(monitoring.GetMetrics().SchemaReadsLocal.WithLabelValues("ShardOwner")) + defer t.ObserveDuration() + + res, err := rs.ShardOwnerWithVersion(context.TODO(), class, shard, 0) + return res, err +} + +// ShardFromUUID returns shard name of the provided uuid +func (rs SchemaReader) ShardFromUUID(class string, uuid []byte) (shard string) { + t := prometheus.NewTimer(monitoring.GetMetrics().SchemaReadsLocal.WithLabelValues("ShardFromUUID")) + defer t.ObserveDuration() + + res, _ := rs.ShardFromUUIDWithVersion(context.TODO(), class, uuid, 0) + return res +} + +// ShardReplicas returns the replica nodes of a shard +func (rs SchemaReader) ShardReplicas(class, shard string) (nodes []string, err error) { + t := prometheus.NewTimer(monitoring.GetMetrics().SchemaReadsLocal.WithLabelValues("ShardReplicas")) + defer t.ObserveDuration() + + res, err := rs.ShardReplicasWithVersion(context.TODO(), class, shard, 0) + return res, err +} + +// TenantsShards returns shard name for the provided tenant and its activity status +func (rs SchemaReader) TenantsShards(class string, tenants ...string) (map[string]string, error) { + t := prometheus.NewTimer(monitoring.GetMetrics().SchemaReadsLocal.WithLabelValues("TenantsShards")) + defer t.ObserveDuration() + + return rs.TenantsShardsWithVersion(context.TODO(), 0, class, tenants...) +} + +func (rs SchemaReader) GetShardsStatus(class, tenant string) (models.ShardStatusList, error) { + t := prometheus.NewTimer(monitoring.GetMetrics().SchemaReadsLocal.WithLabelValues("GetShardsStatus")) + defer t.ObserveDuration() + + return rs.schema.GetShardsStatus(class, tenant) +} + +func (rs SchemaReader) Len() int { return rs.schema.len() } + +func (rs SchemaReader) retry(f func(*schema) error) error { + return backoff.Retry(func() error { + return f(rs.schema) + }, utils.NewBackoff()) +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/schema/reader_with_version.go b/platform/dbops/binaries/weaviate-src/cluster/schema/reader_with_version.go new file mode 100644 index 0000000000000000000000000000000000000000..4677f2a7cfe98dcff7753c924998fdf2d3c0f34f --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/schema/reader_with_version.go @@ -0,0 +1,116 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package schema + +import ( + "context" + + "github.com/weaviate/weaviate/entities/models" +) + +func (rs SchemaReader) WaitForUpdate(ctx context.Context, version uint64) error { + if version > 0 { + return rs.versionedSchemaReader.WaitForUpdate(ctx, version) + } + return nil +} + +func (rs SchemaReader) ClassInfoWithVersion(ctx context.Context, class string, version uint64) (ci ClassInfo, err error) { + if version > 0 { + return rs.versionedSchemaReader.ClassInfo(ctx, class, version) + } + rs.retry(func(s *schema) error { + ci = s.ClassInfo(class) + if !ci.Exists { + return ErrClassNotFound + } + return nil + }) + return ci, nil +} + +func (rs SchemaReader) MultiTenancyWithVersion(ctx context.Context, class string, version uint64) (models.MultiTenancyConfig, error) { + if version > 0 { + return rs.versionedSchemaReader.MultiTenancy(ctx, class, version) + } + mc, _ := rs.metaClass(class).MultiTenancyConfig() + return mc, nil +} + +// ReadOnlyClass returns a shallow copy of a class. +// The copy is read-only and should not be modified. +func (rs SchemaReader) ReadOnlyClassWithVersion(ctx context.Context, class string, version uint64) (cls *models.Class, err error) { + if version > 0 { + return rs.versionedSchemaReader.ReadOnlyClass(ctx, class, version) + } + rs.retry(func(s *schema) error { + if cls, _ = s.ReadOnlyClass(class); cls == nil { + return ErrClassNotFound + } + return nil + }) + return cls, nil +} + +// ShardOwner returns the node owner of the specified shard +func (rs SchemaReader) ShardOwnerWithVersion(ctx context.Context, class, shard string, version uint64) (owner string, err error) { + if version > 0 { + return rs.versionedSchemaReader.ShardOwner(ctx, class, shard, version) + } + err = rs.retry(func(s *schema) error { + owner, _, err = s.ShardOwner(class, shard) + return err + }) + return +} + +// ShardFromUUID returns shard name of the provided uuid +func (rs SchemaReader) ShardFromUUIDWithVersion(ctx context.Context, class string, uuid []byte, version uint64) (shard string, err error) { + if version > 0 { + return rs.versionedSchemaReader.ShardFromUUID(ctx, class, uuid, version) + } + rs.retry(func(s *schema) error { + if shard, _ = s.ShardFromUUID(class, uuid); shard == "" { + return ErrClassNotFound + } + return nil + }) + return +} + +// ShardReplicas returns the replica nodes of a shard +func (rs SchemaReader) ShardReplicasWithVersion(ctx context.Context, class, shard string, version uint64) (nodes []string, err error) { + if version > 0 { + return rs.versionedSchemaReader.ShardReplicas(ctx, class, shard, version) + } + rs.retry(func(s *schema) error { + nodes, _, err = s.ShardReplicas(class, shard) + return err + }) + return +} + +// TenantsShardsWithVersion returns shard name for the provided tenant and its activity status +func (rs SchemaReader) TenantsShardsWithVersion(ctx context.Context, version uint64, class string, tenants ...string) (tenantShards map[string]string, err error) { + if version > 0 { + status, _, err := rs.versionedSchemaReader.TenantsShards(ctx, version, class, tenants...) + return status, err + } + rs.retry(func(s *schema) error { + if tenantShards, _ = s.TenantsShards(class, tenants...); len(tenantShards) == 0 { + return ErrShardNotFound + } + return nil + }) + + return +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/schema/schema.go b/platform/dbops/binaries/weaviate-src/cluster/schema/schema.go new file mode 100644 index 0000000000000000000000000000000000000000..422f8901557b15eb8c9a2b7e2d3a224588d7e93d --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/schema/schema.go @@ -0,0 +1,768 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package schema + +import ( + "encoding/json" + "errors" + "fmt" + "maps" + "strings" + "sync" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + command "github.com/weaviate/weaviate/cluster/proto/api" + "github.com/weaviate/weaviate/cluster/types" + "github.com/weaviate/weaviate/entities/models" + entSchema "github.com/weaviate/weaviate/entities/schema" + "github.com/weaviate/weaviate/entities/versioned" + "github.com/weaviate/weaviate/usecases/sharding" +) + +var ( + ErrClassExists = errors.New("class already exists") + ErrClassNotFound = errors.New("class not found") + ErrShardNotFound = errors.New("shard not found") + ErrAliasExists = errors.New("alias already exists") + ErrAliasNotFound = errors.New("alias not found") + ErrMTDisabled = errors.New("multi-tenancy is not enabled") +) + +type ClassInfo struct { + Exists bool + MultiTenancy models.MultiTenancyConfig + ReplicationFactor int + Tenants int + Properties int + ClassVersion uint64 + ShardVersion uint64 +} + +func (ci *ClassInfo) Version() uint64 { + return max(ci.ClassVersion, ci.ShardVersion) +} + +type schema struct { + nodeID string + shardReader shardReader + + // mu protects the `classes` + mu sync.RWMutex + classes map[string]*metaClass + aliases map[string]string // key: canonical form all in TitleCase. + + // metrics + // collectionsCount represents the number of collections on this specific node. + collectionsCount prometheus.Gauge + + // shardsCount represents the number of shards (of all collections) on this specific node. + shardsCount *prometheus.GaugeVec +} + +func NewSchema(nodeID string, shardReader shardReader, reg prometheus.Registerer) *schema { + // this also registers the prometheus metrics with given `reg` in addition to just creating it. + r := promauto.With(reg) + + s := &schema{ + nodeID: nodeID, + classes: make(map[string]*metaClass, 128), + aliases: make(map[string]string, 128), + shardReader: shardReader, + collectionsCount: r.NewGauge(prometheus.GaugeOpts{ + Namespace: "weaviate", + Name: "schema_collections", + Help: "Number of collections per node", + ConstLabels: prometheus.Labels{"nodeID": nodeID}, + }), + shardsCount: r.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "weaviate", + Name: "schema_shards", + Help: "Number of shards per node with corresponding status", + ConstLabels: prometheus.Labels{"nodeID": nodeID}, + }, []string{"status"}), // status: HOT, WARM, COLD, FROZEN + } + + return s +} + +func (s *schema) ClassInfo(class string) ClassInfo { + s.mu.RLock() + defer s.mu.RUnlock() + + cl, ok := s.classes[class] + if !ok { + return ClassInfo{} + } + return cl.ClassInfo() +} + +// ClassEqual returns the name of an existing class with a similar name, and "" otherwise +// strings.EqualFold is used to compare classes +// additional bool return true if it's a alias match +func (s *schema) ClassEqual(name string) (string, bool) { + s.mu.RLock() + defer s.mu.RUnlock() + return s.unsafeClassEqual(name) +} + +func (s *schema) unsafeClassEqual(name string) (string, bool) { + for alias := range s.aliases { + if strings.EqualFold(alias, name) { + return alias, true + } + } + for k := range s.classes { + if strings.EqualFold(k, name) { + return k, false + } + } + return "", false +} + +func (s *schema) MultiTenancy(class string) models.MultiTenancyConfig { + mtc, _ := s.metaClass(class).MultiTenancyConfig() + return mtc +} + +// Read performs a read operation `reader` on the specified class and sharding state +func (s *schema) Read(class string, reader func(*models.Class, *sharding.State) error) error { + meta := s.metaClass(class) + if meta == nil { + return ErrClassNotFound + } + return meta.RLockGuard(reader) +} + +func (s *schema) metaClass(class string) *metaClass { + s.mu.RLock() + defer s.mu.RUnlock() + return s.unsafeResolveClass(class) +} + +// ReadOnlyClass returns a shallow copy of a class. +// The copy is read-only and should not be modified. +func (s *schema) ReadOnlyClass(class string) (*models.Class, uint64) { + s.mu.RLock() + defer s.mu.RUnlock() + return s.unsafeReadOnlyClass(class) +} + +func (s *schema) unsafeReadOnlyClass(class string) (*models.Class, uint64) { + meta := s.unsafeResolveClass(class) + if meta == nil { + return nil, 0 + } + return meta.CloneClass(), meta.ClassVersion +} + +// ReadOnlyClass returns a shallow copy of a class. +// The copy is read-only and should not be modified. +func (s *schema) ReadOnlyClasses(classes ...string) map[string]versioned.Class { + if len(classes) == 0 { + return nil + } + + vclasses := make(map[string]versioned.Class, len(classes)) + s.mu.RLock() + defer s.mu.RUnlock() + + for _, class := range classes { + meta := s.unsafeResolveClass(class) + if meta == nil { + continue + } + vclasses[class] = versioned.Class{Class: meta.CloneClass(), Version: meta.ClassVersion} + } + + return vclasses +} + +// ReadOnlySchema returns a read only schema +// Changing the schema outside this package might lead to undefined behavior. +// +// it creates a shallow copy of existing classes +// +// This function assumes that class attributes are being overwritten. +// The properties attribute is the only one that might vary in size; +// therefore, we perform a shallow copy of the existing properties. +// This implementation assumes that individual properties are overwritten rather than partially updated +func (s *schema) ReadOnlySchema() models.Schema { + cp := models.Schema{} + s.mu.RLock() + defer s.mu.RUnlock() + + cp.Classes = make([]*models.Class, len(s.classes)) + i := 0 + for _, meta := range s.classes { + cp.Classes[i] = meta.CloneClass() + i++ + } + + return cp +} + +func (s *schema) CollectionsCount() int { + s.mu.RLock() + defer s.mu.RUnlock() + + return len(s.classes) +} + +// ShardOwner returns the node owner of the specified shard +func (s *schema) ShardOwner(class, shard string) (string, uint64, error) { + meta := s.metaClass(class) + if meta == nil { + return "", 0, ErrClassNotFound + } + + return meta.ShardOwner(shard) +} + +// ShardFromUUID returns shard name of the provided uuid +func (s *schema) ShardFromUUID(class string, uuid []byte) (string, uint64) { + meta := s.metaClass(class) + if meta == nil { + return "", 0 + } + return meta.ShardFromUUID(uuid) +} + +// ShardReplicas returns the replica nodes of a shard +func (s *schema) ShardReplicas(class, shard string) ([]string, uint64, error) { + meta := s.metaClass(class) + if meta == nil { + return nil, 0, ErrClassNotFound + } + return meta.ShardReplicas(shard) +} + +// TenantsShards returns shard name for the provided tenant and its activity status +func (s *schema) TenantsShards(class string, tenants ...string) (map[string]string, uint64) { + s.mu.RLock() + defer s.mu.RUnlock() + + meta := s.unsafeResolveClass(class) + if meta == nil { + return nil, 0 + } + + return meta.TenantsShards(class, tenants...) +} + +func (s *schema) CopyShardingState(class string) (*sharding.State, uint64) { + meta := s.metaClass(class) + if meta == nil { + return nil, 0 + } + shardingState := meta.Sharding.DeepCopy() + + return &shardingState, meta.version() +} + +func (s *schema) GetShardsStatus(class, tenant string) (models.ShardStatusList, error) { + return s.shardReader.GetShardsStatus(class, tenant) +} + +type shardReader interface { + GetShardsStatus(class, tenant string) (models.ShardStatusList, error) +} + +func (s *schema) len() int { + s.mu.RLock() + defer s.mu.RUnlock() + + return len(s.classes) +} + +func (s *schema) multiTenancyEnabled(class string) (bool, *metaClass, ClassInfo, error) { + s.mu.RLock() + defer s.mu.RUnlock() + + meta := s.unsafeResolveClass(class) + if meta == nil { + return false, nil, ClassInfo{}, ErrClassNotFound + } + info := s.unsafeResolveClass(class).ClassInfo() + if !info.MultiTenancy.Enabled { + return false, nil, ClassInfo{}, fmt.Errorf("%w for class %q", ErrMTDisabled, class) + } + return true, meta, info, nil +} + +func (s *schema) addClass(cls *models.Class, ss *sharding.State, v uint64) error { + s.mu.Lock() + defer s.mu.Unlock() + + _, exists := s.classes[cls.Class] + if exists { + return ErrClassExists + } + + s.classes[cls.Class] = &metaClass{ + Class: *cls, Sharding: *ss, ClassVersion: v, ShardVersion: v, + } + + s.collectionsCount.Inc() + + for _, shard := range ss.Physical { + s.shardsCount.WithLabelValues(shard.Status).Inc() + } + + return nil +} + +// updateClass modifies existing class based on the givin update function +func (s *schema) updateClass(name string, f func(*metaClass) error) error { + s.mu.Lock() + defer s.mu.Unlock() + + meta := s.unsafeResolveClass(name) + if meta == nil { + return ErrClassNotFound + } + return meta.LockGuard(f) +} + +func (s *schema) deleteClass(name string) bool { + s.mu.Lock() + defer s.mu.Unlock() + + // since `delete(map, key)` is no-op if `key` doesn't exist, check before deleting + // so that we can increment the `collectionsCount` correctly. + class, ok := s.classes[name] + if !ok { + return false + } + + // sc tracks number of shards in this collection to be deleted by status. + sc := make(map[string]int) + + // need to decrement shards count on this class. + for _, shard := range class.Sharding.Physical { + sc[shard.Status]++ + } + + delete(s.classes, name) + + s.collectionsCount.Dec() + for status, count := range sc { + s.shardsCount.WithLabelValues(status).Sub(float64(count)) + } + + return true +} + +// replaceClasses replaces the existing `schema.Classes` with given `classes` +// mainly used in cases like restoring the whole schema from backup or something. +func (s *schema) replaceClasses(classes map[string]*metaClass) { + s.mu.Lock() + defer s.mu.Unlock() + + s.collectionsCount.Sub(float64(len(s.classes))) + for _, ss := range s.classes { + for _, shard := range ss.Sharding.Physical { + s.shardsCount.WithLabelValues(shard.Status).Dec() + } + } + + s.classes = classes + + s.collectionsCount.Add(float64(len(s.classes))) + + for _, ss := range s.classes { + for _, shard := range ss.Sharding.Physical { + s.shardsCount.WithLabelValues(shard.Status).Inc() + } + } +} + +// replaceStatesNodeName it update the node name inside sharding states. +// WARNING: this shall be used in one node cluster environments only. +// because it will replace the shard node name if the node name got updated +// only if the replication factor is 1, otherwise it's no-op +func (s *schema) replaceStatesNodeName(new string) { + s.mu.Lock() + defer s.mu.Unlock() + + for _, meta := range s.classes { + meta.LockGuard(func(mc *metaClass) error { + if meta.Class.ReplicationConfig.Factor > 1 { + return nil + } + + for idx := range meta.Sharding.Physical { + cp := meta.Sharding.Physical[idx].DeepCopy() + cp.BelongsToNodes = []string{new} + meta.Sharding.Physical[idx] = cp + } + return nil + }) + } +} + +func (s *schema) addProperty(class string, v uint64, props ...*models.Property) error { + s.mu.Lock() + defer s.mu.Unlock() + + meta := s.unsafeResolveClass(class) + if meta == nil { + return ErrClassNotFound + } + return meta.AddProperty(v, props...) +} + +func (s *schema) addReplicaToShard(class string, v uint64, shard string, replica string) error { + s.mu.Lock() + defer s.mu.Unlock() + meta := s.unsafeResolveClass(class) + if meta == nil { + return ErrClassNotFound + } + return meta.AddReplicaToShard(v, shard, replica) +} + +func (s *schema) deleteReplicaFromShard(class string, v uint64, shard string, replica string) error { + s.mu.Lock() + defer s.mu.Unlock() + meta := s.unsafeResolveClass(class) + if meta == nil { + return ErrClassNotFound + } + return meta.DeleteReplicaFromShard(v, shard, replica) +} + +func (s *schema) addTenants(class string, v uint64, req *command.AddTenantsRequest) error { + req.Tenants = removeNilTenants(req.Tenants) + + ok, meta, info, err := s.multiTenancyEnabled(class) + if !ok { + return err + } + + sc, err := meta.AddTenants(s.nodeID, req, int64(info.ReplicationFactor), v) + if err != nil { + return err + } + for status, count := range sc { + s.shardsCount.WithLabelValues(status).Add(float64(count)) + } + + return nil +} + +func (s *schema) deleteTenants(class string, v uint64, req *command.DeleteTenantsRequest) error { + ok, meta, _, err := s.multiTenancyEnabled(class) + if !ok { + return err + } + sc, err := meta.DeleteTenants(req, v) + if err != nil { + return err + } + + for status, count := range sc { + s.shardsCount.WithLabelValues(status).Sub(float64(count)) + } + + return nil +} + +func (s *schema) updateTenants(class string, v uint64, req *command.UpdateTenantsRequest, replicationFSM replicationFSM) error { + ok, meta, _, err := s.multiTenancyEnabled(class) + if !ok { + return err + } + sc, err := meta.UpdateTenants(s.nodeID, req, replicationFSM, v) + // partial update possible + for status, count := range sc { + // count can be positive or negative. + s.shardsCount.WithLabelValues(status).Add(float64(count)) + } + + return err +} + +func (s *schema) updateTenantsProcess(class string, v uint64, req *command.TenantProcessRequest) error { + ok, meta, _, err := s.multiTenancyEnabled(class) + if !ok { + return err + } + + sc, err := meta.UpdateTenantsProcess(s.nodeID, req, v) + // partial update possible + for status, count := range sc { + // count can be positive or negative. + s.shardsCount.WithLabelValues(status).Add(float64(count)) + } + + return err +} + +func (s *schema) getTenants(class string, tenants []string) ([]*models.Tenant, error) { + ok, meta, _, err := s.multiTenancyEnabled(class) + if !ok { + return nil, err + } + + // Read tenants using the meta lock guard + var res []*models.Tenant + f := func(_ *models.Class, ss *sharding.State) error { + if len(tenants) == 0 { + res = make([]*models.Tenant, len(ss.Physical)) + i := 0 + for tenantName, physical := range ss.Physical { + // Ensure we copy the belongs to nodes array to avoid it being modified + cpy := make([]string, len(physical.BelongsToNodes)) + copy(cpy, physical.BelongsToNodes) + + res[i] = &models.Tenant{ + Name: tenantName, + ActivityStatus: entSchema.ActivityStatus(physical.Status), + } + + // Increment our result iterator + i++ + } + } else { + res = make([]*models.Tenant, 0, len(tenants)) + for _, tenantName := range tenants { + if physical, ok := ss.Physical[tenantName]; ok { + // Ensure we copy the belongs to nodes array to avoid it being modified + cpy := make([]string, len(physical.BelongsToNodes)) + copy(cpy, physical.BelongsToNodes) + res = append(res, &models.Tenant{ + Name: tenantName, + ActivityStatus: entSchema.ActivityStatus(physical.Status), + }) + } + } + } + return nil + } + return res, meta.RLockGuard(f) +} + +func (s *schema) States() map[string]types.ClassState { + s.mu.RLock() + defer s.mu.RUnlock() + + cs := make(map[string]types.ClassState, len(s.classes)) + for _, c := range s.classes { + cs[c.Class.Class] = types.ClassState{ + Class: c.Class, + Shards: c.Sharding, + } + } + + return cs +} + +// MetaClasses is thread-safe and returns a deep copy of the meta classes and sharding states +func (s *schema) MetaClasses() map[string]*metaClass { + s.mu.RLock() + defer s.mu.RUnlock() + + classesCopy := make(map[string]*metaClass, len(s.classes)) + for k, v := range s.classes { + v.RLock() + classesCopy[k] = &metaClass{ + Class: v.Class, + ClassVersion: v.ClassVersion, + Sharding: v.Sharding.DeepCopy(), + ShardVersion: v.ShardVersion, + } + v.RUnlock() + } + + return classesCopy +} + +func (s *schema) Restore(data []byte, parser Parser) error { + var classes map[string]*metaClass + if err := json.Unmarshal(data, &classes); err != nil { + return fmt.Errorf("restore snapshot: decode json: %w", err) + } + + if classes == nil { + classes = make(map[string]*metaClass) + } + + return s.restore(classes, parser) +} + +func (s *schema) RestoreLegacy(data []byte, parser Parser) error { + snap := snapshot{} + if err := json.Unmarshal(data, &snap); err != nil { + return fmt.Errorf("restore snapshot: decode json: %w", err) + } + + if snap.Classes == nil { + snap.Classes = make(map[string]*metaClass) + } + + return s.restore(snap.Classes, parser) +} + +func (s *schema) restore(classes map[string]*metaClass, parser Parser) error { + for _, cls := range classes { + if err := parser.ParseClass(&cls.Class); err != nil { // should not fail + return fmt.Errorf("parsing class %q: %w", cls.Class.Class, err) // schema might be corrupted + } + cls.Sharding.SetLocalName(s.nodeID) + } + s.replaceClasses(classes) + return nil +} + +func (s *schema) RestoreAlias(data []byte) error { + s.mu.Lock() + defer s.mu.Unlock() + + s.aliases = make(map[string]string) + if err := json.Unmarshal(data, &s.aliases); err != nil { + return fmt.Errorf("restore alias: parse json: %w", err) + } + return nil +} + +func (s *schema) createAlias(class, alias string) error { + alias = s.canonicalAlias(alias) + + s.mu.Lock() + defer s.mu.Unlock() + + if s.unsafeAliasExists(alias) { + return fmt.Errorf("create alias: %s, %w", alias, ErrAliasExists) + } + if cls, _ := s.unsafeReadOnlyClass(class); cls == nil { + return fmt.Errorf("create alias: %s, %w, %s", alias, ErrClassNotFound, class) + } + // trying to check if any class exists with passed 'alias' name + other, isAlias := s.unsafeClassEqual(alias) + item := "class" + if isAlias { + item = "alias" + } + + if other == alias { + return fmt.Errorf("create alias: %s %s already exists", item, alias) + } + s.aliases[alias] = class + return nil +} + +func (s *schema) replaceAlias(newClass, alias string) error { + s.mu.Lock() + defer s.mu.Unlock() + + if s.classes[newClass] == nil { + return fmt.Errorf("replace alias: class %s does not exist", newClass) + } + if !s.unsafeAliasExists(alias) { + return fmt.Errorf("replace alias: alias %s does not exist", alias) + } + s.aliases[alias] = newClass + return nil +} + +// unsafeAliasExists is not concurrency-safe! Lock s.aliases before calling +func (s *schema) unsafeAliasExists(alias string) bool { + for v := range s.aliases { + if strings.EqualFold(v, alias) { + return true + } + } + return false +} + +func (s *schema) canonicalAlias(alias string) string { + if len(alias) < 1 { + return alias + } + + if len(alias) == 1 { + return strings.ToUpper(alias) + } + + return strings.ToUpper(string(alias[0])) + alias[1:] +} + +func (s *schema) GetAliasesForClass(class string) []*models.Alias { + s.mu.RLock() + defer s.mu.RUnlock() + + res := make([]*models.Alias, 0) + if class == "" { + return res + } + for alias, className := range s.aliases { + if className == class { + res = append(res, &models.Alias{ + Alias: alias, + Class: className, + }) + } + } + return res +} + +func (s *schema) getAliases(alias, class string) map[string]string { + s.mu.RLock() + defer s.mu.RUnlock() + if alias != "" { + if className, ok := s.aliases[alias]; ok { + return map[string]string{alias: className} + } + } + if class != "" { + aliases := make(map[string]string) + for aliasName, className := range s.aliases { + if className == class { + aliases[aliasName] = className + } + } + return aliases + } + + // asked for all aliases. + if alias == "" && class == "" { + return maps.Clone(s.aliases) + } + // if asked for spefic class or alias return nil, meaning not found. + return nil +} + +func (s *schema) ResolveAlias(alias string) string { + alias = s.canonicalAlias(alias) + s.mu.RLock() + defer s.mu.RUnlock() + return s.unsafeResolveAlias(alias) +} + +func (s *schema) unsafeResolveAlias(alias string) string { + return s.aliases[alias] +} + +func (s *schema) deleteAlias(alias string) error { + alias = s.canonicalAlias(alias) + + s.mu.Lock() + defer s.mu.Unlock() + delete(s.aliases, alias) + // purposefully idempotent + return nil +} + +func (s *schema) unsafeResolveClass(class string) *metaClass { + return s.classes[class] +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/schema/schema_test.go b/platform/dbops/binaries/weaviate-src/cluster/schema/schema_test.go new file mode 100644 index 0000000000000000000000000000000000000000..e9f7226034f7203b0c40ddb8b22b6133e6466156 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/schema/schema_test.go @@ -0,0 +1,558 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package schema + +import ( + "encoding/json" + "fmt" + "testing" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/testutil" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/cluster/proto/api" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/usecases/sharding" +) + +func TestCollectionNameConflictWithAlias(t *testing.T) { + var ( + sc = NewSchema(t.Name(), nil, prometheus.NewPedanticRegistry()) + ss = &sharding.State{Physical: make(map[string]sharding.Physical)} + ) + + require.Nil(t, sc.addClass(&models.Class{Class: "CoolCar"}, ss, 1)) + + err := sc.createAlias("CoolCar", "MyCar") + require.NoError(t, err) + + // checking to see if class exists should consider the existing alias as well + got, isAlias := sc.ClassEqual("MyCar") + assert.NotEmpty(t, got) + assert.True(t, isAlias) +} + +func Test_schemaCollectionMetrics(t *testing.T) { + r := prometheus.NewPedanticRegistry() + + s := NewSchema("testNode", nil, r) + ss := &sharding.State{} + + c1 := &models.Class{ + Class: "collection1", + MultiTenancyConfig: &models.MultiTenancyConfig{ + Enabled: true, + }, + ReplicationConfig: &models.ReplicationConfig{ + Factor: 1, + }, + } + c2 := &models.Class{ + Class: "collection2", + MultiTenancyConfig: &models.MultiTenancyConfig{ + Enabled: true, + }, + ReplicationConfig: &models.ReplicationConfig{ + Factor: 1, + }, + } + + // Collection metrics + assert.Equal(t, float64(0), testutil.ToFloat64(s.collectionsCount)) + require.NoError(t, s.addClass(c1, ss, 0)) // adding c1 collection + assert.Equal(t, float64(1), testutil.ToFloat64(s.collectionsCount)) + + require.NoError(t, s.addClass(c2, ss, 0)) // adding c2 collection + assert.Equal(t, float64(2), testutil.ToFloat64(s.collectionsCount)) + + // delete c2 + s.deleteClass("collection2") + assert.Equal(t, float64(1), testutil.ToFloat64(s.collectionsCount)) + + // delete c1 + s.deleteClass("collection1") + assert.Equal(t, float64(0), testutil.ToFloat64(s.collectionsCount)) +} + +func Test_schemaShardMetrics(t *testing.T) { + r := prometheus.NewPedanticRegistry() + + s := NewSchema("testNode", nil, r) + ss := &sharding.State{} + + c1 := &models.Class{ + Class: "collection1", + MultiTenancyConfig: &models.MultiTenancyConfig{ + Enabled: true, + }, + ReplicationConfig: &models.ReplicationConfig{ + Factor: 1, + }, + } + c2 := &models.Class{ + Class: "collection2", + MultiTenancyConfig: &models.MultiTenancyConfig{ + Enabled: true, + }, + ReplicationConfig: &models.ReplicationConfig{ + Factor: 1, + }, + } + + require.NoError(t, s.addClass(c1, ss, 0)) // adding c1 collection + require.NoError(t, s.addClass(c2, ss, 0)) // adding c2 collection + + // Shard metrics + // no shards now. + assert.Equal(t, float64(0), testutil.ToFloat64(s.shardsCount.WithLabelValues(""))) + + // add shard to c1 collection + err := s.addTenants(c1.Class, 0, &api.AddTenantsRequest{ + ClusterNodes: []string{"testNode"}, + Tenants: []*api.Tenant{ + { + Name: "tenant1", + Status: "HOT", + }, + nil, // nil tenant shouldn't be counted in the metrics + }, + }) + require.NoError(t, err) + assert.Equal(t, float64(1), testutil.ToFloat64(s.shardsCount.WithLabelValues("HOT"))) + + // add shard to c2 collection + err = s.addTenants(c2.Class, 0, &api.AddTenantsRequest{ + ClusterNodes: []string{"testNode"}, + Tenants: []*api.Tenant{ + { + Name: "tenant2", + Status: "FROZEN", + }, + nil, // nil tenant shouldn't be counted in the metrics + }, + }) + require.NoError(t, err) + assert.Equal(t, float64(1), testutil.ToFloat64(s.shardsCount.WithLabelValues("FROZEN"))) + + // delete "existing" tenant + err = s.deleteTenants(c1.Class, 0, &api.DeleteTenantsRequest{ + Tenants: []string{"tenant1"}, + }) + require.NoError(t, err) + assert.Equal(t, float64(0), testutil.ToFloat64(s.shardsCount.WithLabelValues("HOT"))) + assert.Equal(t, float64(1), testutil.ToFloat64(s.shardsCount.WithLabelValues("FROZEN"))) + + // delete "non-existing" tenant + err = s.deleteTenants(c1.Class, 0, &api.DeleteTenantsRequest{ + Tenants: []string{"tenant1"}, + }) + require.NoError(t, err) + assert.Equal(t, float64(0), testutil.ToFloat64(s.shardsCount.WithLabelValues("HOT"))) + assert.Equal(t, float64(1), testutil.ToFloat64(s.shardsCount.WithLabelValues("FROZEN"))) + + // update tenant status + fsm := NewMockreplicationFSM(t) + fsm.On("HasOngoingReplication", mock.Anything, mock.Anything, mock.Anything).Return(false).Maybe() + err = s.updateTenants(c2.Class, 0, &api.UpdateTenantsRequest{ + Tenants: []*api.Tenant{{Name: "tenant2", Status: "HOT"}}, // FROZEN -> HOT + ClusterNodes: []string{"testNode"}, + }, fsm) + require.NoError(t, err) + assert.Equal(t, float64(1), testutil.ToFloat64(s.shardsCount.WithLabelValues("UNFREEZING"))) + assert.Equal(t, float64(0), testutil.ToFloat64(s.shardsCount.WithLabelValues("FROZEN"))) + + // update tenant status + err = s.updateTenantsProcess(c2.Class, 0, &api.TenantProcessRequest{ + Node: "testNode", + Action: api.TenantProcessRequest_ACTION_UNFREEZING, + TenantsProcesses: []*api.TenantsProcess{ + { + Tenant: &api.Tenant{Name: "tenant2", Status: "HOT"}, + Op: api.TenantsProcess_OP_DONE, + }, + }, + }) + require.NoError(t, err) + assert.Equal(t, float64(1), testutil.ToFloat64(s.shardsCount.WithLabelValues("HOT"))) + assert.Equal(t, float64(0), testutil.ToFloat64(s.shardsCount.WithLabelValues("UNFREEZING"))) + + // Deleting collection with non-zero shards should decrement the shards count as well. + assert.Equal(t, float64(1), testutil.ToFloat64(s.shardsCount.WithLabelValues("HOT"))) + require.True(t, s.deleteClass(c2.Class)) + assert.Equal(t, float64(0), testutil.ToFloat64(s.shardsCount.WithLabelValues("HOT"))) + + // Adding class with non empty shard should increase the shard count + ss = &sharding.State{ + Physical: make(map[string]sharding.Physical), + } + ss.Physical["random"] = sharding.Physical{ + Name: "random", + Status: "", + } + assert.Equal(t, float64(0), testutil.ToFloat64(s.shardsCount.WithLabelValues(""))) + require.NoError(t, s.addClass(c2, ss, 0)) + assert.Equal(t, float64(1), testutil.ToFloat64(s.shardsCount.WithLabelValues(""))) +} + +func Test_schemaDeepCopy(t *testing.T) { + r := prometheus.NewPedanticRegistry() + s := NewSchema("testNode", nil, r) + + class := &models.Class{ + Class: "test", + MultiTenancyConfig: &models.MultiTenancyConfig{ + Enabled: true, + }, + } + shardState := &sharding.State{ + Physical: map[string]sharding.Physical{ + "shard1": { + Name: "shard1", + Status: "HOT", + BelongsToNodes: []string{"node1"}, + }, + }, + } + + require.NoError(t, s.addClass(class, shardState, 1)) + + t.Run("MetaClasses deep copy", func(t *testing.T) { + copied := s.MetaClasses() + + original := s.classes["test"] + copiedClass := copied["test"] + + copiedClass.Class.Class = "modified" + physical := copiedClass.Sharding.Physical["shard1"] + physical.Status = "COLD" + copiedClass.Sharding.Physical["shard1"] = physical + + assert.Equal(t, "test", original.Class.Class) + assert.Equal(t, "HOT", original.Sharding.Physical["shard1"].Status) + + assert.Equal(t, original.ClassVersion, copiedClass.ClassVersion) + assert.Equal(t, original.ShardVersion, copiedClass.ShardVersion) + }) + + t.Run("Concurrent access", func(t *testing.T) { + done := make(chan bool) + go func() { + for i := 0; i < 100; i++ { + s.MetaClasses() + s.States() + } + done <- true + }() + + for i := 0; i < 100; i++ { + s.addClass(&models.Class{Class: fmt.Sprintf("concurrent%d", i)}, shardState, uint64(i)) + } + <-done + }) +} + +func TestSchemaRestoreLegacyWithEmptyClasses(t *testing.T) { + // Test the scenario where snapshot contains "classes":{} which should unmarshal to empty map + t.Run("empty classes object", func(t *testing.T) { + s := NewSchema("test-node", &MockShardReader{}, nil) + + // Create snapshot JSON with empty classes object + snapData := `{"node_id":"test-node","snapshot_id":"test-snapshot","classes":{}}` + + // Test RestoreLegacy + mockParser := NewMockParser(t) + + err := s.RestoreLegacy([]byte(snapData), mockParser) + require.NoError(t, err) + + // Verify that s.classes is an empty map, not nil + assert.NotNil(t, s.classes) + assert.Equal(t, 0, len(s.classes)) + }) +} + +func TestSchemaRestoreLegacyWithNilClasses(t *testing.T) { + // Test the scenario where snapshot JSON unmarshaling results in nil Classes + t.Run("nil classes after unmarshal", func(t *testing.T) { + s := NewSchema("test-node", &MockShardReader{}, nil) + + // Create a snapshot struct with nil Classes to simulate unmarshal failure + snap := snapshot{ + NodeID: "test-node", + SnapshotID: "test-snapshot", + Classes: nil, // This simulates the problematic case + } + + // Marshal it back to JSON + snapData, err := json.Marshal(snap) + require.NoError(t, err) + + // Test RestoreLegacy + mockParser := NewMockParser(t) + err = s.RestoreLegacy(snapData, mockParser) + require.NoError(t, err) + + // Verify that s.classes is initialized, not nil + assert.NotNil(t, s.classes) + assert.Equal(t, 0, len(s.classes)) + }) +} + +func TestSchemaAddClassAfterRestoreWithEmptyClasses(t *testing.T) { + // Test the scenario where addClass is called after restoring empty classes + t.Run("add class after empty restore", func(t *testing.T) { + s := NewSchema("test-node", &MockShardReader{}, nil) + + // First restore with empty classes + snapData := `{"node_id":"test-node","snapshot_id":"test-snapshot","classes":{}}` + mockParser := NewMockParser(t) + err := s.RestoreLegacy([]byte(snapData), mockParser) + require.NoError(t, err) + + // Verify s.classes is not nil + assert.NotNil(t, s.classes) + + // Now try to add a class - this should not panic + cls := &models.Class{Class: "TestClass"} + ss := &sharding.State{Physical: map[string]sharding.Physical{}} + + err = s.addClass(cls, ss, 1) + require.NoError(t, err) + + // Verify the class was added + assert.Equal(t, 1, len(s.classes)) + assert.NotNil(t, s.classes["TestClass"]) + }) +} + +func TestSchemaAddClassAfterRestoreWithNilClasses(t *testing.T) { + // Test the scenario where addClass is called after restoring with nil classes + t.Run("add class after nil restore", func(t *testing.T) { + s := NewSchema("test-node", &MockShardReader{}, nil) + + // First restore with nil classes (simulating unmarshal failure) + snap := snapshot{ + NodeID: "test-node", + SnapshotID: "test-snapshot", + Classes: nil, + } + snapData, err := json.Marshal(snap) + require.NoError(t, err) + + mockParser := NewMockParser(t) + err = s.RestoreLegacy(snapData, mockParser) + require.NoError(t, err) + + // Verify s.classes is not nil + assert.NotNil(t, s.classes) + + // Now try to add a class - this should not panic + cls := &models.Class{Class: "TestClass"} + ss := &sharding.State{Physical: map[string]sharding.Physical{}} + + err = s.addClass(cls, ss, 1) + require.NoError(t, err) + + // Verify the class was added + assert.Equal(t, 1, len(s.classes)) + assert.NotNil(t, s.classes["TestClass"]) + }) +} + +func TestCreateAlias(t *testing.T) { + var ( + sc = NewSchema(t.Name(), nil, prometheus.NewPedanticRegistry()) + ss = &sharding.State{Physical: make(map[string]sharding.Physical)} + ) + + require.Nil(t, sc.addClass(&models.Class{Class: "C"}, ss, 1)) + require.Nil(t, sc.addClass(&models.Class{Class: "AnotherClass"}, ss, 1)) + + t.Run("successfully create alias", func(t *testing.T) { + err := sc.createAlias("C", "A1") + require.Nil(t, err) + }) + + t.Run("fail on conflicting creation", func(t *testing.T) { + err := sc.createAlias("C", "A1") + require.EqualError(t, err, "create alias: A1, alias already exists") + }) + + t.Run("fail on non-existing class", func(t *testing.T) { + err := sc.createAlias("D", "newAlias") + require.EqualError(t, err, "create alias: NewAlias, class not found, D") + }) + + t.Run("fail on non-existing alias", func(t *testing.T) { + err := sc.createAlias("D", "A1") + require.EqualError(t, err, "create alias: A1, alias already exists") + }) + t.Run("fail on creating alias with existing class name", func(t *testing.T) { + // We have two collection. "C" and "AnotherClass" + // 1. We try to create alias with name "AnotherClass" to class "C". + // 2. Should fail saying class with "AnotherClass" already exists. + err := sc.createAlias("C", "AnotherClass") + require.EqualError(t, err, "create alias: class AnotherClass already exists") + }) +} + +func TestSchemaAliasCasing(t *testing.T) { + // Alias name should be case-insensitive similar to collection. + // Meaning, MyCar, MYCar, myCar all same. + + var ( + sc = NewSchema(t.Name(), nil, prometheus.NewPedanticRegistry()) + ss = &sharding.State{Physical: make(map[string]sharding.Physical)} + ) + + require.Nil(t, sc.addClass(&models.Class{Class: "CoolCar"}, ss, 1)) + err := sc.createAlias("CoolCar", "MyCar") + require.Nil(t, err) + + // Try creating it with different cases. + err = sc.createAlias("CoolCar", "MYCar") + require.Error(t, err) + assert.Contains(t, err.Error(), "already exists") + + err = sc.createAlias("CoolCar", "mYCar") + require.Error(t, err) + assert.Contains(t, err.Error(), "already exists") + + err = sc.createAlias("CoolCar", "mycar") + require.Error(t, err) + assert.Contains(t, err.Error(), "already exists") + + err = sc.createAlias("CoolCar", "MYCAR") + require.Error(t, err) + assert.Contains(t, err.Error(), "already exists") +} + +func TestReplaceAlias(t *testing.T) { + var ( + sc = NewSchema(t.Name(), nil, prometheus.NewPedanticRegistry()) + ss = &sharding.State{Physical: make(map[string]sharding.Physical)} + ) + + require.Nil(t, sc.addClass(&models.Class{Class: "C1"}, ss, 1)) + require.Nil(t, sc.addClass(&models.Class{Class: "C2"}, ss, 1)) + require.Nil(t, sc.createAlias("C1", "A1")) + + t.Run("successfully replace alias", func(t *testing.T) { + err := sc.replaceAlias("C2", "A1") + require.Nil(t, err) + }) + + t.Run("fail on non-existing alias", func(t *testing.T) { + err := sc.replaceAlias("C1", "A2") + require.EqualError(t, err, "replace alias: alias A2 does not exist") + }) + + t.Run("fail on non-existing class", func(t *testing.T) { + err := sc.replaceAlias("D", "A1") + require.EqualError(t, err, "replace alias: class D does not exist") + }) +} + +func TestDeleteAlias(t *testing.T) { + var ( + sc = NewSchema(t.Name(), nil, prometheus.NewPedanticRegistry()) + ss = &sharding.State{Physical: make(map[string]sharding.Physical)} + ) + + require.Nil(t, sc.addClass(&models.Class{Class: "C"}, ss, 1)) + require.Nil(t, sc.createAlias("C", "A1")) + + t.Run("successfully delete alias", func(t *testing.T) { + err := sc.deleteAlias("A1") + require.Nil(t, err) + }) + + t.Run("idempotent deletion with non-existent alias", func(t *testing.T) { + err := sc.deleteAlias("A2") + require.Nil(t, err) + }) +} + +func TestResolveAlias(t *testing.T) { + var ( + sc = NewSchema(t.Name(), nil, prometheus.NewPedanticRegistry()) + ss = &sharding.State{Physical: make(map[string]sharding.Physical)} + ) + + require.Nil(t, sc.addClass(&models.Class{Class: "C1"}, ss, 1)) + require.Nil(t, sc.createAlias("C1", "A1")) + + t.Run("successfully resolve alias", func(t *testing.T) { + alias := sc.ResolveAlias("A1") + assert.Equal(t, alias, "C1") + }) + + t.Run("empty response for non-existent alias", func(t *testing.T) { + alias := sc.ResolveAlias("A2") + assert.Empty(t, alias) + }) +} + +func TestGetAlias(t *testing.T) { + var ( + sc = NewSchema(t.Name(), nil, prometheus.NewPedanticRegistry()) + ss = &sharding.State{Physical: make(map[string]sharding.Physical)} + ) + + require.Nil(t, sc.addClass(&models.Class{Class: "C1"}, ss, 1)) + require.Nil(t, sc.addClass(&models.Class{Class: "C2"}, ss, 1)) + require.Nil(t, sc.addClass(&models.Class{Class: "C3"}, ss, 1)) + require.Nil(t, sc.createAlias("C1", "A1")) + require.Nil(t, sc.createAlias("C2", "A2")) + require.Nil(t, sc.createAlias("C2", "A3")) + + t.Run("get aliases", func(t *testing.T) { + aliases := sc.getAliases("", "") + expected := map[string]string{ + "A1": "C1", + "A2": "C2", + "A3": "C2", + } + assert.EqualValues(t, expected, aliases) + }) + + t.Run("get aliases for alias A1", func(t *testing.T) { + aliases := sc.getAliases("A1", "") + expected := map[string]string{ + "A1": "C1", + } + assert.EqualValues(t, expected, aliases) + }) + + t.Run("get aliases for class C2", func(t *testing.T) { + aliases := sc.getAliases("", "C2") + expected := map[string]string{ + "A2": "C2", + "A3": "C2", + } + assert.EqualValues(t, expected, aliases) + }) + + t.Run("get updated aliases", func(t *testing.T) { + require.Nil(t, sc.replaceAlias("C3", "A2")) + + aliases := sc.getAliases("", "") + expected := map[string]string{ + "A1": "C1", + "A2": "C3", + "A3": "C2", + } + assert.EqualValues(t, expected, aliases) + }) +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/schema/schema_thread_safety_test.go b/platform/dbops/binaries/weaviate-src/cluster/schema/schema_thread_safety_test.go new file mode 100644 index 0000000000000000000000000000000000000000..1df186522559e44bfe0d24302febbcffdaaf08aa --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/schema/schema_thread_safety_test.go @@ -0,0 +1,759 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package schema + +import ( + "errors" + "fmt" + "sync" + "testing" + "time" + + "github.com/prometheus/client_golang/prometheus" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + command "github.com/weaviate/weaviate/cluster/proto/api" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/usecases/sharding" +) + +func TestConcurrentSchemaAccess(t *testing.T) { + tests := []struct { + name string + test func(*testing.T, *schema) + }{ + { + name: "concurrent read and write access to classes", + test: testConcurrentReadWrite, + }, + { + name: "concurrent read-only operations", + test: testConcurrentReadOnly, + }, + { + name: "concurrent class modifications", + test: testConcurrentClassModifications, + }, + { + name: "concurrent schema operations", + test: testConcurrentSchemaOperations, + }, + { + name: "concurrent shard operations", + test: testConcurrentShardOperations, + }, + { + name: "concurrent tenant operations", + test: testConcurrentTenantOperations, + }, + { + name: "concurrent meta operations", + test: testConcurrentMetaOperations, + }, + { + name: "concurrent class info operations", + test: testConcurrentClassInfoOperations, + }, + { + name: "concurrent read lock operations", + test: testConcurrentReadLockOperations, + }, + { + name: "concurrent tenant management operations", + test: testConcurrentTenantManagementOperations, + }, + { + name: "concurrent sharding state operations", + test: testConcurrentShardingStateOperations, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + s := NewSchema("testNode", &mockShardReader{}, prometheus.NewPedanticRegistry()) + tt.test(t, s) + }) + } +} + +func testConcurrentReadWrite(t *testing.T, s *schema) { + const numGoroutines = 10 + const iterations = 100 + + var wg sync.WaitGroup + wg.Add(numGoroutines * 2) // readers + writers + + // Start readers + for i := 0; i < numGoroutines; i++ { + go func() { + defer wg.Done() + for j := 0; j < iterations; j++ { + schema := s.ReadOnlySchema() + _ = schema // Note: we just use the schema to prevent optimization + time.Sleep(time.Microsecond) // Simulate some work + } + }() + } + + // Start writers + for i := 0; i < numGoroutines; i++ { + go func(id int) { + defer wg.Done() + for j := 0; j < iterations; j++ { + className := fmt.Sprintf("Class%d_%d", id, j) + class := &models.Class{ + Class: className, + Properties: []*models.Property{ + {Name: "prop1", DataType: []string{"string"}}, + }, + } + err := s.addClass(class, &sharding.State{}, uint64(j)) + if err != nil && !errors.Is(err, ErrClassExists) { + t.Errorf("unexpected error adding class: %v", err) + } + time.Sleep(time.Microsecond) // Simulate some work + } + }(i) + } + + wg.Wait() +} + +func testConcurrentReadOnly(t *testing.T, s *schema) { + // Setup some initial data + initialClasses := []string{"Class1", "Class2", "Class3"} + for _, className := range initialClasses { + class := &models.Class{ + Class: className, + Properties: []*models.Property{ + {Name: "prop1", DataType: []string{"string"}}, + }, + } + require.NoError(t, s.addClass(class, &sharding.State{}, 1)) + } + + const numGoroutines = 10 + const iterations = 100 + + var wg sync.WaitGroup + wg.Add(numGoroutines * 3) + + // Test concurrent ReadOnlySchema + for i := 0; i < numGoroutines; i++ { + go func() { + defer wg.Done() + for j := 0; j < iterations; j++ { + schema := s.ReadOnlySchema() + assert.NotEmpty(t, schema.Classes) + time.Sleep(time.Microsecond) + } + }() + } + + // Test concurrent ReadOnlyClass + for i := 0; i < numGoroutines; i++ { + go func() { + defer wg.Done() + for j := 0; j < iterations; j++ { + class, version := s.ReadOnlyClass("Class1") + if class != nil { + assert.Equal(t, "Class1", class.Class) + assert.Greater(t, version, uint64(0)) + } + time.Sleep(time.Microsecond) + } + }() + } + + // Test concurrent ReadOnlyClasses + for i := 0; i < numGoroutines; i++ { + go func() { + defer wg.Done() + for j := 0; j < iterations; j++ { + classes := s.ReadOnlyClasses(initialClasses...) + assert.NotEmpty(t, classes) + time.Sleep(time.Microsecond) + } + }() + } + + wg.Wait() +} + +func testConcurrentClassModifications(t *testing.T, s *schema) { + class := &models.Class{ + Class: "TestClass", + Properties: []*models.Property{ + {Name: "prop1", DataType: []string{"string"}}, + }, + } + require.NoError(t, s.addClass(class, &sharding.State{}, 1)) + + const numGoroutines = 10 + const iterations = 100 + + var wg sync.WaitGroup + wg.Add(numGoroutines * 2) + + // Test concurrent property additions + for i := 0; i < numGoroutines; i++ { + go func(id int) { + defer wg.Done() + for j := 0; j < iterations; j++ { + prop := &models.Property{ + Name: fmt.Sprintf("prop_%d_%d", id, j), + DataType: []string{"string"}, + } + _ = s.addProperty("TestClass", uint64(j), prop) + time.Sleep(time.Microsecond) + } + }(i) + } + + // Test concurrent reads while modifying + for i := 0; i < numGoroutines; i++ { + go func() { + defer wg.Done() + for j := 0; j < iterations; j++ { + class, _ := s.ReadOnlyClass("TestClass") + if class != nil { + assert.Equal(t, "TestClass", class.Class) + } + time.Sleep(time.Microsecond) + } + }() + } + + wg.Wait() +} + +func testConcurrentSchemaOperations(t *testing.T, s *schema) { + const numGoroutines = 10 + const iterations = 100 + + var wg sync.WaitGroup + wg.Add(numGoroutines * 4) + + // Test concurrent class additions and deletions + for i := 0; i < numGoroutines; i++ { + go func(id int) { + defer wg.Done() + for j := 0; j < iterations; j++ { + className := fmt.Sprintf("Class%d_%d", id, j) + class := &models.Class{ + Class: className, + Properties: []*models.Property{ + {Name: "prop1", DataType: []string{"string"}}, + }, + } + _ = s.addClass(class, &sharding.State{}, uint64(j)) + time.Sleep(time.Microsecond) + } + }(i) + } + + // Test concurrent deletions + for i := 0; i < numGoroutines; i++ { + go func(id int) { + defer wg.Done() + for j := 0; j < iterations; j++ { + className := fmt.Sprintf("Class%d_%d", id, j) + s.deleteClass(className) + time.Sleep(time.Microsecond) + } + }(i) + } + + // Test concurrent class equality checks + for i := 0; i < numGoroutines; i++ { + go func(id int) { + defer wg.Done() + for j := 0; j < iterations; j++ { + className := fmt.Sprintf("Class%d_%d", id, j) + _, _ = s.ClassEqual(className) + time.Sleep(time.Microsecond) + } + }(i) + } + + // Test concurrent length checks + for i := 0; i < numGoroutines; i++ { + go func() { + defer wg.Done() + for j := 0; j < iterations; j++ { + _ = s.len() + time.Sleep(time.Microsecond) + } + }() + } + + wg.Wait() +} + +func testConcurrentShardOperations(t *testing.T, s *schema) { + // Setup initial class with shards + class := &models.Class{ + Class: "TestClass", + Properties: []*models.Property{ + {Name: "prop1", DataType: []string{"string"}}, + }, + ReplicationConfig: &models.ReplicationConfig{ + Factor: 2, + }, + } + shardState := &sharding.State{ + Physical: map[string]sharding.Physical{ + "shard1": { + Name: "shard1", + BelongsToNodes: []string{"node1", "node2"}, + Status: "HOT", + }, + "shard2": { + Name: "shard2", + BelongsToNodes: []string{"node2", "node3"}, + Status: "HOT", + }, + }, + // Add virtual shards mapping + Virtual: []sharding.Virtual{ + { + Name: "00000000-0000-0000-0000-000000000000", + AssignedToPhysical: "shard1", + Upper: 1000, + OwnsPercentage: 50.0, + }, + { + Name: "00000000-0000-0000-0000-000000000001", + AssignedToPhysical: "shard2", + Upper: 2000, + OwnsPercentage: 50.0, + }, + }, + } + require.NoError(t, s.addClass(class, shardState, 1)) + + const numGoroutines = 10 + const iterations = 100 + + var wg sync.WaitGroup + wg.Add(numGoroutines * 3) + + // Test concurrent ShardOwner calls + for i := 0; i < numGoroutines; i++ { + go func() { + defer wg.Done() + for j := 0; j < iterations; j++ { + owner, _, _ := s.ShardOwner("TestClass", "shard1") + if owner != "" { + assert.Contains(t, []string{"node1", "node2"}, owner) + } + time.Sleep(time.Microsecond) + } + }() + } + + // Test concurrent ShardReplicas calls + for i := 0; i < numGoroutines; i++ { + go func() { + defer wg.Done() + for j := 0; j < iterations; j++ { + replicas, _, _ := s.ShardReplicas("TestClass", "shard1") + if len(replicas) > 0 { + assert.Subset(t, []string{"node1", "node2"}, replicas) + } + time.Sleep(time.Microsecond) + } + }() + } + + // Test concurrent ShardFromUUID calls with valid UUID + testUUID := []byte("00000000-0000-0000-0000-000000000000") + for i := 0; i < numGoroutines; i++ { + go func() { + defer wg.Done() + for j := 0; j < iterations; j++ { + shard, _ := s.ShardFromUUID("TestClass", testUUID) + if shard != "" { + assert.Equal(t, "shard1", shard) + } + time.Sleep(time.Microsecond) + } + }() + } + + wg.Wait() +} + +func testConcurrentTenantOperations(t *testing.T, s *schema) { + // Setup initial class with multi-tenancy + class := &models.Class{ + Class: "TestClass", + Properties: []*models.Property{ + {Name: "prop1", DataType: []string{"string"}}, + }, + MultiTenancyConfig: &models.MultiTenancyConfig{ + Enabled: true, + }, + } + shardState := &sharding.State{ + Physical: map[string]sharding.Physical{ + "tenant1": { + Name: "tenant1", + Status: "HOT", + }, + "tenant2": { + Name: "tenant2", + Status: "HOT", + }, + }, + } + require.NoError(t, s.addClass(class, shardState, 1)) + + const numGoroutines = 10 + const iterations = 100 + + var wg sync.WaitGroup + wg.Add(numGoroutines * 2) + + // Test concurrent MultiTenancy calls + for i := 0; i < numGoroutines; i++ { + go func() { + defer wg.Done() + for j := 0; j < iterations; j++ { + config := s.MultiTenancy("TestClass") + assert.True(t, config.Enabled) + time.Sleep(time.Microsecond) + } + }() + } + + // Test concurrent TenantsShards calls + for i := 0; i < numGoroutines; i++ { + go func() { + defer wg.Done() + for j := 0; j < iterations; j++ { + shards, _ := s.TenantsShards("TestClass", "tenant1", "tenant2") + if len(shards) > 0 { + assert.Contains(t, shards, "HOT") + } + time.Sleep(time.Microsecond) + } + }() + } + + wg.Wait() +} + +func testConcurrentMetaOperations(t *testing.T, s *schema) { + // Setup initial data + setupClasses := []string{"Class1", "Class2", "Class3"} + for _, className := range setupClasses { + class := &models.Class{ + Class: className, + Properties: []*models.Property{ + {Name: "prop1", DataType: []string{"string"}}, + }, + } + require.NoError(t, s.addClass(class, &sharding.State{}, 1)) + } + + const numGoroutines = 10 + const iterations = 100 + + var wg sync.WaitGroup + wg.Add(numGoroutines * 2) + + // Test concurrent MetaClasses access + for i := 0; i < numGoroutines; i++ { + go func() { + defer wg.Done() + for j := 0; j < iterations; j++ { + classes := s.MetaClasses() + assert.NotEmpty(t, classes) + // Verify we can safely access the data + for _, meta := range classes { + assert.NotEmpty(t, meta.Class.Class) + } + time.Sleep(time.Microsecond) + } + }() + } + + // Test concurrent modifications while accessing meta + for i := 0; i < numGoroutines; i++ { + go func(id int) { + defer wg.Done() + for j := 0; j < iterations; j++ { + className := fmt.Sprintf("MetaClass%d_%d", id, j) + class := &models.Class{ + Class: className, + Properties: []*models.Property{ + {Name: "prop1", DataType: []string{"string"}}, + }, + } + _ = s.addClass(class, &sharding.State{}, uint64(j)) + time.Sleep(time.Microsecond) + } + }(i) + } + + wg.Wait() +} + +func testConcurrentClassInfoOperations(t *testing.T, s *schema) { + // Setup initial class + class := &models.Class{ + Class: "TestClass", + Properties: []*models.Property{ + {Name: "prop1", DataType: []string{"string"}}, + }, + MultiTenancyConfig: &models.MultiTenancyConfig{ + Enabled: true, + }, + ReplicationConfig: &models.ReplicationConfig{ + Factor: 2, + }, + } + require.NoError(t, s.addClass(class, &sharding.State{}, 1)) + + const numGoroutines = 10 + const iterations = 100 + + var wg sync.WaitGroup + wg.Add(numGoroutines * 2) + + // Test concurrent ClassInfo calls + for i := 0; i < numGoroutines; i++ { + go func() { + defer wg.Done() + for j := 0; j < iterations; j++ { + info := s.ClassInfo("TestClass") + if info.Exists { + assert.True(t, info.MultiTenancy.Enabled) + assert.Equal(t, 2, info.ReplicationFactor) + } + time.Sleep(time.Microsecond) + } + }() + } + + // Test concurrent ClassEqual calls + for i := 0; i < numGoroutines; i++ { + go func() { + defer wg.Done() + for j := 0; j < iterations; j++ { + name, _ := s.ClassEqual("testclass") // Testing case-insensitive match + if name != "" { + assert.Equal(t, "TestClass", name) + } + time.Sleep(time.Microsecond) + } + }() + } + + wg.Wait() +} + +func testConcurrentReadLockOperations(t *testing.T, s *schema) { + // Setup initial class + class := &models.Class{ + Class: "TestClass", + Properties: []*models.Property{ + {Name: "prop1", DataType: []string{"string"}}, + }, + } + require.NoError(t, s.addClass(class, &sharding.State{}, 1)) + + const numGoroutines = 10 + const iterations = 100 + + var wg sync.WaitGroup + wg.Add(numGoroutines * 2) + + // Test concurrent Read operations + for i := 0; i < numGoroutines; i++ { + go func() { + defer wg.Done() + for j := 0; j < iterations; j++ { + err := s.Read("TestClass", func(cls *models.Class, state *sharding.State) error { + assert.Equal(t, "TestClass", cls.Class) + assert.NotNil(t, state) + return nil + }) + assert.NoError(t, err) + time.Sleep(time.Microsecond) + } + }() + } + + // Test concurrent updateClass operations + for i := 0; i < numGoroutines; i++ { + go func(id int) { + defer wg.Done() + for j := 0; j < iterations; j++ { + err := s.updateClass("TestClass", func(mc *metaClass) error { + mc.ClassVersion = uint64(j) + return nil + }) + assert.NoError(t, err) + time.Sleep(time.Microsecond) + } + }(i) + } + + wg.Wait() +} + +func testConcurrentTenantManagementOperations(t *testing.T, s *schema) { + // Setup initial class with multi-tenancy + class := &models.Class{ + Class: "TestClass", + Properties: []*models.Property{ + {Name: "prop1", DataType: []string{"string"}}, + }, + MultiTenancyConfig: &models.MultiTenancyConfig{ + Enabled: true, + }, + } + shardState := &sharding.State{ + Physical: map[string]sharding.Physical{ + "tenant1": { + Name: "tenant1", + Status: "READY", + }, + }, + } + require.NoError(t, s.addClass(class, shardState, 1)) + + const numGoroutines = 10 + const iterations = 10 + + var wg sync.WaitGroup + wg.Add(numGoroutines * 4) + + // Test concurrent getTenants operations + for i := 0; i < numGoroutines; i++ { + go func() { + defer wg.Done() + for j := 0; j < iterations; j++ { + tenants, err := s.getTenants("TestClass", []string{"tenant1"}) + if err == nil { + assert.NotEmpty(t, tenants) + } + time.Sleep(time.Microsecond) + } + }() + } + + // Test concurrent addTenants operations + for i := 0; i < numGoroutines; i++ { + go func(id int) { + defer wg.Done() + for j := 0; j < iterations; j++ { + req := &command.AddTenantsRequest{ + ClusterNodes: []string{"node1"}, + Tenants: []*command.Tenant{ + {Name: fmt.Sprintf("new_tenant_%d_%d", id, j)}, + }, + } + _ = s.addTenants("TestClass", uint64(j), req) + time.Sleep(time.Microsecond) + } + }(i) + } + + // Test concurrent updateTenants operations + for i := 0; i < numGoroutines; i++ { + go func(id int) { + defer wg.Done() + for j := 0; j < iterations; j++ { + req := &command.UpdateTenantsRequest{ + Tenants: []*command.Tenant{ + {Name: "tenant1"}, + }, + } + fsm := NewMockreplicationFSM(t) + fsm.On("HasOngoingReplication", mock.Anything, mock.Anything, mock.Anything).Return(false).Maybe() + _ = s.updateTenants("TestClass", uint64(j), req, fsm) + time.Sleep(time.Microsecond) + } + }(i) + } + + // Test concurrent deleteTenants operations + for i := 0; i < numGoroutines; i++ { + go func(id int) { + defer wg.Done() + for j := 0; j < iterations; j++ { + req := &command.DeleteTenantsRequest{ + Tenants: []string{fmt.Sprintf("new_tenant_%d_%d", id, j)}, + } + _ = s.deleteTenants("TestClass", uint64(j), req) + time.Sleep(time.Microsecond) + } + }(i) + } + + wg.Wait() +} + +func testConcurrentShardingStateOperations(t *testing.T, s *schema) { + // Setup initial class + class := &models.Class{ + Class: "TestClass", + Properties: []*models.Property{ + {Name: "prop1", DataType: []string{"string"}}, + }, + } + shardState := &sharding.State{ + Physical: map[string]sharding.Physical{ + "shard1": { + Name: "shard1", + Status: "HOT", + }, + }, + } + require.NoError(t, s.addClass(class, shardState, 1)) + + const numGoroutines = 10 + const iterations = 100 + + var wg sync.WaitGroup + wg.Add(numGoroutines) // For GetShardsStatus operations + + // Test concurrent GetShardsStatus operations + for i := 0; i < numGoroutines; i++ { + go func() { + defer wg.Done() + for j := 0; j < iterations; j++ { + status, _ := s.GetShardsStatus("TestClass", "") + if status != nil { + assert.NotEmpty(t, status) + } + time.Sleep(time.Microsecond) + } + }() + } + + wg.Wait() +} + +// Additional mock for shard reader +type mockShardReader struct{} + +func (m *mockShardReader) GetShardsStatus(class, tenant string) (models.ShardStatusList, error) { + return models.ShardStatusList{ + {Status: "HOT", Name: "shard1"}, + }, nil +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/schema/snapshot.go b/platform/dbops/binaries/weaviate-src/cluster/schema/snapshot.go new file mode 100644 index 0000000000000000000000000000000000000000..0f7f3c1a1b18923af722ce70f923e6edd2e8d9a5 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/schema/snapshot.go @@ -0,0 +1,53 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package schema + +import ( + "encoding/json" + "fmt" + "io" + + "github.com/hashicorp/raft" + + "github.com/weaviate/weaviate/cluster/types" +) + +// snapshot is the old format, we keep it for backwards compatibility +type snapshot struct { + NodeID string `json:"node_id"` + SnapshotID string `json:"snapshot_id"` + Classes map[string]*metaClass `json:"classes"` +} + +// LegacySnapshot returns a ready-to-use in-memory Raft snapshot based on the provided legacy schema +func LegacySnapshot(nodeID string, m map[string]types.ClassState) (*raft.SnapshotMeta, io.ReadCloser, error) { + store := raft.NewInmemSnapshotStore() + sink, err := store.Create(raft.SnapshotVersionMax, 0, 0, raft.Configuration{}, 0, nil) + if err != nil { + return nil, nil, err + } + defer sink.Close() + snap := snapshot{ + NodeID: nodeID, + SnapshotID: sink.ID(), + Classes: make(map[string]*metaClass, len(m)), + } + for k, v := range m { + // TODO support classTenantDataEvents here? + snap.Classes[k] = &metaClass{Class: v.Class, Sharding: v.Shards} + } + + if err := json.NewEncoder(sink).Encode(&snap); err != nil { + return nil, nil, fmt.Errorf("encode: %w", err) + } + return store.Open(sink.ID()) +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/schema/types.go b/platform/dbops/binaries/weaviate-src/cluster/schema/types.go new file mode 100644 index 0000000000000000000000000000000000000000..33daa67eb4ac6ade70dd9e0a28215dbfc22f5500 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/schema/types.go @@ -0,0 +1,67 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package schema + +import ( + "context" + + "github.com/weaviate/weaviate/cluster/proto/api" + "github.com/weaviate/weaviate/cluster/types" + "github.com/weaviate/weaviate/entities/models" +) + +type ( + // LoadLegacySchema returns the legacy schema + LoadLegacySchema func() (map[string]types.ClassState, error) + // SaveLegacySchema saves the RAFT schema representation to the legacy storage + SaveLegacySchema func(map[string]types.ClassState) error +) + +// Indexer interface updates both the collection and its indices in the filesystem. +// This is distinct from updating metadata, which is handled through a different interface. +type Indexer interface { + AddClass(api.AddClassRequest) error + UpdateClass(api.UpdateClassRequest) error + DeleteClass(className string, hasFrozen bool) error + AddProperty(class string, req api.AddPropertyRequest) error + AddTenants(class string, req *api.AddTenantsRequest) error + UpdateTenants(class string, req *api.UpdateTenantsRequest) error + DeleteTenants(class string, tenants []*models.Tenant) error + UpdateTenantsProcess(class string, req *api.TenantProcessRequest) error + UpdateShardStatus(*api.UpdateShardStatusRequest) error + AddReplicaToShard(class, shard, targetNode string) error + DeleteReplicaFromShard(class, shard, targetNode string) error + LoadShard(class, shard string) // is a no-op + ShutdownShard(class, shard string) // is a no-op + DropShard(class, shard string) // is a no-op + GetShardsStatus(class, tenant string) (models.ShardStatusList, error) + UpdateIndex(api.UpdateClassRequest) error + + TriggerSchemaUpdateCallbacks() + + // ReloadLocalDB reloads the local database using the latest schema. + ReloadLocalDB(ctx context.Context, all []api.UpdateClassRequest) error + + // RestoreClassDir restores classes on the filesystem directly from the temporary class backup stored on disk. + RestoreClassDir(class string) error + Open(context.Context) error + Close(context.Context) error +} + +// Parser parses concrete class fields after deserialization +type Parser interface { + // ParseClassUpdate parses a class after unmarshaling by setting concrete types for the fields + ParseClass(class *models.Class) error + + // ParseClass parses new updates by providing the current class data. + ParseClassUpdate(class, update *models.Class) (*models.Class, error) +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/schema/utils.go b/platform/dbops/binaries/weaviate-src/cluster/schema/utils.go new file mode 100644 index 0000000000000000000000000000000000000000..a8b1d5f3a67a9596d70f67ae1660733ce3edbda2 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/schema/utils.go @@ -0,0 +1,27 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package schema + +import ( + cmd "github.com/weaviate/weaviate/cluster/proto/api" +) + +func removeNilTenants(tenants []*cmd.Tenant) []*cmd.Tenant { + n := 0 + for i := range tenants { + if tenants[i] != nil && tenants[i].Name != "" { + tenants[n] = tenants[i] + n++ + } + } + return tenants[:n] +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/schema/versioned_reader.go b/platform/dbops/binaries/weaviate-src/cluster/schema/versioned_reader.go new file mode 100644 index 0000000000000000000000000000000000000000..214b922710bcae323bab7d945e21becc2c7748a7 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/schema/versioned_reader.go @@ -0,0 +1,142 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package schema + +import ( + "context" + + "github.com/prometheus/client_golang/prometheus" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/usecases/monitoring" + "github.com/weaviate/weaviate/usecases/sharding" +) + +// VersionedSchemaReader is utilized to query the schema based on a specific update version. Serving as a thin wrapper around +// the original schema, it segregates waiting logic from the actual operation. +// It waits until it finds an update at least as up-to-date as the specified version. +// Note that updates may take some time to propagate to the follower, hence this process might take time. +type VersionedSchemaReader struct { // TODO TEST + schema *schema + WaitForUpdate func(ctx context.Context, version uint64) error +} + +func (s VersionedSchemaReader) ClassInfo(ctx context.Context, + class string, + v uint64, +) (ClassInfo, error) { + t := prometheus.NewTimer(monitoring.GetMetrics().SchemaWaitForVersion.WithLabelValues("ClassInfo")) + defer t.ObserveDuration() + + err := s.WaitForUpdate(ctx, v) + return s.schema.ClassInfo(class), err +} + +func (s VersionedSchemaReader) MultiTenancy(ctx context.Context, + class string, + v uint64, +) (models.MultiTenancyConfig, error) { + t := prometheus.NewTimer(monitoring.GetMetrics().SchemaWaitForVersion.WithLabelValues("MultiTenancy")) + defer t.ObserveDuration() + + if info := s.schema.ClassInfo(class); info.Exists { + return info.MultiTenancy, nil + } + err := s.WaitForUpdate(ctx, v) + return s.schema.MultiTenancy(class), err +} + +// Read performs a read operation `reader` on the specified class and sharding state +func (s VersionedSchemaReader) Read(ctx context.Context, + class string, v uint64, + reader func(*models.Class, *sharding.State) error, +) error { + t := prometheus.NewTimer(monitoring.GetMetrics().SchemaWaitForVersion.WithLabelValues("Read")) + defer t.ObserveDuration() + + if err := s.WaitForUpdate(ctx, v); err != nil { + return err + } + + return s.schema.Read(class, reader) +} + +// ReadOnlyClass returns a shallow copy of a class. +// The copy is read-only and should not be modified. +func (s VersionedSchemaReader) ReadOnlyClass(ctx context.Context, + class string, + v uint64, +) (*models.Class, error) { + t := prometheus.NewTimer(monitoring.GetMetrics().SchemaWaitForVersion.WithLabelValues("ReadOnlyClass")) + defer t.ObserveDuration() + + err := s.WaitForUpdate(ctx, v) + cls, _ := s.schema.ReadOnlyClass(class) + return cls, err +} + +// ShardOwner returns the node owner of the specified shard +func (s VersionedSchemaReader) ShardOwner(ctx context.Context, + class, shard string, + v uint64, +) (string, error) { + t := prometheus.NewTimer(monitoring.GetMetrics().SchemaWaitForVersion.WithLabelValues("ShardOwner")) + defer t.ObserveDuration() + + err := s.WaitForUpdate(ctx, v) + owner, _, sErr := s.schema.ShardOwner(class, shard) + if sErr != nil && err == nil { + err = sErr + } + return owner, err +} + +// ShardFromUUID returns shard name of the provided uuid +func (s VersionedSchemaReader) ShardFromUUID(ctx context.Context, + class string, uuid []byte, v uint64, +) (string, error) { + t := prometheus.NewTimer(monitoring.GetMetrics().SchemaWaitForVersion.WithLabelValues("ShardFromUUID")) + defer t.ObserveDuration() + + err := s.WaitForUpdate(ctx, v) + shard, _ := s.schema.ShardFromUUID(class, uuid) + return shard, err +} + +// ShardReplicas returns the replica nodes of a shard +func (s VersionedSchemaReader) ShardReplicas( + ctx context.Context, class, shard string, + v uint64, +) ([]string, error) { + t := prometheus.NewTimer(monitoring.GetMetrics().SchemaWaitForVersion.WithLabelValues("ShardReplicas")) + defer t.ObserveDuration() + + err := s.WaitForUpdate(ctx, v) + nodes, _, sErr := s.schema.ShardReplicas(class, shard) + if sErr != nil && err == nil { + err = sErr + } + return nodes, err +} + +// TenantShard returns shard name for the provided tenant and its activity status +func (s VersionedSchemaReader) TenantsShards(ctx context.Context, + v uint64, class string, tenants ...string, +) (map[string]string, uint64, error) { + t := prometheus.NewTimer(monitoring.GetMetrics().SchemaWaitForVersion.WithLabelValues("TenantsShards")) + defer t.ObserveDuration() + + err := s.WaitForUpdate(ctx, v) + status, version := s.schema.TenantsShards(class, tenants...) + return status, version, err +} + +func (s VersionedSchemaReader) Len() int { return s.schema.len() } diff --git a/platform/dbops/binaries/weaviate-src/cluster/types/errs.go b/platform/dbops/binaries/weaviate-src/cluster/types/errs.go new file mode 100644 index 0000000000000000000000000000000000000000..7dc6ec9a9eea058a9cc67dbc13de9cf6db961551 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/types/errs.go @@ -0,0 +1,26 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package types + +import "errors" + +var ( + // ErrNotLeader is returned when an operation can't be completed on a + // follower or candidate node. + ErrNotLeader = errors.New("node is not the leader") + ErrLeaderNotFound = errors.New("leader not found") + ErrNotOpen = errors.New("store not open") + ErrUnknownCommand = errors.New("unknown command") + // ErrDeadlineExceeded represents an error returned when the deadline for waiting for a specific update is exceeded. + ErrDeadlineExceeded = errors.New("deadline exceeded for waiting for update") + ErrNotFound = errors.New("not found") +) diff --git a/platform/dbops/binaries/weaviate-src/cluster/types/types.go b/platform/dbops/binaries/weaviate-src/cluster/types/types.go new file mode 100644 index 0000000000000000000000000000000000000000..b8526af3c19982c3181d948bc735646d95b37673 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/types/types.go @@ -0,0 +1,40 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package types + +import ( + "net" + "time" + + "github.com/hashicorp/raft" + "github.com/sirupsen/logrus" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/usecases/sharding" +) + +// ClassState represent a class and it's associated sharding state +type ClassState struct { + Class models.Class + Shards sharding.State +} + +// RaftResolver is passed to raft to resolver node ids to their real ip:port so that tranport can be established. +type RaftResolver interface { + ServerAddr(id raft.ServerID) (raft.ServerAddress, error) + NewTCPTransport(bindAddr string, advertise net.Addr, maxPool int, timeout time.Duration, logger *logrus.Logger) (*raft.NetworkTransport, error) + NotResolvedNodes() map[raft.ServerID]struct{} +} + +const ( + TenantActivityStatusFREEZING = "FREEZING" + TenantActivityStatusUNFREEZING = "UNFREEZING" +) diff --git a/platform/dbops/binaries/weaviate-src/cluster/usage/mock_service.go b/platform/dbops/binaries/weaviate-src/cluster/usage/mock_service.go new file mode 100644 index 0000000000000000000000000000000000000000..fac3c9a556b0f2dda8ec50bdf213fd3cf7f7ee93 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/usage/mock_service.go @@ -0,0 +1,141 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by mockery v2.53.2. DO NOT EDIT. + +package usage + +import ( + context "context" + time "time" + + mock "github.com/stretchr/testify/mock" + + types "github.com/weaviate/weaviate/cluster/usage/types" +) + +// MockService is an autogenerated mock type for the Service type +type MockService struct { + mock.Mock +} + +type MockService_Expecter struct { + mock *mock.Mock +} + +func (_m *MockService) EXPECT() *MockService_Expecter { + return &MockService_Expecter{mock: &_m.Mock} +} + +// SetJitterInterval provides a mock function with given fields: interval +func (_m *MockService) SetJitterInterval(interval time.Duration) { + _m.Called(interval) +} + +// MockService_SetJitterInterval_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SetJitterInterval' +type MockService_SetJitterInterval_Call struct { + *mock.Call +} + +// SetJitterInterval is a helper method to define mock.On call +// - interval time.Duration +func (_e *MockService_Expecter) SetJitterInterval(interval interface{}) *MockService_SetJitterInterval_Call { + return &MockService_SetJitterInterval_Call{Call: _e.mock.On("SetJitterInterval", interval)} +} + +func (_c *MockService_SetJitterInterval_Call) Run(run func(interval time.Duration)) *MockService_SetJitterInterval_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(time.Duration)) + }) + return _c +} + +func (_c *MockService_SetJitterInterval_Call) Return() *MockService_SetJitterInterval_Call { + _c.Call.Return() + return _c +} + +func (_c *MockService_SetJitterInterval_Call) RunAndReturn(run func(time.Duration)) *MockService_SetJitterInterval_Call { + _c.Run(run) + return _c +} + +// Usage provides a mock function with given fields: ctx +func (_m *MockService) Usage(ctx context.Context) (*types.Report, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for Usage") + } + + var r0 *types.Report + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (*types.Report, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) *types.Report); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Report) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockService_Usage_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Usage' +type MockService_Usage_Call struct { + *mock.Call +} + +// Usage is a helper method to define mock.On call +// - ctx context.Context +func (_e *MockService_Expecter) Usage(ctx interface{}) *MockService_Usage_Call { + return &MockService_Usage_Call{Call: _e.mock.On("Usage", ctx)} +} + +func (_c *MockService_Usage_Call) Run(run func(ctx context.Context)) *MockService_Usage_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *MockService_Usage_Call) Return(_a0 *types.Report, _a1 error) *MockService_Usage_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockService_Usage_Call) RunAndReturn(run func(context.Context) (*types.Report, error)) *MockService_Usage_Call { + _c.Call.Return(run) + return _c +} + +// NewMockService creates a new instance of MockService. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewMockService(t interface { + mock.TestingT + Cleanup(func()) +}) *MockService { + mock := &MockService{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/usage/service.go b/platform/dbops/binaries/weaviate-src/cluster/usage/service.go new file mode 100644 index 0000000000000000000000000000000000000000..4fecd1c3642440644aab51ee357424f666bd5b81 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/usage/service.go @@ -0,0 +1,333 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package usage + +import ( + "context" + "fmt" + "strings" + "time" + + "github.com/weaviate/weaviate/usecases/sharding" + + "github.com/sirupsen/logrus" + + "github.com/weaviate/weaviate/adapters/repos/db" + "github.com/weaviate/weaviate/adapters/repos/db/vector/common" + "github.com/weaviate/weaviate/adapters/repos/db/vector/dynamic" + "github.com/weaviate/weaviate/cluster/usage/types" + backupent "github.com/weaviate/weaviate/entities/backup" + "github.com/weaviate/weaviate/entities/models" + entschema "github.com/weaviate/weaviate/entities/schema" + schemaConfig "github.com/weaviate/weaviate/entities/schema/config" + "github.com/weaviate/weaviate/entities/storagestate" + enthnsw "github.com/weaviate/weaviate/entities/vectorindex/hnsw" + "github.com/weaviate/weaviate/usecases/backup" + "github.com/weaviate/weaviate/usecases/schema" +) + +type Service interface { + Usage(ctx context.Context) (*types.Report, error) + SetJitterInterval(interval time.Duration) +} + +type service struct { + schemaReader schema.SchemaReader + db db.IndexGetter + backups backup.BackupBackendProvider + nodeName string + logger logrus.FieldLogger + jitterInterval time.Duration +} + +func NewService(schemaReader schema.SchemaReader, db db.IndexGetter, backups backup.BackupBackendProvider, nodeName string, logger logrus.FieldLogger) Service { + return &service{ + schemaReader: schemaReader, + db: db, + backups: backups, + nodeName: nodeName, + logger: logger, + jitterInterval: 0, // Default to no jitter + } +} + +// SetJitterInterval sets the jitter interval for shard processing +func (s *service) SetJitterInterval(interval time.Duration) { + s.jitterInterval = interval + s.logger.WithFields(logrus.Fields{"jitter_interval": interval.String()}).Info("shard jitter interval updated") +} + +// addJitter adds a small random delay if jitter interval is set +func (s *service) addJitter() { + if s.jitterInterval <= 0 { + return // No jitter if interval is 0 or negative + } + jitter := time.Duration(time.Now().UnixNano() % int64(s.jitterInterval)) + time.Sleep(jitter) +} + +// Usage service collects usage metrics for the node and shall return error in case of any error +// to avoid reporting partial data +func (m *service) Usage(ctx context.Context) (*types.Report, error) { + collections := m.schemaReader.ReadOnlySchema().Classes + usage := &types.Report{ + Node: m.nodeName, + Collections: make([]*types.CollectionUsage, 0, len(collections)), + Backups: make([]*types.BackupUsage, 0), + } + + for _, collection := range collections { + type shardInfo struct { + name string + activityStatus string + } + + var uniqueShardCount int + var localShards []shardInfo + var localShardNames map[string]bool + + err := m.schemaReader.Read(collection.Class, func(_ *models.Class, state *sharding.State) error { + if state == nil { + // this could happen in case the between getting the schema and getting the shard state the collection got deleted + // in the meantime, usually in automated tests or scripts + return nil + } + + uniqueShardCount = len(state.Physical) + localShards = make([]shardInfo, 0, len(state.Physical)) + localShardNames = make(map[string]bool) + + for shardName, physical := range state.Physical { + isLocal := state.IsLocalShard(shardName) + if isLocal { + localShardNames[shardName] = true + localShards = append(localShards, shardInfo{ + name: shardName, + activityStatus: physical.ActivityStatus(), + }) + } + } + + return nil + }) + if err != nil { + return nil, fmt.Errorf("failed to read sharding state for collection %s: %w", collection.Class, err) + } + + collectionUsage := &types.CollectionUsage{ + Name: collection.Class, + ReplicationFactor: int(collection.ReplicationConfig.Factor), + UniqueShardCount: uniqueShardCount, + } + + // Get shard usage + index := m.db.GetIndexLike(entschema.ClassName(collection.Class)) + if index == nil { + continue + } + // First, collect cold tenants from sharding state + for _, shard := range localShards { + // Only process COLD tenants here + if shard.activityStatus == models.TenantActivityStatusCOLD { + // Add jitter between cold tenant processing (except for the first one) + if len(collectionUsage.Shards) > 0 { + m.addJitter() + } + + shardUsage, err := calculateUnloadedShardUsage(ctx, index, shard.name, collection.VectorConfig) + if err != nil { + return nil, err + } + + collectionUsage.Shards = append(collectionUsage.Shards, shardUsage) + } + } + + if index == nil { + // index could be deleted in the meantime + m.logger.WithFields(logrus.Fields{"class": collection.Class}).Debug("index not found, could have been deleted in the meantime") + continue + } + + // Then, collect hot tenants from loaded shards + if err := index.ForEachShard(func(shardName string, shard db.ShardLike) error { + // skip non-local shards using extracted local shard names + if !localShardNames[shardName] { // ✅ Use extracted local shard map + return nil + } + + // Add jitter between hot shard processing (except for the first one) + if len(collectionUsage.Shards) > 0 { + m.addJitter() + } + + // Check shard status without forcing load + if shard.GetStatus() == storagestate.StatusLoading { + shardUsage, err := calculateUnloadedShardUsage(ctx, index, shardName, collection.VectorConfig) + if err != nil { + return err + } + collectionUsage.Shards = append(collectionUsage.Shards, shardUsage) + return nil + } + + objectStorageSize, err := shard.ObjectStorageSize(ctx) + if err != nil { + return err + } + objectCount, err := shard.ObjectCountAsync(ctx) + if err != nil { + return err + } + + vectorStorageSize, err := shard.VectorStorageSize(ctx) + if err != nil { + return err + } + + shardUsage := &types.ShardUsage{ + Name: shardName, + Status: strings.ToLower(models.TenantActivityStatusACTIVE), + ObjectsCount: objectCount, + ObjectsStorageBytes: uint64(objectStorageSize), + VectorStorageBytes: uint64(vectorStorageSize), + } + + // Get vector usage for each named vector + if err = shard.ForEachVectorIndex(func(targetVector string, vectorIndex db.VectorIndex) error { + category := db.DimensionCategoryStandard // Default category + indexType := "" + var bits int16 + + // Check if this is a named vector configuration + if vectorConfig, exists := collection.VectorConfig[targetVector]; exists { + // Use the named vector's configuration + if vectorIndexConfig, ok := vectorConfig.VectorIndexConfig.(schemaConfig.VectorIndexConfig); ok { + category, _ = db.GetDimensionCategory(vectorIndexConfig) + indexType = vectorIndexConfig.IndexType() + bits = enthnsw.GetRQBits(vectorIndexConfig) + } + } else if vectorIndexConfig, ok := collection.VectorIndexConfig.(schemaConfig.VectorIndexConfig); ok { + // Fall back to legacy single vector configuration + category, _ = db.GetDimensionCategory(vectorIndexConfig) + indexType = vectorIndexConfig.IndexType() + bits = enthnsw.GetRQBits(vectorIndexConfig) + } + + dimensionality, err := shard.DimensionsUsage(ctx, targetVector) + if err != nil { + return err + } + + // For dynamic indexes, get the actual underlying index type + if dynamicIndex, ok := vectorIndex.(dynamic.Index); ok { + indexType = dynamicIndex.UnderlyingIndex().String() + } + + vectorUsage := &types.VectorUsage{ + Name: targetVector, + Compression: category.String(), + VectorIndexType: indexType, + IsDynamic: common.IsDynamic(common.IndexType(indexType)), + VectorCompressionRatio: vectorIndex.CompressionStats().CompressionRatio(dimensionality.Dimensions), + Bits: bits, + } + + // Only add dimensionalities if there's valid data + if dimensionality.Count > 0 || dimensionality.Dimensions > 0 { + vectorUsage.Dimensionalities = append(vectorUsage.Dimensionalities, &types.Dimensionality{ + Dimensions: dimensionality.Dimensions, + Count: dimensionality.Count, + }) + } + + shardUsage.NamedVectors = append(shardUsage.NamedVectors, vectorUsage) + return nil + }); err != nil { + return err + } + + collectionUsage.Shards = append(collectionUsage.Shards, shardUsage) + return nil + }); err != nil { + return nil, err + } + + usage.Collections = append(usage.Collections, collectionUsage) + } + + // Get backup usage from all enabled backup backends (unchanged) + for _, backend := range m.backups.EnabledBackupBackends() { + backups, err := backend.AllBackups(ctx) + if err != nil { + m.logger.WithError(err).WithFields(logrus.Fields{"backend": backend}).Error("failed to get backups from backend") + return nil, err + } + + for _, backup := range backups { + if backup.Status != backupent.Success { + continue + } + usage.Backups = append(usage.Backups, &types.BackupUsage{ + ID: backup.ID, + CompletionTime: backup.CompletedAt.Format(time.RFC3339), + SizeInGib: float64(backup.PreCompressionSizeBytes) / (1024 * 1024 * 1024), // Convert bytes to GiB + Type: string(backup.Status), + Collections: backup.Classes(), + }) + } + } + return usage, nil +} + +func calculateUnloadedShardUsage(ctx context.Context, index db.IndexLike, tenantName string, vectorConfigs map[string]models.VectorConfig) (*types.ShardUsage, error) { + // Cold tenant: calculate from disk without loading + objectUsage, err := index.CalculateUnloadedObjectsMetrics(ctx, tenantName) + if err != nil { + return nil, err + } + + vectorStorageSize, err := index.CalculateUnloadedVectorsMetrics(ctx, tenantName) + if err != nil { + return nil, err + } + + shardUsage := &types.ShardUsage{ + Name: tenantName, + ObjectsCount: objectUsage.Count, + Status: strings.ToLower(models.TenantActivityStatusINACTIVE), + ObjectsStorageBytes: uint64(objectUsage.StorageBytes), + VectorStorageBytes: uint64(vectorStorageSize), + } + + // Get named vector data for cold shards from schema configuration + for targetVector, vectorConfig := range vectorConfigs { + // For cold shards, we can't get actual dimensionality from disk without loading + // So we'll use a placeholder or estimate based on the schema + vectorUsage := &types.VectorUsage{ + Name: targetVector, + Compression: db.DimensionCategoryStandard.String(), + VectorCompressionRatio: 1.0, // Default ratio for cold shards + } + + if vectorIndexConfig, ok := vectorConfig.VectorIndexConfig.(schemaConfig.VectorIndexConfig); ok { + category, _ := db.GetDimensionCategory(vectorIndexConfig) + vectorUsage.Compression = category.String() + vectorUsage.VectorIndexType = vectorIndexConfig.IndexType() + vectorUsage.Bits = enthnsw.GetRQBits(vectorIndexConfig) + vectorUsage.IsDynamic = common.IsDynamic(common.IndexType(vectorUsage.VectorIndexType)) + } + + shardUsage.NamedVectors = append(shardUsage.NamedVectors, vectorUsage) + } + return shardUsage, err +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/usage/service_test.go b/platform/dbops/binaries/weaviate-src/cluster/usage/service_test.go new file mode 100644 index 0000000000000000000000000000000000000000..c673689ea35f108543382f33bbb3bea8b2632522 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/usage/service_test.go @@ -0,0 +1,1651 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package usage + +import ( + "context" + "sort" + "strings" + "testing" + "time" + + "github.com/weaviate/weaviate/entities/vectorindex/hnsw" + schemaUC "github.com/weaviate/weaviate/usecases/schema" + + logrus "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/weaviate/weaviate/adapters/repos/db" + "github.com/weaviate/weaviate/adapters/repos/db/vector/compressionhelpers" + "github.com/weaviate/weaviate/adapters/repos/db/vector/dynamic" + types "github.com/weaviate/weaviate/cluster/usage/types" + "github.com/weaviate/weaviate/entities/backup" + "github.com/weaviate/weaviate/entities/models" + modulecapabilities "github.com/weaviate/weaviate/entities/modulecapabilities" + entschema "github.com/weaviate/weaviate/entities/schema" + "github.com/weaviate/weaviate/entities/storagestate" + backupusecase "github.com/weaviate/weaviate/usecases/backup" + "github.com/weaviate/weaviate/usecases/sharding" +) + +func TestService_Usage_SingleTenant(t *testing.T) { + ctx := context.Background() + + nodeName := "test-node" + className := "TestClass" + replication := 1 + uniqueShards := 1 + shardName := "abcd" + objectCount := 1000 + storageSize := int64(5000000) + vectorName := "abcd" + vectorType := "hnsw" + compression := "standard" + compressionRatio := 0.75 + dimensionality := 1536 + dimensionCount := 1000 + + shardingState := &sharding.State{ + Physical: map[string]sharding.Physical{ + "": { + Name: shardName, + BelongsToNodes: []string{nodeName}, + Status: models.TenantActivityStatusHOT, + }, + }, + } + shardingState.SetLocalName(nodeName) + mockSchemaReader := schemaUC.NewMockSchemaReader(t) + mockSchemaReader.EXPECT().ReadOnlySchema().Return(models.Schema{Classes: []*models.Class{ + { + Class: className, + VectorIndexConfig: &hnsw.UserConfig{}, + ReplicationConfig: &models.ReplicationConfig{ + Factor: int64(replication), + }, + }, + }}).Maybe() + mockSchemaReader.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readFunc func(*models.Class, *sharding.State) error) error { + class := &models.Class{Class: className} + return readFunc(class, shardingState) + }).Maybe() + + mockDB := db.NewMockIndexGetter(t) + mockIndex := db.NewMockIndexLike(t) + mockDB.EXPECT().GetIndexLike(entschema.ClassName(className)).Return(mockIndex) + + mockShard := db.NewMockShardLike(t) + mockShard.EXPECT().GetStatus().Return(storagestate.StatusReady) + mockShard.EXPECT().ObjectCountAsync(ctx).Return(int64(objectCount), nil) + mockShard.EXPECT().ObjectStorageSize(ctx).Return(storageSize, nil) + mockShard.EXPECT().VectorStorageSize(ctx).Return(int64(0), nil) + mockShard.EXPECT().DimensionsUsage(ctx, vectorName).Return(types.Dimensionality{ + Dimensions: dimensionality, + Count: dimensionCount, + }, nil) + + mockVectorIndex := db.NewMockVectorIndex(t) + mockCompressionStats := compressionhelpers.NewMockCompressionStats(t) + mockCompressionStats.EXPECT().CompressionRatio(dimensionality).Return(compressionRatio) + mockVectorIndex.EXPECT().CompressionStats().Return(mockCompressionStats) + + mockIndex.On("ForEachShard", mock.AnythingOfType("func(string, db.ShardLike) error")).Return(nil).Run(func(args mock.Arguments) { + f := args.Get(0).(func(string, db.ShardLike) error) + f("", mockShard) + }) + + mockShard.On("ForEachVectorIndex", mock.AnythingOfType("func(string, db.VectorIndex) error")).Return(nil).Run(func(args mock.Arguments) { + f := args.Get(0).(func(string, db.VectorIndex) error) + f(vectorName, mockVectorIndex) + }) + + mockBackupProvider := backupusecase.NewMockBackupBackendProvider(t) + mockBackupProvider.EXPECT().EnabledBackupBackends().Return([]modulecapabilities.BackupBackend{}) + + logger, _ := logrus.NewNullLogger() + service := NewService(mockSchemaReader, mockDB, mockBackupProvider, nodeName, logger) + + result, err := service.Usage(ctx) + + require.NoError(t, err) + require.NotNil(t, result) + assert.Equal(t, nodeName, result.Node) + assert.Len(t, result.Collections, 1) + + collection := result.Collections[0] + assert.Equal(t, className, collection.Name) + assert.Equal(t, replication, collection.ReplicationFactor) + assert.Equal(t, uniqueShards, collection.UniqueShardCount) + assert.Len(t, collection.Shards, 1) + + shard := collection.Shards[0] + assert.Equal(t, "", shard.Name) + assert.Equal(t, int64(objectCount), shard.ObjectsCount) + assert.Equal(t, uint64(storageSize), shard.ObjectsStorageBytes) + assert.Len(t, shard.NamedVectors, 1) + + vector := shard.NamedVectors[0] + assert.Equal(t, vectorName, vector.Name) + assert.Equal(t, vectorType, vector.VectorIndexType) + assert.Equal(t, compression, vector.Compression) + assert.Equal(t, compressionRatio, vector.VectorCompressionRatio) + assert.Len(t, vector.Dimensionalities, 1) + + dim := vector.Dimensionalities[0] + assert.Equal(t, dimensionality, dim.Dimensions) + assert.Equal(t, dimensionCount, dim.Count) + + mockSchemaReader.AssertExpectations(t) + mockDB.AssertExpectations(t) + mockIndex.AssertExpectations(t) + mockShard.AssertExpectations(t) + mockVectorIndex.AssertExpectations(t) + mockCompressionStats.AssertExpectations(t) + mockBackupProvider.AssertExpectations(t) +} + +func TestService_Usage_MultiTenant_HotAndCold(t *testing.T) { + ctx := context.Background() + + nodeName := "test-node" + className := "MultiTenantClass" + replication := 3 + uniqueShards := 2 + hotTenant := "tenant1" + coldTenant := "tenant2" + hotObjectCount := 1500 + coldObjectCount := 500 + hotStorageSize := int64(7500000) + coldStorageSize := int64(2500000) + vectorName := "abcd" + vectorType := "hnsw" + compression := "standard" + compressionRatio := 0.8 + dimensionality := 1536 + dimensionCount := 1500 + + shardingState := &sharding.State{ + PartitioningEnabled: true, + Physical: map[string]sharding.Physical{ + hotTenant: { + Name: hotTenant, + BelongsToNodes: []string{nodeName}, + Status: models.TenantActivityStatusHOT, + }, + coldTenant: { + Name: coldTenant, + BelongsToNodes: []string{nodeName}, + Status: models.TenantActivityStatusCOLD, + }, + }, + } + shardingState.SetLocalName(nodeName) + mockSchemaReader := schemaUC.NewMockSchemaReader(t) + mockSchemaReader.EXPECT().ReadOnlySchema().Return(models.Schema{Classes: []*models.Class{ + { + Class: className, + VectorIndexConfig: &hnsw.UserConfig{}, + ReplicationConfig: &models.ReplicationConfig{ + Factor: int64(replication), + }, + }, + }}).Maybe() + mockSchemaReader.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readFunc func(*models.Class, *sharding.State) error) error { + class := &models.Class{Class: className} + return readFunc(class, shardingState) + }).Maybe() + + mockDB := db.NewMockIndexGetter(t) + mockIndex := db.NewMockIndexLike(t) + mockDB.EXPECT().GetIndexLike(entschema.ClassName(className)).Return(mockIndex) + + mockShard := db.NewMockShardLike(t) + mockShard.EXPECT().GetStatus().Return(storagestate.StatusReady) + mockShard.EXPECT().ObjectCountAsync(ctx).Return(int64(hotObjectCount), nil) + mockShard.EXPECT().ObjectStorageSize(ctx).Return(hotStorageSize, nil) + mockShard.EXPECT().VectorStorageSize(ctx).Return(int64(0), nil) + mockShard.EXPECT().DimensionsUsage(ctx, vectorName).Return(types.Dimensionality{ + Dimensions: dimensionality, + Count: dimensionCount, + }, nil) + + mockVectorIndex := db.NewMockVectorIndex(t) + mockCompressionStats := compressionhelpers.NewMockCompressionStats(t) + mockCompressionStats.EXPECT().CompressionRatio(dimensionality).Return(compressionRatio) + mockVectorIndex.EXPECT().CompressionStats().Return(mockCompressionStats) + + mockIndex.On("ForEachShard", mock.AnythingOfType("func(string, db.ShardLike) error")).Return(nil).Run(func(args mock.Arguments) { + f := args.Get(0).(func(string, db.ShardLike) error) + f(hotTenant, mockShard) + }) + mockIndex.EXPECT().CalculateUnloadedObjectsMetrics(ctx, coldTenant).Return(types.ObjectUsage{ + Count: int64(coldObjectCount), + StorageBytes: coldStorageSize, + }, nil) + mockIndex.EXPECT().CalculateUnloadedVectorsMetrics(ctx, coldTenant).Return(int64(0), nil) + + mockShard.On("ForEachVectorIndex", mock.AnythingOfType("func(string, db.VectorIndex) error")).Return(nil).Run(func(args mock.Arguments) { + f := args.Get(0).(func(string, db.VectorIndex) error) + f(vectorName, mockVectorIndex) + }) + + mockBackupProvider := backupusecase.NewMockBackupBackendProvider(t) + mockBackupProvider.EXPECT().EnabledBackupBackends().Return([]modulecapabilities.BackupBackend{}) + + logger, _ := logrus.NewNullLogger() + service := NewService(mockSchemaReader, mockDB, mockBackupProvider, nodeName, logger) + + result, err := service.Usage(ctx) + + require.NoError(t, err) + require.NotNil(t, result) + assert.Equal(t, nodeName, result.Node) + assert.Len(t, result.Collections, 1) + + collection := result.Collections[0] + assert.Equal(t, className, collection.Name) + assert.Equal(t, replication, collection.ReplicationFactor) + assert.Equal(t, uniqueShards, collection.UniqueShardCount) + assert.Len(t, collection.Shards, 2) + + var hotShard, coldShard *types.ShardUsage + for _, shard := range collection.Shards { + switch shard.Name { + case hotTenant: + hotShard = shard + case coldTenant: + coldShard = shard + } + } + + require.NotNil(t, hotShard) + assert.Equal(t, int64(hotObjectCount), hotShard.ObjectsCount) + assert.Equal(t, uint64(hotStorageSize), hotShard.ObjectsStorageBytes) + assert.Equal(t, strings.ToLower(models.TenantActivityStatusACTIVE), hotShard.Status) + assert.Len(t, hotShard.NamedVectors, 1) + + require.NotNil(t, coldShard) + assert.Equal(t, int64(coldObjectCount), coldShard.ObjectsCount) + assert.Equal(t, uint64(coldStorageSize), coldShard.ObjectsStorageBytes) + assert.Equal(t, strings.ToLower(models.TenantActivityStatusINACTIVE), coldShard.Status) + assert.Len(t, coldShard.NamedVectors, 0) + + vector := hotShard.NamedVectors[0] + assert.Equal(t, vectorName, vector.Name) + assert.Equal(t, vectorType, vector.VectorIndexType) + assert.Equal(t, compression, vector.Compression) + assert.Equal(t, compressionRatio, vector.VectorCompressionRatio) + assert.Len(t, vector.Dimensionalities, 1) + dim := vector.Dimensionalities[0] + assert.Equal(t, dimensionality, dim.Dimensions) + assert.Equal(t, dimensionCount, dim.Count) + + mockSchemaReader.AssertExpectations(t) + mockDB.AssertExpectations(t) + mockIndex.AssertExpectations(t) + mockShard.AssertExpectations(t) + mockVectorIndex.AssertExpectations(t) + mockCompressionStats.AssertExpectations(t) + mockBackupProvider.AssertExpectations(t) +} + +func TestService_Usage_WithBackups(t *testing.T) { + ctx := context.Background() + + nodeName := "test-node" + backupID := "backup-1" + backupStatus := backup.Success + completionTime := time.Date(2025, 1, 15, 10, 30, 0, 0, time.UTC) + preCompressionSizeBytes := int64(1073741824) // 1 GiB + sizeInGib := 1.0 + backupType := "SUCCESS" + class1 := "Class1" + class2 := "Class2" + class3 := "Class3" + + mockSchemaReader := schemaUC.NewMockSchemaReader(t) + mockSchemaReader.EXPECT().ReadOnlySchema().Return(models.Schema{Classes: []*models.Class{}}).Maybe() + mockDB := db.NewMockIndexGetter(t) + + mockBackupBackend := modulecapabilities.NewMockBackupBackend(t) + backups := []*backup.DistributedBackupDescriptor{ + { + ID: backupID, + Status: backupStatus, + CompletedAt: completionTime, + PreCompressionSizeBytes: preCompressionSizeBytes, + Nodes: map[string]*backup.NodeDescriptor{ + "node1": {Classes: []string{class1, class2}}, + }, + }, + { + ID: "backup-2", + Status: backup.Failed, + CompletedAt: completionTime, + PreCompressionSizeBytes: 2147483648, + Nodes: map[string]*backup.NodeDescriptor{ + "node1": {Classes: []string{class3}}, + }, + }, + } + mockBackupBackend.EXPECT().AllBackups(ctx).Return(backups, nil) + + mockBackupProvider := backupusecase.NewMockBackupBackendProvider(t) + mockBackupProvider.EXPECT().EnabledBackupBackends().Return([]modulecapabilities.BackupBackend{mockBackupBackend}) + + logger, _ := logrus.NewNullLogger() + service := NewService(mockSchemaReader, mockDB, mockBackupProvider, nodeName, logger) + + result, err := service.Usage(ctx) + + require.NoError(t, err) + require.NotNil(t, result) + assert.Equal(t, nodeName, result.Node) + assert.Len(t, result.Collections, 0) + assert.Len(t, result.Backups, 1) + + backup := result.Backups[0] + assert.Equal(t, backupID, backup.ID) + assert.Equal(t, completionTime.Format(time.RFC3339), backup.CompletionTime) + assert.Equal(t, sizeInGib, backup.SizeInGib) + assert.Equal(t, backupType, backup.Type) + + collections := backup.Collections + sort.Strings(collections) + expectedCollections := []string{class1, class2} + sort.Strings(expectedCollections) + assert.Equal(t, expectedCollections, collections) + + mockSchemaReader.AssertExpectations(t) + mockDB.AssertExpectations(t) + mockBackupProvider.AssertExpectations(t) + mockBackupBackend.AssertExpectations(t) +} + +func TestService_Usage_WithNamedVectors(t *testing.T) { + ctx := context.Background() + + nodeName := "test-node" + className := "NamedVectorClass" + replication := 1 + shardName := "" + objectCount := 2000 + storageSize := int64(10000000) + vectorName := "abcd" + textVectorName := "text" + imageVectorName := "image" + vectorType := "hnsw" + compression := "standard" + defaultCompressionRatio := 0.7 + textCompressionRatio := 0.7 + imageCompressionRatio := 0.8 + dimensionality := 1536 + textDimensionality := 768 + imageDimensionality := 1024 + dimensionCount := 2000 + + shardingState := &sharding.State{ + Physical: map[string]sharding.Physical{ + shardName: { + Name: "", + BelongsToNodes: []string{nodeName}, + Status: models.TenantActivityStatusHOT, + }, + }, + } + shardingState.SetLocalName(nodeName) + + mockSchemaReader := schemaUC.NewMockSchemaReader(t) + mockSchemaReader.EXPECT().ReadOnlySchema().Return(models.Schema{Classes: []*models.Class{ + { + Class: className, + VectorIndexConfig: &hnsw.UserConfig{}, + ReplicationConfig: &models.ReplicationConfig{ + Factor: int64(replication), + }, + }, + }}).Maybe() + mockSchemaReader.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readFunc func(*models.Class, *sharding.State) error) error { + class := &models.Class{Class: className} + return readFunc(class, shardingState) + }).Maybe() + + mockDB := db.NewMockIndexGetter(t) + mockIndex := db.NewMockIndexLike(t) + mockDB.EXPECT().GetIndexLike(entschema.ClassName(className)).Return(mockIndex) + + mockShard := db.NewMockShardLike(t) + mockShard.EXPECT().GetStatus().Return(storagestate.StatusReady) + mockShard.EXPECT().ObjectCountAsync(ctx).Return(int64(objectCount), nil) + mockShard.EXPECT().ObjectStorageSize(ctx).Return(storageSize, nil) + mockShard.EXPECT().VectorStorageSize(ctx).Return(int64(0), nil) + mockShard.EXPECT().DimensionsUsage(ctx, vectorName).Return(types.Dimensionality{ + Dimensions: dimensionality, + Count: dimensionCount, + }, nil) + mockShard.EXPECT().DimensionsUsage(ctx, textVectorName).Return(types.Dimensionality{ + Dimensions: textDimensionality, + Count: dimensionCount, + }, nil) + mockShard.EXPECT().DimensionsUsage(ctx, imageVectorName).Return(types.Dimensionality{ + Dimensions: imageDimensionality, + Count: dimensionCount, + }, nil) + + mockDefaultVectorIndex := db.NewMockVectorIndex(t) + mockTextVectorIndex := db.NewMockVectorIndex(t) + mockImageVectorIndex := db.NewMockVectorIndex(t) + + mockDefaultCompressionStats := compressionhelpers.NewMockCompressionStats(t) + mockDefaultCompressionStats.EXPECT().CompressionRatio(dimensionality).Return(defaultCompressionRatio) + mockDefaultVectorIndex.EXPECT().CompressionStats().Return(mockDefaultCompressionStats) + + mockTextCompressionStats := compressionhelpers.NewMockCompressionStats(t) + mockTextCompressionStats.EXPECT().CompressionRatio(textDimensionality).Return(textCompressionRatio) + mockTextVectorIndex.EXPECT().CompressionStats().Return(mockTextCompressionStats) + + mockImageCompressionStats := compressionhelpers.NewMockCompressionStats(t) + mockImageCompressionStats.EXPECT().CompressionRatio(imageDimensionality).Return(imageCompressionRatio) + mockImageVectorIndex.EXPECT().CompressionStats().Return(mockImageCompressionStats) + + mockIndex.On("ForEachShard", mock.AnythingOfType("func(string, db.ShardLike) error")).Return(nil).Run(func(args mock.Arguments) { + f := args.Get(0).(func(string, db.ShardLike) error) + f(shardName, mockShard) + }) + + mockShard.On("ForEachVectorIndex", mock.AnythingOfType("func(string, db.VectorIndex) error")).Return(nil).Run(func(args mock.Arguments) { + f := args.Get(0).(func(string, db.VectorIndex) error) + f(vectorName, mockDefaultVectorIndex) + f(textVectorName, mockTextVectorIndex) + f(imageVectorName, mockImageVectorIndex) + }) + + mockBackupProvider := backupusecase.NewMockBackupBackendProvider(t) + mockBackupProvider.EXPECT().EnabledBackupBackends().Return([]modulecapabilities.BackupBackend{}) + + logger, _ := logrus.NewNullLogger() + service := NewService(mockSchemaReader, mockDB, mockBackupProvider, nodeName, logger) + + result, err := service.Usage(ctx) + + require.NoError(t, err) + require.NotNil(t, result) + assert.Equal(t, nodeName, result.Node) + assert.Len(t, result.Collections, 1) + + collection := result.Collections[0] + assert.Equal(t, className, collection.Name) + assert.Len(t, collection.Shards, 1) + + shard := collection.Shards[0] + assert.Equal(t, shardName, shard.Name) + assert.Equal(t, int64(objectCount), shard.ObjectsCount) + assert.Equal(t, uint64(storageSize), shard.ObjectsStorageBytes) + assert.Len(t, shard.NamedVectors, 3) + + defaultVector := shard.NamedVectors[0] + assert.Equal(t, vectorName, defaultVector.Name) + assert.Equal(t, vectorType, defaultVector.VectorIndexType) + assert.Equal(t, compression, defaultVector.Compression) + assert.Equal(t, defaultCompressionRatio, defaultVector.VectorCompressionRatio) + assert.Len(t, defaultVector.Dimensionalities, 1) + + textVector := shard.NamedVectors[1] + assert.Equal(t, textVectorName, textVector.Name) + assert.Equal(t, vectorType, textVector.VectorIndexType) + assert.Equal(t, compression, textVector.Compression) + assert.Equal(t, textCompressionRatio, textVector.VectorCompressionRatio) + assert.Len(t, textVector.Dimensionalities, 1) + + imageVector := shard.NamedVectors[2] + assert.Equal(t, imageVectorName, imageVector.Name) + assert.Equal(t, vectorType, imageVector.VectorIndexType) + assert.Equal(t, compression, imageVector.Compression) + assert.Equal(t, imageCompressionRatio, imageVector.VectorCompressionRatio) + assert.Len(t, imageVector.Dimensionalities, 1) + + mockSchemaReader.AssertExpectations(t) + mockDB.AssertExpectations(t) + mockIndex.AssertExpectations(t) + mockShard.AssertExpectations(t) + mockDefaultVectorIndex.AssertExpectations(t) + mockDefaultCompressionStats.AssertExpectations(t) + mockTextVectorIndex.AssertExpectations(t) + mockTextCompressionStats.AssertExpectations(t) + mockImageVectorIndex.AssertExpectations(t) + mockImageCompressionStats.AssertExpectations(t) + mockBackupProvider.AssertExpectations(t) +} + +func TestService_Usage_EmptyCollections(t *testing.T) { + ctx := context.Background() + + nodeName := "test-node" + + mockDB := db.NewMockIndexGetter(t) + + mockBackupProvider := backupusecase.NewMockBackupBackendProvider(t) + mockBackupProvider.EXPECT().EnabledBackupBackends().Return([]modulecapabilities.BackupBackend{}) + + logger, _ := logrus.NewNullLogger() + mockSchemaReader := schemaUC.NewMockSchemaReader(t) + mockSchemaReader.EXPECT().ReadOnlySchema().Return(models.Schema{Classes: []*models.Class{}}).Maybe() + service := NewService(mockSchemaReader, mockDB, mockBackupProvider, nodeName, logger) + + result, err := service.Usage(ctx) + + require.NoError(t, err) + require.NotNil(t, result) + assert.Equal(t, nodeName, result.Node) + assert.Len(t, result.Collections, 0) + assert.Len(t, result.Backups, 0) + + mockSchemaReader.AssertExpectations(t) + mockDB.AssertExpectations(t) + mockBackupProvider.AssertExpectations(t) +} + +func TestService_Usage_BackupError(t *testing.T) { + ctx := context.Background() + + nodeName := "test-node" + + mockSchemaReader := schemaUC.NewMockSchemaReader(t) + mockSchemaReader.EXPECT().ReadOnlySchema().Return(models.Schema{Classes: []*models.Class{}}).Maybe() + mockDB := db.NewMockIndexGetter(t) + + mockBackupBackend := modulecapabilities.NewMockBackupBackend(t) + mockBackupBackend.EXPECT().AllBackups(ctx).Return(nil, assert.AnError) + + mockBackupProvider := backupusecase.NewMockBackupBackendProvider(t) + mockBackupProvider.EXPECT().EnabledBackupBackends().Return([]modulecapabilities.BackupBackend{mockBackupBackend}) + + logger, _ := logrus.NewNullLogger() + service := NewService(mockSchemaReader, mockDB, mockBackupProvider, nodeName, logger) + + _, err := service.Usage(ctx) + + require.Error(t, err) + require.ErrorIs(t, err, assert.AnError) + + mockSchemaReader.AssertExpectations(t) + mockDB.AssertExpectations(t) + mockBackupProvider.AssertExpectations(t) + mockBackupBackend.AssertExpectations(t) +} + +func TestService_Usage_VectorIndexError(t *testing.T) { + ctx := context.Background() + + nodeName := "test-node" + className := "ErrorClass" + replication := 1 + shardName := "" + objectCount := 1000 + storageSize := int64(5000000) + vectorName := "abcd" + vectorType := "hnsw" + compression := "standard" + compressionRatio := 1.0 + dimensionality := 1536 + dimensionCount := 1000 + + shardingState := &sharding.State{ + Physical: map[string]sharding.Physical{ + shardName: { + Name: "", + BelongsToNodes: []string{nodeName}, + Status: models.TenantActivityStatusHOT, + }, + }, + } + shardingState.SetLocalName(nodeName) + + mockSchemaReader := schemaUC.NewMockSchemaReader(t) + mockSchemaReader.EXPECT().ReadOnlySchema().Return(models.Schema{Classes: []*models.Class{ + { + Class: className, + VectorIndexConfig: &hnsw.UserConfig{}, + ReplicationConfig: &models.ReplicationConfig{ + Factor: int64(replication), + }, + }, + }}).Maybe() + mockSchemaReader.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readFunc func(*models.Class, *sharding.State) error) error { + class := &models.Class{Class: className} + return readFunc(class, shardingState) + }).Maybe() + + mockDB := db.NewMockIndexGetter(t) + mockIndex := db.NewMockIndexLike(t) + mockDB.EXPECT().GetIndexLike(entschema.ClassName(className)).Return(mockIndex) + + mockShard := db.NewMockShardLike(t) + mockShard.EXPECT().GetStatus().Return(storagestate.StatusReady) + mockShard.EXPECT().ObjectCountAsync(ctx).Return(int64(objectCount), nil) + mockShard.EXPECT().ObjectStorageSize(ctx).Return(storageSize, nil) + mockShard.EXPECT().VectorStorageSize(ctx).Return(int64(0), nil) + mockShard.EXPECT().DimensionsUsage(ctx, vectorName).Return(types.Dimensionality{ + Dimensions: dimensionality, + Count: dimensionCount, + }, nil) + + mockVectorIndex := db.NewMockVectorIndex(t) + mockVectorIndex.EXPECT().CompressionStats().Return(compressionhelpers.UncompressedStats{}) + + mockIndex.On("ForEachShard", mock.AnythingOfType("func(string, db.ShardLike) error")).Return(nil).Run(func(args mock.Arguments) { + f := args.Get(0).(func(string, db.ShardLike) error) + f(shardName, mockShard) + }) + + mockShard.On("ForEachVectorIndex", mock.AnythingOfType("func(string, db.VectorIndex) error")).Return(nil).Run(func(args mock.Arguments) { + f := args.Get(0).(func(string, db.VectorIndex) error) + f(vectorName, mockVectorIndex) + }) + + mockBackupProvider := backupusecase.NewMockBackupBackendProvider(t) + mockBackupProvider.EXPECT().EnabledBackupBackends().Return([]modulecapabilities.BackupBackend{}) + + logger, _ := logrus.NewNullLogger() + service := NewService(mockSchemaReader, mockDB, mockBackupProvider, nodeName, logger) + + result, err := service.Usage(ctx) + + require.NoError(t, err) + require.NotNil(t, result) + assert.Equal(t, nodeName, result.Node) + assert.Len(t, result.Collections, 1) + + collection := result.Collections[0] + assert.Len(t, collection.Shards, 1) + + shard := collection.Shards[0] + assert.Len(t, shard.NamedVectors, 1) + + vector := shard.NamedVectors[0] + assert.Equal(t, vectorName, vector.Name) + assert.Equal(t, vectorType, vector.VectorIndexType) + assert.Equal(t, compression, vector.Compression) + assert.Equal(t, compressionRatio, vector.VectorCompressionRatio) + assert.Len(t, vector.Dimensionalities, 1) + + mockSchemaReader.AssertExpectations(t) + mockDB.AssertExpectations(t) + mockIndex.AssertExpectations(t) + mockShard.AssertExpectations(t) + mockVectorIndex.AssertExpectations(t) + mockBackupProvider.AssertExpectations(t) +} + +func TestService_Usage_NilVectorIndexConfig(t *testing.T) { + ctx := context.Background() + + nodeName := "test-node" + className := "NilConfigClass" + shardName := "" + objectCount := 1000 + storageSize := int64(5000000) + vectorName := "abcd" + vectorType := "" + compression := "standard" + compressionRatio := 0.75 + dimensionality := 1536 + dimensionCount := 1000 + + shardingState := &sharding.State{ + Physical: map[string]sharding.Physical{ + shardName: { + Name: "", + BelongsToNodes: []string{nodeName}, + Status: models.TenantActivityStatusHOT, + }, + }, + } + shardingState.SetLocalName(nodeName) + mockSchemaReader := schemaUC.NewMockSchemaReader(t) + mockSchemaReader.EXPECT().ReadOnlySchema().Return(models.Schema{Classes: []*models.Class{ + { + Class: className, + VectorIndexConfig: nil, + ReplicationConfig: &models.ReplicationConfig{}, + }, + }}).Maybe() + mockSchemaReader.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readFunc func(*models.Class, *sharding.State) error) error { + class := &models.Class{Class: className} + return readFunc(class, shardingState) + }).Maybe() + + mockDB := db.NewMockIndexGetter(t) + mockIndex := db.NewMockIndexLike(t) + mockDB.EXPECT().GetIndexLike(entschema.ClassName(className)).Return(mockIndex) + + mockShard := db.NewMockShardLike(t) + mockShard.EXPECT().GetStatus().Return(storagestate.StatusReady) + mockShard.EXPECT().ObjectCountAsync(ctx).Return(int64(objectCount), nil) + mockShard.EXPECT().ObjectStorageSize(ctx).Return(storageSize, nil) + mockShard.EXPECT().VectorStorageSize(ctx).Return(int64(0), nil) + mockShard.EXPECT().DimensionsUsage(ctx, vectorName).Return(types.Dimensionality{ + Dimensions: dimensionality, + Count: dimensionCount, + }, nil) + + mockVectorIndex := db.NewMockVectorIndex(t) + mockCompressionStats := compressionhelpers.NewMockCompressionStats(t) + mockCompressionStats.EXPECT().CompressionRatio(dimensionality).Return(compressionRatio) + mockVectorIndex.EXPECT().CompressionStats().Return(mockCompressionStats) + + mockIndex.On("ForEachShard", mock.AnythingOfType("func(string, db.ShardLike) error")).Return(nil).Run(func(args mock.Arguments) { + f := args.Get(0).(func(string, db.ShardLike) error) + f(shardName, mockShard) + }) + + mockShard.On("ForEachVectorIndex", mock.AnythingOfType("func(string, db.VectorIndex) error")).Return(nil).Run(func(args mock.Arguments) { + f := args.Get(0).(func(string, db.VectorIndex) error) + f(vectorName, mockVectorIndex) + }) + + mockBackupProvider := backupusecase.NewMockBackupBackendProvider(t) + mockBackupProvider.EXPECT().EnabledBackupBackends().Return([]modulecapabilities.BackupBackend{}) + + logger, _ := logrus.NewNullLogger() + service := NewService(mockSchemaReader, mockDB, mockBackupProvider, nodeName, logger) + + result, err := service.Usage(ctx) + + require.NoError(t, err) + require.NotNil(t, result) + assert.Equal(t, nodeName, result.Node) + assert.Len(t, result.Collections, 1) + + collection := result.Collections[0] + assert.Equal(t, className, collection.Name) + assert.Len(t, collection.Shards, 1) + + shard := collection.Shards[0] + assert.Equal(t, shardName, shard.Name) + assert.Equal(t, int64(objectCount), shard.ObjectsCount) + assert.Equal(t, uint64(storageSize), shard.ObjectsStorageBytes) + assert.Len(t, shard.NamedVectors, 1) + + vector := shard.NamedVectors[0] + assert.Equal(t, vectorName, vector.Name) + assert.Equal(t, vectorType, vector.VectorIndexType) + assert.Equal(t, compression, vector.Compression) + assert.Equal(t, compressionRatio, vector.VectorCompressionRatio) + assert.Len(t, vector.Dimensionalities, 1) + dim := vector.Dimensionalities[0] + assert.Equal(t, dimensionality, dim.Dimensions) + assert.Equal(t, dimensionCount, dim.Count) + + mockSchemaReader.AssertExpectations(t) + mockDB.AssertExpectations(t) + mockIndex.AssertExpectations(t) + mockShard.AssertExpectations(t) + mockVectorIndex.AssertExpectations(t) + mockCompressionStats.AssertExpectations(t) + mockBackupProvider.AssertExpectations(t) +} + +func TestService_Usage_VectorStorageSize(t *testing.T) { + ctx := context.Background() + + nodeName := "test-node" + className := "VectorStorageClass" + replication := 3 + uniqueShards := 2 + hotTenant := "hot-tenant" + coldTenant := "cold-tenant" + + // Hot tenant metrics + hotObjectCount := 2000 + hotStorageSize := int64(10000000) + hotVectorStorageSize := int64(8000000) // 8MB for hot tenant vectors + + // Cold tenant metrics + coldObjectCount := 1000 + coldStorageSize := int64(5000000) + coldVectorStorageSize := int64(4000000) // 4MB for cold tenant vectors + + vectorName := "default" + vectorType := "hnsw" + compression := "standard" + compressionRatio := 0.75 + dimensionality := 1536 + dimensionCount := 2000 + + shardingState := &sharding.State{ + PartitioningEnabled: true, + Physical: map[string]sharding.Physical{ + hotTenant: { + Name: hotTenant, + BelongsToNodes: []string{nodeName}, + Status: models.TenantActivityStatusHOT, + }, + coldTenant: { + Name: coldTenant, + BelongsToNodes: []string{nodeName}, + Status: models.TenantActivityStatusCOLD, + }, + }, + } + shardingState.SetLocalName(nodeName) + mockSchemaReader := schemaUC.NewMockSchemaReader(t) + mockSchemaReader.EXPECT().ReadOnlySchema().Return(models.Schema{Classes: []*models.Class{ + { + Class: className, + VectorIndexConfig: &hnsw.UserConfig{}, + ReplicationConfig: &models.ReplicationConfig{ + Factor: int64(replication), + }, + }, + }}).Maybe() + mockSchemaReader.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readFunc func(*models.Class, *sharding.State) error) error { + class := &models.Class{Class: className} + return readFunc(class, shardingState) + }).Maybe() + + mockDB := db.NewMockIndexGetter(t) + mockIndex := db.NewMockIndexLike(t) + mockDB.EXPECT().GetIndexLike(entschema.ClassName(className)).Return(mockIndex) + + // Mock hot tenant shard + mockHotShard := db.NewMockShardLike(t) + mockHotShard.EXPECT().GetStatus().Return(storagestate.StatusReady) + mockHotShard.EXPECT().ObjectCountAsync(ctx).Return(int64(hotObjectCount), nil) + mockHotShard.EXPECT().ObjectStorageSize(ctx).Return(hotStorageSize, nil) + mockHotShard.EXPECT().VectorStorageSize(ctx).Return(hotVectorStorageSize, nil) // Test actual vector storage size + mockHotShard.EXPECT().DimensionsUsage(ctx, vectorName).Return(types.Dimensionality{ + Dimensions: dimensionality, + Count: dimensionCount, + }, nil) + + mockVectorIndex := db.NewMockVectorIndex(t) + mockCompressionStats := compressionhelpers.NewMockCompressionStats(t) + mockCompressionStats.EXPECT().CompressionRatio(dimensionality).Return(compressionRatio) + mockVectorIndex.EXPECT().CompressionStats().Return(mockCompressionStats) + + // Mock cold tenant calculations + mockIndex.EXPECT().CalculateUnloadedObjectsMetrics(ctx, coldTenant).Return(types.ObjectUsage{ + Count: int64(coldObjectCount), + StorageBytes: coldStorageSize, + }, nil) + mockIndex.EXPECT().CalculateUnloadedVectorsMetrics(ctx, coldTenant).Return(coldVectorStorageSize, nil) // Test cold tenant vector storage + + mockIndex.On("ForEachShard", mock.AnythingOfType("func(string, db.ShardLike) error")).Return(nil).Run(func(args mock.Arguments) { + f := args.Get(0).(func(string, db.ShardLike) error) + f(hotTenant, mockHotShard) + }) + + mockHotShard.On("ForEachVectorIndex", mock.AnythingOfType("func(string, db.VectorIndex) error")).Return(nil).Run(func(args mock.Arguments) { + f := args.Get(0).(func(string, db.VectorIndex) error) + f(vectorName, mockVectorIndex) + }) + + mockBackupProvider := backupusecase.NewMockBackupBackendProvider(t) + mockBackupProvider.EXPECT().EnabledBackupBackends().Return([]modulecapabilities.BackupBackend{}) + + logger, _ := logrus.NewNullLogger() + service := NewService(mockSchemaReader, mockDB, mockBackupProvider, nodeName, logger) + + result, err := service.Usage(ctx) + + require.NoError(t, err) + require.NotNil(t, result) + assert.Equal(t, nodeName, result.Node) + assert.Len(t, result.Collections, 1) + + collection := result.Collections[0] + assert.Equal(t, className, collection.Name) + assert.Equal(t, replication, collection.ReplicationFactor) + assert.Equal(t, uniqueShards, collection.UniqueShardCount) + assert.Len(t, collection.Shards, 2) + + // Find hot and cold shards + var hotShard, coldShard *types.ShardUsage + for _, shard := range collection.Shards { + switch shard.Name { + case hotTenant: + hotShard = shard + case coldTenant: + coldShard = shard + } + } + + // Verify hot tenant vector storage + require.NotNil(t, hotShard) + assert.Equal(t, int64(hotObjectCount), hotShard.ObjectsCount) + assert.Equal(t, uint64(hotStorageSize), hotShard.ObjectsStorageBytes) + assert.Equal(t, strings.ToLower(models.TenantActivityStatusACTIVE), hotShard.Status) + assert.Equal(t, uint64(hotVectorStorageSize), hotShard.VectorStorageBytes) // Verify hot tenant vector storage + assert.Len(t, hotShard.NamedVectors, 1) + + // Verify cold tenant vector storage + require.NotNil(t, coldShard) + assert.Equal(t, int64(coldObjectCount), coldShard.ObjectsCount) + assert.Equal(t, uint64(coldStorageSize), coldShard.ObjectsStorageBytes) + assert.Equal(t, strings.ToLower(models.TenantActivityStatusINACTIVE), coldShard.Status) + assert.Equal(t, uint64(coldVectorStorageSize), coldShard.VectorStorageBytes) // Verify cold tenant vector storage + assert.Len(t, coldShard.NamedVectors, 0) // Cold tenants don't have named vectors + + // Verify vector details for hot tenant + vector := hotShard.NamedVectors[0] + assert.Equal(t, vectorName, vector.Name) + assert.Equal(t, vectorType, vector.VectorIndexType) + assert.Equal(t, compression, vector.Compression) + assert.Equal(t, compressionRatio, vector.VectorCompressionRatio) + assert.Len(t, vector.Dimensionalities, 1) + dim := vector.Dimensionalities[0] + assert.Equal(t, dimensionality, dim.Dimensions) + assert.Equal(t, dimensionCount, dim.Count) + + mockSchemaReader.AssertExpectations(t) + mockDB.AssertExpectations(t) + mockIndex.AssertExpectations(t) + mockHotShard.AssertExpectations(t) + mockVectorIndex.AssertExpectations(t) + mockCompressionStats.AssertExpectations(t) + mockBackupProvider.AssertExpectations(t) +} + +func TestService_DynamicIndexDetection(t *testing.T) { + tests := []struct { + name string + createMockIndex func(t *testing.T) db.VectorIndex + createMockDynamicIndex func(t *testing.T) dynamic.Index + expectedIndexType string + expectedUnderlyingType string + isDynamic bool + }{ + { + name: "dynamic index with flat underlying", + createMockDynamicIndex: func(t *testing.T) dynamic.Index { + mock := dynamic.NewMockIndex(t) + mock.EXPECT().UnderlyingIndex().Return("flat") + return mock + }, + expectedIndexType: "flat", + expectedUnderlyingType: "flat", + isDynamic: true, + }, + { + name: "dynamic index with hnsw underlying", + createMockDynamicIndex: func(t *testing.T) dynamic.Index { + mock := dynamic.NewMockIndex(t) + mock.EXPECT().UnderlyingIndex().Return("hnsw") + return mock + }, + expectedIndexType: "hnsw", + expectedUnderlyingType: "hnsw", + isDynamic: true, + }, + { + name: "dynamic index with dynamic underlying", + createMockDynamicIndex: func(t *testing.T) dynamic.Index { + mock := dynamic.NewMockIndex(t) + mock.EXPECT().UnderlyingIndex().Return("dynamic") + return mock + }, + expectedIndexType: "dynamic", + expectedUnderlyingType: "dynamic", + isDynamic: true, + }, + { + name: "regular hnsw index", + createMockIndex: func(t *testing.T) db.VectorIndex { + mock := db.NewMockVectorIndex(t) + return mock + }, + expectedIndexType: "hnsw", + expectedUnderlyingType: "hnsw", + isDynamic: false, + }, + { + name: "regular flat index", + createMockIndex: func(t *testing.T) db.VectorIndex { + mock := db.NewMockVectorIndex(t) + return mock + }, + expectedIndexType: "flat", + expectedUnderlyingType: "flat", + isDynamic: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Create the mock indexes + var mockVectorIndex db.VectorIndex + var mockDynamicIndex dynamic.Index + + if tt.createMockIndex != nil { + mockVectorIndex = tt.createMockIndex(t) + } + if tt.createMockDynamicIndex != nil { + mockDynamicIndex = tt.createMockDynamicIndex(t) + } + + // Simulate the exact logic from the service + indexType := "" + + // For dynamic indexes, get the actual underlying index type + if mockDynamicIndex != nil { + // This is a dynamic index + indexType = mockDynamicIndex.UnderlyingIndex().String() + } else if mockVectorIndex != nil { + // This is a regular index + indexType = tt.expectedIndexType + } + + // Check if it's dynamic - dynamic indexes are always dynamic + isDynamic := mockDynamicIndex != nil + + // Assertions + assert.Equal(t, tt.expectedIndexType, indexType, "Index type should match expected") + assert.Equal(t, tt.isDynamic, isDynamic, "IsDynamic flag should match expected") + + // For dynamic indexes, verify the underlying type + if mockDynamicIndex != nil { + assert.Equal(t, tt.expectedUnderlyingType, indexType, "Dynamic index should report underlying type") + } + }) + } +} + +func TestService_JitterFunctionality(t *testing.T) { + logger, _ := logrus.NewNullLogger() + + t.Run("Usage_WithJitter", func(t *testing.T) { + ctx := context.Background() + nodeName := "test-node" + className := "JitterTestClass" + + // Simple sharding state with two shards + shardingState := &sharding.State{ + Physical: map[string]sharding.Physical{ + "shard1": { + Name: "shard1", + BelongsToNodes: []string{nodeName}, + Status: models.TenantActivityStatusHOT, + }, + "shard2": { + Name: "shard2", + BelongsToNodes: []string{nodeName}, + Status: models.TenantActivityStatusHOT, + }, + }, + } + shardingState.SetLocalName(nodeName) + mockSchemaReader := schemaUC.NewMockSchemaReader(t) + mockSchemaReader.EXPECT().ReadOnlySchema().Return(models.Schema{Classes: []*models.Class{ + { + Class: className, + VectorIndexConfig: &hnsw.UserConfig{}, + ReplicationConfig: &models.ReplicationConfig{}, + }, + }}).Maybe() + mockSchemaReader.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readFunc func(*models.Class, *sharding.State) error) error { + class := &models.Class{Class: className} + return readFunc(class, shardingState) + }).Maybe() + + // Minimal DB mock + mockDB := db.NewMockIndexGetter(t) + mockIndex := db.NewMockIndexLike(t) + mockDB.EXPECT().GetIndexLike(entschema.ClassName(className)).Return(mockIndex) + + // Simple shard mock + mockShard := db.NewMockShardLike(t) + mockShard.EXPECT().GetStatus().Return(storagestate.StatusReady) + mockShard.EXPECT().ObjectCountAsync(ctx).Return(int64(100), nil).Times(2) + mockShard.EXPECT().ObjectStorageSize(ctx).Return(int64(1000), nil).Times(2) + mockShard.EXPECT().VectorStorageSize(ctx).Return(int64(0), nil).Times(2) + mockShard.EXPECT().DimensionsUsage(ctx, "default").Return(types.Dimensionality{ + Dimensions: 1536, + Count: 100, + }, nil).Times(2) + + // Simple vector index mock + mockVectorIndex := db.NewMockVectorIndex(t) + mockCompressionStats := compressionhelpers.NewMockCompressionStats(t) + mockCompressionStats.EXPECT().CompressionRatio(1536).Return(0.75).Times(2) + mockVectorIndex.EXPECT().CompressionStats().Return(mockCompressionStats).Times(2) + + // Mock the shard iteration + mockIndex.On("ForEachShard", mock.Anything).Return(nil).Run(func(args mock.Arguments) { + f := args.Get(0).(func(string, db.ShardLike) error) + f("shard1", mockShard) + f("shard2", mockShard) + }) + + mockShard.On("ForEachVectorIndex", mock.Anything).Return(nil).Run(func(args mock.Arguments) { + f := args.Get(0).(func(string, db.VectorIndex) error) + f("default", mockVectorIndex) + }) + + // Minimal backup mock + mockBackupProvider := backupusecase.NewMockBackupBackendProvider(t) + mockBackupProvider.EXPECT().EnabledBackupBackends().Return([]modulecapabilities.BackupBackend{}) + + service := NewService(mockSchemaReader, mockDB, mockBackupProvider, nodeName, logger) + service.SetJitterInterval(10 * time.Millisecond) + + result, err := service.Usage(ctx) + + require.NoError(t, err) + require.NotNil(t, result) + + // Only assert jitter-related behavior: both shards should be processed + assert.Len(t, result.Collections, 1) + collection := result.Collections[0] + assert.Len(t, collection.Shards, 2, "Should process both shards with jitter") + + mockSchemaReader.AssertExpectations(t) + mockDB.AssertExpectations(t) + mockIndex.AssertExpectations(t) + mockShard.AssertExpectations(t) + mockVectorIndex.AssertExpectations(t) + mockCompressionStats.AssertExpectations(t) + mockBackupProvider.AssertExpectations(t) + }) + + t.Run("Usage_WithZeroJitter", func(t *testing.T) { + ctx := context.Background() + nodeName := "test-node" + className := "ZeroJitterTestClass" + + // Simple sharding state with one shard + shardingState := &sharding.State{ + Physical: map[string]sharding.Physical{ + "shard1": { + Name: "shard1", + BelongsToNodes: []string{nodeName}, + Status: models.TenantActivityStatusHOT, + }, + }, + } + shardingState.SetLocalName(nodeName) + mockSchemaReader := schemaUC.NewMockSchemaReader(t) + mockSchemaReader.EXPECT().ReadOnlySchema().Return(models.Schema{Classes: []*models.Class{ + { + Class: className, + VectorIndexConfig: &hnsw.UserConfig{}, + ReplicationConfig: &models.ReplicationConfig{}, + }, + }}).Maybe() + mockSchemaReader.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readFunc func(*models.Class, *sharding.State) error) error { + class := &models.Class{Class: className} + return readFunc(class, shardingState) + }).Maybe() + + // Minimal DB mock + mockDB := db.NewMockIndexGetter(t) + mockIndex := db.NewMockIndexLike(t) + mockDB.EXPECT().GetIndexLike(entschema.ClassName(className)).Return(mockIndex) + + // Simple shard mock + mockShard := db.NewMockShardLike(t) + mockShard.EXPECT().GetStatus().Return(storagestate.StatusReady) + mockShard.EXPECT().ObjectCountAsync(ctx).Return(int64(100), nil) + mockShard.EXPECT().ObjectStorageSize(ctx).Return(int64(1000), nil) + mockShard.EXPECT().VectorStorageSize(ctx).Return(int64(0), nil) + mockShard.EXPECT().DimensionsUsage(ctx, "default").Return(types.Dimensionality{ + Dimensions: 1536, + Count: 100, + }, nil) + + // Simple vector index mock + mockVectorIndex := db.NewMockVectorIndex(t) + mockCompressionStats := compressionhelpers.NewMockCompressionStats(t) + mockCompressionStats.EXPECT().CompressionRatio(1536).Return(0.75) + mockVectorIndex.EXPECT().CompressionStats().Return(mockCompressionStats) + + // Mock the shard iteration + mockIndex.On("ForEachShard", mock.Anything).Return(nil).Run(func(args mock.Arguments) { + f := args.Get(0).(func(string, db.ShardLike) error) + f("shard1", mockShard) + }) + + mockShard.On("ForEachVectorIndex", mock.Anything).Return(nil).Run(func(args mock.Arguments) { + f := args.Get(0).(func(string, db.VectorIndex) error) + f("default", mockVectorIndex) + }) + + // Minimal backup mock + mockBackupProvider := backupusecase.NewMockBackupBackendProvider(t) + mockBackupProvider.EXPECT().EnabledBackupBackends().Return([]modulecapabilities.BackupBackend{}) + + service := NewService(mockSchemaReader, mockDB, mockBackupProvider, nodeName, logger) + service.SetJitterInterval(0) + + result, err := service.Usage(ctx) + + require.NoError(t, err) + require.NotNil(t, result) + + // Only assert jitter-related behavior: single shard should be processed + assert.Len(t, result.Collections, 1) + collection := result.Collections[0] + assert.Len(t, collection.Shards, 1, "Should process single shard without jitter") + + mockSchemaReader.AssertExpectations(t) + mockDB.AssertExpectations(t) + mockIndex.AssertExpectations(t) + mockShard.AssertExpectations(t) + mockVectorIndex.AssertExpectations(t) + mockCompressionStats.AssertExpectations(t) + mockBackupProvider.AssertExpectations(t) + }) +} + +func TestService_Usage_HotTenantWithLoadingStatus(t *testing.T) { + ctx := context.Background() + + nodeName := "test-node" + className := "LoadingHotTenantClass" + replication := 1 + shardName := "hot-tenant" + objectCount := 1000 + storageSize := int64(5000000) + vectorName := "abcd" + vectorType := "hnsw" + compression := "standard" + + shardingState := &sharding.State{ + Physical: map[string]sharding.Physical{ + shardName: { + Name: shardName, + BelongsToNodes: []string{nodeName}, + Status: models.TenantActivityStatusHOT, + }, + }, + } + shardingState.SetLocalName(nodeName) + mockSchemaReader := schemaUC.NewMockSchemaReader(t) + mockSchemaReader.EXPECT().ReadOnlySchema().Return(models.Schema{Classes: []*models.Class{ + { + Class: className, + VectorIndexConfig: &hnsw.UserConfig{}, + ReplicationConfig: &models.ReplicationConfig{Factor: int64(replication)}, + VectorConfig: map[string]models.VectorConfig{ + vectorName: { + VectorIndexType: vectorType, + VectorIndexConfig: &hnsw.UserConfig{}, + }, + }, + }, + }}).Maybe() + mockSchemaReader.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readFunc func(*models.Class, *sharding.State) error) error { + class := &models.Class{Class: className} + return readFunc(class, shardingState) + }).Maybe() + + mockDB := db.NewMockIndexGetter(t) + mockIndex := db.NewMockIndexLike(t) + mockDB.EXPECT().GetIndexLike(entschema.ClassName(className)).Return(mockIndex) + + // Mock shard that returns StatusLoading + mockShard := db.NewMockShardLike(t) + mockShard.EXPECT().GetStatus().Return(storagestate.StatusLoading) + + // Mock the unloaded shard usage calculation + mockIndex.EXPECT().CalculateUnloadedObjectsMetrics(ctx, shardName).Return(types.ObjectUsage{ + Count: int64(objectCount), + StorageBytes: storageSize, + }, nil) + mockIndex.EXPECT().CalculateUnloadedVectorsMetrics(ctx, shardName).Return(int64(0), nil) + + mockIndex.On("ForEachShard", mock.AnythingOfType("func(string, db.ShardLike) error")).Return(nil).Run(func(args mock.Arguments) { + f := args.Get(0).(func(string, db.ShardLike) error) + f(shardName, mockShard) + }) + + mockBackupProvider := backupusecase.NewMockBackupBackendProvider(t) + mockBackupProvider.EXPECT().EnabledBackupBackends().Return([]modulecapabilities.BackupBackend{}) + + logger, _ := logrus.NewNullLogger() + service := NewService(mockSchemaReader, mockDB, mockBackupProvider, nodeName, logger) + + result, err := service.Usage(ctx) + + require.NoError(t, err) + require.NotNil(t, result) + assert.Equal(t, nodeName, result.Node) + assert.Len(t, result.Collections, 1) + + collection := result.Collections[0] + assert.Equal(t, className, collection.Name) + assert.Len(t, collection.Shards, 1) + + shard := collection.Shards[0] + assert.Equal(t, shardName, shard.Name) + assert.Equal(t, int64(objectCount), shard.ObjectsCount) + assert.Equal(t, uint64(storageSize), shard.ObjectsStorageBytes) + // Verify that the shard is treated as inactive (cold) even though it's a hot tenant + assert.Equal(t, strings.ToLower(models.TenantActivityStatusINACTIVE), shard.Status) + assert.Len(t, shard.NamedVectors, 1) + + vector := shard.NamedVectors[0] + assert.Equal(t, vectorName, vector.Name) + assert.Equal(t, vectorType, vector.VectorIndexType) + assert.Equal(t, compression, vector.Compression) + assert.Equal(t, 1.0, vector.VectorCompressionRatio) // Default ratio for cold shards + + mockSchemaReader.AssertExpectations(t) + mockDB.AssertExpectations(t) + mockIndex.AssertExpectations(t) + mockShard.AssertExpectations(t) + mockBackupProvider.AssertExpectations(t) +} + +func TestService_Usage_RQCompressionWithBits(t *testing.T) { + ctx := context.Background() + + nodeName := "test-node" + className := "RQTestClass" + replication := 1 + shardName := "rq-shard" + objectCount := 1000 + storageSize := int64(5000000) + vectorName := "default" + vectorType := "hnsw" + compression := "rq" + compressionRatio := 0.125 + dimensionality := 1536 + dimensionCount := 1000 + + // Test both bits=1 and bits=8 + testCases := []struct { + name string + bits int16 + expected int16 + }{ + { + name: "RQ with bits=1", + bits: 1, + expected: 1, + }, + { + name: "RQ with bits=8", + bits: 8, + expected: 8, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + sch := models.Schema{ + Classes: []*models.Class{ + { + Class: className, + VectorIndexConfig: hnsw.UserConfig{ + RQ: hnsw.RQConfig{ + Enabled: true, + Bits: tc.bits, + }, + }, + ReplicationConfig: &models.ReplicationConfig{ + Factor: int64(replication), + }, + }, + }, + } + + shardingState := &sharding.State{ + Physical: map[string]sharding.Physical{ + shardName: { + Name: shardName, + BelongsToNodes: []string{nodeName}, + Status: models.TenantActivityStatusHOT, + }, + }, + } + shardingState.SetLocalName(nodeName) + mockSchemaReader := schemaUC.NewMockSchemaReader(t) + mockSchemaReader.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readFunc func(*models.Class, *sharding.State) error) error { + class := &models.Class{Class: className} + return readFunc(class, shardingState) + }).Maybe() + mockSchemaReader.EXPECT().ReadOnlySchema().Return(sch).Maybe() + + mockDB := db.NewMockIndexGetter(t) + mockIndex := db.NewMockIndexLike(t) + mockDB.EXPECT().GetIndexLike(entschema.ClassName(className)).Return(mockIndex) + + mockShard := db.NewMockShardLike(t) + mockShard.EXPECT().GetStatus().Return(storagestate.StatusReady) + mockShard.EXPECT().ObjectCountAsync(ctx).Return(int64(objectCount), nil) + mockShard.EXPECT().ObjectStorageSize(ctx).Return(storageSize, nil) + mockShard.EXPECT().VectorStorageSize(ctx).Return(int64(0), nil) + mockShard.EXPECT().DimensionsUsage(ctx, vectorName).Return(types.Dimensionality{ + Dimensions: dimensionality, + Count: dimensionCount, + }, nil) + + mockVectorIndex := db.NewMockVectorIndex(t) + mockCompressionStats := compressionhelpers.NewMockCompressionStats(t) + mockCompressionStats.EXPECT().CompressionRatio(dimensionality).Return(compressionRatio) + mockVectorIndex.EXPECT().CompressionStats().Return(mockCompressionStats) + + mockShard.On("ForEachVectorIndex", mock.AnythingOfType("func(string, db.VectorIndex) error")).Return(nil).Run(func(args mock.Arguments) { + f := args.Get(0).(func(string, db.VectorIndex) error) + f(vectorName, mockVectorIndex) + }) + + mockIndex.On("ForEachShard", mock.AnythingOfType("func(string, db.ShardLike) error")).Return(nil).Run(func(args mock.Arguments) { + f := args.Get(0).(func(string, db.ShardLike) error) + f(shardName, mockShard) + }) + + mockBackupProvider := backupusecase.NewMockBackupBackendProvider(t) + mockBackupProvider.EXPECT().EnabledBackupBackends().Return([]modulecapabilities.BackupBackend{}) + + logger, _ := logrus.NewNullLogger() + service := NewService(mockSchemaReader, mockDB, mockBackupProvider, nodeName, logger) + + result, err := service.Usage(ctx) + + require.NoError(t, err) + require.NotNil(t, result) + assert.Equal(t, nodeName, result.Node) + assert.Len(t, result.Collections, 1) + + collection := result.Collections[0] + assert.Equal(t, className, collection.Name) + assert.Len(t, collection.Shards, 1) + + shard := collection.Shards[0] + assert.Equal(t, shardName, shard.Name) + assert.Equal(t, int64(objectCount), shard.ObjectsCount) + assert.Equal(t, uint64(storageSize), shard.ObjectsStorageBytes) + assert.Len(t, shard.NamedVectors, 1) + + vector := shard.NamedVectors[0] + assert.Equal(t, vectorName, vector.Name) + assert.Equal(t, vectorType, vector.VectorIndexType) + assert.Equal(t, compression, vector.Compression) + assert.Equal(t, compressionRatio, vector.VectorCompressionRatio) + assert.Equal(t, tc.expected, vector.Bits, "Bits should match the RQ configuration") + + mockSchemaReader.AssertExpectations(t) + mockDB.AssertExpectations(t) + mockIndex.AssertExpectations(t) + mockShard.AssertExpectations(t) + mockVectorIndex.AssertExpectations(t) + mockCompressionStats.AssertExpectations(t) + mockBackupProvider.AssertExpectations(t) + }) + } +} + +func TestService_Usage_NamedVectorsWithConfig(t *testing.T) { + ctx := context.Background() + + nodeName := "test-node" + className := "NamedVectorConfigClass" + replication := 1 + shardName := "" + objectCount := 2000 + storageSize := int64(10000000) + + // Named vector configurations + textVectorName := "text" + imageVectorName := "image" + + // Expected values from named vector configs + textVectorType := "hnsw" + textCompression := "pq" + + imageVectorType := "hnsw" + imageCompression := "standard" + sch := models.Schema{ + Classes: []*models.Class{ + { + Class: className, + // No legacy VectorIndexConfig - only named vectors + ReplicationConfig: &models.ReplicationConfig{Factor: int64(replication)}, + VectorConfig: map[string]models.VectorConfig{ + textVectorName: { + VectorIndexType: textVectorType, + VectorIndexConfig: func() hnsw.UserConfig { + config := hnsw.UserConfig{} + config.SetDefaults() + config.PQ.Enabled = true + return config + }(), + }, + imageVectorName: { + VectorIndexType: imageVectorType, + VectorIndexConfig: func() hnsw.UserConfig { + config := hnsw.UserConfig{} + config.SetDefaults() + // PQ is disabled by default, so this should result in standard compression + return config + }(), + }, + }, + }, + }, + } + + shardingState := &sharding.State{ + Physical: map[string]sharding.Physical{ + shardName: { + Name: "", + BelongsToNodes: []string{nodeName}, + Status: models.TenantActivityStatusHOT, + }, + }, + } + shardingState.SetLocalName(nodeName) + mockSchemaReader := schemaUC.NewMockSchemaReader(t) + mockSchemaReader.EXPECT().Shards(mock.Anything).Return(shardingState.AllPhysicalShards(), nil).Maybe() + mockSchemaReader.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readFunc func(*models.Class, *sharding.State) error) error { + class := &models.Class{Class: className} + return readFunc(class, shardingState) + }).Maybe() + mockSchemaReader.EXPECT().ReadOnlySchema().Return(sch).Maybe() + + mockDB := db.NewMockIndexGetter(t) + mockIndex := db.NewMockIndexLike(t) + mockDB.EXPECT().GetIndexLike(entschema.ClassName(className)).Return(mockIndex) + + mockShard := db.NewMockShardLike(t) + mockShard.EXPECT().GetStatus().Return(storagestate.StatusReady) + mockShard.EXPECT().ObjectCountAsync(ctx).Return(int64(objectCount), nil) + mockShard.EXPECT().ObjectStorageSize(ctx).Return(storageSize, nil) + mockShard.EXPECT().VectorStorageSize(ctx).Return(int64(0), nil) + + // Mock dimensions usage for both named vectors + mockShard.EXPECT().DimensionsUsage(ctx, textVectorName).Return(types.Dimensionality{}, nil) + mockShard.EXPECT().DimensionsUsage(ctx, imageVectorName).Return(types.Dimensionality{}, nil) + + // Mock vector indexes for both named vectors + mockTextVectorIndex := db.NewMockVectorIndex(t) + mockImageVectorIndex := db.NewMockVectorIndex(t) + + mockTextCompressionStats := compressionhelpers.NewMockCompressionStats(t) + mockTextCompressionStats.EXPECT().CompressionRatio(mock.Anything).Return(1) + mockTextVectorIndex.EXPECT().CompressionStats().Return(mockTextCompressionStats) + + mockImageCompressionStats := compressionhelpers.NewMockCompressionStats(t) + mockImageCompressionStats.EXPECT().CompressionRatio(mock.Anything).Return(1) + mockImageVectorIndex.EXPECT().CompressionStats().Return(mockImageCompressionStats) + + mockIndex.On("ForEachShard", mock.AnythingOfType("func(string, db.ShardLike) error")).Return(nil).Run(func(args mock.Arguments) { + f := args.Get(0).(func(string, db.ShardLike) error) + f(shardName, mockShard) + }) + + mockShard.On("ForEachVectorIndex", mock.AnythingOfType("func(string, db.VectorIndex) error")).Return(nil).Run(func(args mock.Arguments) { + f := args.Get(0).(func(string, db.VectorIndex) error) + f(textVectorName, mockTextVectorIndex) + f(imageVectorName, mockImageVectorIndex) + }) + + mockBackupProvider := backupusecase.NewMockBackupBackendProvider(t) + mockBackupProvider.EXPECT().EnabledBackupBackends().Return([]modulecapabilities.BackupBackend{}) + + logger, _ := logrus.NewNullLogger() + service := NewService(mockSchemaReader, mockDB, mockBackupProvider, nodeName, logger) + + result, err := service.Usage(ctx) + + require.NoError(t, err) + require.NotNil(t, result) + assert.Equal(t, nodeName, result.Node) + assert.Len(t, result.Collections, 1) + + collection := result.Collections[0] + assert.Equal(t, className, collection.Name) + assert.Len(t, collection.Shards, 1) + + shard := collection.Shards[0] + assert.Equal(t, shardName, shard.Name) + assert.Equal(t, int64(objectCount), shard.ObjectsCount) + assert.Equal(t, uint64(storageSize), shard.ObjectsStorageBytes) + assert.Len(t, shard.NamedVectors, 2) + + // Find and verify text vector + var textVector, imageVector *types.VectorUsage + for _, vector := range shard.NamedVectors { + switch vector.Name { + case textVectorName: + textVector = vector + case imageVectorName: + imageVector = vector + } + } + + // Verify text vector configuration + require.NotNil(t, textVector) + assert.Equal(t, textVectorName, textVector.Name) + assert.Equal(t, textVectorType, textVector.VectorIndexType) + t.Logf("Text vector compression: expected=%s, actual=%s", textCompression, textVector.Compression) + + // Verify image vector configuration + require.NotNil(t, imageVector) + assert.Equal(t, imageVectorName, imageVector.Name) + assert.Equal(t, imageVectorType, imageVector.VectorIndexType) + assert.Equal(t, imageCompression, imageVector.Compression) + + mockSchemaReader.AssertExpectations(t) + mockDB.AssertExpectations(t) + mockIndex.AssertExpectations(t) + mockShard.AssertExpectations(t) + mockTextVectorIndex.AssertExpectations(t) + mockTextCompressionStats.AssertExpectations(t) + mockImageVectorIndex.AssertExpectations(t) + mockImageCompressionStats.AssertExpectations(t) + mockBackupProvider.AssertExpectations(t) +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/usage/types/types.go b/platform/dbops/binaries/weaviate-src/cluster/usage/types/types.go new file mode 100644 index 0000000000000000000000000000000000000000..966e542ccc4affbdd07047b14e91f22396182123 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/usage/types/types.go @@ -0,0 +1,128 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package types + +import "github.com/weaviate/weaviate/entities/models" + +// Report represents the usage metrics report from the metrics endpoint +type Report struct { + // The version of usage policy, date based versioning + // e.g. 2025-06-01 + Version string `json:"version,omitempty"` + + // The name of the node + Node string `json:"node,omitempty"` + + // List of collections and their metrics + Collections []*CollectionUsage `json:"collections,omitempty"` + + // List of backups and their metrics + Backups []*BackupUsage `json:"backups,omitempty"` + + // CollectingTime The time of the collection of the metric + CollectingTime string `json:"-"` + + // The local node's view of the schema + Schema *models.Schema `json:"schema,omitempty"` +} + +// CollectionUsage represents metrics for a single collection +type CollectionUsage struct { + // The name of the collection + Name string `json:"name,omitempty"` + + // The replication factor of the collection + ReplicationFactor int `json:"replication_factor,omitempty"` + + // The number of unique shards in the collection + UniqueShardCount int `json:"unique_shard_count,omitempty"` + + // List of shards and their metrics + Shards []*ShardUsage `json:"shards,omitempty"` +} + +// ShardUsage represents metrics for a single shard +type ShardUsage struct { + // The name of the shard + Name string `json:"name,omitempty"` + + // The status of the shard (ACTIVE, INACTIVE) + Status string `json:"status,omitempty"` + + // The number of objects in the shard + ObjectsCount int64 `json:"objects_count,omitempty"` + + // The storage size in bytes + ObjectsStorageBytes uint64 `json:"objects_storage_bytes,omitempty"` + + // The actual memory storage bytes used by vectors + VectorStorageBytes uint64 `json:"vector_storage_bytes,omitempty"` + + // List of named vectors and their metrics + NamedVectors []*VectorUsage `json:"named_vectors,omitempty"` +} + +// VectorUsage represents metrics for a single vector index +type VectorUsage struct { + // The name of the vector + Name string `json:"name,omitempty"` + + // The type of vector index (for dynamic indexes, this shows the underlying type: flat/hnsw) + VectorIndexType string `json:"vector_index_type,omitempty"` + + // Indicates if this index originated from a dynamic index configuration + IsDynamic bool `json:"is_dynamic,omitempty"` + + // The compression type used + Compression string `json:"compression,omitempty"` + + // The compression ratio achieved + VectorCompressionRatio float64 `json:"vector_compression_ratio,omitempty"` + + // The bits parameter for RQ compression (only set when Compression="rq") + Bits int16 `json:"bits,omitempty"` + + // List of dimensionalities and their metrics + Dimensionalities []*Dimensionality `json:"dimensionalities,omitempty"` +} + +// Dimensionality represents metrics for a specific dimensionality +type Dimensionality struct { + // The dimensionality of the vectors + Dimensions int `json:"dimensionality,omitempty"` + + // The number of objects with this dimensionality + Count int `json:"count,omitempty"` +} + +// BackupUsage represents metrics for a single backup +type BackupUsage struct { + // The ID of the backup + ID string `json:"id,omitempty"` + + // The completion time of the backup + CompletionTime string `json:"completion_time,omitempty"` + + // The size of the backup in GiB + SizeInGib float64 `json:"size_in_gib,omitempty"` + + // The type of backup + Type string `json:"type,omitempty"` + + // The list of collections included in the backup + Collections []string `json:"collections,omitempty"` +} + +type ObjectUsage struct { + Count int64 + StorageBytes int64 +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/usage/types/types_test.go b/platform/dbops/binaries/weaviate-src/cluster/usage/types/types_test.go new file mode 100644 index 0000000000000000000000000000000000000000..bf90599962bcb93f87916690c553ea2494f16af4 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/usage/types/types_test.go @@ -0,0 +1,362 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package types + +import ( + "encoding/json" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestReport_OmitsEmptyFields(t *testing.T) { + tests := []struct { + name string + report Report + expected string + }{ + { + name: "completely empty report", + report: Report{}, + expected: "{}", + }, + { + name: "report with only node name", + report: Report{ + Node: "test-node", + }, + expected: `{"node":"test-node"}`, + }, + { + name: "report with empty collections slice", + report: Report{ + Node: "test-node", + Collections: []*CollectionUsage{}, + }, + expected: `{"node":"test-node"}`, + }, + { + name: "report with empty backups slice", + report: Report{ + Node: "test-node", + Backups: []*BackupUsage{}, + }, + expected: `{"node":"test-node"}`, + }, + { + name: "complete report", + report: Report{ + Version: "2025-01-01", + Node: "test-node", + Collections: []*CollectionUsage{ + {Name: "test-collection"}, + }, + Backups: []*BackupUsage{ + {ID: "test-backup"}, + }, + }, + expected: `{"version":"2025-01-01","node":"test-node","collections":[{"name":"test-collection"}],"backups":[{"id":"test-backup"}]}`, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + data, err := json.Marshal(tt.report) + require.NoError(t, err) + assert.Equal(t, tt.expected, string(data)) + }) + } +} + +func TestCollectionUsage_OmitsEmptyFields(t *testing.T) { + tests := []struct { + name string + usage CollectionUsage + expected string + }{ + { + name: "completely empty collection usage", + usage: CollectionUsage{}, + expected: "{}", + }, + { + name: "collection usage with only name", + usage: CollectionUsage{ + Name: "test-collection", + }, + expected: `{"name":"test-collection"}`, + }, + { + name: "collection usage with empty shards slice", + usage: CollectionUsage{ + Name: "test-collection", + Shards: []*ShardUsage{}, + }, + expected: `{"name":"test-collection"}`, + }, + { + name: "complete collection usage", + usage: CollectionUsage{ + Name: "test-collection", + ReplicationFactor: 3, + UniqueShardCount: 2, + Shards: []*ShardUsage{ + {Name: "test-shard"}, + }, + }, + expected: `{"name":"test-collection","replication_factor":3,"unique_shard_count":2,"shards":[{"name":"test-shard"}]}`, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + data, err := json.Marshal(tt.usage) + require.NoError(t, err) + assert.Equal(t, tt.expected, string(data)) + }) + } +} + +func TestShardUsage_OmitsEmptyFields(t *testing.T) { + tests := []struct { + name string + usage ShardUsage + expected string + }{ + { + name: "completely empty shard usage", + usage: ShardUsage{}, + expected: "{}", + }, + { + name: "shard usage with only name", + usage: ShardUsage{ + Name: "test-shard", + }, + expected: `{"name":"test-shard"}`, + }, + { + name: "shard usage with empty named vectors slice", + usage: ShardUsage{ + Name: "test-shard", + NamedVectors: []*VectorUsage{}, + }, + expected: `{"name":"test-shard"}`, + }, + { + name: "complete shard usage", + usage: ShardUsage{ + Name: "test-shard", + Status: "active", + ObjectsCount: 1000, + ObjectsStorageBytes: 1024, + VectorStorageBytes: 2048, + NamedVectors: []*VectorUsage{ + {Name: "default"}, + }, + }, + expected: `{"name":"test-shard","status":"active","objects_count":1000,"objects_storage_bytes":1024,"vector_storage_bytes":2048,"named_vectors":[{"name":"default"}]}`, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + data, err := json.Marshal(tt.usage) + require.NoError(t, err) + assert.Equal(t, tt.expected, string(data)) + }) + } +} + +func TestVectorUsage_OmitsEmptyFields(t *testing.T) { + tests := []struct { + name string + usage VectorUsage + expected string + }{ + { + name: "completely empty vector usage", + usage: VectorUsage{}, + expected: "{}", + }, + { + name: "vector usage with only name", + usage: VectorUsage{ + Name: "default", + }, + expected: `{"name":"default"}`, + }, + { + name: "vector usage with empty dimensionalities slice", + usage: VectorUsage{ + Name: "default", + Dimensionalities: []*Dimensionality{}, + }, + expected: `{"name":"default"}`, + }, + { + name: "complete vector usage", + usage: VectorUsage{ + Name: "default", + VectorIndexType: "hnsw", + IsDynamic: false, + Compression: "standard", + VectorCompressionRatio: 0.75, + Dimensionalities: []*Dimensionality{ + {Dimensions: 1536, Count: 1000}, + }, + }, + expected: `{"name":"default","vector_index_type":"hnsw","compression":"standard","vector_compression_ratio":0.75,"dimensionalities":[{"dimensionality":1536,"count":1000}]}`, + }, + { + name: "vector usage with is_dynamic true", + usage: VectorUsage{ + Name: "default", + VectorIndexType: "hnsw", + IsDynamic: true, + Compression: "standard", + VectorCompressionRatio: 0.75, + Dimensionalities: []*Dimensionality{ + {Dimensions: 1536, Count: 1000}, + }, + }, + expected: `{"name":"default","vector_index_type":"hnsw","is_dynamic":true,"compression":"standard","vector_compression_ratio":0.75,"dimensionalities":[{"dimensionality":1536,"count":1000}]}`, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + data, err := json.Marshal(tt.usage) + require.NoError(t, err) + assert.Equal(t, tt.expected, string(data)) + }) + } +} + +func TestDimensionality_OmitsEmptyFields(t *testing.T) { + tests := []struct { + name string + dim Dimensionality + expected string + }{ + { + name: "completely empty dimensionality", + dim: Dimensionality{}, + expected: "{}", + }, + { + name: "dimensionality with only dimensions", + dim: Dimensionality{ + Dimensions: 1536, + }, + expected: `{"dimensionality":1536}`, + }, + { + name: "dimensionality with only count", + dim: Dimensionality{ + Count: 1000, + }, + expected: `{"count":1000}`, + }, + { + name: "complete dimensionality", + dim: Dimensionality{ + Dimensions: 1536, + Count: 1000, + }, + expected: `{"dimensionality":1536,"count":1000}`, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + data, err := json.Marshal(tt.dim) + require.NoError(t, err) + assert.Equal(t, tt.expected, string(data)) + }) + } +} + +func TestBackupUsage_OmitsEmptyFields(t *testing.T) { + tests := []struct { + name string + usage BackupUsage + expected string + }{ + { + name: "completely empty backup usage", + usage: BackupUsage{}, + expected: "{}", + }, + { + name: "backup usage with only ID", + usage: BackupUsage{ + ID: "test-backup", + }, + expected: `{"id":"test-backup"}`, + }, + { + name: "backup usage with empty collections slice", + usage: BackupUsage{ + ID: "test-backup", + Collections: []string{}, + }, + expected: `{"id":"test-backup"}`, + }, + { + name: "complete backup usage", + usage: BackupUsage{ + ID: "test-backup", + CompletionTime: "2025-01-01T00:00:00Z", + SizeInGib: 1.5, + Type: "success", + Collections: []string{"collection1", "collection2"}, + }, + expected: `{"id":"test-backup","completion_time":"2025-01-01T00:00:00Z","size_in_gib":1.5,"type":"success","collections":["collection1","collection2"]}`, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + data, err := json.Marshal(tt.usage) + require.NoError(t, err) + assert.Equal(t, tt.expected, string(data)) + }) + } +} + +func TestZeroValues_AreOmitted(t *testing.T) { + // Test that zero values are properly omitted + report := Report{ + Node: "", // empty string + Collections: nil, // nil slice + Backups: []*BackupUsage{}, // empty slice + } + + data, err := json.Marshal(report) + require.NoError(t, err) + assert.Equal(t, "{}", string(data)) +} + +func TestNilSlices_AreOmitted(t *testing.T) { + // Test that nil slices are omitted + report := Report{ + Node: "test-node", + Collections: nil, + Backups: nil, + } + + data, err := json.Marshal(report) + require.NoError(t, err) + assert.Equal(t, `{"node":"test-node"}`, string(data)) +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/utils/retry.go b/platform/dbops/binaries/weaviate-src/cluster/utils/retry.go new file mode 100644 index 0000000000000000000000000000000000000000..baab77ba629f53669c6b5caeccebc70ee44e1ee2 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/utils/retry.go @@ -0,0 +1,39 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package utils + +import ( + "time" + + "github.com/cenkalti/backoff/v4" +) + +// NewBackoff returns a Backoff that can be used to retry an operation +// We have this function to ensure that we can use the same backoff settings in multiple places in weaviate. +func NewBackoff() backoff.BackOff { + return ConstantBackoff(3, 50*time.Millisecond) +} + +// ConstantBackoff is a backoff configuration used to handle getters +// retry for eventual consistency handling +func ConstantBackoff(maxrtry int, interval time.Duration) backoff.BackOff { + return backoff.WithMaxRetries(backoff.NewConstantBackOff(interval), uint64(maxrtry)) +} + +// After MaxElapsedTime the backoff.BackOff returns Stop. +// It never stops if MaxElapsedTime == 0. +func NewExponentialBackoff(initialInverval time.Duration, maxElapsedTime time.Duration) backoff.BackOff { + eb := backoff.NewExponentialBackOff() + eb.InitialInterval = initialInverval + eb.MaxElapsedTime = maxElapsedTime + return eb +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/utils/utils.go b/platform/dbops/binaries/weaviate-src/cluster/utils/utils.go new file mode 100644 index 0000000000000000000000000000000000000000..9ef24e93c540805e8c874992a895dbbead7650d0 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/utils/utils.go @@ -0,0 +1,27 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package utils + +import "net" + +func MustGetFreeTCPPort() (port int) { + lAddr, err := net.ResolveTCPAddr("tcp", "localhost:0") + if err != nil { + panic(err) + } + l, err := net.ListenTCP("tcp", lAddr) + if err != nil { + panic(err) + } + defer l.Close() + return l.Addr().(*net.TCPAddr).Port +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/utils/utils_test.go b/platform/dbops/binaries/weaviate-src/cluster/utils/utils_test.go new file mode 100644 index 0000000000000000000000000000000000000000..47b6a8c26f7c9f40612f34cf02ef2b590e29f17b --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/utils/utils_test.go @@ -0,0 +1,23 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package utils + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestFreeTcpPort(t *testing.T) { + port := MustGetFreeTCPPort() + assert.Greater(t, port, 0) +} diff --git a/platform/dbops/binaries/weaviate-src/cmd/weaviate-server/main.go b/platform/dbops/binaries/weaviate-src/cmd/weaviate-server/main.go new file mode 100644 index 0000000000000000000000000000000000000000..ee4e8a53dfeb7c429cf50c1f0f45e9d8c8f96d6c --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cmd/weaviate-server/main.go @@ -0,0 +1,68 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package main + +import ( + "log" + "os" + + "github.com/go-openapi/loads" + flags "github.com/jessevdk/go-flags" + + "github.com/weaviate/weaviate/adapters/handlers/rest" + "github.com/weaviate/weaviate/adapters/handlers/rest/operations" +) + +// This file was generated by the swagger tool. +// Make sure not to overwrite this file after you generated it because all your edits would be lost! + +func main() { + + swaggerSpec, err := loads.Embedded(rest.SwaggerJSON, rest.FlatSwaggerJSON) + if err != nil { + log.Fatalln(err) + } + + api := operations.NewWeaviateAPI(swaggerSpec) + server := rest.NewServer(api) + defer server.Shutdown() + + parser := flags.NewParser(server, flags.Default) + parser.ShortDescription = "Weaviate" + parser.LongDescription = "# Introduction\n Weaviate is an open source, AI-native vector database that helps developers create intuitive and reliable AI-powered applications. \n ### Base Path \nThe base path for the Weaviate server is structured as `[YOUR-WEAVIATE-HOST]:[PORT]/v1`. As an example, if you wish to access the `schema` endpoint on a local instance, you would navigate to `http://localhost:8080/v1/schema`. Ensure you replace `[YOUR-WEAVIATE-HOST]` and `[PORT]` with your actual server host and port number respectively. \n ### Questions? \nIf you have any comments or questions, please feel free to reach out to us at the community forum [https://forum.weaviate.io/](https://forum.weaviate.io/). \n### Issues? \nIf you find a bug or want to file a feature request, please open an issue on our GitHub repository for [Weaviate](https://github.com/weaviate/weaviate). \n### Want more documentation? \nFor a quickstart, code examples, concepts and more, please visit our [documentation page](https://weaviate.io/developers/weaviate)." + server.ConfigureFlags() + for _, optsGroup := range api.CommandLineOptionsGroups { + _, err := parser.AddGroup(optsGroup.ShortDescription, optsGroup.LongDescription, optsGroup.Options) + if err != nil { + log.Fatalln(err) + } + } + + if _, err := parser.Parse(); err != nil { + code := 1 + if fe, ok := err.(*flags.Error); ok { + if fe.Type == flags.ErrHelp { + code = 0 + } + } + os.Exit(code) + } + + server.ConfigureAPI() + + if err := server.Serve(); err != nil { + log.Fatalln(err) + } + +} diff --git a/platform/dbops/binaries/weaviate-src/entities/additional/classification.go b/platform/dbops/binaries/weaviate-src/entities/additional/classification.go new file mode 100644 index 0000000000000000000000000000000000000000..1f3c26e102336947d661b9b5d91858da92ac9532 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/additional/classification.go @@ -0,0 +1,61 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package additional + +import "github.com/go-openapi/strfmt" + +type Classification struct { + BasedOn []string `json:"basedOn"` + ClassifiedFields []string `json:"classifiedFields"` + Completed strfmt.DateTime `json:"completed,omitempty"` + ID strfmt.UUID `json:"id,omitempty"` + Scope []string `json:"scope"` +} + +type Properties struct { + Classification bool `json:"classification"` + RefMeta bool `json:"refMeta"` + Vector bool `json:"vector"` + Vectors []string `json:"vectors"` + Certainty bool `json:"certainty"` + ID bool `json:"id"` + CreationTimeUnix bool `json:"creationTimeUnix"` + LastUpdateTimeUnix bool `json:"lastUpdateTimeUnix"` + ModuleParams map[string]interface{} `json:"moduleParams"` + Distance bool `json:"distance"` + Score bool `json:"score"` + ExplainScore bool `json:"explainScore"` + IsConsistent bool `json:"isConsistent"` + Group bool `json:"group"` + + // The User is not interested in returning props, we can skip any costly + // operation that isn't required. + NoProps bool `json:"noProps"` + + // ReferenceQuery is used to indicate that a search + // is being conducted on behalf of a referenced + // property. for example: this is relevant when a + // where filter operand is passed in with a path to + // a referenced class, rather than a path to one of + // its own props. + // + // The reason we need this indication is that + // without it, the sub-Search which is + // conducted to extract the reference propValuePair + // is conducted with the pagination set to whatever + // the QueryMaximumResults. if this value is set low + // relative to the number of objects being searched, + // weaviate will be unable to find enough results to + // make any comparisons, and erroneously returns + // empty, or with fewer results than expected. + ReferenceQuery bool `json:"-"` +} diff --git a/platform/dbops/binaries/weaviate-src/entities/additional/distance.go b/platform/dbops/binaries/weaviate-src/entities/additional/distance.go new file mode 100644 index 0000000000000000000000000000000000000000..3c6abfe940277ac49a1225a96110ef1dd518d99a --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/additional/distance.go @@ -0,0 +1,30 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package additional + +func CertaintyToDistPtr(maybeCertainty *float64) (distPtr *float64) { + if maybeCertainty != nil { + dist := (1 - *maybeCertainty) * 2 + distPtr = &dist + } + return +} + +func CertaintyToDist(certainty float64) (dist float64) { + dist = (1 - certainty) * 2 + return +} + +func DistToCertainty(dist float64) (certainty float64) { + certainty = 1 - (dist / 2) + return +} diff --git a/platform/dbops/binaries/weaviate-src/entities/additional/group.go b/platform/dbops/binaries/weaviate-src/entities/additional/group.go new file mode 100644 index 0000000000000000000000000000000000000000..7f07d9c593edd3e54f13353462600bb0221ccfc2 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/additional/group.go @@ -0,0 +1,38 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package additional + +import ( + "github.com/go-openapi/strfmt" + "github.com/weaviate/weaviate/entities/models" +) + +type Group struct { + ID int `json:"id"` + GroupedBy *GroupedBy `json:"groupedBy"` + MinDistance float32 `json:"minDistance"` + MaxDistance float32 `json:"maxDistance"` + Count int `json:"count"` + Hits []map[string]interface{} `json:"hits"` +} + +type GroupedBy struct { + Value string `json:"value"` + Path []string `json:"path"` +} + +type GroupHitAdditional struct { + ID strfmt.UUID `json:"id"` + Vector []float32 `json:"vector"` + Vectors models.Vectors `json:"vectors"` + Distance float32 `json:"distance"` +} diff --git a/platform/dbops/binaries/weaviate-src/entities/additional/replication.go b/platform/dbops/binaries/weaviate-src/entities/additional/replication.go new file mode 100644 index 0000000000000000000000000000000000000000..150a4bb81091d6ef424ae074ef14b184397ebfbb --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/additional/replication.go @@ -0,0 +1,53 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package additional + +// ReplicationProperties are replication-related handles and configurations which +// allow replication context to pass through different layers of +// abstraction, usually initiated via client requests +type ReplicationProperties struct { + // ConsistencyLevel indicates how many nodes should + // respond to a request before it is considered + // successful. Can be "ONE", "QUORUM", or "ALL" + // + // This is only relevant for a replicated + // class + ConsistencyLevel string + + // NodeName is the node which is expected to + // fulfill the request + NodeName string +} + +type AsyncReplicationTargetNodeOverride struct { + CollectionID string + ShardID string + SourceNode string + TargetNode string + UpperTimeBound int64 + NoDeletionResolution bool +} + +type AsyncReplicationTargetNodeOverrides []AsyncReplicationTargetNodeOverride + +func (left *AsyncReplicationTargetNodeOverride) Equal(right *AsyncReplicationTargetNodeOverride) bool { + return left.SourceNode == right.SourceNode && left.TargetNode == right.TargetNode && left.CollectionID == right.CollectionID && left.ShardID == right.ShardID +} + +func (overrides AsyncReplicationTargetNodeOverrides) NoDeletionResolution(targetNode string) bool { + for _, override := range overrides { + if override.TargetNode == targetNode { + return override.NoDeletionResolution + } + } + return false +} diff --git a/platform/dbops/binaries/weaviate-src/entities/aggregation/params.go b/platform/dbops/binaries/weaviate-src/entities/aggregation/params.go new file mode 100644 index 0000000000000000000000000000000000000000..62335aa64acf6c695553950a6eb548f998a8fc24 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/aggregation/params.go @@ -0,0 +1,178 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package aggregation + +import ( + "encoding/json" + "fmt" + + "github.com/weaviate/weaviate/entities/filters" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/schema" + "github.com/weaviate/weaviate/entities/searchparams" +) + +type Params struct { + Filters *filters.LocalFilter `json:"filters"` + ClassName schema.ClassName `json:"className"` + Properties []ParamProperty `json:"properties"` + GroupBy *filters.Path `json:"groupBy"` + IncludeMetaCount bool `json:"includeMetaCount"` + Limit *int `json:"limit"` + ObjectLimit *int `json:"objectLimit"` + SearchVector models.Vector `json:"searchVector"` + TargetVector string `json:"targetVector"` + Certainty float64 `json:"certainty"` + Tenant string `json:"tenant"` + ModuleParams map[string]interface{} `json:"moduleParams"` + NearVector *searchparams.NearVector `json:"nearVector"` + NearObject *searchparams.NearObject `json:"nearObject"` + Hybrid *searchparams.HybridSearch `json:"hybrid"` +} + +func (p *Params) UnmarshalJSON(data []byte) error { + type alias Params + aux := &struct { + SearchVector json.RawMessage `json:"searchVector"` + *alias + }{ + alias: (*alias)(p), + } + + if err := json.Unmarshal(data, aux); err != nil { + return err + } + + // SearchVector is nil + if aux.SearchVector == nil { + return nil + } + + // Try unmarshaling as []float32 + var vector []float32 + if err := json.Unmarshal(aux.SearchVector, &vector); err == nil { + if len(vector) > 0 { + p.SearchVector = vector + } + return nil + } + + // Try unmarshaling as [][]float32 + var multiVector [][]float32 + if err := json.Unmarshal(aux.SearchVector, &multiVector); err == nil { + if len(multiVector) > 0 { + p.SearchVector = multiVector + } + return nil + } + return fmt.Errorf("searchVector: cannot unmarshal into either []float32 or [][]float32: %v", aux.SearchVector) +} + +type ParamProperty struct { + Name schema.PropertyName `json:"name"` + Aggregators []Aggregator `json:"aggregators"` +} + +type Aggregator struct { + Type string `json:"type"` + Limit *int `json:"limit"` // used on TopOccurrence Agg +} + +func (a Aggregator) String() string { + return a.Type +} + +// Aggregators used in every prop +var ( + CountAggregator = Aggregator{Type: "count"} + TypeAggregator = Aggregator{Type: "type"} +) + +// Aggregators used in numerical props +var ( + SumAggregator = Aggregator{Type: "sum"} + MeanAggregator = Aggregator{Type: "mean"} + ModeAggregator = Aggregator{Type: "mode"} + MedianAggregator = Aggregator{Type: "median"} + MaximumAggregator = Aggregator{Type: "maximum"} + MinimumAggregator = Aggregator{Type: "minimum"} +) + +// Aggregators used in boolean props +var ( + TotalTrueAggregator = Aggregator{Type: "totalTrue"} + PercentageTrueAggregator = Aggregator{Type: "percentageTrue"} + TotalFalseAggregator = Aggregator{Type: "totalFalse"} + PercentageFalseAggregator = Aggregator{Type: "percentageFalse"} +) + +const TopOccurrencesType = "topOccurrences" + +// NewTopOccurrencesAggregator creates a TopOccurrencesAggregator, we cannot +// use a singleton for this as the desired limit can be different each time +func NewTopOccurrencesAggregator(limit *int) Aggregator { + return Aggregator{Type: TopOccurrencesType, Limit: limit} +} + +// Aggregators used in ref props +var ( + PointingToAggregator = Aggregator{Type: "pointingTo"} +) + +func ParseAggregatorProp(name string) (Aggregator, error) { + switch name { + // common + case CountAggregator.String(): + return CountAggregator, nil + case TypeAggregator.String(): + return TypeAggregator, nil + + // numerical + case MeanAggregator.String(): + return MeanAggregator, nil + case MedianAggregator.String(): + return MedianAggregator, nil + case ModeAggregator.String(): + return ModeAggregator, nil + case MaximumAggregator.String(): + return MaximumAggregator, nil + case MinimumAggregator.String(): + return MinimumAggregator, nil + case SumAggregator.String(): + return SumAggregator, nil + + // boolean + case TotalTrueAggregator.String(): + return TotalTrueAggregator, nil + case TotalFalseAggregator.String(): + return TotalFalseAggregator, nil + case PercentageTrueAggregator.String(): + return PercentageTrueAggregator, nil + case PercentageFalseAggregator.String(): + return PercentageFalseAggregator, nil + + // string/text + case TopOccurrencesType: + return NewTopOccurrencesAggregator(ptInt(5)), nil // default to limit 5, can be overwritten + + // ref + case PointingToAggregator.String(): + return PointingToAggregator, nil + + default: + return Aggregator{}, fmt.Errorf("unrecognized aggregator prop '%s'", name) + } +} + +func ptInt(in int) *int { + return &in +} diff --git a/platform/dbops/binaries/weaviate-src/entities/aggregation/params_test.go b/platform/dbops/binaries/weaviate-src/entities/aggregation/params_test.go new file mode 100644 index 0000000000000000000000000000000000000000..aecdee7a941a693c2d12cbd9d035356cb1c29022 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/aggregation/params_test.go @@ -0,0 +1,62 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package aggregation + +import ( + "encoding/json" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func Test_Params_Unmarshal(t *testing.T) { + tests := []struct { + name string + payload string + isMultiVector bool + }{ + { + name: "regular vector", + payload: `{ + "targetVector": "vector1", + "searchVector": [1.0, 2.0] + }`, + isMultiVector: false, + }, + { + name: "multi vector", + payload: `{ + "targetVector": "vector1", + "searchVector": [[1.0, 2.0], [2.0]] + }`, + isMultiVector: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var par Params + err := json.Unmarshal([]byte(tt.payload), &par) + require.NoError(t, err) + require.NotNil(t, par.SearchVector) + if tt.isMultiVector { + vector, ok := par.SearchVector.([][]float32) + assert.True(t, ok) + assert.True(t, len(vector) > 0) + } else { + vector, ok := par.SearchVector.([]float32) + assert.True(t, ok) + assert.True(t, len(vector) > 0) + } + }) + } +} diff --git a/platform/dbops/binaries/weaviate-src/entities/aggregation/result.go b/platform/dbops/binaries/weaviate-src/entities/aggregation/result.go new file mode 100644 index 0000000000000000000000000000000000000000..0c263e93fce361bb7dac02966c6ce7b4710250e5 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/aggregation/result.go @@ -0,0 +1,69 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package aggregation + +type Result struct { + Groups []Group `json:"groups"` +} + +type Group struct { + Properties map[string]Property `json:"properties"` + GroupedBy *GroupedBy `json:"groupedBy"` // optional to support ungrouped aggregations (formerly meta) + Count int `json:"count"` +} + +type Property struct { + Type PropertyType `json:"type"` + NumericalAggregations map[string]interface{} `json:"numericalAggregations"` + TextAggregation Text `json:"textAggregation"` + BooleanAggregation Boolean `json:"booleanAggregation"` + SchemaType string `json:"schemaType"` + ReferenceAggregation Reference `json:"referenceAggregation"` + DateAggregations map[string]interface{} `json:"dateAggregation"` +} + +type Text struct { + Items []TextOccurrence `json:"items"` + Count int `json:"count"` +} + +type PropertyType string + +const ( + PropertyTypeNumerical PropertyType = "numerical" + PropertyTypeBoolean PropertyType = "boolean" + PropertyTypeText PropertyType = "text" + PropertyTypeDate PropertyType = "date" + PropertyTypeReference PropertyType = "cref" +) + +type GroupedBy struct { + Value interface{} `json:"value"` + Path []string `json:"path"` +} + +type TextOccurrence struct { + Value string `json:"value"` + Occurs int `json:"occurs"` +} + +type Boolean struct { + Count int `json:"count"` + TotalTrue int `json:"totalTrue"` + TotalFalse int `json:"totalFalse"` + PercentageTrue float64 `json:"percentageTrue"` + PercentageFalse float64 `json:"percentageFalse"` +} + +type Reference struct { + PointingTo []string `json:"pointingTo"` +} diff --git a/platform/dbops/binaries/weaviate-src/entities/autocut/autocut.go b/platform/dbops/binaries/weaviate-src/entities/autocut/autocut.go new file mode 100644 index 0000000000000000000000000000000000000000..457980dd858e530b0b620e26bb95833d2311ee29 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/autocut/autocut.go @@ -0,0 +1,51 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package autocut + +func Autocut(yValues []float32, cutOff int) int { + if len(yValues) <= 1 { + return len(yValues) + } + + diff := make([]float32, len(yValues)) + step := 1. / (float32(len(yValues)) - 1.) + + for i := range yValues { + xValue := 0. + float32(i)*step + yValueNorm := (yValues[i] - yValues[0]) / (yValues[len(yValues)-1] - yValues[0]) + diff[i] = yValueNorm - xValue + } + + extremaCount := 0 + for i := range diff { + if i == 0 { + continue // we want the index _before_ the extrema + } + + if i == len(diff)-1 && len(diff) > 1 { // for last element there is no "next" point + if diff[i] > diff[i-1] && diff[i] > diff[i-2] { + extremaCount += 1 + if extremaCount >= cutOff { + return i + } + } + } else { + if diff[i] > diff[i-1] && diff[i] > diff[i+1] { + extremaCount += 1 + if extremaCount >= cutOff { + return i + } + } + } + } + return len(yValues) +} diff --git a/platform/dbops/binaries/weaviate-src/entities/autocut/autocut_test.go b/platform/dbops/binaries/weaviate-src/entities/autocut/autocut_test.go new file mode 100644 index 0000000000000000000000000000000000000000..6fc416ece50ec8bc2196fbf5f722094dee863cbd --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/autocut/autocut_test.go @@ -0,0 +1,46 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package autocut + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestAutoCut(t *testing.T) { + cases := []struct { + values []float32 + cutOff int + expectedResults int + }{ + {values: []float32{}, cutOff: 1, expectedResults: 0}, + {values: []float32{2}, cutOff: 1, expectedResults: 1}, + {values: []float32{2, 1.95, 1.9, 0.2, 0.1, 0.1, -1}, cutOff: 1, expectedResults: 3}, + {values: []float32{2, 1.95, 1.9, 0.2, 0.1, 0.1, -2}, cutOff: 2, expectedResults: 6}, + {values: []float32{5, 1, 1, 1, 1, 0, 0}, cutOff: 1, expectedResults: 1}, + {values: []float32{5, 1, 1, 1, 1, 0, 0}, cutOff: 2, expectedResults: 5}, + {values: []float32{0.298, 0.260, 0.169, 0.108, 0.108, 0.104, 0.093}, cutOff: 1, expectedResults: 3}, + {values: []float32{0.5, 0.32, 0.31, 0.30, 0.29, 0.15}, cutOff: 1, expectedResults: 1}, + {values: []float32{0.5, 0.32, 0.31, 0.30, 0.29, 0.15, 0.15, 0.15}, cutOff: 2, expectedResults: 5}, + {values: []float32{1.0, 0.98, 0.95, 0.9, 0.88, 0.87, 0.80, 0.79}, cutOff: 1, expectedResults: 3}, + {values: []float32{1.0, 0.98, 0.95, 0.9, 0.88, 0.87, 0.80, 0.79}, cutOff: 2, expectedResults: 6}, + {values: []float32{1.0, 0.98, 0.95, 0.9, 0.88, 0.87, 0.80, 0.79}, cutOff: 3, expectedResults: 8}, // all values + {values: []float32{0.586835, 0.5450372, 0.34137487, 0.30482167, 0.2753393}, cutOff: 1, expectedResults: 2}, + {values: []float32{0.36663342, 0.33818772, 0.045160502, 0.045160501}, cutOff: 1, expectedResults: 2}, + } + for _, tt := range cases { + t.Run("", func(t *testing.T) { + assert.Equal(t, tt.expectedResults, Autocut(tt.values, tt.cutOff)) + }) + } +} diff --git a/platform/dbops/binaries/weaviate-src/entities/backup/descriptor.go b/platform/dbops/binaries/weaviate-src/entities/backup/descriptor.go new file mode 100644 index 0000000000000000000000000000000000000000..3fa260c1dcd9a96322d6aad7db525ef7a403652c --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/backup/descriptor.go @@ -0,0 +1,417 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package backup + +import ( + "fmt" + "time" +) + +// NodeDescriptor contains data related to one participant in DBRO +type NodeDescriptor struct { + Classes []string `json:"classes"` + Status Status `json:"status"` + Error string `json:"error"` + PreCompressionSizeBytes int64 `json:"preCompressionSizeBytes"` // Size of this node's backup in bytes before compression +} + +// DistributedBAckupDescriptor contains everything need to completely restore a distributed backup +type DistributedBackupDescriptor struct { + StartedAt time.Time `json:"startedAt"` + CompletedAt time.Time `json:"completedAt"` + ID string `json:"id"` // User created backup id + Nodes map[string]*NodeDescriptor `json:"nodes"` + NodeMapping map[string]string `json:"node_mapping"` + Status Status `json:"status"` // + Version string `json:"version"` // + ServerVersion string `json:"serverVersion"` + Leader string `json:"leader"` + Error string `json:"error"` + PreCompressionSizeBytes int64 `json:"preCompressionSizeBytes"` // Size of this node's backup in bytes before compression +} + +// Len returns how many nodes exist in d +func (d *DistributedBackupDescriptor) Len() int { + return len(d.Nodes) +} + +// Count number of classes +func (d *DistributedBackupDescriptor) Count() int { + count := 0 + for _, desc := range d.Nodes { + count += len(desc.Classes) + } + return count +} + +// RemoveEmpty removes any nodes with an empty class list +func (d *DistributedBackupDescriptor) RemoveEmpty() *DistributedBackupDescriptor { + for node, desc := range d.Nodes { + if len(desc.Classes) == 0 { + delete(d.Nodes, node) + } + } + return d +} + +// Classes returns all classes contained in d +func (d *DistributedBackupDescriptor) Classes() []string { + set := make(map[string]struct{}, 32) + for _, desc := range d.Nodes { + for _, cls := range desc.Classes { + set[cls] = struct{}{} + } + } + lst := make([]string, len(set)) + i := 0 + for cls := range set { + lst[i] = cls + i++ + } + return lst +} + +// Filter classes based on predicate +func (d *DistributedBackupDescriptor) Filter(pred func(s string) bool) { + for _, desc := range d.Nodes { + cs := make([]string, 0, len(desc.Classes)) + for _, cls := range desc.Classes { + if pred(cls) { + cs = append(cs, cls) + } + } + if len(cs) != len(desc.Classes) { + desc.Classes = cs + } + } +} + +// Include only these classes and remove everything else +func (d *DistributedBackupDescriptor) Include(classes []string) { + if len(classes) == 0 { + return + } + set := make(map[string]struct{}, len(classes)) + for _, cls := range classes { + set[cls] = struct{}{} + } + pred := func(s string) bool { + _, ok := set[s] + return ok + } + d.Filter(pred) +} + +// Exclude removes classes from d +func (d *DistributedBackupDescriptor) Exclude(classes []string) { + if len(classes) == 0 { + return + } + set := make(map[string]struct{}, len(classes)) + for _, cls := range classes { + set[cls] = struct{}{} + } + pred := func(s string) bool { + _, ok := set[s] + return !ok + } + d.Filter(pred) +} + +// ToMappedNodeName will return nodeName after applying d.NodeMapping translation on it. +// If nodeName is not contained in d.nodeMapping, returns nodeName unmodified +func (d *DistributedBackupDescriptor) ToMappedNodeName(nodeName string) string { + if newNodeName, ok := d.NodeMapping[nodeName]; ok { + return newNodeName + } + return nodeName +} + +// ToOriginalNodeName will return nodeName after trying to find an original node name from d.NodeMapping values. +// If nodeName is not contained in d.nodeMapping values, returns nodeName unmodified +func (d *DistributedBackupDescriptor) ToOriginalNodeName(nodeName string) string { + for oldNodeName, newNodeName := range d.NodeMapping { + if newNodeName == nodeName { + return oldNodeName + } + } + return nodeName +} + +// ApplyNodeMapping applies d.NodeMapping translation to d.Nodes. If a node in d.Nodes is not translated by d.NodeMapping, it will remain +// unchanged. +func (d *DistributedBackupDescriptor) ApplyNodeMapping() { + if len(d.NodeMapping) == 0 { + return + } + + for k, v := range d.NodeMapping { + if nodeDescriptor, ok := d.Nodes[k]; !ok { + d.Nodes[v] = nodeDescriptor + delete(d.Nodes, k) + } + } +} + +// AllExist checks if all classes exist in d. +// It returns either "" or the first class which it could not find +func (d *DistributedBackupDescriptor) AllExist(classes []string) string { + if len(classes) == 0 { + return "" + } + set := make(map[string]struct{}, len(classes)) + for _, cls := range classes { + set[cls] = struct{}{} + } + for _, dest := range d.Nodes { + for _, cls := range dest.Classes { + delete(set, cls) + if len(set) == 0 { + return "" + } + } + } + first := "" + for k := range set { + first = k + break + } + return first +} + +func (d *DistributedBackupDescriptor) Validate() error { + if d.StartedAt.IsZero() || d.ID == "" || + d.Version == "" || d.ServerVersion == "" || d.Error != "" { + return fmt.Errorf("attribute mismatch: [id versions time error]") + } + if len(d.Nodes) == 0 { + return fmt.Errorf("empty list of node descriptors") + } + return nil +} + +// resetStatus sets status and sub-statuses to Started +// It also empties error and sub-errors +func (d *DistributedBackupDescriptor) ResetStatus() *DistributedBackupDescriptor { + d.Status = Started + d.Error = "" + d.StartedAt = time.Now() + d.CompletedAt = time.Time{} + for _, node := range d.Nodes { + node.Status = Started + node.Error = "" + } + return d +} + +// ShardDescriptor contains everything needed to completely restore a partition of a specific class +type ShardDescriptor struct { + Name string `json:"name"` + Node string `json:"node"` + Files []string `json:"files,omitempty"` + + DocIDCounterPath string `json:"docIdCounterPath,omitempty"` + DocIDCounter []byte `json:"docIdCounter,omitempty"` + PropLengthTrackerPath string `json:"propLengthTrackerPath,omitempty"` + PropLengthTracker []byte `json:"propLengthTracker,omitempty"` + ShardVersionPath string `json:"shardVersionPath,omitempty"` + Version []byte `json:"version,omitempty"` + Chunk int32 `json:"chunk"` +} + +// ClearTemporary clears fields that are no longer needed once compression is done. +// These fields are not required in versions > 1 because they are stored in the tarball. +func (s *ShardDescriptor) ClearTemporary() { + s.ShardVersionPath = "" + s.Version = nil + + s.DocIDCounterPath = "" + s.DocIDCounter = nil + + s.PropLengthTrackerPath = "" + s.PropLengthTracker = nil +} + +// ClassDescriptor contains everything needed to completely restore a class +type ClassDescriptor struct { + Name string `json:"name"` // DB class name, also selected by user + Shards []*ShardDescriptor `json:"shards"` + ShardingState []byte `json:"shardingState"` + Schema []byte `json:"schema"` + Aliases []byte `json:"aliases"` + + // AliasesIncluded makes the old backup backward compatible when + // old backups are restored by newer ClassDescriptor that supports + // aliases + AliasesIncluded bool `json:"aliasesIncluded"` + Chunks map[int32][]string `json:"chunks,omitempty"` + Error error `json:"-"` + PreCompressionSizeBytes int64 `json:"preCompressionSizeBytes"` // Size of this class's backup in bytes before compression +} + +// BackupDescriptor contains everything needed to completely restore a list of classes +type BackupDescriptor struct { + StartedAt time.Time `json:"startedAt"` + CompletedAt time.Time `json:"completedAt"` + ID string `json:"id"` // User created backup id + Classes []ClassDescriptor `json:"classes"` + RbacBackups []byte `json:"rbacBackups"` + UserBackups []byte `json:"userBackups"` + Status string `json:"status"` // "STARTED|TRANSFERRING|TRANSFERRED|SUCCESS|FAILED|CANCELED" + Version string `json:"version"` // + ServerVersion string `json:"serverVersion"` + Error string `json:"error"` + PreCompressionSizeBytes int64 `json:"preCompressionSizeBytes"` // Size of this node's backup in bytes before compression +} + +// List all existing classes in d +func (d *BackupDescriptor) List() []string { + lst := make([]string, len(d.Classes)) + for i, cls := range d.Classes { + lst[i] = cls.Name + } + return lst +} + +// AllExist checks if all classes exist in d. +// It returns either "" or the first class which it could not find +func (d *BackupDescriptor) AllExist(classes []string) string { + if len(classes) == 0 { + return "" + } + set := make(map[string]struct{}, len(classes)) + for _, cls := range classes { + set[cls] = struct{}{} + } + for _, dest := range d.Classes { + delete(set, dest.Name) + } + first := "" + for k := range set { + first = k + break + } + return first +} + +// Include only these classes and remove everything else +func (d *BackupDescriptor) Include(classes []string) { + if len(classes) == 0 { + return + } + set := make(map[string]struct{}, len(classes)) + for _, cls := range classes { + set[cls] = struct{}{} + } + pred := func(s string) bool { + _, ok := set[s] + return ok + } + d.Filter(pred) +} + +// Exclude removes classes from d +func (d *BackupDescriptor) Exclude(classes []string) { + if len(classes) == 0 { + return + } + set := make(map[string]struct{}, len(classes)) + for _, cls := range classes { + set[cls] = struct{}{} + } + pred := func(s string) bool { + _, ok := set[s] + return !ok + } + d.Filter(pred) +} + +// Filter classes based on predicate +func (d *BackupDescriptor) Filter(pred func(s string) bool) { + cs := make([]ClassDescriptor, 0, len(d.Classes)) + for _, dest := range d.Classes { + if pred(dest.Name) { + cs = append(cs, dest) + } + } + d.Classes = cs +} + +// ValidateV1 validates d +func (d *BackupDescriptor) validateV1() error { + for _, c := range d.Classes { + if c.Name == "" || len(c.Schema) == 0 || len(c.ShardingState) == 0 { + return fmt.Errorf("invalid class %q: [name schema sharding]", c.Name) + } + for _, s := range c.Shards { + n := len(s.Files) + if s.Name == "" || s.Node == "" || s.DocIDCounterPath == "" || + s.ShardVersionPath == "" || s.PropLengthTrackerPath == "" || + (n > 0 && (len(s.DocIDCounter) == 0 || + len(s.PropLengthTracker) == 0 || + len(s.Version) == 0)) { + return fmt.Errorf("invalid shard %q.%q", c.Name, s.Name) + } + for i, fpath := range s.Files { + if fpath == "" { + return fmt.Errorf("invalid shard %q.%q: file number %d", c.Name, s.Name, i) + } + } + } + } + return nil +} + +func (d *BackupDescriptor) Validate(newSchema bool) error { + if d.StartedAt.IsZero() || d.ID == "" || + d.Version == "" || d.ServerVersion == "" || d.Error != "" { + return fmt.Errorf("attribute mismatch: [id versions time error]") + } + if !newSchema { + return d.validateV1() + } + for _, c := range d.Classes { + if c.Name == "" || len(c.Schema) == 0 || len(c.ShardingState) == 0 { + return fmt.Errorf("class=%q: invalid attributes [name schema sharding]", c.Name) + } + for _, s := range c.Shards { + if s.Name == "" || s.Node == "" { + return fmt.Errorf("class=%q: invalid shard %q node=%q", c.Name, s.Name, s.Node) + } + } + } + return nil +} + +// ToDistributed is used just for backward compatibility with the old version. +func (d *BackupDescriptor) ToDistributed() *DistributedBackupDescriptor { + node, cs := "", d.List() + for _, xs := range d.Classes { + for _, s := range xs.Shards { + node = s.Node + } + } + result := &DistributedBackupDescriptor{ + StartedAt: d.StartedAt, + CompletedAt: d.CompletedAt, + ID: d.ID, + Status: Status(d.Status), + Version: d.Version, + ServerVersion: d.ServerVersion, + Error: d.Error, + PreCompressionSizeBytes: d.PreCompressionSizeBytes, // Copy pre-compression size + } + if node != "" && len(cs) > 0 { + result.Nodes = map[string]*NodeDescriptor{node: {Classes: cs}} + } + return result +} diff --git a/platform/dbops/binaries/weaviate-src/entities/backup/descriptor_test.go b/platform/dbops/binaries/weaviate-src/entities/backup/descriptor_test.go new file mode 100644 index 0000000000000000000000000000000000000000..f8a37d18f78e440360241c7bc58d40c49e9cfb5a --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/backup/descriptor_test.go @@ -0,0 +1,475 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package backup + +import ( + "sort" + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +func TestExcludeClasses(t *testing.T) { + tests := []struct { + in BackupDescriptor + xs []string + out []string + }{ + {in: BackupDescriptor{}, xs: []string{}, out: []string{}}, + {in: BackupDescriptor{Classes: []ClassDescriptor{{Name: "a"}}}, xs: []string{}, out: []string{"a"}}, + {in: BackupDescriptor{Classes: []ClassDescriptor{{Name: "a"}}}, xs: []string{"a"}, out: []string{}}, + {in: BackupDescriptor{Classes: []ClassDescriptor{{Name: "1"}, {Name: "2"}, {Name: "3"}, {Name: "4"}}}, xs: []string{"2", "3"}, out: []string{"1", "4"}}, + {in: BackupDescriptor{Classes: []ClassDescriptor{{Name: "1"}, {Name: "2"}, {Name: "3"}}}, xs: []string{"1", "3"}, out: []string{"2"}}, + + // {in: []BackupDescriptor{"1", "2", "3", "4"}, xs: []string{"2", "3"}, out: []string{"1", "4"}}, + // {in: []BackupDescriptor{"1", "2", "3"}, xs: []string{"1", "3"}, out: []string{"2"}}, + } + for _, tc := range tests { + tc.in.Exclude(tc.xs) + lst := tc.in.List() + assert.Equal(t, tc.out, lst) + } +} + +func TestIncludeClasses(t *testing.T) { + tests := []struct { + in BackupDescriptor + xs []string + out []string + }{ + {in: BackupDescriptor{}, xs: []string{}, out: []string{}}, + {in: BackupDescriptor{Classes: []ClassDescriptor{{Name: "a"}}}, xs: []string{}, out: []string{"a"}}, + {in: BackupDescriptor{Classes: []ClassDescriptor{{Name: "a"}}}, xs: []string{"a"}, out: []string{"a"}}, + {in: BackupDescriptor{Classes: []ClassDescriptor{{Name: "1"}, {Name: "2"}, {Name: "3"}, {Name: "4"}}}, xs: []string{"2", "3"}, out: []string{"2", "3"}}, + {in: BackupDescriptor{Classes: []ClassDescriptor{{Name: "1"}, {Name: "2"}, {Name: "3"}}}, xs: []string{"1", "3"}, out: []string{"1", "3"}}, + } + for _, tc := range tests { + tc.in.Include(tc.xs) + lst := tc.in.List() + assert.Equal(t, tc.out, lst) + } +} + +func TestAllExist(t *testing.T) { + x := BackupDescriptor{Classes: []ClassDescriptor{{Name: "a"}}} + if y := x.AllExist(nil); y != "" { + t.Errorf("x.AllExists(nil) got=%v want=%v", y, "") + } + if y := x.AllExist([]string{"a"}); y != "" { + t.Errorf("x.AllExists(['a']) got=%v want=%v", y, "") + } + if y := x.AllExist([]string{"b"}); y != "b" { + t.Errorf("x.AllExists(['a']) got=%v want=%v", y, "b") + } +} + +func TestValidateBackup(t *testing.T) { + timept := time.Now().UTC() + bytes := []byte("hello") + tests := []struct { + desc BackupDescriptor + successV1 bool + successV2 bool + }{ + // first level check + {desc: BackupDescriptor{}}, + {desc: BackupDescriptor{ID: "1"}}, + {desc: BackupDescriptor{ID: "1", Version: "1"}}, + {desc: BackupDescriptor{ID: "1", Version: "1", ServerVersion: "1"}}, + { + desc: BackupDescriptor{ID: "1", Version: "1", ServerVersion: "1", StartedAt: timept}, + successV1: true, successV2: true, + }, + {desc: BackupDescriptor{ID: "1", Version: "1", ServerVersion: "1", StartedAt: timept, Error: "err"}}, + {desc: BackupDescriptor{ + ID: "1", Version: "1", ServerVersion: "1", StartedAt: timept, + Classes: []ClassDescriptor{{}}, + }}, + {desc: BackupDescriptor{ + ID: "1", Version: "1", ServerVersion: "1", StartedAt: timept, + Classes: []ClassDescriptor{{Name: "n"}}, + }}, + {desc: BackupDescriptor{ + ID: "1", Version: "1", ServerVersion: "1", StartedAt: timept, + Classes: []ClassDescriptor{{Name: "n", Schema: bytes}}, + }}, + {desc: BackupDescriptor{ + ID: "1", Version: "1", ServerVersion: "1", StartedAt: timept, + Classes: []ClassDescriptor{{Name: "n", Schema: bytes, ShardingState: bytes}}, + }, successV1: true, successV2: true}, + {desc: BackupDescriptor{ + ID: "1", Version: "1", ServerVersion: "1", StartedAt: timept, + Classes: []ClassDescriptor{{ + Name: "n", Schema: bytes, ShardingState: bytes, + Shards: []*ShardDescriptor{{Name: ""}}, + }}, + }}, + {desc: BackupDescriptor{ + ID: "1", Version: "1", ServerVersion: "1", StartedAt: timept, + Classes: []ClassDescriptor{{ + Name: "n", Schema: bytes, ShardingState: bytes, + Shards: []*ShardDescriptor{{Name: "n", Node: ""}}, + }}, + }}, + {desc: BackupDescriptor{ + ID: "1", Version: "1", ServerVersion: "1", StartedAt: timept, + Classes: []ClassDescriptor{{ + Name: "n", Schema: bytes, ShardingState: bytes, + Shards: []*ShardDescriptor{{Name: "n", Node: "n"}}, + }}, + }, successV2: true}, + {desc: BackupDescriptor{ + ID: "1", Version: "1", ServerVersion: "1", StartedAt: timept, + Classes: []ClassDescriptor{{ + Name: "n", Schema: bytes, ShardingState: bytes, + Shards: []*ShardDescriptor{{ + Name: "n", Node: "n", + PropLengthTrackerPath: "n", DocIDCounterPath: "n", ShardVersionPath: "n", + }}, + }}, + }, successV1: true, successV2: true}, + {desc: BackupDescriptor{ + ID: "1", Version: "1", ServerVersion: "1", StartedAt: timept, + Classes: []ClassDescriptor{{ + Name: "n", Schema: bytes, ShardingState: bytes, + Shards: []*ShardDescriptor{{ + Name: "n", Node: "n", + PropLengthTrackerPath: "n", DocIDCounterPath: "n", ShardVersionPath: "n", + Files: []string{"file"}, + }}, + }}, + }, successV2: true}, + {desc: BackupDescriptor{ + ID: "1", Version: "1", ServerVersion: "1", StartedAt: timept, + Classes: []ClassDescriptor{{ + Name: "n", Schema: bytes, ShardingState: bytes, + Shards: []*ShardDescriptor{{ + Name: "n", Node: "n", + PropLengthTrackerPath: "n", DocIDCounterPath: "n", ShardVersionPath: "n", + DocIDCounter: bytes, Files: []string{"file"}, + }}, + }}, + }, successV2: true}, + {desc: BackupDescriptor{ + ID: "1", Version: "1", ServerVersion: "1", StartedAt: timept, + Classes: []ClassDescriptor{{ + Name: "n", Schema: bytes, ShardingState: bytes, + Shards: []*ShardDescriptor{{ + Name: "n", Node: "n", + PropLengthTrackerPath: "n", DocIDCounterPath: "n", ShardVersionPath: "n", + DocIDCounter: bytes, Version: bytes, PropLengthTracker: bytes, Files: []string{""}, + }}, + }}, + }, successV2: true}, + {desc: BackupDescriptor{ + ID: "1", Version: "1", ServerVersion: "1", StartedAt: timept, + Classes: []ClassDescriptor{{ + Name: "n", Schema: bytes, ShardingState: bytes, + Shards: []*ShardDescriptor{{ + Name: "n", Node: "n", + PropLengthTrackerPath: "n", DocIDCounterPath: "n", ShardVersionPath: "n", + DocIDCounter: bytes, Version: bytes, PropLengthTracker: bytes, Files: []string{"file"}, + }}, + }}, + }, successV1: true, successV2: true}, + } + for i, tc := range tests { + err := tc.desc.Validate(false) + if got := err == nil; got != tc.successV1 { + t.Errorf("%d. validate(%+v): want=%v got=%v err=%v", i, tc.desc, tc.successV1, got, err) + } + err = tc.desc.Validate(true) + if got := err == nil; got != tc.successV2 { + t.Errorf("%d. validate(%+v): want=%v got=%v err=%v", i, tc.desc, tc.successV1, got, err) + } + } +} + +func TestBackwardCompatibility(t *testing.T) { + timept := time.Now().UTC() + tests := []struct { + desc BackupDescriptor + success bool + }{ + // first level check + {desc: BackupDescriptor{}}, + {desc: BackupDescriptor{ID: "1"}}, + {desc: BackupDescriptor{ID: "1", Version: "1"}}, + {desc: BackupDescriptor{ID: "1", Version: "1", ServerVersion: "1"}}, + {desc: BackupDescriptor{ID: "1", Version: "1", ServerVersion: "1", StartedAt: timept}}, + {desc: BackupDescriptor{ID: "1", Version: "1", ServerVersion: "1", StartedAt: timept, Error: "err"}}, + {desc: BackupDescriptor{ + ID: "1", Version: "1", ServerVersion: "1", StartedAt: timept, + Classes: []ClassDescriptor{{ + Name: "n", + Shards: []*ShardDescriptor{{Name: "n", Node: ""}}, + }}, + }}, + {desc: BackupDescriptor{ + ID: "1", Version: "1", ServerVersion: "1", StartedAt: timept, + Classes: []ClassDescriptor{{ + Name: "n", + Shards: []*ShardDescriptor{{ + Name: "n", Node: "n", + }}, + }}, + }, success: true}, + } + for i, tc := range tests { + desc := tc.desc.ToDistributed() + err := desc.Validate() + if got := err == nil; got != tc.success { + t.Errorf("%d. validate(%+v): want=%v got=%v err=%v", i, tc.desc, tc.success, got, err) + } + } +} + +func TestDistributedBackup(t *testing.T) { + d := DistributedBackupDescriptor{ + Nodes: map[string]*NodeDescriptor{ + "N1": {Classes: []string{"1", "2"}}, + "N2": {Classes: []string{"3", "4"}}, + }, + } + if n := d.Len(); n != 2 { + t.Errorf("#nodes got:%v want:%v", n, 2) + } + if n := d.Count(); n != 4 { + t.Errorf("#classes got:%v want:%v", n, 4) + } + d.Exclude([]string{"3", "4"}) + d.RemoveEmpty() + if n := d.Len(); n != 1 { + t.Errorf("#nodes got:%v want:%v", n, 2) + } + if n := d.Count(); n != 2 { + t.Errorf("#classes got:%v want:%v", n, 4) + } +} + +func TestDistributedBackupExcludeClasses(t *testing.T) { + tests := []struct { + in DistributedBackupDescriptor + xs []string + out []string + }{ + { + in: DistributedBackupDescriptor{}, + xs: []string{}, + out: []string{}, + }, + { + in: DistributedBackupDescriptor{ + Nodes: map[string]*NodeDescriptor{ + "N1": {Classes: []string{"a"}}, + }, + }, + xs: []string{}, + out: []string{"a"}, + }, + { + in: DistributedBackupDescriptor{ + Nodes: map[string]*NodeDescriptor{ + "N1": {Classes: []string{"a"}}, + }, + }, + xs: []string{"a"}, + out: []string{}, + }, + { + in: DistributedBackupDescriptor{ + Nodes: map[string]*NodeDescriptor{ + "N1": {Classes: []string{"1", "2"}}, + "N2": {Classes: []string{"3", "4"}}, + }, + }, + xs: []string{"2", "3"}, + out: []string{"1", "4"}, + }, + + { + in: DistributedBackupDescriptor{ + Nodes: map[string]*NodeDescriptor{ + "N1": {Classes: []string{"1", "2"}}, + "N2": {Classes: []string{"3"}}, + }, + }, + xs: []string{"1", "3"}, + out: []string{"2"}, + }, + } + + for _, tc := range tests { + tc.in.Exclude(tc.xs) + lst := tc.in.Classes() + sort.Strings(lst) + assert.Equal(t, tc.out, lst) + } +} + +func TestDistributedBackupIncludeClasses(t *testing.T) { + tests := []struct { + in DistributedBackupDescriptor + xs []string + out []string + }{ + { + in: DistributedBackupDescriptor{}, + xs: []string{}, + out: []string{}, + }, + { + in: DistributedBackupDescriptor{ + Nodes: map[string]*NodeDescriptor{ + "N1": {Classes: []string{"a"}}, + }, + }, + xs: []string{}, + out: []string{"a"}, + }, + { + in: DistributedBackupDescriptor{ + Nodes: map[string]*NodeDescriptor{ + "N1": {Classes: []string{"a"}}, + }, + }, + xs: []string{"a"}, + out: []string{"a"}, + }, + { + in: DistributedBackupDescriptor{ + Nodes: map[string]*NodeDescriptor{ + "N1": {Classes: []string{"1", "2"}}, + "N2": {Classes: []string{"3", "4"}}, + }, + }, + xs: []string{"2", "3"}, + out: []string{"2", "3"}, + }, + + { + in: DistributedBackupDescriptor{ + Nodes: map[string]*NodeDescriptor{ + "N1": {Classes: []string{"1", "2"}}, + "N2": {Classes: []string{"3"}}, + }, + }, + xs: []string{"1", "3"}, + out: []string{"1", "3"}, + }, + } + for _, tc := range tests { + tc.in.Include(tc.xs) + lst := tc.in.Classes() + sort.Strings(lst) + assert.Equal(t, tc.out, lst) + } +} + +func TestDistributedBackupAllExist(t *testing.T) { + x := DistributedBackupDescriptor{Nodes: map[string]*NodeDescriptor{"N1": {Classes: []string{"a"}}}} + if y := x.AllExist(nil); y != "" { + t.Errorf("x.AllExists(nil) got=%v want=%v", y, "") + } + if y := x.AllExist([]string{"a"}); y != "" { + t.Errorf("x.AllExists(['a']) got=%v want=%v", y, "") + } + if y := x.AllExist([]string{"b"}); y != "b" { + t.Errorf("x.AllExists(['a']) got=%v want=%v", y, "b") + } +} + +func TestDistributedBackupValidate(t *testing.T) { + timept := time.Now().UTC() + tests := []struct { + desc DistributedBackupDescriptor + success bool + }{ + // first level check + {desc: DistributedBackupDescriptor{}}, + {desc: DistributedBackupDescriptor{ID: "1"}}, + {desc: DistributedBackupDescriptor{ID: "1", Version: "1"}}, + {desc: DistributedBackupDescriptor{ID: "1", Version: "1", ServerVersion: "1"}}, + {desc: DistributedBackupDescriptor{ID: "1", Version: "1", ServerVersion: "1", StartedAt: timept, Error: "err"}}, + {desc: DistributedBackupDescriptor{ID: "1", Version: "1", ServerVersion: "1", StartedAt: timept}}, + {desc: DistributedBackupDescriptor{ + ID: "1", Version: "1", ServerVersion: "1", StartedAt: timept, + Nodes: map[string]*NodeDescriptor{"N": {}}, + }, success: true}, + } + for i, tc := range tests { + err := tc.desc.Validate() + if got := err == nil; got != tc.success { + t.Errorf("%d. validate(%+v): want=%v got=%v err=%v", i, tc.desc, tc.success, got, err) + } + } +} + +func TestTestDistributedBackupResetStatus(t *testing.T) { + begin := time.Now().UTC().Add(-2) + desc := DistributedBackupDescriptor{ + StartedAt: begin, + CompletedAt: begin.Add(2), + ID: "1", + Version: "1", + ServerVersion: "1", + Nodes: map[string]*NodeDescriptor{ + "1": {}, + "2": {Status: Success}, + "3": {Error: "error"}, + }, + Error: "error", + } + + desc.ResetStatus() + if !desc.StartedAt.After(begin) { + t.Fatalf("!desc.StartedAt.After(begin)") + } + want := DistributedBackupDescriptor{ + StartedAt: desc.StartedAt, + ID: "1", + Version: "1", + ServerVersion: "1", + Nodes: map[string]*NodeDescriptor{ + "1": {Status: Started}, + "2": {Status: Started}, + "3": {Status: Started, Error: ""}, + }, + Status: Started, + } + assert.Equal(t, want, desc) +} + +func TestShardDescriptorClear(t *testing.T) { + s := ShardDescriptor{ + Name: "name", + Node: "node", + PropLengthTrackerPath: "a/b", + PropLengthTracker: []byte{1}, + DocIDCounterPath: "a/c", + DocIDCounter: []byte{2}, + ShardVersionPath: "a/d", + Version: []byte{3}, + Files: []string{"file"}, + Chunk: 1, + } + + want := ShardDescriptor{ + Name: "name", + Node: "node", + Files: []string{"file"}, + Chunk: 1, + } + s.ClearTemporary() + assert.Equal(t, want, s) +} diff --git a/platform/dbops/binaries/weaviate-src/entities/backup/errors.go b/platform/dbops/binaries/weaviate-src/entities/backup/errors.go new file mode 100644 index 0000000000000000000000000000000000000000..f6af310aacfa4bfbec94da512b431d7a0383e03c --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/backup/errors.go @@ -0,0 +1,70 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package backup + +type ErrUnprocessable struct { + err error +} + +func (e ErrUnprocessable) Error() string { + return e.err.Error() +} + +func NewErrUnprocessable(err error) ErrUnprocessable { + return ErrUnprocessable{err} +} + +type ErrNotFound struct { + err error +} + +func (e ErrNotFound) Error() string { + if e.err != nil { + return e.err.Error() + } + return "" +} + +func NewErrNotFound(err error) ErrNotFound { + return ErrNotFound{err} +} + +type ErrContextExpired struct { + err error +} + +func (e ErrContextExpired) Error() string { + return e.err.Error() +} + +func NewErrContextExpired(err error) ErrContextExpired { + return ErrContextExpired{err} +} + +type ErrInternal struct { + err error +} + +func (e ErrInternal) Error() string { + return e.err.Error() +} + +func NewErrInternal(err error) ErrInternal { + return ErrInternal{err} +} + +func IsCancelled(err error, meta *DistributedBackupDescriptor) bool { + if err == nil && meta.Status == Cancelled { + return true + } + return false +} diff --git a/platform/dbops/binaries/weaviate-src/entities/backup/status.go b/platform/dbops/binaries/weaviate-src/entities/backup/status.go new file mode 100644 index 0000000000000000000000000000000000000000..c49ad0791eea0e4b989270aace338ceab260d0b3 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/backup/status.go @@ -0,0 +1,33 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package backup + +type Status string + +const ( + Started Status = "STARTED" + Transferring Status = "TRANSFERRING" + Transferred Status = "TRANSFERRED" + Success Status = "SUCCESS" + Cancelled Status = "CANCELED" + Failed Status = "FAILED" +) + +type CreateMeta struct { + Path string + Status Status +} + +type RestoreMeta struct { + Path string + Status Status +} diff --git a/platform/dbops/binaries/weaviate-src/entities/classcache/classcache.go b/platform/dbops/binaries/weaviate-src/entities/classcache/classcache.go new file mode 100644 index 0000000000000000000000000000000000000000..26ab2b7db9d043ead6eca7e7360854015c52c737 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/classcache/classcache.go @@ -0,0 +1,45 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package classcache + +import ( + "sync" + + "github.com/weaviate/weaviate/entities/models" +) + +type classCache sync.Map + +type classCacheEntry struct { + class *models.Class + version uint64 +} + +func (cc *classCache) Load(name string) (*classCacheEntry, bool) { + if e, ok := (*sync.Map)(cc).Load(name); ok { + return e.(*classCacheEntry), true + } + return nil, false +} + +func (cc *classCache) LoadOrStore(name string, entry *classCacheEntry) (*classCacheEntry, bool) { + e, ok := (*sync.Map)(cc).LoadOrStore(name, entry) + return e.(*classCacheEntry), ok +} + +// func (cc *classCache) Store(name string, entry *classCacheEntry) { +// (*sync.Map)(cc).Store(name, entry) +// } + +func (cc *classCache) Delete(name string) { + (*sync.Map)(cc).Delete(name) +} diff --git a/platform/dbops/binaries/weaviate-src/entities/classcache/context.go b/platform/dbops/binaries/weaviate-src/entities/classcache/context.go new file mode 100644 index 0000000000000000000000000000000000000000..3e8e2847e7dec03cb45bb4c3d82dd42dbd42029e --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/classcache/context.go @@ -0,0 +1,97 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package classcache + +import ( + "context" + "fmt" + "slices" + + "github.com/weaviate/weaviate/entities/versioned" +) + +const classCacheKey = "classCache" + +var errorNoClassCache = fmt.Errorf("context does not contain classCache") + +func ContextWithClassCache(ctx context.Context) context.Context { + if ctx.Value(classCacheKey) != nil { + return ctx + } + return context.WithValue(ctx, classCacheKey, &classCache{}) +} + +func RemoveClassFromContext(ctxWithClassCache context.Context, name string) error { + cache, err := extractCache(ctxWithClassCache) + if err != nil { + return err + } + + cache.Delete(name) + return nil +} + +func ClassesFromContext(ctxWithClassCache context.Context, getter func(names ...string) (map[string]versioned.Class, error), names ...string) (map[string]versioned.Class, error) { + cache, err := extractCache(ctxWithClassCache) + if err != nil { + return nil, err + } + + versionedClasses := map[string]versioned.Class{} + notFoundInCtx := []string{} + for _, name := range names { + // collect what is not in context + if entry, ok := cache.Load(name); ok { + versionedClasses[entry.class.Class] = versioned.Class{Class: entry.class, Version: entry.version} + continue + } + notFoundInCtx = append(notFoundInCtx, name) + } + + // remove dedup, empty and a void calls if there is non + slices.Sort(notFoundInCtx) + notFoundInCtx = slices.Compact(notFoundInCtx) + if len(notFoundInCtx) == 0 { + return versionedClasses, nil + } + + if len(notFoundInCtx) > 1 && notFoundInCtx[0] == "" { + notFoundInCtx = notFoundInCtx[1:] + } + + // TODO prevent concurrent getter calls for the same class if it was not loaded, + // get once and share results + vclasses, err := getter(notFoundInCtx...) + if err != nil { + return versionedClasses, err + } + + for _, vclass := range vclasses { + // do not replace entry if it was loaded in the meantime by concurrent access + entry, _ := cache.LoadOrStore(vclass.Class.Class, &classCacheEntry{class: vclass.Class, version: vclass.Version}) + versionedClasses[entry.class.Class] = versioned.Class{Class: entry.class, Version: entry.version} + } + + return versionedClasses, nil +} + +func extractCache(ctx context.Context) (*classCache, error) { + value := ctx.Value(classCacheKey) + if value == nil { + return nil, errorNoClassCache + } + cache, ok := value.(*classCache) + if !ok { + return nil, errorNoClassCache + } + return cache, nil +} diff --git a/platform/dbops/binaries/weaviate-src/entities/classcache/context_test.go b/platform/dbops/binaries/weaviate-src/entities/classcache/context_test.go new file mode 100644 index 0000000000000000000000000000000000000000..a72bb38ed17c82fb9c6089084d5712e89b427063 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/classcache/context_test.go @@ -0,0 +1,194 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package classcache + +import ( + "context" + "fmt" + "sync" + "sync/atomic" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/versioned" +) + +func Test_ContextWithClassCache(t *testing.T) { + t.Run("adds cache to context if not present", func(t *testing.T) { + ctx := context.Background() + cacheCtx := ContextWithClassCache(ctx) + + assert.Nil(t, ctx.Value(classCacheKey)) + assert.NotNil(t, cacheCtx.Value(classCacheKey)) + }) + + t.Run("does not add cache to context if already present", func(t *testing.T) { + ctx := context.Background() + cacheCtx1 := ContextWithClassCache(ctx) + cacheCtx2 := ContextWithClassCache(cacheCtx1) + + cache1 := cacheCtx1.Value(classCacheKey) + cache2 := cacheCtx2.Value(classCacheKey) + + assert.NotNil(t, cache1) + assert.NotNil(t, cache2) + assert.True(t, cacheCtx1 == cacheCtx2) // same context instance + assert.True(t, cache1 == cache2) // same cache instance + }) +} + +func Test_ClassesFromContext(t *testing.T) { + t.Run("fails getting class from context without cache", func(t *testing.T) { + noCacheCtx := context.Background() + + vclasses, err := ClassesFromContext(noCacheCtx, noopGetter, "class1") + _, exists := vclasses["class1"] + assert.False(t, exists) + assert.NotContains(t, vclasses, "class1") + assert.ErrorContains(t, err, "context does not contain classCache") + }) + + t.Run("fails getting class from context with invalid cache", func(t *testing.T) { + invalidCacheCtx := context.WithValue(context.Background(), classCacheKey, "stringInsteadClassCache") + + vclasses, err := ClassesFromContext(invalidCacheCtx, noopGetter, "class1") + _, exists := vclasses["class1"] + assert.False(t, exists) + assert.NotContains(t, vclasses, "class1") + assert.ErrorContains(t, err, "context does not contain classCache") + }) + + t.Run("uses getter to init class cache if miss", func(t *testing.T) { + cacheCtx := ContextWithClassCache(context.Background()) + getter := createCounterGetter(0) + + vclasses_1, err_1 := ClassesFromContext(cacheCtx, getter, "class1", "class2") + assert.NoError(t, err_1) + + vclass1 := vclasses_1["class1"] + assert.Equal(t, uint64(1), vclass1.Version) + require.NotNil(t, vclass1.Class) + assert.Equal(t, "class1", vclass1.Class.Class) + + vclass2 := vclasses_1["class2"] + assert.Equal(t, uint64(2), vclass2.Version) + require.NotNil(t, vclass2) + assert.Equal(t, "class2", vclass2.Class.Class) + + vclasses_2, err_2 := ClassesFromContext(cacheCtx, getter, "class1", "class2") + assert.NoError(t, err_2) + + vclass1 = vclasses_2["class1"] + assert.Equal(t, uint64(1), vclass1.Version) + require.NotNil(t, vclass1.Class) + assert.Equal(t, "class1", vclass1.Class.Class) + + vclass2 = vclasses_2["class2"] + assert.Equal(t, uint64(2), vclass2.Version) + require.NotNil(t, vclass2) + assert.Equal(t, "class2", vclass2.Class.Class) + }) + + t.Run("does not cache class if getter fails", func(t *testing.T) { + cacheCtx := ContextWithClassCache(context.Background()) + getter := createErrorGetter() + + vclasses, err1_1 := ClassesFromContext(cacheCtx, getter, "class1") + class1_1, exists1_1 := vclasses["class1"] + assert.False(t, exists1_1) + assert.Equal(t, uint64(0), class1_1.Version) + assert.ErrorContains(t, err1_1, "error getting class class1, count_1") + + vclasses, err1_2 := ClassesFromContext(cacheCtx, getter, "class1") + class1_2, exists_1_2 := vclasses["class1"] + assert.False(t, exists_1_2) + assert.Equal(t, uint64(0), class1_2.Version) + assert.ErrorContains(t, err1_2, "error getting class class1, count_2") + + vclasses, err1_3 := ClassesFromContext(cacheCtx, getter, "class1") + class1_3, exists_1_3 := vclasses["class1"] + assert.False(t, exists_1_3) + assert.Equal(t, uint64(0), class1_3.Version) + assert.ErrorContains(t, err1_3, "error getting class class1, count_3") + }) + + t.Run("does not overwrite cache when multiple concurrent loads", func(t *testing.T) { + cacheCtx := ContextWithClassCache(context.Background()) + getter := createCounterGetter(50 * time.Millisecond) + + concurrency := 20 + classes := make([]*models.Class, concurrency) + versions := make([]uint64, concurrency) + errors := make([]error, concurrency) + + wg := new(sync.WaitGroup) + wg.Add(concurrency) + for i := 0; i < concurrency; i++ { + i := i + go func() { + vclasses, err := ClassesFromContext(cacheCtx, getter, "class1") + errors[i] = err + classes[i] = vclasses["class1"].Class + versions[i] = vclasses["class1"].Version + wg.Done() + }() + } + wg.Wait() + + // Same class for all calls, same versions for all calls, no errors. + // It is undetermined which getter call will be stored, but it should be shared + // across all results + for i := 0; i < concurrency; i++ { + assert.NoError(t, errors[i]) + assert.Equal(t, versions[0], versions[i]) + require.NotNil(t, classes[i]) + assert.Equal(t, fmt.Sprintf("description_%d", versions[0]), classes[i].Description) + } + }) +} + +func noopGetter(names ...string) (map[string]versioned.Class, error) { + return nil, nil +} + +func createErrorGetter() func(names ...string) (map[string]versioned.Class, error) { + errorCounter := uint64(0) + return func(names ...string) (map[string]versioned.Class, error) { + return nil, fmt.Errorf("error getting class %s, count_%d", names[0], atomic.AddUint64(&errorCounter, 1)) + } +} + +func createCounterGetter(sleep time.Duration) func(names ...string) (map[string]versioned.Class, error) { + versionCounter := uint64(0) + return func(names ...string) (map[string]versioned.Class, error) { + if sleep > 0 { + time.Sleep(sleep) + } + res := make(map[string]versioned.Class, len(names)) + + for _, name := range names { + version := atomic.AddUint64(&versionCounter, 1) + res[name] = versioned.Class{ + Version: version, + Class: &models.Class{ + Class: name, + Description: fmt.Sprintf("description_%d", version), + }, + } + } + + return res, nil + } +} diff --git a/platform/dbops/binaries/weaviate-src/entities/concurrency/budget.go b/platform/dbops/binaries/weaviate-src/entities/concurrency/budget.go new file mode 100644 index 0000000000000000000000000000000000000000..71e68c099a06feeb4dc8fe883f761f2fd7562511 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/concurrency/budget.go @@ -0,0 +1,42 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package concurrency + +import ( + "context" +) + +type budgetKey struct{} + +func (budgetKey) String() string { + return "concurrency_budget" +} + +func CtxWithBudget(ctx context.Context, budget int) context.Context { + return context.WithValue(ctx, budgetKey{}, budget) +} + +func BudgetFromCtx(ctx context.Context, fallback int) int { + budget, ok := ctx.Value(budgetKey{}).(int) + if !ok { + return fallback + } + + return budget +} + +func ContextWithFractionalBudget(ctx context.Context, factor, fallback int) context.Context { + budget := BudgetFromCtx(ctx, fallback) + newBudget := FractionOf(budget, factor) + + return CtxWithBudget(ctx, newBudget) +} diff --git a/platform/dbops/binaries/weaviate-src/entities/concurrency/budget_test.go b/platform/dbops/binaries/weaviate-src/entities/concurrency/budget_test.go new file mode 100644 index 0000000000000000000000000000000000000000..e050d894bc9a0dd6b1693532ab73024990546dff --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/concurrency/budget_test.go @@ -0,0 +1,43 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package concurrency + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestBudget(t *testing.T) { + fallback := 12 + + ctx := context.Background() + budget := BudgetFromCtx(ctx, fallback) + // no budget set + assert.Equal(t, fallback, budget) + + // extract previously set budget + ctx = CtxWithBudget(ctx, 32) + budget = BudgetFromCtx(ctx, fallback) + assert.Equal(t, 32, budget) + + // reduce budget by factor + ctx = ContextWithFractionalBudget(ctx, 2, fallback) + budget = BudgetFromCtx(ctx, fallback) + assert.Equal(t, 16, budget) + + // fractional reduction of fallback + ctx = ContextWithFractionalBudget(context.Background(), 3, fallback) + budget = BudgetFromCtx(ctx, fallback) + assert.Equal(t, 4, budget) +} diff --git a/platform/dbops/binaries/weaviate-src/entities/concurrency/numcpu.go b/platform/dbops/binaries/weaviate-src/entities/concurrency/numcpu.go new file mode 100644 index 0000000000000000000000000000000000000000..159c39f97ca689cc04433206f962fedad379c016 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/concurrency/numcpu.go @@ -0,0 +1,97 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package concurrency + +import ( + "math" + "runtime" +) + +// Use runtime.GOMAXPROCS instead of runtime.NumCPU because NumCPU returns +// the physical CPU cores. However, in a containerization context, that might +// not be what we want. The physical node could have 128 cores, but we could +// be cgroup-limited to 2 cores. In that case, we want 2 to be our limit, not +// 128. It isn't guaranteed that MAXPROCS reflects the cgroup limit, but at +// least there is a chance that it was set correctly. If not, it defaults to +// NumCPU anyway, so we're not any worse off. +var ( + NUMCPU = runtime.GOMAXPROCS(0) + NUMCPUx2 = NUMCPU * 2 + NUMCPU_2 = NUMCPU / 2 + + SROAR_MERGE = 0 // see init() +) + +func init() { + if NUMCPU_2 == 0 { + NUMCPU_2 = 1 + } + + SROAR_MERGE = NUMCPU_2 +} + +func NoMoreThanNUMCPU(conc int) int { + if conc > NUMCPU || conc <= 0 { + return NUMCPU + } + return conc +} + +// TimesNUMCPU calculate number of gorutines based on NUMCPU (gomaxprocs) and given factor. +// Negative factors are interpreted as fractions. Result is rounded down, min returned result is 1. +// Examples for factors: +// * -3: NUMCPU/3 +// * -2: NUMCPU/2 +// * -1, 0, 1: NUMCPU +// * 2: NUMCPU*2 +// * 3: NUMCPU*3 +func TimesNUMCPU(factor int) int { + return timesNUMCPU(factor, NUMCPU) +} + +func timesNUMCPU(factor int, numcpu int) int { + if factor >= -1 && factor <= 1 { + return numcpu + } + if factor > 1 { + return numcpu * factor + } + if n := numcpu / -factor; n > 0 { + return n + } + return 1 +} + +// TimesFloatNUMCPU calculate number of gorutines based on NUMCPU (gomaxprocs) and given factor greater or equal to 0. +func TimesFloatNUMCPU(factor float64) int { + return timesFloatNUMCPU(factor, NUMCPU) +} + +func timesFloatNUMCPU(factor float64, numcpu int) int { + if factor <= 0 { + return numcpu + } + + return int(math.Max(1, math.Round(factor*float64(numcpu)))) +} + +func FractionOf(original, factor int) int { + if factor <= 0 { + return original + } + + result := original / factor + if result < 1 { + return 1 + } + return result +} diff --git a/platform/dbops/binaries/weaviate-src/entities/concurrency/numcpu_test.go b/platform/dbops/binaries/weaviate-src/entities/concurrency/numcpu_test.go new file mode 100644 index 0000000000000000000000000000000000000000..3c3b6e1739ba4d1ed7175171d7e372ed6789d31a --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/concurrency/numcpu_test.go @@ -0,0 +1,219 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package concurrency + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestTimesNumcpu(t *testing.T) { + type testCase struct { + numcpu int + factor int + expectedN int + } + + testCases := []testCase{ + { + numcpu: 10, + factor: -15, + expectedN: 1, + }, + { + numcpu: 10, + factor: -4, + expectedN: 2, + }, + { + numcpu: 10, + factor: -3, + expectedN: 3, + }, + { + numcpu: 10, + factor: -2, + expectedN: 5, + }, + { + numcpu: 10, + factor: -1, + expectedN: 10, + }, + { + numcpu: 10, + factor: 0, + expectedN: 10, + }, + { + numcpu: 10, + factor: 1, + expectedN: 10, + }, + { + numcpu: 10, + factor: 2, + expectedN: 20, + }, + { + numcpu: 10, + factor: 3, + expectedN: 30, + }, + { + numcpu: 10, + factor: 4, + expectedN: 40, + }, + } + + for _, tc := range testCases { + t.Run(fmt.Sprintf("times numcpu %d factor %d", tc.numcpu, tc.factor), func(t *testing.T) { + n := timesNUMCPU(tc.factor, tc.numcpu) + assert.Equal(t, tc.expectedN, n) + }) + } +} + +func TestTimesFloatNumcpu(t *testing.T) { + type testCase struct { + numcpu int + factor float64 + expectedN int + } + + testCases := []testCase{ + { + numcpu: 10, + factor: -1, + expectedN: 10, + }, + { + numcpu: 10, + factor: 0, + expectedN: 10, + }, + { + numcpu: 10, + factor: 0.01, + expectedN: 1, + }, + { + numcpu: 10, + factor: 0.04, + expectedN: 1, + }, + { + numcpu: 10, + factor: 0.1, + expectedN: 1, + }, + { + numcpu: 10, + factor: 0.14, + expectedN: 1, + }, + { + numcpu: 10, + factor: 0.15, + expectedN: 2, + }, + + { + numcpu: 10, + factor: 0.16, + expectedN: 2, + }, + { + numcpu: 10, + factor: 0.5, + expectedN: 5, + }, + { + numcpu: 10, + factor: 1, + expectedN: 10, + }, + { + numcpu: 10, + factor: 2, + expectedN: 20, + }, + { + numcpu: 10, + factor: 3, + expectedN: 30, + }, + { + numcpu: 10, + factor: 4, + expectedN: 40, + }, + } + + for _, tc := range testCases { + t.Run(fmt.Sprintf("times numcpu %d factor %f", tc.numcpu, tc.factor), func(t *testing.T) { + n := timesFloatNUMCPU(tc.factor, tc.numcpu) + assert.Equal(t, tc.expectedN, n) + }) + } +} + +func TestFractionOf(t *testing.T) { + type testCase struct { + original int + factor int + expected int + } + + testCases := []testCase{ + { + original: 10, + factor: -1, + expected: 10, + }, + { + original: 10, + factor: 0, + expected: 10, + }, + { + original: 10, + factor: 1, + expected: 10, + }, + { + original: 10, + factor: 2, + expected: 5, + }, + { + original: 10, + factor: 3, + expected: 3, + }, + { + original: 10, + factor: 24, + expected: 1, + }, + } + + for _, tc := range testCases { + t.Run(fmt.Sprintf("fraction of %d factor %d", tc.original, tc.factor), func(t *testing.T) { + n := FractionOf(tc.original, tc.factor) + assert.Equal(t, tc.expected, n) + }) + } +} diff --git a/platform/dbops/binaries/weaviate-src/entities/config/helpers.go b/platform/dbops/binaries/weaviate-src/entities/config/helpers.go new file mode 100644 index 0000000000000000000000000000000000000000..160bd594792fcff793063c2e53841bb3fde63397 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/config/helpers.go @@ -0,0 +1,23 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package config + +import "strings" + +func Enabled(value string) bool { + switch strings.ToLower(value) { + case "on", "enabled", "1", "true": + return true + default: + return false + } +} diff --git a/platform/dbops/binaries/weaviate-src/entities/config/helpers_test.go b/platform/dbops/binaries/weaviate-src/entities/config/helpers_test.go new file mode 100644 index 0000000000000000000000000000000000000000..1a944d79895ecffc9a2ede10f512c0298ebbd105 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/config/helpers_test.go @@ -0,0 +1,68 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package config + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestEnabled(t *testing.T) { + tests := []struct { + name string + value string + want bool + }{ + { + name: "true", value: "true", want: true, + }, + { + name: "True", value: "True", want: true, + }, + { + name: "TRUE", value: "TRUE", want: true, + }, + { + name: "enabled", value: "enabled", want: true, + }, + { + name: "Enabled", value: "Enabled", want: true, + }, + { + name: "ENABLED", value: "ENABLED", want: true, + }, + { + name: "on", value: "on", want: true, + }, + { + name: "On", value: "On", want: true, + }, + { + name: "ON", value: "ON", want: true, + }, + { + name: "1", value: "1", want: true, + }, + { + name: "empty", value: "", want: false, + }, + { + name: "other", value: "other", want: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + assert.Equal(t, tt.want, Enabled(tt.value)) + }) + } +} diff --git a/platform/dbops/binaries/weaviate-src/entities/cyclemanager/cyclecallbackctrl.go b/platform/dbops/binaries/weaviate-src/entities/cyclemanager/cyclecallbackctrl.go new file mode 100644 index 0000000000000000000000000000000000000000..e135fb3f0b846767232e706a7ca2874eb19fbc9b --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/cyclemanager/cyclecallbackctrl.go @@ -0,0 +1,211 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package cyclemanager + +import ( + "context" + "sync" + + "github.com/sirupsen/logrus" + "github.com/weaviate/weaviate/entities/concurrency" + enterrors "github.com/weaviate/weaviate/entities/errors" + + "github.com/weaviate/weaviate/entities/errorcompounder" +) + +// Used to control of registered in CycleCallbacks container callback +// Allows deactivating and activating registered callback or unregistering it +type CycleCallbackCtrl interface { + IsActive() bool + Activate() error + Deactivate(ctx context.Context) error + Unregister(ctx context.Context) error +} + +type cycleCallbackCtrl struct { + callbackId uint32 + callbackCustomId string + + isActive func(callbackId uint32, callbackCustomId string) bool + activate func(callbackId uint32, callbackCustomId string) error + deactivate func(ctx context.Context, callbackId uint32, callbackCustomId string) error + unregister func(ctx context.Context, callbackId uint32, callbackCustomId string) error +} + +func (c *cycleCallbackCtrl) IsActive() bool { + return c.isActive(c.callbackId, c.callbackCustomId) +} + +func (c *cycleCallbackCtrl) Activate() error { + return c.activate(c.callbackId, c.callbackCustomId) +} + +func (c *cycleCallbackCtrl) Deactivate(ctx context.Context) error { + return c.deactivate(ctx, c.callbackId, c.callbackCustomId) +} + +func (c *cycleCallbackCtrl) Unregister(ctx context.Context) error { + return c.unregister(ctx, c.callbackId, c.callbackCustomId) +} + +type cycleCombinedCallbackCtrl struct { + routinesLimit int + ctrls []CycleCallbackCtrl + logger logrus.FieldLogger +} + +// Creates combined controller to manage all provided controllers at once as it was single instance. +// Methods (activate, deactivate, unregister) calls nested controllers' methods in parallel by number of +// goroutines given as argument. If < 1 value given, NumCPU is used. +func NewCombinedCallbackCtrl(routinesLimit int, logger logrus.FieldLogger, ctrls ...CycleCallbackCtrl) CycleCallbackCtrl { + routinesLimit = concurrency.NoMoreThanNUMCPU(routinesLimit) + return &cycleCombinedCallbackCtrl{routinesLimit: routinesLimit, logger: logger, ctrls: ctrls} +} + +func (c *cycleCombinedCallbackCtrl) IsActive() bool { + for _, ctrl := range c.ctrls { + if !ctrl.IsActive() { + return false + } + } + return true +} + +func (c *cycleCombinedCallbackCtrl) Activate() error { + return c.combineErrors(c.activate()...) +} + +func (c *cycleCombinedCallbackCtrl) activate() []error { + eg := enterrors.NewErrorGroupWrapper(c.logger) + eg.SetLimit(c.routinesLimit) + lock := new(sync.Mutex) + + errs := make([]error, 0, len(c.ctrls)) + for _, ctrl := range c.ctrls { + ctrl := ctrl + eg.Go(func() error { + if err := ctrl.Activate(); err != nil { + c.locked(lock, func() { errs = append(errs, err) }) + return err + } + return nil + }) + } + + eg.Wait() + return errs +} + +func (c *cycleCombinedCallbackCtrl) Deactivate(ctx context.Context) error { + errs, deactivated := c.deactivate(ctx) + if len(errs) == 0 { + return nil + } + + // try activating back deactivated + eg := enterrors.NewErrorGroupWrapper(c.logger) + eg.SetLimit(c.routinesLimit) + for _, id := range deactivated { + id := id + eg.Go(func() error { + return c.ctrls[id].Activate() + }) + } + + eg.Wait() + return c.combineErrors(errs...) +} + +func (c *cycleCombinedCallbackCtrl) deactivate(ctx context.Context) ([]error, []int) { + eg := enterrors.NewErrorGroupWrapper(c.logger) + eg.SetLimit(c.routinesLimit) + lock := new(sync.Mutex) + + errs := make([]error, 0, len(c.ctrls)) + deactivated := make([]int, 0, len(c.ctrls)) + for id, ctrl := range c.ctrls { + id, ctrl := id, ctrl + eg.Go(func() error { + if err := ctrl.Deactivate(ctx); err != nil { + c.locked(lock, func() { errs = append(errs, err) }) + return err + } + c.locked(lock, func() { deactivated = append(deactivated, id) }) + return nil + }, id, ctrl) + } + + eg.Wait() + return errs, deactivated +} + +func (c *cycleCombinedCallbackCtrl) Unregister(ctx context.Context) error { + return c.combineErrors(c.unregister(ctx)...) +} + +func (c *cycleCombinedCallbackCtrl) unregister(ctx context.Context) []error { + eg := enterrors.NewErrorGroupWrapper(c.logger) + eg.SetLimit(c.routinesLimit) + lock := new(sync.Mutex) + + errs := make([]error, 0, len(c.ctrls)) + for _, ctrl := range c.ctrls { + ctrl := ctrl + eg.Go(func() error { + if err := ctrl.Unregister(ctx); err != nil { + c.locked(lock, func() { errs = append(errs, err) }) + return err + } + return nil + }) + } + + eg.Wait() + return errs +} + +func (c *cycleCombinedCallbackCtrl) locked(lock *sync.Mutex, mutate func()) { + lock.Lock() + defer lock.Unlock() + + mutate() +} + +func (c *cycleCombinedCallbackCtrl) combineErrors(errors ...error) error { + ec := errorcompounder.New() + for _, err := range errors { + ec.Add(err) + } + return ec.ToError() +} + +type cycleCallbackCtrlNoop struct{} + +func NewCallbackCtrlNoop() CycleCallbackCtrl { + return &cycleCallbackCtrlNoop{} +} + +func (c *cycleCallbackCtrlNoop) IsActive() bool { + return false +} + +func (c *cycleCallbackCtrlNoop) Activate() error { + return nil +} + +func (c *cycleCallbackCtrlNoop) Deactivate(ctx context.Context) error { + return ctx.Err() +} + +func (c *cycleCallbackCtrlNoop) Unregister(ctx context.Context) error { + return ctx.Err() +} diff --git a/platform/dbops/binaries/weaviate-src/entities/cyclemanager/cyclecallbackctrl_test.go b/platform/dbops/binaries/weaviate-src/entities/cyclemanager/cyclecallbackctrl_test.go new file mode 100644 index 0000000000000000000000000000000000000000..df24eb8a6aa3745282c2be9a6621b45008a16f2c --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/cyclemanager/cyclecallbackctrl_test.go @@ -0,0 +1,254 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package cyclemanager + +import ( + "context" + "testing" + "time" + + "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestCycleCombineCallbackCtrl_Unregister(t *testing.T) { + logger, _ := test.NewNullLogger() + ctx := context.Background() + + t.Run("unregisters both", func(t *testing.T) { + callback1 := func(shouldAbort ShouldAbortCallback) bool { + time.Sleep(100 * time.Millisecond) + return true + } + callback2 := func(shouldAbort ShouldAbortCallback) bool { + time.Sleep(100 * time.Millisecond) + return true + } + + callbacks := NewCallbackGroup("id", logger, 2) + ctrl1 := callbacks.Register("c1", callback1) + ctrl2 := callbacks.Register("c2", callback2) + combinedCtrl := NewCombinedCallbackCtrl(2, logger, ctrl1, ctrl2) + + cycle := NewManager(NewFixedTicker(100*time.Millisecond), callbacks.CycleCallback, logger) + cycle.Start() + defer cycle.StopAndWait(ctx) + + err := combinedCtrl.Unregister(ctx) + require.Nil(t, err) + + assert.False(t, combinedCtrl.IsActive()) + assert.False(t, ctrl1.IsActive()) + assert.False(t, ctrl2.IsActive()) + }) + + t.Run("does not unregister on expired context", func(t *testing.T) { + expiredCtx, cancel := context.WithDeadline(ctx, time.Now()) + defer cancel() + + callback1 := func(shouldAbort ShouldAbortCallback) bool { + time.Sleep(100 * time.Millisecond) + return true + } + callback2 := func(shouldAbort ShouldAbortCallback) bool { + time.Sleep(100 * time.Millisecond) + return true + } + + callbacks := NewCallbackGroup("id", logger, 2) + ctrl1 := callbacks.Register("c1", callback1) + ctrl2 := callbacks.Register("c2", callback2) + combinedCtrl := NewCombinedCallbackCtrl(2, logger, ctrl1, ctrl2) + + cycle := NewManager(NewFixedTicker(100*time.Millisecond), callbacks.CycleCallback, logger) + cycle.Start() + defer cycle.StopAndWait(ctx) + + err := combinedCtrl.Unregister(expiredCtx) + require.NotNil(t, err) + assert.Contains(t, err.Error(), "unregistering callback 'c1' of 'id' failed: context deadline exceeded") + assert.Contains(t, err.Error(), "unregistering callback 'c2' of 'id' failed: context deadline exceeded") + + assert.True(t, combinedCtrl.IsActive()) + assert.True(t, ctrl1.IsActive()) + assert.True(t, ctrl2.IsActive()) + }) + + t.Run("fails unregistering one", func(t *testing.T) { + callbackShort := func(shouldAbort ShouldAbortCallback) bool { + time.Sleep(100 * time.Millisecond) + return true + } + callbackLong := func(shouldAbort ShouldAbortCallback) bool { + time.Sleep(500 * time.Millisecond) + return true + } + + callbacks := NewCallbackGroup("id", logger, 2) + ctrlShort := callbacks.Register("short", callbackShort) + ctrlLong := callbacks.Register("long", callbackLong) + combinedCtrl := NewCombinedCallbackCtrl(2, logger, ctrlShort, ctrlLong) + + cycle := NewManager(NewFixedTicker(100*time.Millisecond), callbacks.CycleCallback, logger) + cycle.Start() + defer cycle.StopAndWait(ctx) + + // wait long enough to call Unregister while 2nd callback is still processed. + // set timeout short enough to expire before 2nd callback finishes + time.Sleep(300 * time.Millisecond) + expirableCtx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) + defer cancel() + + err := combinedCtrl.Unregister(expirableCtx) + require.NotNil(t, err) + assert.EqualError(t, err, "unregistering callback 'long' of 'id' failed: context deadline exceeded") + + assert.False(t, combinedCtrl.IsActive()) + assert.False(t, ctrlShort.IsActive()) + assert.True(t, ctrlLong.IsActive()) + }) +} + +func TestCycleCombineCallbackCtrl_Deactivate(t *testing.T) { + logger, _ := test.NewNullLogger() + ctx := context.Background() + + t.Run("deactivates both", func(t *testing.T) { + callback1 := func(shouldAbort ShouldAbortCallback) bool { + time.Sleep(100 * time.Millisecond) + return true + } + callback2 := func(shouldAbort ShouldAbortCallback) bool { + time.Sleep(100 * time.Millisecond) + return true + } + + callbacks := NewCallbackGroup("id", logger, 2) + ctrl1 := callbacks.Register("c1", callback1) + ctrl2 := callbacks.Register("c2", callback2) + combinedCtrl := NewCombinedCallbackCtrl(2, logger, ctrl1, ctrl2) + + cycle := NewManager(NewFixedTicker(100*time.Millisecond), callbacks.CycleCallback, logger) + cycle.Start() + defer cycle.StopAndWait(ctx) + + err := combinedCtrl.Deactivate(ctx) + require.Nil(t, err) + + assert.False(t, combinedCtrl.IsActive()) + assert.False(t, ctrl1.IsActive()) + assert.False(t, ctrl2.IsActive()) + }) + + t.Run("does not deactivate on expired context", func(t *testing.T) { + expiredCtx, cancel := context.WithDeadline(ctx, time.Now()) + defer cancel() + + callback1 := func(shouldAbort ShouldAbortCallback) bool { + time.Sleep(100 * time.Millisecond) + return true + } + callback2 := func(shouldAbort ShouldAbortCallback) bool { + time.Sleep(100 * time.Millisecond) + return true + } + + callbacks := NewCallbackGroup("id", logger, 2) + ctrl1 := callbacks.Register("c1", callback1) + ctrl2 := callbacks.Register("c2", callback2) + combinedCtrl := NewCombinedCallbackCtrl(2, logger, ctrl1, ctrl2) + + cycle := NewManager(NewFixedTicker(100*time.Millisecond), callbacks.CycleCallback, logger) + cycle.Start() + defer cycle.StopAndWait(ctx) + + err := combinedCtrl.Deactivate(expiredCtx) + require.NotNil(t, err) + assert.Contains(t, err.Error(), "deactivating callback 'c1' of 'id' failed: context deadline exceeded") + assert.Contains(t, err.Error(), "deactivating callback 'c1' of 'id' failed: context deadline exceeded") + + assert.True(t, combinedCtrl.IsActive()) + assert.True(t, ctrl1.IsActive()) + assert.True(t, ctrl2.IsActive()) + }) + + t.Run("fails deactivating one, activates other again", func(t *testing.T) { + callbackShort := func(shouldAbort ShouldAbortCallback) bool { + time.Sleep(100 * time.Millisecond) + return true + } + callbackLong := func(shouldAbort ShouldAbortCallback) bool { + time.Sleep(500 * time.Millisecond) + return true + } + + callbacks := NewCallbackGroup("id", logger, 2) + ctrlShort := callbacks.Register("short", callbackShort) + ctrlLong := callbacks.Register("long", callbackLong) + combinedCtrl := NewCombinedCallbackCtrl(2, logger, ctrlShort, ctrlLong) + + cycle := NewManager(NewFixedTicker(100*time.Millisecond), callbacks.CycleCallback, logger) + cycle.Start() + defer cycle.StopAndWait(ctx) + + // wait long enough to call Deactivate while 2nd callback is still processed. + // set timeout short enough to expire before 2nd callback finishes + time.Sleep(300 * time.Millisecond) + expirableCtx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) + defer cancel() + + err := combinedCtrl.Deactivate(expirableCtx) + require.NotNil(t, err) + assert.EqualError(t, err, "deactivating callback 'long' of 'id' failed: context deadline exceeded") + + assert.True(t, combinedCtrl.IsActive()) + assert.True(t, ctrlShort.IsActive()) + assert.True(t, ctrlLong.IsActive()) + }) +} + +func TestCycleCombineCallbackCtrl_Activate(t *testing.T) { + logger, _ := test.NewNullLogger() + ctx := context.Background() + + t.Run("activates both", func(t *testing.T) { + callback1 := func(shouldAbort ShouldAbortCallback) bool { + time.Sleep(100 * time.Millisecond) + return true + } + callback2 := func(shouldAbort ShouldAbortCallback) bool { + time.Sleep(100 * time.Millisecond) + return true + } + + callbacks := NewCallbackGroup("id", logger, 2) + ctrl1 := callbacks.Register("c1", callback1, AsInactive()) + ctrl2 := callbacks.Register("c2", callback2, AsInactive()) + combinedCtrl := NewCombinedCallbackCtrl(2, logger, ctrl1, ctrl2) + + cycle := NewManager(NewFixedTicker(100*time.Millisecond), callbacks.CycleCallback, logger) + cycle.Start() + defer cycle.StopAndWait(ctx) + + assert.False(t, combinedCtrl.IsActive()) + assert.False(t, ctrl1.IsActive()) + assert.False(t, ctrl2.IsActive()) + + err := combinedCtrl.Activate() + require.Nil(t, err) + + assert.True(t, combinedCtrl.IsActive()) + assert.True(t, ctrl1.IsActive()) + assert.True(t, ctrl2.IsActive()) + }) +} diff --git a/platform/dbops/binaries/weaviate-src/entities/cyclemanager/cyclecallbackgroup.go b/platform/dbops/binaries/weaviate-src/entities/cyclemanager/cyclecallbackgroup.go new file mode 100644 index 0000000000000000000000000000000000000000..2c5715cc45a2add7c6688da26af2d5c8d723dc6a --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/cyclemanager/cyclecallbackgroup.go @@ -0,0 +1,452 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package cyclemanager + +import ( + "context" + "fmt" + "runtime" + "strings" + "sync" + "time" + + "github.com/pkg/errors" + enterrors "github.com/weaviate/weaviate/entities/errors" + entsentry "github.com/weaviate/weaviate/entities/sentry" + + "github.com/sirupsen/logrus" +) + +// Container for multiple callbacks exposing CycleCallback method acting as single callback. +// Can be provided to CycleManager. +type CycleCallbackGroup interface { + // Adds CycleCallback method to container + Register(id string, cycleCallback CycleCallback, options ...RegisterOption) CycleCallbackCtrl + // Method of CycleCallback acting as single callback for all callbacks added to the container + CycleCallback(shouldAbort ShouldAbortCallback) bool +} + +type cycleCallbackGroup struct { + sync.Mutex + + logger logrus.FieldLogger + customId string + routinesLimit int + nextId uint32 + callbackIds []uint32 + callbacks map[uint32]*cycleCallbackMeta +} + +func NewCallbackGroup(id string, logger logrus.FieldLogger, routinesLimit int) CycleCallbackGroup { + return &cycleCallbackGroup{ + logger: logger, + customId: id, + routinesLimit: routinesLimit, + nextId: 0, + callbackIds: []uint32{}, + callbacks: map[uint32]*cycleCallbackMeta{}, + } +} + +func (c *cycleCallbackGroup) Register(id string, cycleCallback CycleCallback, options ...RegisterOption) CycleCallbackCtrl { + c.Lock() + defer c.Unlock() + + meta := &cycleCallbackMeta{ + customId: id, + cycleCallback: cycleCallback, + active: true, + runningCtx: nil, + started: time.Now(), + intervals: nil, + } + for _, option := range options { + if option != nil { + option(meta) + } + } + + callbackId := c.nextId + c.callbackIds = append(c.callbackIds, callbackId) + c.callbacks[callbackId] = meta + c.nextId++ + + return &cycleCallbackCtrl{ + callbackId: callbackId, + callbackCustomId: id, + + isActive: c.isActive, + activate: c.activate, + deactivate: c.deactivate, + unregister: c.unregister, + } +} + +func (c *cycleCallbackGroup) CycleCallback(shouldAbort ShouldAbortCallback) bool { + if c.routinesLimit <= 1 { + return c.cycleCallbackSequential(shouldAbort) + } + return c.cycleCallbackParallel(shouldAbort, c.routinesLimit) +} + +func (c *cycleCallbackGroup) cycleCallbackSequential(shouldAbort ShouldAbortCallback) bool { + anyExecuted := false + i := 0 + for { + if shouldAbort() { + break + } + + c.Lock() + // no more callbacks left, exit the loop + if i >= len(c.callbackIds) { + c.Unlock() + break + } + + callbackId := c.callbackIds[i] + meta, ok := c.callbacks[callbackId] + // callback deleted in the meantime, remove its id + // and proceed to the next one (no "i" increment required) + if !ok { + c.callbackIds = append(c.callbackIds[:i], c.callbackIds[i+1:]...) + c.Unlock() + continue + } + i++ + // callback deactivated, proceed to the next one + if !meta.active { + c.Unlock() + continue + } + now := time.Now() + // not enough time passed since previous execution + if meta.intervals != nil && now.Sub(meta.started) < meta.intervals.Get() { + c.Unlock() + continue + } + // callback active, mark as running + runningCtx, cancel := context.WithCancel(context.Background()) + meta.runningCtx = runningCtx + meta.started = now + c.Unlock() + + func() { + // cancel called in recover, regardless of panic occurred or not + defer c.recover(meta.customId, cancel) + executed := meta.cycleCallback(func() bool { + if shouldAbort() { + return true + } + + c.Lock() + defer c.Unlock() + + return meta.shouldAbort + }) + anyExecuted = executed || anyExecuted + + if meta.intervals != nil { + if executed { + meta.intervals.Reset() + } else { + meta.intervals.Advance() + } + } + }() + } + + return anyExecuted +} + +func (c *cycleCallbackGroup) cycleCallbackParallel(shouldAbort ShouldAbortCallback, routinesLimit int) bool { + anyExecuted := false + ch := make(chan uint32) + lock := new(sync.Mutex) + wg := new(sync.WaitGroup) + wg.Add(routinesLimit) + + i := 0 + for r := 0; r < routinesLimit; r++ { + f := func() { + for callbackId := range ch { + if shouldAbort() { + // keep reading from channel until it is closed + continue + } + + c.Lock() + meta, ok := c.callbacks[callbackId] + // callback missing or deactivated, proceed to the next one + if !ok || !meta.active { + c.Unlock() + continue + } + now := time.Now() + // not enough time passed since previous execution + if meta.intervals != nil && now.Sub(meta.started) < meta.intervals.Get() { + c.Unlock() + continue + } + // callback active, mark as running + runningCtx, cancel := context.WithCancel(context.Background()) + meta.runningCtx = runningCtx + meta.started = now + c.Unlock() + + func() { + // cancel called in recover, regardless of panic occurred or not + defer c.recover(meta.customId, cancel) + executed := meta.cycleCallback(func() bool { + if shouldAbort() { + return true + } + + c.Lock() + defer c.Unlock() + + return meta.shouldAbort + }) + + if executed { + lock.Lock() + anyExecuted = true + lock.Unlock() + } + if meta.intervals != nil { + if executed { + meta.intervals.Reset() + } else { + meta.intervals.Advance() + } + } + }() + } + wg.Done() + } + enterrors.GoWrapper(f, c.logger) + } + + for { + if shouldAbort() { + close(ch) + break + } + + c.Lock() + // no more callbacks left, exit the loop + if i >= len(c.callbackIds) { + c.Unlock() + close(ch) + break + } + + callbackId := c.callbackIds[i] + _, ok := c.callbacks[callbackId] + // callback deleted in the meantime, remove its id + // and proceed to the next one (no "i" increment required) + if !ok { + c.callbackIds = append(c.callbackIds[:i], c.callbackIds[i+1:]...) + c.Unlock() + continue + } + c.Unlock() + ch <- callbackId + i++ + } + + wg.Wait() + return anyExecuted +} + +func (c *cycleCallbackGroup) recover(callbackCustomId string, cancel context.CancelFunc) { + if r := recover(); r != nil { + entsentry.Recover(r) + c.logger.WithFields(logrus.Fields{ + "action": "cyclemanager", + "callback_id": callbackCustomId, + "callbacks_id": c.customId, + "trace": trace(), + }).Errorf("callback panic: %v", r) + } + cancel() +} + +func (c *cycleCallbackGroup) mutateCallback(ctx context.Context, callbackId uint32, + onMetaNotFound func(callbackId uint32) error, + onMetaFound func(callbackId uint32, meta *cycleCallbackMeta, running bool) error, +) error { + if ctx.Err() != nil { + return ctx.Err() + } + + for { + // mutate callback in collection only if not running (not yet started of finished) + c.Lock() + meta, ok := c.callbacks[callbackId] + if !ok { + err := onMetaNotFound(callbackId) + c.Unlock() + return err + } + runningCtx := meta.runningCtx + running := runningCtx != nil && runningCtx.Err() == nil + + if err := onMetaFound(callbackId, meta, running); err != nil { + c.Unlock() + return err + } + if !running { + c.Unlock() + return nil + } + c.Unlock() + + // wait for callback to finish + select { + case <-runningCtx.Done(): + // get back to the beginning of the loop to make sure state.runningCtx + // was not changed. If not, loop will finish on runningCtx.Err() != nil check + continue + case <-ctx.Done(): + // in case both contexts are ready, but input ctx was selected + // check again running ctx as priority one + if runningCtx.Err() != nil { + // get back to the beginning of the loop to make sure state.runningCtx + // was not changed. If not, loop will finish on runningCtx.Err() != nil check + continue + } + // input ctx expired + return ctx.Err() + } + } +} + +func (c *cycleCallbackGroup) unregister(ctx context.Context, callbackId uint32, callbackCustomId string) error { + err := c.mutateCallback(ctx, callbackId, + func(callbackId uint32) error { + return nil + }, + func(callbackId uint32, meta *cycleCallbackMeta, running bool) error { + meta.shouldAbort = true + if !running { + meta.active = false + delete(c.callbacks, callbackId) + } + return nil + }, + ) + return errorUnregisterCallback(callbackCustomId, c.customId, err) +} + +func (c *cycleCallbackGroup) deactivate(ctx context.Context, callbackId uint32, callbackCustomId string) error { + err := c.mutateCallback(ctx, callbackId, + func(callbackId uint32) error { + return ErrorCallbackNotFound + }, + func(callbackId uint32, meta *cycleCallbackMeta, running bool) error { + meta.shouldAbort = true + if !running { + meta.active = false + } + return nil + }, + ) + return errorDeactivateCallback(callbackCustomId, c.customId, err) +} + +func (c *cycleCallbackGroup) activate(callbackId uint32, callbackCustomId string) error { + c.Lock() + defer c.Unlock() + + meta, ok := c.callbacks[callbackId] + if !ok { + return errorActivateCallback(callbackCustomId, c.customId, ErrorCallbackNotFound) + } + + meta.shouldAbort = false + meta.active = true + return nil +} + +func (c *cycleCallbackGroup) isActive(callbackId uint32, callbackCustomId string) bool { + c.Lock() + defer c.Unlock() + + if meta, ok := c.callbacks[callbackId]; ok { + return meta.active + } + return false +} + +type cycleCallbackMeta struct { + customId string + cycleCallback CycleCallback + active bool + // indicates whether callback is already running - context active + // or not running (already finished) - context expired + // or not running (not yet started) - context nil + runningCtx context.Context + started time.Time + intervals CycleIntervals + // true if deactivate or unregister were requested to abort callback when running + shouldAbort bool +} + +type cycleCallbackGroupNoop struct{} + +func NewCallbackGroupNoop() CycleCallbackGroup { + return &cycleCallbackGroupNoop{} +} + +func (c *cycleCallbackGroupNoop) Register(id string, cycleCallback CycleCallback, options ...RegisterOption) CycleCallbackCtrl { + return NewCallbackCtrlNoop() +} + +func (c *cycleCallbackGroupNoop) CycleCallback(shouldAbort ShouldAbortCallback) bool { + return false +} + +type RegisterOption func(meta *cycleCallbackMeta) + +func AsInactive() RegisterOption { + return func(meta *cycleCallbackMeta) { + meta.active = false + } +} + +func WithIntervals(intervals CycleIntervals) RegisterOption { + if intervals == nil { + return nil + } + return func(meta *cycleCallbackMeta) { + meta.intervals = intervals + // adjusts start time to allow for immediate callback execution without + // having to wait for interval duration to pass + meta.started = time.Now().Add(-intervals.Get()) + } +} + +func trace() string { + var sb strings.Builder + pcs := make([]uintptr, 10) + n := runtime.Callers(3, pcs) // skip self, callers and recover + pcs = pcs[:n] + for i := range pcs { + f := errors.Frame(pcs[i]) + sb.WriteString(fmt.Sprintf("%n@%s:%d", f, f, f)) + if i < n-1 { + sb.WriteString(";") + } + } + return sb.String() +} diff --git a/platform/dbops/binaries/weaviate-src/entities/cyclemanager/cyclecallbackgroup_test.go b/platform/dbops/binaries/weaviate-src/entities/cyclemanager/cyclecallbackgroup_test.go new file mode 100644 index 0000000000000000000000000000000000000000..a8c56311a64baadff29e676d352169fd5a7894c8 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/cyclemanager/cyclecallbackgroup_test.go @@ -0,0 +1,1984 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package cyclemanager + +import ( + "context" + "sync/atomic" + "testing" + "time" + + "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestCycleCallback_Parallel(t *testing.T) { + logger, _ := test.NewNullLogger() + shouldNotAbort := func() bool { return false } + + t.Run("no callbacks", func(t *testing.T) { + var executed bool + + callbacks := NewCallbackGroup("id", logger, 2) + + executed = callbacks.CycleCallback(shouldNotAbort) + + assert.False(t, executed) + }) + + t.Run("2 executable callbacks", func(t *testing.T) { + executedCounter1 := 0 + callback1 := func(shouldAbort ShouldAbortCallback) bool { + time.Sleep(50 * time.Millisecond) + executedCounter1++ + return true + } + executedCounter2 := 0 + callback2 := func(shouldAbort ShouldAbortCallback) bool { + time.Sleep(25 * time.Millisecond) + executedCounter2++ + return true + } + var executed bool + var d time.Duration + + callbacks := NewCallbackGroup("id", logger, 2) + callbacks.Register("c1", callback1) + callbacks.Register("c2", callback2) + + start := time.Now() + executed = callbacks.CycleCallback(shouldNotAbort) + d = time.Since(start) + + assert.True(t, executed) + assert.Equal(t, 1, executedCounter1) + assert.Equal(t, 1, executedCounter2) + assert.GreaterOrEqual(t, d, 50*time.Millisecond) + }) + + t.Run("2 non-executable callbacks", func(t *testing.T) { + executedCounter1 := 0 + callback1 := func(shouldAbort ShouldAbortCallback) bool { + time.Sleep(10 * time.Millisecond) + executedCounter1++ + return false + } + executedCounter2 := 0 + callback2 := func(shouldAbort ShouldAbortCallback) bool { + time.Sleep(10 * time.Millisecond) + executedCounter2++ + return false + } + var executed bool + var d time.Duration + + callbacks := NewCallbackGroup("id", logger, 2) + callbacks.Register("c1", callback1) + callbacks.Register("c2", callback2) + + start := time.Now() + executed = callbacks.CycleCallback(shouldNotAbort) + d = time.Since(start) + + assert.False(t, executed) + assert.Equal(t, 1, executedCounter1) + assert.Equal(t, 1, executedCounter2) + assert.GreaterOrEqual(t, d, 10*time.Millisecond) + }) + + t.Run("3 executable callbacks, not all executed due to should abort", func(t *testing.T) { + executedCounter1 := 0 + callback1 := func(shouldAbort ShouldAbortCallback) bool { + time.Sleep(25 * time.Millisecond) + executedCounter1++ + return true + } + executedCounter2 := 0 + callback2 := func(shouldAbort ShouldAbortCallback) bool { + time.Sleep(25 * time.Millisecond) + executedCounter2++ + return true + } + executedCounter3 := 0 + callback3 := func(shouldAbort ShouldAbortCallback) bool { + time.Sleep(25 * time.Millisecond) + executedCounter3++ + return true + } + // due to async calls of shouldAbort callback by main for loop + // and goroutines reading from shared channel it is hard to + // establish order of calls. + // with 3 callbacks and shouldAbort returning true on 6th call + // 1 or 2 callbacks should be executed, but not all 3. + shouldAbortCounter := uint32(0) + shouldAbort := func() bool { + return atomic.AddUint32(&shouldAbortCounter, 1) > 5 + } + var executed bool + var d time.Duration + + callbacks := NewCallbackGroup("id", logger, 2) + callbacks.Register("c1", callback1) + callbacks.Register("c2", callback2) + callbacks.Register("c3", callback3) + + start := time.Now() + executed = callbacks.CycleCallback(shouldAbort) + d = time.Since(start) + + assert.True(t, executed) + totalExecuted := executedCounter1 + executedCounter2 + executedCounter3 + assert.Greater(t, totalExecuted, 0) + assert.Less(t, totalExecuted, 3) + assert.GreaterOrEqual(t, d, 25*time.Millisecond) + }) + + t.Run("register new while executing", func(t *testing.T) { + executedCounter1 := 0 + callback1 := func(shouldAbort ShouldAbortCallback) bool { + time.Sleep(50 * time.Millisecond) + executedCounter1++ + return true + } + executedCounter2 := 0 + callback2 := func(shouldAbort ShouldAbortCallback) bool { + time.Sleep(50 * time.Millisecond) + executedCounter2++ + return true + } + executedCounter3 := 0 + callback3 := func(shouldAbort ShouldAbortCallback) bool { + time.Sleep(50 * time.Millisecond) + executedCounter3++ + return true + } + executedCounter4 := 0 + callback4 := func(shouldAbort ShouldAbortCallback) bool { + time.Sleep(50 * time.Millisecond) + executedCounter4++ + return true + } + chStarted := make(chan struct{}, 1) + chFinished := make(chan struct{}, 1) + var executed bool + var d time.Duration + + callbacks := NewCallbackGroup("id", logger, 2) + callbacks.Register("c1", callback1) + callbacks.Register("c2", callback2) + callbacks.Register("c3", callback3) + + // register 4th callback while other are executed, + // + // while 1st and 2nd are being processed (50ms), + // 3rd is waiting for available routine (without 3rd callback loop would be finished) + // 4th is registered (25ms) to be called next along with 3rd + go func() { + chStarted <- struct{}{} + start := time.Now() + executed = callbacks.CycleCallback(shouldNotAbort) + d = time.Since(start) + chFinished <- struct{}{} + }() + <-chStarted + time.Sleep(25 * time.Millisecond) + callbacks.Register("c4", callback4) + <-chFinished + + assert.True(t, executed) + assert.Equal(t, 1, executedCounter1) + assert.Equal(t, 1, executedCounter2) + assert.Equal(t, 1, executedCounter3) + assert.Equal(t, 1, executedCounter4) + assert.GreaterOrEqual(t, d, 100*time.Millisecond) + }) + + t.Run("run with intervals", func(T *testing.T) { + ticker := NewFixedTicker(10 * time.Millisecond) + intervals2 := NewSeriesIntervals([]time.Duration{ + 10 * time.Millisecond, 30 * time.Millisecond, 50 * time.Millisecond, + }) + intervals3 := NewFixedIntervals(60 * time.Millisecond) + now := time.Now() + + executionTimes1 := []time.Duration{} + callback1 := func(shouldAbort ShouldAbortCallback) bool { + executionTimes1 = append(executionTimes1, time.Since(now)) + return true + } + executionCounter2 := 0 + executionTimes2 := []time.Duration{} + callback2 := func(shouldAbort ShouldAbortCallback) bool { + executionCounter2++ + executionTimes2 = append(executionTimes2, time.Since(now)) + // reports executed every 3 calls, should result in 10, 30, 50, 50, 10, 30, 50, 50, ... intervals + return executionCounter2%4 == 0 + } + executionTimes3 := []time.Duration{} + callback3 := func(shouldAbort ShouldAbortCallback) bool { + executionTimes3 = append(executionTimes3, time.Since(now)) + return true + } + + callbacks := NewCallbackGroup("id", logger, 2) + // should be called on every tick, with 10 intervals + callbacks.Register("c1", callback1) + // should be called with 10, 30, 50, 50, 10, 30, 50, 50, ... intervals + callbacks.Register("c2", callback2, WithIntervals(intervals2)) + // should be called with 60, 60, ... intervals + callbacks.Register("c3", callback3, WithIntervals(intervals3)) + + cm := NewManager(ticker, callbacks.CycleCallback, logger) + cm.Start() + time.Sleep(400 * time.Millisecond) + cm.StopAndWait(context.Background()) + + // within 400 ms c1 should be called at least 30x + require.GreaterOrEqual(t, len(executionTimes1), 30) + // 1st call on 1st tick after 10ms + sumDuration := time.Duration(10) + for i := 0; i < 30; i++ { + assert.GreaterOrEqual(t, executionTimes1[i], sumDuration) + sumDuration += 10 * time.Millisecond + } + + // within 400 ms c2 should be called at least 8x + require.GreaterOrEqual(t, len(executionTimes2), 8) + // 1st call on 1st tick after 10ms + sumDuration = time.Duration(0) + for i := 0; i < 8; i++ { + assert.GreaterOrEqual(t, executionTimes2[i], sumDuration) + switch (i + 1) % 4 { + case 0: + sumDuration += 10 * time.Millisecond + case 1: + sumDuration += 30 * time.Millisecond + case 2, 3: + sumDuration += 50 * time.Millisecond + } + } + + // within 400 ms c3 should be called at least 6x + require.GreaterOrEqual(t, len(executionTimes3), 6) + // 1st call on 1st tick after 10ms + sumDuration = time.Duration(0) + for i := 0; i < 6; i++ { + assert.GreaterOrEqual(t, executionTimes3[i], sumDuration) + sumDuration += 60 * time.Millisecond + } + }) +} + +func TestCycleCallback_Parallel_Unregister(t *testing.T) { + ctx := context.Background() + logger, _ := test.NewNullLogger() + shouldNotAbort := func() bool { return false } + + t.Run("1 executable callback, 1 unregistered", func(t *testing.T) { + executedCounter := 0 + callback := func(shouldAbort ShouldAbortCallback) bool { + time.Sleep(50 * time.Millisecond) + executedCounter++ + return true + } + var executed bool + var d time.Duration + + callbacks := NewCallbackGroup("id", logger, 2) + ctrl := callbacks.Register("c1", callback) + require.Nil(t, ctrl.Unregister(ctx)) + + start := time.Now() + executed = callbacks.CycleCallback(shouldNotAbort) + d = time.Since(start) + + assert.False(t, executed) + assert.Equal(t, 0, executedCounter) + assert.GreaterOrEqual(t, d, 0*time.Millisecond) + }) + + t.Run("2 executable callbacks, 2 unregistered", func(t *testing.T) { + executedCounter1 := 0 + callback1 := func(shouldAbort ShouldAbortCallback) bool { + time.Sleep(50 * time.Millisecond) + executedCounter1++ + return true + } + executedCounter2 := 0 + callback2 := func(shouldAbort ShouldAbortCallback) bool { + time.Sleep(25 * time.Millisecond) + executedCounter2++ + return true + } + var executed bool + var d time.Duration + + callbacks := NewCallbackGroup("id", logger, 2) + ctrl1 := callbacks.Register("c1", callback1) + ctrl2 := callbacks.Register("c2", callback2) + require.Nil(t, ctrl1.Unregister(ctx)) + require.Nil(t, ctrl2.Unregister(ctx)) + + start := time.Now() + executed = callbacks.CycleCallback(shouldNotAbort) + d = time.Since(start) + + assert.False(t, executed) + assert.Equal(t, 0, executedCounter1) + assert.Equal(t, 0, executedCounter2) + assert.GreaterOrEqual(t, d, 0*time.Millisecond) + }) + + t.Run("2 executable callbacks, 1 unregistered", func(t *testing.T) { + executedCounter1 := 0 + callback1 := func(shouldAbort ShouldAbortCallback) bool { + time.Sleep(50 * time.Millisecond) + executedCounter1++ + return true + } + executedCounter2 := 0 + callback2 := func(shouldAbort ShouldAbortCallback) bool { + time.Sleep(25 * time.Millisecond) + executedCounter2++ + return true + } + var executed bool + var d time.Duration + + callbacks := NewCallbackGroup("id", logger, 2) + ctrl1 := callbacks.Register("c1", callback1) + callbacks.Register("c2", callback2) + require.Nil(t, ctrl1.Unregister(ctx)) + + start := time.Now() + executed = callbacks.CycleCallback(shouldNotAbort) + d = time.Since(start) + + assert.True(t, executed) + assert.Equal(t, 0, executedCounter1) + assert.Equal(t, 1, executedCounter2) + assert.GreaterOrEqual(t, d, 25*time.Millisecond) + }) + + t.Run("4 executable callbacks, all unregistered at different time", func(t *testing.T) { + executedCounter1 := 0 + callback1 := func(shouldAbort ShouldAbortCallback) bool { + time.Sleep(25 * time.Millisecond) + executedCounter1++ + return true + } + executedCounter2 := 0 + callback2 := func(shouldAbort ShouldAbortCallback) bool { + time.Sleep(25 * time.Millisecond) + executedCounter2++ + return true + } + executedCounter3 := 0 + callback3 := func(shouldAbort ShouldAbortCallback) bool { + time.Sleep(25 * time.Millisecond) + executedCounter3++ + return true + } + executedCounter4 := 0 + callback4 := func(shouldAbort ShouldAbortCallback) bool { + time.Sleep(25 * time.Millisecond) + executedCounter4++ + return true + } + var executed1 bool + var executed2 bool + var executed3 bool + var executed4 bool + var d1 time.Duration + var d2 time.Duration + var d3 time.Duration + var d4 time.Duration + + callbacks := NewCallbackGroup("id", logger, 2) + ctrl1 := callbacks.Register("c1", callback1) + ctrl2 := callbacks.Register("c2", callback2) + ctrl3 := callbacks.Register("c3", callback3) + ctrl4 := callbacks.Register("c4", callback4) + require.Nil(t, ctrl3.Unregister(ctx)) + + start := time.Now() + executed1 = callbacks.CycleCallback(shouldNotAbort) + d1 = time.Since(start) + + require.Nil(t, ctrl1.Unregister(ctx)) + + start = time.Now() + executed2 = callbacks.CycleCallback(shouldNotAbort) + d2 = time.Since(start) + + require.Nil(t, ctrl4.Unregister(ctx)) + + start = time.Now() + executed3 = callbacks.CycleCallback(shouldNotAbort) + d3 = time.Since(start) + + require.Nil(t, ctrl2.Unregister(ctx)) + + start = time.Now() + executed4 = callbacks.CycleCallback(shouldNotAbort) + d4 = time.Since(start) + + assert.True(t, executed1) + assert.True(t, executed2) + assert.True(t, executed3) + assert.False(t, executed4) + assert.Equal(t, 1, executedCounter1) + assert.Equal(t, 3, executedCounter2) + assert.Equal(t, 0, executedCounter3) + assert.Equal(t, 2, executedCounter4) + assert.GreaterOrEqual(t, d1, 50*time.Millisecond) + assert.GreaterOrEqual(t, d2, 25*time.Millisecond) + assert.GreaterOrEqual(t, d3, 25*time.Millisecond) + assert.GreaterOrEqual(t, d4, 0*time.Millisecond) + }) + + t.Run("unregister is waiting till the end of execution", func(t *testing.T) { + executedCounter := 0 + callback := func(shouldAbort ShouldAbortCallback) bool { + time.Sleep(50 * time.Millisecond) + executedCounter++ + return true + } + chStarted := make(chan struct{}, 1) + chFinished := make(chan struct{}, 1) + var executed bool + var d time.Duration + + callbacks := NewCallbackGroup("id", logger, 2) + ctrl := callbacks.Register("c", callback) + + go func() { + chStarted <- struct{}{} + start := time.Now() + executed = callbacks.CycleCallback(shouldNotAbort) + d = time.Since(start) + chFinished <- struct{}{} + }() + <-chStarted + start := time.Now() + time.Sleep(25 * time.Millisecond) + require.Nil(t, ctrl.Unregister(ctx)) + du := time.Since(start) + <-chFinished + + assert.True(t, executed) + assert.Equal(t, 1, executedCounter) + assert.GreaterOrEqual(t, d, 50*time.Millisecond) + assert.GreaterOrEqual(t, du, 40*time.Millisecond) + }) + + t.Run("unregister fails due to context timeout", func(t *testing.T) { + executedCounter := 0 + callback := func(shouldAbort ShouldAbortCallback) bool { + time.Sleep(50 * time.Millisecond) + executedCounter++ + return true + } + chStarted := make(chan struct{}, 1) + chFinished := make(chan struct{}, 1) + var executed1 bool + var executed2 bool + var d1 time.Duration + var d2 time.Duration + + callbacks := NewCallbackGroup("id", logger, 2) + ctrl := callbacks.Register("c", callback) + + go func() { + chStarted <- struct{}{} + start := time.Now() + executed1 = callbacks.CycleCallback(shouldNotAbort) + d1 = time.Since(start) + chFinished <- struct{}{} + }() + <-chStarted + start := time.Now() + time.Sleep(25 * time.Millisecond) + ctxTimeout, cancel := context.WithTimeout(ctx, 5*time.Millisecond) + defer cancel() + require.NotNil(t, ctrl.Unregister(ctxTimeout)) + du := time.Since(start) + <-chFinished + + go func() { + start := time.Now() + executed2 = callbacks.CycleCallback(shouldNotAbort) + d2 = time.Since(start) + chFinished <- struct{}{} + }() + <-chFinished + + assert.True(t, executed1) + assert.True(t, executed2) + assert.Equal(t, 2, executedCounter) + assert.GreaterOrEqual(t, d1, 50*time.Millisecond) + assert.GreaterOrEqual(t, d2, 50*time.Millisecond) + assert.GreaterOrEqual(t, du, 30*time.Millisecond) + }) + + t.Run("unregister 3rd and 4th while executing", func(t *testing.T) { + executedCounter1 := 0 + callback1 := func(shouldAbort ShouldAbortCallback) bool { + time.Sleep(50 * time.Millisecond) + executedCounter1++ + return true + } + executedCounter2 := 0 + callback2 := func(shouldAbort ShouldAbortCallback) bool { + time.Sleep(50 * time.Millisecond) + executedCounter2++ + return true + } + executedCounter3 := 0 + callback3 := func(shouldAbort ShouldAbortCallback) bool { + time.Sleep(50 * time.Millisecond) + executedCounter3++ + return true + } + executedCounter4 := 0 + callback4 := func(shouldAbort ShouldAbortCallback) bool { + time.Sleep(50 * time.Millisecond) + executedCounter4++ + return true + } + chStarted := make(chan struct{}, 1) + chFinished := make(chan struct{}, 1) + var executed bool + var d time.Duration + + callbacks := NewCallbackGroup("id", logger, 2) + callbacks.Register("c1", callback1) + callbacks.Register("c2", callback2) + ctrl3 := callbacks.Register("c3", callback3) + ctrl4 := callbacks.Register("c4", callback4) + + go func() { + chStarted <- struct{}{} + start := time.Now() + executed = callbacks.CycleCallback(shouldNotAbort) + d = time.Since(start) + chFinished <- struct{}{} + }() + <-chStarted + time.Sleep(25 * time.Millisecond) + require.Nil(t, ctrl3.Unregister(ctx)) + require.Nil(t, ctrl4.Unregister(ctx)) + <-chFinished + + assert.True(t, executed) + assert.Equal(t, 1, executedCounter1) + assert.Equal(t, 1, executedCounter2) + assert.Equal(t, 0, executedCounter3) + assert.Equal(t, 0, executedCounter3) + assert.GreaterOrEqual(t, d, 50*time.Millisecond) + }) + + t.Run("unregister while running", func(t *testing.T) { + counter1 := 0 + counter2 := 0 + max := 25 + + callback1 := func(shouldAbort ShouldAbortCallback) bool { + for { + if shouldAbort() { + return false + } + + time.Sleep(10 * time.Millisecond) + counter1++ + + // 10ms * 25 = 250ms + if counter1 > max { + return true + } + } + } + callback2 := func(shouldAbort ShouldAbortCallback) bool { + for { + if shouldAbort() { + return false + } + + time.Sleep(10 * time.Millisecond) + counter2++ + + // 10ms * 25 = 250ms + if counter2 > max { + return true + } + } + } + + chStarted := make(chan struct{}, 1) + chFinished := make(chan struct{}, 1) + var executed bool + var d time.Duration + + callbacks := NewCallbackGroup("id", logger, 2) + ctrl1 := callbacks.Register("c1", callback1) + ctrl2 := callbacks.Register("c2", callback2) + + go func() { + chStarted <- struct{}{} + start := time.Now() + executed = callbacks.CycleCallback(shouldNotAbort) + d = time.Since(start) + chFinished <- struct{}{} + }() + <-chStarted + time.Sleep(50 * time.Millisecond) + require.NoError(t, ctrl1.Unregister(ctx)) + require.NoError(t, ctrl2.Unregister(ctx)) + <-chFinished + + assert.False(t, executed) + assert.LessOrEqual(t, counter1, max) + assert.LessOrEqual(t, counter2, max) + assert.LessOrEqual(t, d, 200*time.Millisecond) + }) +} + +func TestCycleCallback_Parallel_Deactivate(t *testing.T) { + ctx := context.Background() + logger, _ := test.NewNullLogger() + shouldNotAbort := func() bool { return false } + + t.Run("1 executable callback, 1 deactivated", func(t *testing.T) { + executedCounter := 0 + callback := func(shouldAbort ShouldAbortCallback) bool { + time.Sleep(50 * time.Millisecond) + executedCounter++ + return true + } + var executed bool + var d time.Duration + + callbacks := NewCallbackGroup("id", logger, 2) + ctrl := callbacks.Register("c1", callback) + require.Nil(t, ctrl.Deactivate(ctx)) + + start := time.Now() + executed = callbacks.CycleCallback(shouldNotAbort) + d = time.Since(start) + + assert.False(t, executed) + assert.Equal(t, 0, executedCounter) + assert.GreaterOrEqual(t, d, 0*time.Millisecond) + }) + + t.Run("2 executable callbacks, 2 deactivated", func(t *testing.T) { + executedCounter1 := 0 + callback1 := func(shouldAbort ShouldAbortCallback) bool { + time.Sleep(50 * time.Millisecond) + executedCounter1++ + return true + } + executedCounter2 := 0 + callback2 := func(shouldAbort ShouldAbortCallback) bool { + time.Sleep(25 * time.Millisecond) + executedCounter2++ + return true + } + var executed bool + var d time.Duration + + callbacks := NewCallbackGroup("id", logger, 2) + ctrl1 := callbacks.Register("c1", callback1) + ctrl2 := callbacks.Register("c2", callback2) + require.Nil(t, ctrl1.Deactivate(ctx)) + require.Nil(t, ctrl2.Deactivate(ctx)) + + start := time.Now() + executed = callbacks.CycleCallback(shouldNotAbort) + d = time.Since(start) + + assert.False(t, executed) + assert.Equal(t, 0, executedCounter1) + assert.Equal(t, 0, executedCounter2) + assert.GreaterOrEqual(t, d, 0*time.Millisecond) + }) + + t.Run("2 executable callbacks, 1 deactivated", func(t *testing.T) { + executedCounter1 := 0 + callback1 := func(shouldAbort ShouldAbortCallback) bool { + time.Sleep(50 * time.Millisecond) + executedCounter1++ + return true + } + executedCounter2 := 0 + callback2 := func(shouldAbort ShouldAbortCallback) bool { + time.Sleep(25 * time.Millisecond) + executedCounter2++ + return true + } + var executed bool + var d time.Duration + + callbacks := NewCallbackGroup("id", logger, 2) + ctrl1 := callbacks.Register("c1", callback1) + callbacks.Register("c2", callback2) + require.Nil(t, ctrl1.Deactivate(ctx)) + + start := time.Now() + executed = callbacks.CycleCallback(shouldNotAbort) + d = time.Since(start) + + assert.True(t, executed) + assert.Equal(t, 0, executedCounter1) + assert.Equal(t, 1, executedCounter2) + assert.GreaterOrEqual(t, d, 25*time.Millisecond) + }) + + t.Run("4 executable callbacks, all deactivated at different time", func(t *testing.T) { + executedCounter1 := 0 + callback1 := func(shouldAbort ShouldAbortCallback) bool { + time.Sleep(25 * time.Millisecond) + executedCounter1++ + return true + } + executedCounter2 := 0 + callback2 := func(shouldAbort ShouldAbortCallback) bool { + time.Sleep(25 * time.Millisecond) + executedCounter2++ + return true + } + executedCounter3 := 0 + callback3 := func(shouldAbort ShouldAbortCallback) bool { + time.Sleep(25 * time.Millisecond) + executedCounter3++ + return true + } + executedCounter4 := 0 + callback4 := func(shouldAbort ShouldAbortCallback) bool { + time.Sleep(25 * time.Millisecond) + executedCounter4++ + return true + } + var executed1 bool + var executed2 bool + var executed3 bool + var executed4 bool + var d1 time.Duration + var d2 time.Duration + var d3 time.Duration + var d4 time.Duration + + callbacks := NewCallbackGroup("id", logger, 2) + ctrl1 := callbacks.Register("c1", callback1) + ctrl2 := callbacks.Register("c2", callback2) + ctrl3 := callbacks.Register("c3", callback3) + ctrl4 := callbacks.Register("c4", callback4) + require.Nil(t, ctrl3.Deactivate(ctx)) + + start := time.Now() + executed1 = callbacks.CycleCallback(shouldNotAbort) + d1 = time.Since(start) + + require.Nil(t, ctrl1.Deactivate(ctx)) + + start = time.Now() + executed2 = callbacks.CycleCallback(shouldNotAbort) + d2 = time.Since(start) + + require.Nil(t, ctrl4.Deactivate(ctx)) + + start = time.Now() + executed3 = callbacks.CycleCallback(shouldNotAbort) + d3 = time.Since(start) + + require.Nil(t, ctrl2.Deactivate(ctx)) + + start = time.Now() + executed4 = callbacks.CycleCallback(shouldNotAbort) + d4 = time.Since(start) + + assert.True(t, executed1) + assert.True(t, executed2) + assert.True(t, executed3) + assert.False(t, executed4) + assert.Equal(t, 1, executedCounter1) + assert.Equal(t, 3, executedCounter2) + assert.Equal(t, 0, executedCounter3) + assert.Equal(t, 2, executedCounter4) + assert.GreaterOrEqual(t, d1, 50*time.Millisecond) + assert.GreaterOrEqual(t, d2, 25*time.Millisecond) + assert.GreaterOrEqual(t, d3, 25*time.Millisecond) + assert.GreaterOrEqual(t, d4, 0*time.Millisecond) + }) + + t.Run("deactivate is waiting till the end of execution", func(t *testing.T) { + executedCounter := 0 + callback := func(shouldAbort ShouldAbortCallback) bool { + time.Sleep(50 * time.Millisecond) + executedCounter++ + return true + } + chStarted := make(chan struct{}, 1) + chFinished := make(chan struct{}, 1) + var executed bool + var d time.Duration + + callbacks := NewCallbackGroup("id", logger, 2) + ctrl := callbacks.Register("c", callback) + + go func() { + chStarted <- struct{}{} + start := time.Now() + executed = callbacks.CycleCallback(shouldNotAbort) + d = time.Since(start) + chFinished <- struct{}{} + }() + <-chStarted + start := time.Now() + time.Sleep(25 * time.Millisecond) + require.Nil(t, ctrl.Deactivate(ctx)) + du := time.Since(start) + <-chFinished + + assert.True(t, executed) + assert.Equal(t, 1, executedCounter) + assert.GreaterOrEqual(t, d, 50*time.Millisecond) + assert.GreaterOrEqual(t, du, 40*time.Millisecond) + }) + + t.Run("deactivate fails due to context timeout", func(t *testing.T) { + executedCounter := 0 + callback := func(shouldAbort ShouldAbortCallback) bool { + time.Sleep(50 * time.Millisecond) + executedCounter++ + return true + } + chStarted := make(chan struct{}, 1) + chFinished := make(chan struct{}, 1) + var executed1 bool + var executed2 bool + var d1 time.Duration + var d2 time.Duration + + callbacks := NewCallbackGroup("id", logger, 2) + ctrl := callbacks.Register("c", callback) + + go func() { + chStarted <- struct{}{} + start := time.Now() + executed1 = callbacks.CycleCallback(shouldNotAbort) + d1 = time.Since(start) + chFinished <- struct{}{} + }() + <-chStarted + start := time.Now() + time.Sleep(25 * time.Millisecond) + ctxTimeout, cancel := context.WithTimeout(ctx, 5*time.Millisecond) + defer cancel() + require.NotNil(t, ctrl.Deactivate(ctxTimeout)) + du := time.Since(start) + <-chFinished + + go func() { + start := time.Now() + executed2 = callbacks.CycleCallback(shouldNotAbort) + d2 = time.Since(start) + chFinished <- struct{}{} + }() + <-chFinished + + assert.True(t, executed1) + assert.True(t, executed2) + assert.Equal(t, 2, executedCounter) + assert.GreaterOrEqual(t, d1, 50*time.Millisecond) + assert.GreaterOrEqual(t, d2, 50*time.Millisecond) + assert.GreaterOrEqual(t, du, 30*time.Millisecond) + }) + + t.Run("deactivate 3rd and 4th while executing", func(t *testing.T) { + executedCounter1 := 0 + callback1 := func(shouldAbort ShouldAbortCallback) bool { + time.Sleep(50 * time.Millisecond) + executedCounter1++ + return true + } + executedCounter2 := 0 + callback2 := func(shouldAbort ShouldAbortCallback) bool { + time.Sleep(50 * time.Millisecond) + executedCounter2++ + return true + } + executedCounter3 := 0 + callback3 := func(shouldAbort ShouldAbortCallback) bool { + time.Sleep(50 * time.Millisecond) + executedCounter3++ + return true + } + executedCounter4 := 0 + callback4 := func(shouldAbort ShouldAbortCallback) bool { + time.Sleep(50 * time.Millisecond) + executedCounter4++ + return true + } + chStarted := make(chan struct{}, 1) + chFinished := make(chan struct{}, 1) + var executed bool + var d time.Duration + + callbacks := NewCallbackGroup("id", logger, 2) + callbacks.Register("c1", callback1) + callbacks.Register("c2", callback2) + ctrl3 := callbacks.Register("c3", callback3) + ctrl4 := callbacks.Register("c4", callback4) + + go func() { + chStarted <- struct{}{} + start := time.Now() + executed = callbacks.CycleCallback(shouldNotAbort) + d = time.Since(start) + chFinished <- struct{}{} + }() + <-chStarted + time.Sleep(25 * time.Millisecond) + require.Nil(t, ctrl3.Deactivate(ctx)) + require.Nil(t, ctrl4.Deactivate(ctx)) + <-chFinished + + assert.True(t, executed) + assert.Equal(t, 1, executedCounter1) + assert.Equal(t, 1, executedCounter2) + assert.Equal(t, 0, executedCounter3) + assert.Equal(t, 0, executedCounter3) + assert.GreaterOrEqual(t, d, 50*time.Millisecond) + }) + + t.Run("deactivate while running", func(t *testing.T) { + counter1 := 0 + counter2 := 0 + max := 25 + + callback1 := func(shouldAbort ShouldAbortCallback) bool { + for { + if shouldAbort() { + return false + } + + time.Sleep(10 * time.Millisecond) + counter1++ + + // 10ms * 25 = 250ms + if counter1 > max { + return true + } + } + } + callback2 := func(shouldAbort ShouldAbortCallback) bool { + for { + if shouldAbort() { + return false + } + + time.Sleep(10 * time.Millisecond) + counter2++ + + // 10ms * 25 = 250ms + if counter2 > max { + return true + } + } + } + + chStarted := make(chan struct{}, 1) + chFinished := make(chan struct{}, 1) + var executed bool + var d time.Duration + + callbacks := NewCallbackGroup("id", logger, 2) + ctrl1 := callbacks.Register("c1", callback1) + ctrl2 := callbacks.Register("c2", callback2) + + go func() { + chStarted <- struct{}{} + start := time.Now() + executed = callbacks.CycleCallback(shouldNotAbort) + d = time.Since(start) + chFinished <- struct{}{} + }() + <-chStarted + time.Sleep(50 * time.Millisecond) + require.NoError(t, ctrl1.Deactivate(ctx)) + require.NoError(t, ctrl2.Deactivate(ctx)) + <-chFinished + + assert.False(t, executed) + assert.LessOrEqual(t, counter1, max) + assert.LessOrEqual(t, counter2, max) + assert.LessOrEqual(t, d, 200*time.Millisecond) + + t.Run("does not abort after activated back again", func(t *testing.T) { + require.NoError(t, ctrl1.Activate()) + require.NoError(t, ctrl2.Activate()) + + counter1 = 0 + counter2 = 0 + max = 10 + + go func() { + start := time.Now() + executed = callbacks.CycleCallback(shouldNotAbort) + d = time.Since(start) + chFinished <- struct{}{} + }() + <-chFinished + + assert.True(t, executed) + assert.Greater(t, counter1, max) + assert.Greater(t, counter2, max) + assert.GreaterOrEqual(t, d, 100*time.Millisecond) + }) + }) +} + +func TestCycleCallback_Sequential(t *testing.T) { + logger, _ := test.NewNullLogger() + shouldNotAbort := func() bool { return false } + + t.Run("no callbacks", func(t *testing.T) { + var executed bool + + callbacks := NewCallbackGroup("id", logger, 1) + + executed = callbacks.CycleCallback(shouldNotAbort) + + assert.False(t, executed) + }) + + t.Run("2 executable callbacks", func(t *testing.T) { + executedCounter1 := 0 + callback1 := func(shouldAbort ShouldAbortCallback) bool { + time.Sleep(50 * time.Millisecond) + executedCounter1++ + return true + } + executedCounter2 := 0 + callback2 := func(shouldAbort ShouldAbortCallback) bool { + time.Sleep(25 * time.Millisecond) + executedCounter2++ + return true + } + var executed bool + var d time.Duration + + callbacks := NewCallbackGroup("id", logger, 1) + callbacks.Register("c1", callback1) + callbacks.Register("c2", callback2) + + start := time.Now() + executed = callbacks.CycleCallback(shouldNotAbort) + d = time.Since(start) + + assert.True(t, executed) + assert.Equal(t, 1, executedCounter1) + assert.Equal(t, 1, executedCounter2) + assert.GreaterOrEqual(t, d, 75*time.Millisecond) + }) + + t.Run("2 non-executable callbacks", func(t *testing.T) { + executedCounter1 := 0 + callback1 := func(shouldAbort ShouldAbortCallback) bool { + time.Sleep(10 * time.Millisecond) + executedCounter1++ + return false + } + executedCounter2 := 0 + callback2 := func(shouldAbort ShouldAbortCallback) bool { + time.Sleep(10 * time.Millisecond) + executedCounter2++ + return false + } + var executed bool + var d time.Duration + + callbacks := NewCallbackGroup("id", logger, 1) + callbacks.Register("c1", callback1) + callbacks.Register("c2", callback2) + + start := time.Now() + executed = callbacks.CycleCallback(shouldNotAbort) + d = time.Since(start) + + assert.False(t, executed) + assert.Equal(t, 1, executedCounter1) + assert.Equal(t, 1, executedCounter2) + assert.GreaterOrEqual(t, d, 10*time.Millisecond) + }) + + t.Run("2 executable callbacks, not executed due to should abort", func(t *testing.T) { + executedCounter1 := 0 + callback1 := func(shouldAbort ShouldAbortCallback) bool { + time.Sleep(25 * time.Millisecond) + executedCounter1++ + return true + } + executedCounter2 := 0 + callback2 := func(shouldAbort ShouldAbortCallback) bool { + time.Sleep(25 * time.Millisecond) + executedCounter2++ + return true + } + shouldAbortCounter := 0 + shouldAbort := func() bool { + shouldAbortCounter++ + return shouldAbortCounter > 1 + } + + var executed bool + var d time.Duration + + callbacks := NewCallbackGroup("id", logger, 1) + callbacks.Register("c1", callback1) + callbacks.Register("c2", callback2) + + start := time.Now() + executed = callbacks.CycleCallback(shouldAbort) + d = time.Since(start) + + assert.True(t, executed) + assert.Equal(t, 1, executedCounter1) + assert.Equal(t, 0, executedCounter2) + assert.GreaterOrEqual(t, d, 25*time.Millisecond) + }) + + t.Run("register new while executing", func(t *testing.T) { + executedCounter1 := 0 + callback1 := func(shouldAbort ShouldAbortCallback) bool { + time.Sleep(50 * time.Millisecond) + executedCounter1++ + return true + } + executedCounter2 := 0 + callback2 := func(shouldAbort ShouldAbortCallback) bool { + time.Sleep(50 * time.Millisecond) + executedCounter2++ + return true + } + chStarted := make(chan struct{}, 1) + chFinished := make(chan struct{}, 1) + var executed bool + var d time.Duration + + callbacks := NewCallbackGroup("id", logger, 1) + callbacks.Register("c1", callback1) + + go func() { + chStarted <- struct{}{} + start := time.Now() + executed = callbacks.CycleCallback(shouldNotAbort) + d = time.Since(start) + chFinished <- struct{}{} + }() + <-chStarted + time.Sleep(25 * time.Millisecond) + callbacks.Register("c2", callback2) + <-chFinished + + assert.True(t, executed) + assert.Equal(t, 1, executedCounter1) + assert.Equal(t, 1, executedCounter2) + assert.GreaterOrEqual(t, d, 100*time.Millisecond) + }) + + t.Run("run with intervals", func(T *testing.T) { + ticker := NewFixedTicker(10 * time.Millisecond) + intervals2 := NewSeriesIntervals([]time.Duration{ + 10 * time.Millisecond, 30 * time.Millisecond, 50 * time.Millisecond, + }) + intervals3 := NewFixedIntervals(60 * time.Millisecond) + now := time.Now() + + executionTimes1 := []time.Duration{} + callback1 := func(shouldAbort ShouldAbortCallback) bool { + executionTimes1 = append(executionTimes1, time.Since(now)) + return true + } + executionCounter2 := 0 + executionTimes2 := []time.Duration{} + callback2 := func(shouldAbort ShouldAbortCallback) bool { + executionCounter2++ + executionTimes2 = append(executionTimes2, time.Since(now)) + // reports executed every 3 calls, should result in 10, 30, 50, 50, 10, 30, 50, 50, ... intervals + return executionCounter2%4 == 0 + } + executionTimes3 := []time.Duration{} + callback3 := func(shouldAbort ShouldAbortCallback) bool { + executionTimes3 = append(executionTimes3, time.Since(now)) + return true + } + + callbacks := NewCallbackGroup("id", logger, 1) + // should be called on every tick, with 10 intervals + callbacks.Register("c1", callback1) + // should be called with 10, 30, 50, 50, 10, 30, 50, 50, ... intervals + callbacks.Register("c2", callback2, WithIntervals(intervals2)) + // should be called with 60, 60, ... intervals + callbacks.Register("c3", callback3, WithIntervals(intervals3)) + + cm := NewManager(ticker, callbacks.CycleCallback, logger) + cm.Start() + time.Sleep(400 * time.Millisecond) + cm.StopAndWait(context.Background()) + + // within 400 ms c1 should be called at least 30x + require.GreaterOrEqual(t, len(executionTimes1), 30) + // 1st call on 1st tick after 10ms + sumDuration := time.Duration(10) + for i := 0; i < 30; i++ { + assert.GreaterOrEqual(t, executionTimes1[i], sumDuration) + sumDuration += 10 * time.Millisecond + } + + // within 400 ms c2 should be called at least 8x + require.GreaterOrEqual(t, len(executionTimes2), 8) + // 1st call on 1st tick after 10ms + sumDuration = time.Duration(0) + for i := 0; i < 8; i++ { + assert.GreaterOrEqual(t, executionTimes2[i], sumDuration) + switch (i + 1) % 4 { + case 0: + sumDuration += 10 * time.Millisecond + case 1: + sumDuration += 30 * time.Millisecond + case 2, 3: + sumDuration += 50 * time.Millisecond + } + } + + // within 400 ms c3 should be called at least 6x + require.GreaterOrEqual(t, len(executionTimes3), 6) + // 1st call on 1st tick after 10ms + sumDuration = time.Duration(0) + for i := 0; i < 6; i++ { + assert.GreaterOrEqual(t, executionTimes3[i], sumDuration) + sumDuration += 60 * time.Millisecond + } + }) +} + +func TestCycleCallback_Sequential_Unregister(t *testing.T) { + ctx := context.Background() + logger, _ := test.NewNullLogger() + shouldNotAbort := func() bool { return false } + + t.Run("1 executable callback, 1 unregistered", func(t *testing.T) { + executedCounter := 0 + callback := func(shouldAbort ShouldAbortCallback) bool { + time.Sleep(50 * time.Millisecond) + executedCounter++ + return true + } + var executed bool + var d time.Duration + + callbacks := NewCallbackGroup("id", logger, 1) + ctrl := callbacks.Register("c1", callback) + require.Nil(t, ctrl.Unregister(ctx)) + + start := time.Now() + executed = callbacks.CycleCallback(shouldNotAbort) + d = time.Since(start) + + assert.False(t, executed) + assert.Equal(t, 0, executedCounter) + assert.GreaterOrEqual(t, d, 0*time.Millisecond) + }) + + t.Run("2 executable callbacks, 2 unregistered", func(t *testing.T) { + executedCounter1 := 0 + callback1 := func(shouldAbort ShouldAbortCallback) bool { + time.Sleep(50 * time.Millisecond) + executedCounter1++ + return true + } + executedCounter2 := 0 + callback2 := func(shouldAbort ShouldAbortCallback) bool { + time.Sleep(25 * time.Millisecond) + executedCounter2++ + return true + } + var executed bool + var d time.Duration + + callbacks := NewCallbackGroup("id", logger, 1) + ctrl1 := callbacks.Register("c1", callback1) + ctrl2 := callbacks.Register("c2", callback2) + require.Nil(t, ctrl1.Unregister(ctx)) + require.Nil(t, ctrl2.Unregister(ctx)) + + start := time.Now() + executed = callbacks.CycleCallback(shouldNotAbort) + d = time.Since(start) + + assert.False(t, executed) + assert.Equal(t, 0, executedCounter1) + assert.Equal(t, 0, executedCounter2) + assert.GreaterOrEqual(t, d, 0*time.Millisecond) + }) + + t.Run("2 executable callbacks, 1 unregistered", func(t *testing.T) { + executedCounter1 := 0 + callback1 := func(shouldAbort ShouldAbortCallback) bool { + time.Sleep(50 * time.Millisecond) + executedCounter1++ + return true + } + executedCounter2 := 0 + callback2 := func(shouldAbort ShouldAbortCallback) bool { + time.Sleep(25 * time.Millisecond) + executedCounter2++ + return true + } + var executed bool + var d time.Duration + + callbacks := NewCallbackGroup("id", logger, 1) + ctrl1 := callbacks.Register("c1", callback1) + callbacks.Register("c2", callback2) + require.Nil(t, ctrl1.Unregister(ctx)) + + start := time.Now() + executed = callbacks.CycleCallback(shouldNotAbort) + d = time.Since(start) + + assert.True(t, executed) + assert.Equal(t, 0, executedCounter1) + assert.Equal(t, 1, executedCounter2) + assert.GreaterOrEqual(t, d, 25*time.Millisecond) + }) + + t.Run("4 executable callbacks, all unregistered at different time", func(t *testing.T) { + executedCounter1 := 0 + callback1 := func(shouldAbort ShouldAbortCallback) bool { + time.Sleep(25 * time.Millisecond) + executedCounter1++ + return true + } + executedCounter2 := 0 + callback2 := func(shouldAbort ShouldAbortCallback) bool { + time.Sleep(25 * time.Millisecond) + executedCounter2++ + return true + } + executedCounter3 := 0 + callback3 := func(shouldAbort ShouldAbortCallback) bool { + time.Sleep(25 * time.Millisecond) + executedCounter3++ + return true + } + executedCounter4 := 0 + callback4 := func(shouldAbort ShouldAbortCallback) bool { + time.Sleep(25 * time.Millisecond) + executedCounter4++ + return true + } + var executed1 bool + var executed2 bool + var executed3 bool + var executed4 bool + var d1 time.Duration + var d2 time.Duration + var d3 time.Duration + var d4 time.Duration + + callbacks := NewCallbackGroup("id", logger, 1) + ctrl1 := callbacks.Register("c1", callback1) + ctrl2 := callbacks.Register("c2", callback2) + ctrl3 := callbacks.Register("c3", callback3) + ctrl4 := callbacks.Register("c4", callback4) + require.Nil(t, ctrl3.Unregister(ctx)) + + start := time.Now() + executed1 = callbacks.CycleCallback(shouldNotAbort) + d1 = time.Since(start) + + require.Nil(t, ctrl1.Unregister(ctx)) + + start = time.Now() + executed2 = callbacks.CycleCallback(shouldNotAbort) + d2 = time.Since(start) + + require.Nil(t, ctrl4.Unregister(ctx)) + + start = time.Now() + executed3 = callbacks.CycleCallback(shouldNotAbort) + d3 = time.Since(start) + + require.Nil(t, ctrl2.Unregister(ctx)) + + start = time.Now() + executed4 = callbacks.CycleCallback(shouldNotAbort) + d4 = time.Since(start) + + assert.True(t, executed1) + assert.True(t, executed2) + assert.True(t, executed3) + assert.False(t, executed4) + assert.Equal(t, 1, executedCounter1) + assert.Equal(t, 3, executedCounter2) + assert.Equal(t, 0, executedCounter3) + assert.Equal(t, 2, executedCounter4) + assert.GreaterOrEqual(t, d1, 75*time.Millisecond) + assert.GreaterOrEqual(t, d2, 50*time.Millisecond) + assert.GreaterOrEqual(t, d3, 25*time.Millisecond) + assert.GreaterOrEqual(t, d4, 0*time.Millisecond) + }) + + t.Run("unregister is waiting till the end of execution", func(t *testing.T) { + executedCounter := 0 + callback := func(shouldAbort ShouldAbortCallback) bool { + time.Sleep(50 * time.Millisecond) + executedCounter++ + return true + } + chStarted := make(chan struct{}, 1) + chFinished := make(chan struct{}, 1) + var executed bool + var d time.Duration + + callbacks := NewCallbackGroup("id", logger, 1) + ctrl := callbacks.Register("c", callback) + + go func() { + chStarted <- struct{}{} + start := time.Now() + executed = callbacks.CycleCallback(shouldNotAbort) + d = time.Since(start) + chFinished <- struct{}{} + }() + <-chStarted + start := time.Now() + time.Sleep(25 * time.Millisecond) + require.Nil(t, ctrl.Unregister(ctx)) + du := time.Since(start) + <-chFinished + + assert.True(t, executed) + assert.Equal(t, 1, executedCounter) + assert.GreaterOrEqual(t, d, 50*time.Millisecond) + assert.GreaterOrEqual(t, du, 40*time.Millisecond) + }) + + t.Run("unregister fails due to context timeout", func(t *testing.T) { + executedCounter := 0 + callback := func(shouldAbort ShouldAbortCallback) bool { + time.Sleep(50 * time.Millisecond) + executedCounter++ + return true + } + chStarted := make(chan struct{}, 1) + chFinished := make(chan struct{}, 1) + var executed1 bool + var executed2 bool + var d1 time.Duration + var d2 time.Duration + + callbacks := NewCallbackGroup("id", logger, 1) + ctrl := callbacks.Register("c", callback) + + go func() { + chStarted <- struct{}{} + start := time.Now() + executed1 = callbacks.CycleCallback(shouldNotAbort) + d1 = time.Since(start) + chFinished <- struct{}{} + }() + <-chStarted + start := time.Now() + time.Sleep(25 * time.Millisecond) + ctxTimeout, cancel := context.WithTimeout(ctx, 5*time.Millisecond) + defer cancel() + require.NotNil(t, ctrl.Unregister(ctxTimeout)) + du := time.Since(start) + <-chFinished + + go func() { + start := time.Now() + executed2 = callbacks.CycleCallback(shouldNotAbort) + d2 = time.Since(start) + chFinished <- struct{}{} + }() + <-chFinished + + assert.True(t, executed1) + assert.True(t, executed2) + assert.Equal(t, 2, executedCounter) + assert.GreaterOrEqual(t, d1, 50*time.Millisecond) + assert.GreaterOrEqual(t, d2, 50*time.Millisecond) + assert.GreaterOrEqual(t, du, 30*time.Millisecond) + }) + + t.Run("unregister 2nd and 3rd while executing", func(t *testing.T) { + executedCounter1 := 0 + callback1 := func(shouldAbort ShouldAbortCallback) bool { + time.Sleep(50 * time.Millisecond) + executedCounter1++ + return true + } + executedCounter2 := 0 + callback2 := func(shouldAbort ShouldAbortCallback) bool { + time.Sleep(50 * time.Millisecond) + executedCounter2++ + return true + } + executedCounter3 := 0 + callback3 := func(shouldAbort ShouldAbortCallback) bool { + time.Sleep(50 * time.Millisecond) + executedCounter3++ + return true + } + chStarted := make(chan struct{}, 1) + chFinished := make(chan struct{}, 1) + var executed bool + var d time.Duration + + callbacks := NewCallbackGroup("id", logger, 1) + callbacks.Register("c1", callback1) + ctrl2 := callbacks.Register("c2", callback2) + ctrl3 := callbacks.Register("c3", callback3) + + go func() { + chStarted <- struct{}{} + start := time.Now() + executed = callbacks.CycleCallback(shouldNotAbort) + d = time.Since(start) + chFinished <- struct{}{} + }() + <-chStarted + time.Sleep(25 * time.Millisecond) + require.Nil(t, ctrl2.Unregister(ctx)) + require.Nil(t, ctrl3.Unregister(ctx)) + <-chFinished + + assert.True(t, executed) + assert.Equal(t, 1, executedCounter1) + assert.Equal(t, 0, executedCounter2) + assert.Equal(t, 0, executedCounter3) + assert.GreaterOrEqual(t, d, 50*time.Millisecond) + }) + + t.Run("unregister while running", func(t *testing.T) { + counter := 0 + max := 25 + callback := func(shouldAbort ShouldAbortCallback) bool { + for { + if shouldAbort() { + return false + } + + time.Sleep(10 * time.Millisecond) + counter++ + + // 10ms * 25 = 250ms + if counter > max { + return true + } + } + } + chStarted := make(chan struct{}, 1) + chFinished := make(chan struct{}, 1) + var executed bool + var d time.Duration + + callbacks := NewCallbackGroup("id", logger, 1) + ctrl := callbacks.Register("c", callback) + + go func() { + chStarted <- struct{}{} + start := time.Now() + executed = callbacks.CycleCallback(shouldNotAbort) + d = time.Since(start) + chFinished <- struct{}{} + }() + <-chStarted + time.Sleep(50 * time.Millisecond) + require.NoError(t, ctrl.Unregister(ctx)) + <-chFinished + + assert.False(t, executed) + assert.LessOrEqual(t, counter, max) + assert.LessOrEqual(t, d, 200*time.Millisecond) + }) +} + +func TestCycleCallback_Sequential_Deactivate(t *testing.T) { + ctx := context.Background() + logger, _ := test.NewNullLogger() + shouldNotAbort := func() bool { return false } + + t.Run("1 executable callback, 1 deactivated", func(t *testing.T) { + executedCounter := 0 + callback := func(shouldAbort ShouldAbortCallback) bool { + time.Sleep(50 * time.Millisecond) + executedCounter++ + return true + } + var executed bool + var d time.Duration + + callbacks := NewCallbackGroup("id", logger, 1) + ctrl := callbacks.Register("c1", callback) + require.Nil(t, ctrl.Deactivate(ctx)) + + start := time.Now() + executed = callbacks.CycleCallback(shouldNotAbort) + d = time.Since(start) + + assert.False(t, executed) + assert.Equal(t, 0, executedCounter) + assert.GreaterOrEqual(t, d, 0*time.Millisecond) + }) + + t.Run("2 executable callbacks, 2 deactivated", func(t *testing.T) { + executedCounter1 := 0 + callback1 := func(shouldAbort ShouldAbortCallback) bool { + time.Sleep(50 * time.Millisecond) + executedCounter1++ + return true + } + executedCounter2 := 0 + callback2 := func(shouldAbort ShouldAbortCallback) bool { + time.Sleep(25 * time.Millisecond) + executedCounter2++ + return true + } + var executed bool + var d time.Duration + + callbacks := NewCallbackGroup("id", logger, 1) + ctrl1 := callbacks.Register("c1", callback1) + ctrl2 := callbacks.Register("c2", callback2) + require.Nil(t, ctrl1.Deactivate(ctx)) + require.Nil(t, ctrl2.Deactivate(ctx)) + + start := time.Now() + executed = callbacks.CycleCallback(shouldNotAbort) + d = time.Since(start) + + assert.False(t, executed) + assert.Equal(t, 0, executedCounter1) + assert.Equal(t, 0, executedCounter2) + assert.GreaterOrEqual(t, d, 0*time.Millisecond) + }) + + t.Run("2 executable callbacks, 1 deactivated", func(t *testing.T) { + executedCounter1 := 0 + callback1 := func(shouldAbort ShouldAbortCallback) bool { + time.Sleep(50 * time.Millisecond) + executedCounter1++ + return true + } + executedCounter2 := 0 + callback2 := func(shouldAbort ShouldAbortCallback) bool { + time.Sleep(25 * time.Millisecond) + executedCounter2++ + return true + } + var executed bool + var d time.Duration + + callbacks := NewCallbackGroup("id", logger, 1) + ctrl1 := callbacks.Register("c1", callback1) + callbacks.Register("c2", callback2) + require.Nil(t, ctrl1.Deactivate(ctx)) + + start := time.Now() + executed = callbacks.CycleCallback(shouldNotAbort) + d = time.Since(start) + + assert.True(t, executed) + assert.Equal(t, 0, executedCounter1) + assert.Equal(t, 1, executedCounter2) + assert.GreaterOrEqual(t, d, 25*time.Millisecond) + }) + + t.Run("4 executable callbacks, all deactivated at different time", func(t *testing.T) { + executedCounter1 := 0 + callback1 := func(shouldAbort ShouldAbortCallback) bool { + time.Sleep(25 * time.Millisecond) + executedCounter1++ + return true + } + executedCounter2 := 0 + callback2 := func(shouldAbort ShouldAbortCallback) bool { + time.Sleep(25 * time.Millisecond) + executedCounter2++ + return true + } + executedCounter3 := 0 + callback3 := func(shouldAbort ShouldAbortCallback) bool { + time.Sleep(25 * time.Millisecond) + executedCounter3++ + return true + } + executedCounter4 := 0 + callback4 := func(shouldAbort ShouldAbortCallback) bool { + time.Sleep(25 * time.Millisecond) + executedCounter4++ + return true + } + var executed1 bool + var executed2 bool + var executed3 bool + var executed4 bool + var d1 time.Duration + var d2 time.Duration + var d3 time.Duration + var d4 time.Duration + + callbacks := NewCallbackGroup("id", logger, 1) + ctrl1 := callbacks.Register("c1", callback1) + ctrl2 := callbacks.Register("c2", callback2) + ctrl3 := callbacks.Register("c3", callback3) + ctrl4 := callbacks.Register("c4", callback4) + require.Nil(t, ctrl3.Deactivate(ctx)) + + start := time.Now() + executed1 = callbacks.CycleCallback(shouldNotAbort) + d1 = time.Since(start) + + require.Nil(t, ctrl1.Deactivate(ctx)) + + start = time.Now() + executed2 = callbacks.CycleCallback(shouldNotAbort) + d2 = time.Since(start) + + require.Nil(t, ctrl4.Deactivate(ctx)) + + start = time.Now() + executed3 = callbacks.CycleCallback(shouldNotAbort) + d3 = time.Since(start) + + require.Nil(t, ctrl2.Deactivate(ctx)) + + start = time.Now() + executed4 = callbacks.CycleCallback(shouldNotAbort) + d4 = time.Since(start) + + assert.True(t, executed1) + assert.True(t, executed2) + assert.True(t, executed3) + assert.False(t, executed4) + assert.Equal(t, 1, executedCounter1) + assert.Equal(t, 3, executedCounter2) + assert.Equal(t, 0, executedCounter3) + assert.Equal(t, 2, executedCounter4) + assert.GreaterOrEqual(t, d1, 75*time.Millisecond) + assert.GreaterOrEqual(t, d2, 50*time.Millisecond) + assert.GreaterOrEqual(t, d3, 25*time.Millisecond) + assert.GreaterOrEqual(t, d4, 0*time.Millisecond) + }) + + t.Run("deactivate is waiting till the end of execution", func(t *testing.T) { + executedCounter := 0 + callback := func(shouldAbort ShouldAbortCallback) bool { + time.Sleep(50 * time.Millisecond) + executedCounter++ + return true + } + chStarted := make(chan struct{}, 1) + chFinished := make(chan struct{}, 1) + var executed bool + var d time.Duration + + callbacks := NewCallbackGroup("id", logger, 1) + ctrl := callbacks.Register("c", callback) + + go func() { + chStarted <- struct{}{} + start := time.Now() + executed = callbacks.CycleCallback(shouldNotAbort) + d = time.Since(start) + chFinished <- struct{}{} + }() + <-chStarted + start := time.Now() + time.Sleep(25 * time.Millisecond) + require.Nil(t, ctrl.Deactivate(ctx)) + du := time.Since(start) + <-chFinished + + assert.True(t, executed) + assert.Equal(t, 1, executedCounter) + assert.GreaterOrEqual(t, d, 50*time.Millisecond) + assert.GreaterOrEqual(t, du, 40*time.Millisecond) + }) + + t.Run("deactivate fails due to context timeout", func(t *testing.T) { + executedCounter := 0 + callback := func(shouldAbort ShouldAbortCallback) bool { + time.Sleep(50 * time.Millisecond) + executedCounter++ + return true + } + chStarted := make(chan struct{}, 1) + chFinished := make(chan struct{}, 1) + var executed1 bool + var executed2 bool + var d1 time.Duration + var d2 time.Duration + + callbacks := NewCallbackGroup("id", logger, 1) + ctrl := callbacks.Register("c", callback) + + go func() { + chStarted <- struct{}{} + start := time.Now() + executed1 = callbacks.CycleCallback(shouldNotAbort) + d1 = time.Since(start) + chFinished <- struct{}{} + }() + <-chStarted + start := time.Now() + time.Sleep(25 * time.Millisecond) + ctxTimeout, cancel := context.WithTimeout(ctx, 5*time.Millisecond) + defer cancel() + require.NotNil(t, ctrl.Deactivate(ctxTimeout)) + du := time.Since(start) + <-chFinished + + go func() { + start := time.Now() + executed2 = callbacks.CycleCallback(shouldNotAbort) + d2 = time.Since(start) + chFinished <- struct{}{} + }() + <-chFinished + + assert.True(t, executed1) + assert.True(t, executed2) + assert.Equal(t, 2, executedCounter) + assert.GreaterOrEqual(t, d1, 50*time.Millisecond) + assert.GreaterOrEqual(t, d2, 50*time.Millisecond) + assert.GreaterOrEqual(t, du, 30*time.Millisecond) + }) + + t.Run("deactivate 2nd and 3rd while executing", func(t *testing.T) { + executedCounter1 := 0 + callback1 := func(shouldAbort ShouldAbortCallback) bool { + time.Sleep(50 * time.Millisecond) + executedCounter1++ + return true + } + executedCounter2 := 0 + callback2 := func(shouldAbort ShouldAbortCallback) bool { + time.Sleep(50 * time.Millisecond) + executedCounter2++ + return true + } + executedCounter3 := 0 + callback3 := func(shouldAbort ShouldAbortCallback) bool { + time.Sleep(50 * time.Millisecond) + executedCounter3++ + return true + } + chStarted := make(chan struct{}, 1) + chFinished := make(chan struct{}, 1) + var executed bool + var d time.Duration + + callbacks := NewCallbackGroup("id", logger, 1) + callbacks.Register("c1", callback1) + ctrl2 := callbacks.Register("c2", callback2) + ctrl3 := callbacks.Register("c3", callback3) + + go func() { + chStarted <- struct{}{} + start := time.Now() + executed = callbacks.CycleCallback(shouldNotAbort) + d = time.Since(start) + chFinished <- struct{}{} + }() + <-chStarted + time.Sleep(25 * time.Millisecond) + require.Nil(t, ctrl2.Deactivate(ctx)) + require.Nil(t, ctrl3.Deactivate(ctx)) + <-chFinished + + assert.True(t, executed) + assert.Equal(t, 1, executedCounter1) + assert.Equal(t, 0, executedCounter2) + assert.Equal(t, 0, executedCounter3) + assert.GreaterOrEqual(t, d, 50*time.Millisecond) + }) + + t.Run("deactivate while running", func(t *testing.T) { + counter := 0 + max := 25 + callback := func(shouldAbort ShouldAbortCallback) bool { + for { + if shouldAbort() { + return false + } + + time.Sleep(10 * time.Millisecond) + counter++ + + // 10ms * 25 = 250ms + if counter > max { + return true + } + } + } + chStarted := make(chan struct{}, 1) + chFinished := make(chan struct{}, 1) + var executed bool + var d time.Duration + + callbacks := NewCallbackGroup("id", logger, 1) + ctrl := callbacks.Register("c", callback) + + go func() { + chStarted <- struct{}{} + start := time.Now() + executed = callbacks.CycleCallback(shouldNotAbort) + d = time.Since(start) + chFinished <- struct{}{} + }() + <-chStarted + time.Sleep(50 * time.Millisecond) + require.NoError(t, ctrl.Deactivate(ctx)) + <-chFinished + + assert.False(t, executed) + assert.LessOrEqual(t, counter, max) + assert.LessOrEqual(t, d, 200*time.Millisecond) + + t.Run("does not abort after activated back again", func(t *testing.T) { + require.NoError(t, ctrl.Activate()) + + counter = 0 + max = 10 + + go func() { + start := time.Now() + executed = callbacks.CycleCallback(shouldNotAbort) + d = time.Since(start) + chFinished <- struct{}{} + }() + <-chFinished + + assert.True(t, executed) + assert.Greater(t, counter, max) + assert.GreaterOrEqual(t, d, 100*time.Millisecond) + }) + }) +} diff --git a/platform/dbops/binaries/weaviate-src/entities/cyclemanager/cyclemanager.go b/platform/dbops/binaries/weaviate-src/entities/cyclemanager/cyclemanager.go new file mode 100644 index 0000000000000000000000000000000000000000..a2283515c270b90920f408b63e04142ddd87da9a --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/cyclemanager/cyclemanager.go @@ -0,0 +1,241 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package cyclemanager + +import ( + "context" + "fmt" + "sync" + + "github.com/sirupsen/logrus" + enterrors "github.com/weaviate/weaviate/entities/errors" +) + +type ( + // indicates whether cyclemanager's stop was requested to allow safely + // abort execution of CycleCallback and stop cyclemanager earlier + ShouldAbortCallback func() bool + // return value indicates whether actual work was done in the cycle + CycleCallback func(shouldAbort ShouldAbortCallback) bool +) + +type CycleManager interface { + Start() + Stop(ctx context.Context) chan bool + StopAndWait(ctx context.Context) error + Running() bool +} + +type cycleManager struct { + sync.RWMutex + + cycleCallback CycleCallback + cycleTicker CycleTicker + running bool + stopSignal chan struct{} + + stopContexts []context.Context + stopResults []chan bool + + logger logrus.FieldLogger +} + +func NewManager(cycleTicker CycleTicker, cycleCallback CycleCallback, logger logrus.FieldLogger) CycleManager { + return &cycleManager{ + cycleCallback: cycleCallback, + cycleTicker: cycleTicker, + running: false, + stopSignal: make(chan struct{}, 1), + logger: logger, + } +} + +// Starts instance, does not block +// Does nothing if instance is already started +func (c *cycleManager) Start() { + c.Lock() + defer c.Unlock() + + if c.running { + return + } + + enterrors.GoWrapper(func() { + c.cycleTicker.Start() + defer c.cycleTicker.Stop() + + for { + if c.isStopRequested() { + c.Lock() + if c.shouldStop() { + c.handleStopRequest(true) + c.Unlock() + break + } + c.handleStopRequest(false) + c.Unlock() + continue + } + c.cycleTicker.CycleExecuted(c.cycleCallback(c.shouldAbortCycleCallback)) + } + }, c.logger) + + c.running = true +} + +// Stops running instance, does not block +// Returns channel with final stop result - true / false +// +// If given context is cancelled before it is handled by stop logic, instance is not stopped +// If called multiple times, all contexts have to be cancelled to cancel stop +// (any valid will result in stopping instance) +// stopResult is the same (consistent) for multiple calls +func (c *cycleManager) Stop(ctx context.Context) (stopResult chan bool) { + c.Lock() + defer c.Unlock() + + stopResult = make(chan bool, 1) + if !c.running { + stopResult <- true + close(stopResult) + return stopResult + } + + if len(c.stopContexts) == 0 { + defer func() { + c.stopSignal <- struct{}{} + }() + } + c.stopContexts = append(c.stopContexts, ctx) + c.stopResults = append(c.stopResults, stopResult) + + return stopResult +} + +// Stops running instance, waits for stop to occur or context to expire (which comes first) +// Returns error if instance was not stopped +func (c *cycleManager) StopAndWait(ctx context.Context) error { + // if both channels are ready, chan is selected randomly, therefore regardless of + // channel selected first, second one is also checked + stop := c.Stop(ctx) + done := ctx.Done() + + select { + case <-done: + select { + case stopped := <-stop: + if !stopped { + return ctx.Err() + } + default: + return ctx.Err() + } + case stopped := <-stop: + if !stopped { + if ctx.Err() != nil { + return ctx.Err() + } + return fmt.Errorf("failed to stop cycle") + } + } + return nil +} + +func (c *cycleManager) Running() bool { + c.RLock() + defer c.RUnlock() + + return c.running +} + +func (c *cycleManager) shouldStop() bool { + for _, ctx := range c.stopContexts { + if ctx.Err() == nil { + return true + } + } + return false +} + +func (c *cycleManager) shouldAbortCycleCallback() bool { + c.RLock() + defer c.RUnlock() + + return c.shouldStop() +} + +func (c *cycleManager) isStopRequested() bool { + select { + case <-c.stopSignal: + case <-c.cycleTicker.C(): + // as stop chan has higher priority, + // it is checked again in case of ticker was selected over stop if both were ready + select { + case <-c.stopSignal: + default: + return false + } + } + return true +} + +func (c *cycleManager) handleStopRequest(stopped bool) { + for _, stopResult := range c.stopResults { + stopResult <- stopped + close(stopResult) + } + c.running = !stopped + c.stopContexts = nil + c.stopResults = nil +} + +func NewManagerNoop() CycleManager { + return &cycleManagerNoop{running: false} +} + +type cycleManagerNoop struct { + running bool +} + +func (c *cycleManagerNoop) Start() { + c.running = true +} + +func (c *cycleManagerNoop) Stop(ctx context.Context) chan bool { + if !c.running { + return c.closedChan(true) + } + if ctx.Err() != nil { + return c.closedChan(false) + } + + c.running = false + return c.closedChan(true) +} + +func (c *cycleManagerNoop) StopAndWait(ctx context.Context) error { + if <-c.Stop(ctx) { + return nil + } + return ctx.Err() +} + +func (c *cycleManagerNoop) Running() bool { + return c.running +} + +func (c *cycleManagerNoop) closedChan(val bool) chan bool { + ch := make(chan bool, 1) + ch <- val + close(ch) + return ch +} diff --git a/platform/dbops/binaries/weaviate-src/entities/cyclemanager/cyclemanager_test.go b/platform/dbops/binaries/weaviate-src/entities/cyclemanager/cyclemanager_test.go new file mode 100644 index 0000000000000000000000000000000000000000..568bf9e68a71e1409e280c863929ad0ce58fc27d --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/cyclemanager/cyclemanager_test.go @@ -0,0 +1,409 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package cyclemanager + +import ( + "context" + "sync" + "testing" + "time" + + "github.com/sirupsen/logrus/hooks/test" + + "github.com/stretchr/testify/assert" +) + +var logger, _ = test.NewNullLogger() + +type cycleCallbackProvider struct { + sync.Mutex + + firstCycleStarted chan struct{} + cycleCallback CycleCallback + results chan string +} + +func newProvider(cycleDuration time.Duration, resultsSize uint) *cycleCallbackProvider { + return newProviderAbortable(cycleDuration, resultsSize, 1) +} + +func newProviderAbortable(cycleDuration time.Duration, resultsSize uint, aborts int) *cycleCallbackProvider { + fs := false + p := &cycleCallbackProvider{} + p.results = make(chan string, resultsSize) + p.firstCycleStarted = make(chan struct{}, 1) + p.cycleCallback = func(shouldAbort ShouldAbortCallback) bool { + p.Lock() + if !fs { + p.firstCycleStarted <- struct{}{} + fs = true + } + p.Unlock() + + if aborts > 1 { + for i := 0; i < aborts; i++ { + time.Sleep(cycleDuration / time.Duration(aborts)) + if shouldAbort() { + return true + } + } + } else { + time.Sleep(cycleDuration) + } + p.results <- "something wonderful..." + return true + } + return p +} + +func TestCycleManager_beforeTimeout(t *testing.T) { + cycleInterval := 5 * time.Millisecond + cycleDuration := 1 * time.Millisecond + stopTimeout := 12 * time.Millisecond + + p := newProvider(cycleDuration, 1) + var cm CycleManager + + t.Run("create new", func(t *testing.T) { + cm = NewManager(NewFixedTicker(cycleInterval), p.cycleCallback, logger) + + assert.False(t, cm.Running()) + }) + + t.Run("start", func(t *testing.T) { + cm.Start() + <-p.firstCycleStarted + + assert.True(t, cm.Running()) + }) + + t.Run("stop", func(t *testing.T) { + timeoutCtx, cancel := context.WithTimeout(context.Background(), stopTimeout) + defer cancel() + + stopResult := cm.Stop(timeoutCtx) + + select { + case <-timeoutCtx.Done(): + t.Fatal(timeoutCtx.Err().Error(), "failed to stop") + case stopped := <-stopResult: + assert.True(t, stopped) + assert.False(t, cm.Running()) + assert.Equal(t, "something wonderful...", <-p.results) + } + }) +} + +func TestCycleManager_beforeTimeoutWithWait(t *testing.T) { + cycleInterval := 5 * time.Millisecond + cycleDuration := 1 * time.Millisecond + stopTimeout := 12 * time.Millisecond + + p := newProvider(cycleDuration, 1) + var cm CycleManager + + t.Run("create new", func(t *testing.T) { + cm = NewManager(NewFixedTicker(cycleInterval), p.cycleCallback, logger) + + assert.False(t, cm.Running()) + }) + + t.Run("start", func(t *testing.T) { + cm.Start() + <-p.firstCycleStarted + + assert.True(t, cm.Running()) + }) + + t.Run("stop", func(t *testing.T) { + timeoutCtx, cancel := context.WithTimeout(context.Background(), stopTimeout) + defer cancel() + + err := cm.StopAndWait(timeoutCtx) + + assert.Nil(t, err) + assert.False(t, cm.Running()) + assert.Equal(t, "something wonderful...", <-p.results) + }) +} + +func TestCycleManager_timeout(t *testing.T) { + cycleInterval := 5 * time.Millisecond + cycleDuration := 20 * time.Millisecond + stopTimeout := 12 * time.Millisecond + + p := newProvider(cycleDuration, 1) + cm := NewManager(NewFixedTicker(cycleInterval), p.cycleCallback, logger) + + t.Run("timeout is reached", func(t *testing.T) { + timeoutCtx, cancel := context.WithTimeout(context.Background(), stopTimeout) + defer cancel() + + cm.Start() + <-p.firstCycleStarted + + stopResult := cm.Stop(timeoutCtx) + + select { + case <-timeoutCtx.Done(): + assert.True(t, cm.Running()) + case <-stopResult: + t.Fatal("stopped before timeout") + } + + // make sure it is still running + assert.False(t, <-stopResult) + assert.True(t, cm.Running()) + assert.Equal(t, "something wonderful...", <-p.results) + }) + + t.Run("stop", func(t *testing.T) { + stopResult := cm.Stop(context.Background()) + assert.True(t, <-stopResult) + assert.False(t, cm.Running()) + }) +} + +func TestCycleManager_timeoutWithWait(t *testing.T) { + cycleInterval := 5 * time.Millisecond + cycleDuration := 20 * time.Millisecond + stopTimeout := 12 * time.Millisecond + + p := newProvider(cycleDuration, 1) + cm := NewManager(NewFixedTicker(cycleInterval), p.cycleCallback, logger) + + t.Run("timeout is reached", func(t *testing.T) { + timeoutCtx, cancel := context.WithTimeout(context.Background(), stopTimeout) + defer cancel() + + cm.Start() + <-p.firstCycleStarted + + err := cm.StopAndWait(timeoutCtx) + + assert.NotNil(t, err) + assert.Equal(t, "context deadline exceeded", err.Error()) + assert.True(t, cm.Running()) + assert.Equal(t, "something wonderful...", <-p.results) + }) + + t.Run("stop", func(t *testing.T) { + stopResult := cm.Stop(context.Background()) + assert.True(t, <-stopResult) + assert.False(t, cm.Running()) + }) +} + +func TestCycleManager_doesNotStartMultipleTimes(t *testing.T) { + cycleInterval := 5 * time.Millisecond + cycleDuration := 1 * time.Millisecond + + startCount := 5 + + p := newProvider(cycleDuration, uint(startCount)) + cm := NewManager(NewFixedTicker(cycleInterval), p.cycleCallback, logger) + + t.Run("multiple starts", func(t *testing.T) { + for i := 0; i < startCount; i++ { + cm.Start() + } + <-p.firstCycleStarted + + stopResult := cm.Stop(context.Background()) + + assert.True(t, <-stopResult) + assert.False(t, cm.Running()) + // just one result produced + assert.Equal(t, 1, len(p.results)) + }) +} + +func TestCycleManager_doesNotStartMultipleTimesWithWait(t *testing.T) { + cycleInterval := 5 * time.Millisecond + cycleDuration := 1 * time.Millisecond + + startCount := 5 + + p := newProvider(cycleDuration, uint(startCount)) + cm := NewManager(NewFixedTicker(cycleInterval), p.cycleCallback, logger) + + t.Run("multiple starts", func(t *testing.T) { + for i := 0; i < startCount; i++ { + cm.Start() + } + <-p.firstCycleStarted + + err := cm.StopAndWait(context.Background()) + + assert.Nil(t, err) + assert.False(t, cm.Running()) + // just one result produced + assert.Equal(t, 1, len(p.results)) + }) +} + +func TestCycleManager_handlesMultipleStops(t *testing.T) { + cycleInterval := 5 * time.Millisecond + cycleDuration := 1 * time.Millisecond + + stopCount := 5 + + p := newProvider(cycleDuration, 1) + cm := NewManager(NewFixedTicker(cycleInterval), p.cycleCallback, logger) + + t.Run("multiple stops", func(t *testing.T) { + cm.Start() + <-p.firstCycleStarted + + stopResult := make([]chan bool, stopCount) + for i := 0; i < stopCount; i++ { + stopResult[i] = cm.Stop(context.Background()) + } + + for i := 0; i < stopCount; i++ { + assert.True(t, <-stopResult[i]) + } + assert.False(t, cm.Running()) + assert.Equal(t, "something wonderful...", <-p.results) + }) +} + +func TestCycleManager_stopsIfNotAllContextsAreCancelled(t *testing.T) { + cycleInterval := 5 * time.Millisecond + cycleDuration := 1 * time.Millisecond + stopTimeout := 5 * time.Millisecond + + p := newProvider(cycleDuration, 1) + cm := NewManager(NewFixedTicker(cycleInterval), p.cycleCallback, logger) + + t.Run("multiple stops, few cancelled", func(t *testing.T) { + timeout1Ctx, cancel1 := context.WithTimeout(context.Background(), stopTimeout) + timeout2Ctx, cancel2 := context.WithTimeout(context.Background(), stopTimeout) + defer cancel1() + defer cancel2() + + cm.Start() + <-p.firstCycleStarted + + stopResult1 := cm.Stop(timeout1Ctx) + stopResult2 := cm.Stop(timeout2Ctx) + stopResult3 := cm.Stop(context.Background()) + + // all produce the same result: cycle was stopped + assert.True(t, <-stopResult1) + assert.True(t, <-stopResult2) + assert.True(t, <-stopResult3) + + assert.False(t, cm.Running()) + assert.Equal(t, "something wonderful...", <-p.results) + }) +} + +func TestCycleManager_doesNotStopIfAllContextsAreCancelled(t *testing.T) { + cycleInterval := 50 * time.Millisecond + cycleDuration := 10 * time.Millisecond + stopTimeout := 50 * time.Millisecond + + p := newProvider(cycleDuration, 1) + cm := NewManager(NewFixedTicker(cycleInterval), p.cycleCallback, logger) + + t.Run("multiple stops, few cancelled", func(t *testing.T) { + timeout1Ctx, cancel1 := context.WithTimeout(context.Background(), stopTimeout) + timeout2Ctx, cancel2 := context.WithTimeout(context.Background(), stopTimeout) + timeout3Ctx, cancel3 := context.WithTimeout(context.Background(), stopTimeout) + defer cancel1() + defer cancel2() + defer cancel3() + + cm.Start() + <-p.firstCycleStarted + + stopResult1 := cm.Stop(timeout1Ctx) + stopResult2 := cm.Stop(timeout2Ctx) + stopResult3 := cm.Stop(timeout3Ctx) + + // all produce the same result: cycle was stopped + assert.False(t, <-stopResult1) + assert.False(t, <-stopResult2) + assert.False(t, <-stopResult3) + + assert.True(t, cm.Running()) + assert.Equal(t, "something wonderful...", <-p.results) + }) + + t.Run("stop", func(t *testing.T) { + stopResult := cm.Stop(context.Background()) + assert.True(t, <-stopResult) + assert.False(t, cm.Running()) + }) +} + +func TestCycleManager_cycleCallbackStoppedDueToFrequentStopChecks(t *testing.T) { + cycleInterval := 50 * time.Millisecond + cycleDuration := 300 * time.Millisecond + stopTimeout := 100 * time.Millisecond + + // despite cycleDuration is 30ms, cycle callback checks every 20ms (300/15) if it needs to be stopped + p := newProviderAbortable(cycleDuration, 1, 15) + cm := NewManager(NewFixedTicker(cycleInterval), p.cycleCallback, logger) + + t.Run("cycle function stopped before timeout reached", func(t *testing.T) { + timeoutCtx, cancel := context.WithTimeout(context.Background(), stopTimeout) + defer cancel() + + cm.Start() + <-p.firstCycleStarted + + err := cm.StopAndWait(timeoutCtx) + + assert.Nil(t, err) + assert.False(t, cm.Running()) + assert.Equal(t, 0, len(p.results)) + }) + + t.Run("stop", func(t *testing.T) { + stopResult := cm.Stop(context.Background()) + assert.True(t, <-stopResult) + assert.False(t, cm.Running()) + }) +} + +func TestCycleManager_cycleCallbackNotStoppedDueToRareStopChecks(t *testing.T) { + cycleInterval := 50 * time.Millisecond + cycleDuration := 300 * time.Millisecond + stopTimeout := 100 * time.Millisecond + + // despite cycleDuration is 30ms, cycle callback checks every 150ms (300/2) if it needs to be stopped + p := newProviderAbortable(cycleDuration, 1, 2) + cm := NewManager(NewFixedTicker(cycleInterval), p.cycleCallback, logger) + + t.Run("timeout reached", func(t *testing.T) { + timeoutCtx, cancel := context.WithTimeout(context.Background(), stopTimeout) + defer cancel() + + cm.Start() + <-p.firstCycleStarted + + err := cm.StopAndWait(timeoutCtx) + + assert.NotNil(t, err) + assert.Equal(t, "context deadline exceeded", err.Error()) + assert.True(t, cm.Running()) + assert.Equal(t, "something wonderful...", <-p.results) + }) + + t.Run("stop", func(t *testing.T) { + stopResult := cm.Stop(context.Background()) + assert.True(t, <-stopResult) + assert.False(t, cm.Running()) + }) +} diff --git a/platform/dbops/binaries/weaviate-src/entities/cyclemanager/errors.go b/platform/dbops/binaries/weaviate-src/entities/cyclemanager/errors.go new file mode 100644 index 0000000000000000000000000000000000000000..a4de5030e222989e68c67f95768d25ca60a8608a --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/cyclemanager/errors.go @@ -0,0 +1,45 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package cyclemanager + +import ( + "errors" + "fmt" +) + +var ErrorCallbackNotFound = errors.New("callback not found") +var ( + formatActivateCallback = "activating callback '%s' of '%s' failed: %w" + formatDeactivateCallback = "deactivating callback '%s' of '%s' failed: %w" + formatUnregisterCallback = "unregistering callback '%s' of '%s' failed: %w" +) + +func errorActivateCallback(callbackCustomId, callbacksCustomId string, err error) error { + if err == nil { + return nil + } + return fmt.Errorf(formatActivateCallback, callbackCustomId, callbacksCustomId, err) +} + +func errorDeactivateCallback(callbackCustomId, callbacksCustomId string, err error) error { + if err == nil { + return nil + } + return fmt.Errorf(formatDeactivateCallback, callbackCustomId, callbacksCustomId, err) +} + +func errorUnregisterCallback(callbackCustomId, callbacksCustomId string, err error) error { + if err == nil { + return nil + } + return fmt.Errorf(formatUnregisterCallback, callbackCustomId, callbacksCustomId, err) +} diff --git a/platform/dbops/binaries/weaviate-src/entities/cyclemanager/interval.go b/platform/dbops/binaries/weaviate-src/entities/cyclemanager/interval.go new file mode 100644 index 0000000000000000000000000000000000000000..3dc29d051e956de1c9ef559af1551c365e739dd3 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/cyclemanager/interval.go @@ -0,0 +1,90 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package cyclemanager + +import "time" + +const ( + compactionMinInterval = 3 * time.Second + compactionMaxInterval = time.Minute + compactionBase = uint(2) + compactionSteps = uint(4) +) + +// 3s . 6.8s .. 14.4s .... 29.6s ........ 60s +func CompactionCycleIntervals() CycleIntervals { + return NewExpIntervals(compactionMinInterval, compactionMaxInterval, + compactionBase, compactionSteps) +} + +// run cycle ticker with fixed minimal interval and let each shard +// take care of its intervals +func CompactionCycleTicker() CycleTicker { + return NewFixedTicker(compactionMinInterval) +} + +const ( + memtableFlushMinInterval = 100 * time.Millisecond + memtableFlushMaxInterval = 5 * time.Second + memtableFlushBase = uint(2) + memtableFlushSteps = uint(5) +) + +// 100ms . 258ms .. 574ms .... 1.206s ........ 2.471s ................ 5s +func MemtableFlushCycleIntervals() CycleIntervals { + return NewExpIntervals(memtableFlushMinInterval, memtableFlushMaxInterval, + memtableFlushBase, memtableFlushSteps) +} + +// run cycle ticker with fixed minimal interval and let each shard +// take care of its intervals +func MemtableFlushCycleTicker() CycleTicker { + return NewFixedTicker(memtableFlushMinInterval) +} + +const ( + geoCommitLoggerMinInterval = 10 * time.Second + geoCommitLoggerMaxInterval = 60 * time.Second + geoCommitLoggerBase = uint(2) + geoCommitLoggerSteps = uint(4) +) + +// 10s . 13.3s .. 20s .... 33.3s ........ 60s +func GeoCommitLoggerCycleIntervals() CycleIntervals { + return NewExpIntervals(geoCommitLoggerMinInterval, geoCommitLoggerMaxInterval, + geoCommitLoggerBase, geoCommitLoggerSteps) +} + +// run cycle ticker with fixed minimal interval and let each shard +// take care of its intervals +func GeoCommitLoggerCycleTicker() CycleTicker { + return NewFixedTicker(geoCommitLoggerMinInterval) +} + +const ( + hnswCommitLoggerMinInterval = 500 * time.Millisecond + hnswCommitLoggerMaxInterval = 10 * time.Second + hnswCommitLoggerBase = uint(2) + hnswCommitLoggerSteps = uint(5) +) + +// 500ms . 806ms .. 1.42s .... 2.65s ........ 5.1s ................10s +func HnswCommitLoggerCycleIntervals() CycleIntervals { + return NewExpIntervals(hnswCommitLoggerMinInterval, hnswCommitLoggerMaxInterval, + hnswCommitLoggerBase, hnswCommitLoggerSteps) +} + +// run cycle ticker with fixed minimal interval and let each shard +// take care of its intervals +func HnswCommitLoggerCycleTicker() CycleTicker { + return NewFixedTicker(hnswCommitLoggerMinInterval) +} diff --git a/platform/dbops/binaries/weaviate-src/entities/cyclemanager/ticker.go b/platform/dbops/binaries/weaviate-src/entities/cyclemanager/ticker.go new file mode 100644 index 0000000000000000000000000000000000000000..a621df49e4aaaea46ee5d29153136855e89d5b03 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/cyclemanager/ticker.go @@ -0,0 +1,258 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package cyclemanager + +import ( + "time" +) + +// ===== Tickers ===== + +type CycleTicker interface { + Start() + Stop() + C() <-chan time.Time + // called with bool value whenever cycle function finished execution + // true - indicates cycle function actually did some processing + // false - cycle function returned without doing anything + CycleExecuted(executed bool) +} + +type cycleTicker struct { + intervals CycleIntervals + ticker *time.Ticker +} + +func newCycleTicker(intervals CycleIntervals) CycleTicker { + if intervals == nil { + return NewNoopTicker() + } + ticker := time.NewTicker(time.Second) + ticker.Stop() + return &cycleTicker{ticker: ticker, intervals: intervals} +} + +func (t *cycleTicker) Start() { + t.ticker.Reset(t.intervals.Get()) +} + +func (t *cycleTicker) Stop() { + t.ticker.Stop() +} + +func (t *cycleTicker) C() <-chan time.Time { + return t.ticker.C +} + +func (t *cycleTicker) CycleExecuted(executed bool) { + if executed { + t.intervals.Reset() + } else { + t.intervals.Advance() + } + t.ticker.Reset(t.intervals.Get()) +} + +// Creates ticker with fixed interval. Interval is not changed regardless +// of execution results reported by cycle function +// +// If interval <= 0 given, ticker will not fire +func NewFixedTicker(interval time.Duration) CycleTicker { + return newCycleTicker(NewFixedIntervals(interval)) +} + +// Creates ticker with set of interval values. +// Ticker starts with intervals[0] value and with every report of executed "false" +// changes interval value to next one in given array up until last one. +// Report of executed "true" resets interval to interval[0] +// +// If any of intervals given is <= 0 given, ticker will not fire +func NewSeriesTicker(intervals []time.Duration) CycleTicker { + return newCycleTicker(NewSeriesIntervals(intervals)) +} + +// Creates ticker with intervals between minInterval and maxInterval values. +// Number of intervals in-between is determined by steps value. +// Ticker starts with minInterval value and with every report of executed "false" +// changes interval value to next one, up until maxInterval. +// Report of executed "true" resets interval to minInterval +// Example: for minInterval = 100ms, maxInterval = 5s, steps = 4, intervals are +// 100ms . 1325ms . 2550ms . 3775ms . 5000ms +// +// If min- or maxInterval is <= 0 or steps = 0 or min > maxInterval, ticker will not fire +func NewLinearTicker(minInterval, maxInterval time.Duration, steps uint) CycleTicker { + return newCycleTicker(NewLinearIntervals(minInterval, maxInterval, steps)) +} + +// Creates ticker with intervals between minInterval and maxInterval values. +// Number of intervals in-between is determined by steps value. +// Ticker starts with minInterval value and with every report of executed "false" +// changes interval value to next one, up until maxInterval. +// Report of executed "true" resets interval to minInterval +// Example: for minInterval = 100ms, maxInterval = 5s, base = 2, steps = 4, intervals are +// 100ms . 427ms .. 1080ms .... 2387ms ........ 5000ms +// +// If min- or maxInterval is <= 0 or base = 0 or steps = 0 or min > maxInterval, ticker will not fire +func NewExpTicker(minInterval, maxInterval time.Duration, base, steps uint) CycleTicker { + return newCycleTicker(NewExpIntervals(minInterval, maxInterval, base, steps)) +} + +type noopTicker struct { + ch chan time.Time +} + +func NewNoopTicker() CycleTicker { + return &noopTicker{ + ch: make(chan time.Time), + } +} + +func (t *noopTicker) Start() { +} + +func (t *noopTicker) Stop() { +} + +func (t *noopTicker) C() <-chan time.Time { + return t.ch +} + +func (t *noopTicker) CycleExecuted(executed bool) { +} + +// ===== Intervals ===== + +type CycleIntervals interface { + Get() time.Duration + Reset() + Advance() +} + +type fixedIntervals struct { + interval time.Duration +} + +func (i *fixedIntervals) Get() time.Duration { + return i.interval +} + +func (i *fixedIntervals) Reset() { +} + +func (i *fixedIntervals) Advance() { +} + +type seriesIntervals struct { + intervals []time.Duration + pos int +} + +func (i *seriesIntervals) Get() time.Duration { + return i.intervals[i.pos] +} + +func (i *seriesIntervals) Reset() { + i.pos = 0 +} + +func (i *seriesIntervals) Advance() { + if i.pos < len(i.intervals)-1 { + i.pos++ + } +} + +func NewFixedIntervals(interval time.Duration) CycleIntervals { + if interval <= 0 { + return nil + } + return &fixedIntervals{interval: interval} +} + +func NewSeriesIntervals(intervals []time.Duration) CycleIntervals { + if len(intervals) == 0 { + return nil + } + allSame := true + for i := range intervals { + if intervals[i] <= 0 { + return nil + } + if intervals[i] != intervals[0] { + allSame = false + } + } + if allSame { + return &fixedIntervals{interval: intervals[0]} + } + return &seriesIntervals{intervals: intervals, pos: 0} +} + +func NewLinearIntervals(minInterval, maxInterval time.Duration, steps uint) CycleIntervals { + if minInterval <= 0 || maxInterval <= 0 || steps == 0 || minInterval > maxInterval { + return nil + } + if minInterval == maxInterval { + return &fixedIntervals{interval: minInterval} + } + return &seriesIntervals{intervals: linearToIntervals(minInterval, maxInterval, steps), pos: 0} +} + +func NewExpIntervals(minInterval, maxInterval time.Duration, base, steps uint) CycleIntervals { + if minInterval <= 0 || maxInterval <= 0 || base == 0 || steps == 0 || minInterval > maxInterval { + return nil + } + if minInterval == maxInterval { + return &fixedIntervals{interval: minInterval} + } + if base == 1 { + return &seriesIntervals{intervals: linearToIntervals(minInterval, maxInterval, steps), pos: 0} + } + return &seriesIntervals{intervals: expToIntervals(minInterval, maxInterval, base, steps), pos: 0} +} + +// ===== Helper funcs ===== + +func linearToIntervals(minInterval, maxInterval time.Duration, steps uint) []time.Duration { + delta := float64(maxInterval-minInterval) / float64(steps) + floatInterval := float64(minInterval) + + intervals := make([]time.Duration, steps+1) + intervals[0] = minInterval + for i := uint(1); i <= steps; i++ { + floatInterval += delta + intervals[i] = time.Duration(floatInterval) + } + return intervals +} + +func expToIntervals(minInterval, maxInterval time.Duration, base, steps uint) []time.Duration { + sum := uint(1) + power := uint(1) + for i := uint(1); i < steps; i++ { + power *= base + sum += power + } + delta := float64(maxInterval-minInterval) / float64(sum) + floatInterval := float64(minInterval) + floatBase := float64(base) + + intervals := make([]time.Duration, steps+1) + intervals[0] = minInterval + for i := uint(1); i <= steps; i++ { + floatInterval += delta + intervals[i] = time.Duration(floatInterval) + if i < steps { + delta *= floatBase + } + } + return intervals +} diff --git a/platform/dbops/binaries/weaviate-src/entities/cyclemanager/ticker_test.go b/platform/dbops/binaries/weaviate-src/entities/cyclemanager/ticker_test.go new file mode 100644 index 0000000000000000000000000000000000000000..327794c85bb4d0cd15a50edbd83728dcd97f6997 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/cyclemanager/ticker_test.go @@ -0,0 +1,956 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package cyclemanager + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +func Test_FixedIntervalTicker(t *testing.T) { + t.Run("channel is empty before started", func(t *testing.T) { + interval := 10 * time.Millisecond + ticker := NewFixedTicker(10 * time.Millisecond) + + assertNoTick(t, ticker.C()) + + ticker.Start() + time.Sleep(2 * interval) + + assertTick(t, ticker.C()) + }) + + t.Run("interval is fixed", func(t *testing.T) { + interval := 50 * time.Millisecond + tolerance := 25 * time.Millisecond + + ticker := NewFixedTicker(interval) + ticker.Start() + + t0 := time.Now() + val1 := <-ticker.C() + t1 := time.Now() + val2 := <-ticker.C() + t2 := time.Now() + val3 := <-ticker.C() + t3 := time.Now() + val4 := <-ticker.C() + t4 := time.Now() + + ticker.Stop() + + assertTimeDiffEquals(t, val1, val2, interval, tolerance) + assertTimeDiffEquals(t, val2, val3, interval, tolerance) + assertTimeDiffEquals(t, val3, val4, interval, tolerance) + assertTimeDiffEquals(t, t0, t1, interval, tolerance) + assertTimeDiffEquals(t, t1, t2, interval, tolerance) + assertTimeDiffEquals(t, t2, t3, interval, tolerance) + assertTimeDiffEquals(t, t3, t4, interval, tolerance) + }) + + t.Run("interval does not change on CycleExecuted call", func(t *testing.T) { + interval := 50 * time.Millisecond + tolerance := 25 * time.Millisecond + + ticker := NewFixedTicker(interval) + ticker.Start() + + t0 := time.Now() + val1 := <-ticker.C() + t1 := time.Now() + val2 := <-ticker.C() + t2 := time.Now() + + ticker.CycleExecuted(false) + + val3 := <-ticker.C() + t3 := time.Now() + val4 := <-ticker.C() + t4 := time.Now() + + ticker.CycleExecuted(true) + + val5 := <-ticker.C() + t5 := time.Now() + val6 := <-ticker.C() + t6 := time.Now() + + ticker.Stop() + + assertTimeDiffEquals(t, val1, val2, interval, tolerance) + assertTimeDiffEquals(t, val2, val3, interval, tolerance) + assertTimeDiffEquals(t, val3, val4, interval, tolerance) + assertTimeDiffEquals(t, val4, val5, interval, tolerance) + assertTimeDiffEquals(t, val5, val6, interval, tolerance) + assertTimeDiffEquals(t, t0, t1, interval, tolerance) + assertTimeDiffEquals(t, t1, t2, interval, tolerance) + assertTimeDiffEquals(t, t2, t3, interval, tolerance) + assertTimeDiffEquals(t, t3, t4, interval, tolerance) + assertTimeDiffEquals(t, t4, t5, interval, tolerance) + assertTimeDiffEquals(t, t5, t6, interval, tolerance) + }) + + t.Run("no ticks after stop", func(t *testing.T) { + interval := 50 * time.Millisecond + tolerance := 25 * time.Millisecond + + ticker := NewFixedTicker(interval) + ticker.Start() + + t0 := time.Now() + val1 := <-ticker.C() + t1 := time.Now() + val2 := <-ticker.C() + t2 := time.Now() + + ticker.Stop() + + tickOccurred := false + ctx, cancel := context.WithTimeout(context.Background(), 2*interval) + defer cancel() + + select { + case <-ticker.C(): + tickOccurred = true + case <-ctx.Done(): + tickOccurred = false + } + + assert.False(t, tickOccurred) + + assertTimeDiffEquals(t, val1, val2, interval, tolerance) + assertTimeDiffEquals(t, t0, t1, interval, tolerance) + assertTimeDiffEquals(t, t1, t2, interval, tolerance) + }) + + t.Run("ticker starts again", func(t *testing.T) { + interval := 50 * time.Millisecond + tolerance := 25 * time.Millisecond + + ticker := NewFixedTicker(interval) + ticker.Start() + + t01 := time.Now() + val1 := <-ticker.C() + t1 := time.Now() + val2 := <-ticker.C() + t2 := time.Now() + + ticker.Stop() + ticker.Start() + + t02 := time.Now() + val3 := <-ticker.C() + t3 := time.Now() + val4 := <-ticker.C() + t4 := time.Now() + + ticker.Stop() + + assertTimeDiffEquals(t, val1, val2, interval, tolerance) + assertTimeDiffEquals(t, val3, val4, interval, tolerance) + assertTimeDiffEquals(t, t01, t1, interval, tolerance) + assertTimeDiffEquals(t, t1, t2, interval, tolerance) + assertTimeDiffEquals(t, t02, t3, interval, tolerance) + assertTimeDiffEquals(t, t3, t4, interval, tolerance) + }) + + t.Run("ticker does not run with <= 0 interval", func(t *testing.T) { + interval := time.Duration(0) + + ticker := NewFixedTicker(interval) + ticker.Start() + + tickOccurred := false + ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond) + defer cancel() + + select { + case <-ticker.C(): + tickOccurred = true + case <-ctx.Done(): + tickOccurred = false + } + + assert.False(t, tickOccurred) + + ticker.Stop() + }) +} + +func Test_SeriesTicker(t *testing.T) { + t.Run("channel is empty before started", func(t *testing.T) { + intervals := []time.Duration{10 * time.Millisecond, 20 * time.Millisecond} + ticker := NewSeriesTicker(intervals) + + assertNoTick(t, ticker.C()) + + ticker.Start() + time.Sleep(2 * intervals[0]) + + assertTick(t, ticker.C()) + }) + + t.Run("interval is fixed between CycleExecuted calls, advances on false, resets on true", func(t *testing.T) { + intervals := []time.Duration{50 * time.Millisecond, 100 * time.Millisecond, 150 * time.Millisecond} + tolerance := 25 * time.Millisecond + + ticker := NewSeriesTicker(intervals) + ticker.Start() + + t0 := time.Now() + val1 := <-ticker.C() + t1 := time.Now() + val2 := <-ticker.C() + t2 := time.Now() + + ticker.CycleExecuted(false) + + val3 := <-ticker.C() + t3 := time.Now() + val4 := <-ticker.C() + t4 := time.Now() + + ticker.CycleExecuted(false) + + val5 := <-ticker.C() + t5 := time.Now() + val6 := <-ticker.C() + t6 := time.Now() + + ticker.CycleExecuted(false) + + val7 := <-ticker.C() + t7 := time.Now() + val8 := <-ticker.C() + t8 := time.Now() + + ticker.CycleExecuted(true) + + val9 := <-ticker.C() + t9 := time.Now() + val10 := <-ticker.C() + t10 := time.Now() + + ticker.Stop() + + assertTimeDiffEquals(t, val1, val2, intervals[0], tolerance) + assertTimeDiffEquals(t, val2, val3, intervals[1], tolerance) + assertTimeDiffEquals(t, val3, val4, intervals[1], tolerance) + assertTimeDiffEquals(t, val4, val5, intervals[2], tolerance) + assertTimeDiffEquals(t, val5, val6, intervals[2], tolerance) + assertTimeDiffEquals(t, val6, val7, intervals[2], tolerance) + assertTimeDiffEquals(t, val7, val8, intervals[2], tolerance) + assertTimeDiffEquals(t, val8, val9, intervals[0], tolerance) + assertTimeDiffEquals(t, val9, val10, intervals[0], tolerance) + assertTimeDiffEquals(t, t0, t1, intervals[0], tolerance) + assertTimeDiffEquals(t, t1, t2, intervals[0], tolerance) + assertTimeDiffEquals(t, t2, t3, intervals[1], tolerance) + assertTimeDiffEquals(t, t3, t4, intervals[1], tolerance) + assertTimeDiffEquals(t, t4, t5, intervals[2], tolerance) + assertTimeDiffEquals(t, t5, t6, intervals[2], tolerance) + assertTimeDiffEquals(t, t6, t7, intervals[2], tolerance) + assertTimeDiffEquals(t, t7, t8, intervals[2], tolerance) + assertTimeDiffEquals(t, t8, t9, intervals[0], tolerance) + assertTimeDiffEquals(t, t9, t10, intervals[0], tolerance) + }) + + t.Run("no ticks after stop", func(t *testing.T) { + intervals := []time.Duration{50 * time.Millisecond} + tolerance := 25 * time.Millisecond + + ticker := NewSeriesTicker(intervals) + ticker.Start() + + t0 := time.Now() + val1 := <-ticker.C() + t1 := time.Now() + val2 := <-ticker.C() + t2 := time.Now() + + ticker.Stop() + + tickOccurred := false + ctx, cancel := context.WithTimeout(context.Background(), 2*intervals[0]) + defer cancel() + + select { + case <-ticker.C(): + tickOccurred = true + case <-ctx.Done(): + tickOccurred = false + } + + assert.False(t, tickOccurred) + + assertTimeDiffEquals(t, val1, val2, intervals[0], tolerance) + assertTimeDiffEquals(t, t0, t1, intervals[0], tolerance) + assertTimeDiffEquals(t, t1, t2, intervals[0], tolerance) + }) + + t.Run("ticker starts again", func(t *testing.T) { + intervals := []time.Duration{50 * time.Millisecond} + tolerance := 25 * time.Millisecond + + ticker := NewSeriesTicker(intervals) + ticker.Start() + + t01 := time.Now() + val1 := <-ticker.C() + t1 := time.Now() + val2 := <-ticker.C() + t2 := time.Now() + + ticker.Stop() + ticker.Start() + + t02 := time.Now() + val3 := <-ticker.C() + t3 := time.Now() + val4 := <-ticker.C() + t4 := time.Now() + + ticker.Stop() + + assertTimeDiffEquals(t, val1, val2, intervals[0], tolerance) + assertTimeDiffEquals(t, val3, val4, intervals[0], tolerance) + assertTimeDiffEquals(t, t01, t1, intervals[0], tolerance) + assertTimeDiffEquals(t, t1, t2, intervals[0], tolerance) + assertTimeDiffEquals(t, t02, t3, intervals[0], tolerance) + assertTimeDiffEquals(t, t3, t4, intervals[0], tolerance) + }) + + t.Run("ticker does not run with invalid params", func(t *testing.T) { + run := func(t *testing.T, ticker CycleTicker) { + ticker.Start() + + tickOccurred := false + ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond) + defer cancel() + + select { + case <-ticker.C(): + tickOccurred = true + case <-ctx.Done(): + tickOccurred = false + } + + assert.False(t, tickOccurred) + + ticker.Stop() + } + + t.Run("any interval <= 0", func(t *testing.T) { + ticker := NewSeriesTicker([]time.Duration{50 * time.Millisecond, 0}) + + run(t, ticker) + }) + + t.Run("no intervals", func(t *testing.T) { + ticker := NewSeriesTicker([]time.Duration{}) + + run(t, ticker) + }) + }) +} + +func Test_LinearTicker(t *testing.T) { + t.Run("channel is empty before started", func(t *testing.T) { + minInterval := 10 * time.Millisecond + maxInterval := 50 * time.Millisecond + steps := uint(2) + ticker := NewLinearTicker(minInterval, maxInterval, steps) + + assertNoTick(t, ticker.C()) + + ticker.Start() + time.Sleep(2 * minInterval) + + assertTick(t, ticker.C()) + }) + + t.Run("interval is fixed between CycleExecuted calls, advances on false, resets on true", func(t *testing.T) { + ms50 := 50 * time.Millisecond + ms75 := 75 * time.Millisecond + ms100 := 100 * time.Millisecond + tolerance := 25 * time.Millisecond + + minInterval := ms50 + maxInterval := ms100 + steps := uint(2) + + ticker := NewLinearTicker(minInterval, maxInterval, steps) + ticker.Start() + + t0 := time.Now() + val1 := <-ticker.C() + t1 := time.Now() + val2 := <-ticker.C() + t2 := time.Now() + + ticker.CycleExecuted(false) + + val3 := <-ticker.C() + t3 := time.Now() + val4 := <-ticker.C() + t4 := time.Now() + + ticker.CycleExecuted(false) + + val5 := <-ticker.C() + t5 := time.Now() + val6 := <-ticker.C() + t6 := time.Now() + + ticker.CycleExecuted(false) + + val7 := <-ticker.C() + t7 := time.Now() + val8 := <-ticker.C() + t8 := time.Now() + + ticker.CycleExecuted(true) + + val9 := <-ticker.C() + t9 := time.Now() + val10 := <-ticker.C() + t10 := time.Now() + + ticker.Stop() + + assertTimeDiffEquals(t, val1, val2, ms50, tolerance) + assertTimeDiffEquals(t, val2, val3, ms75, tolerance) + assertTimeDiffEquals(t, val3, val4, ms75, tolerance) + assertTimeDiffEquals(t, val4, val5, ms100, tolerance) + assertTimeDiffEquals(t, val5, val6, ms100, tolerance) + assertTimeDiffEquals(t, val6, val7, ms100, tolerance) + assertTimeDiffEquals(t, val7, val8, ms100, tolerance) + assertTimeDiffEquals(t, val8, val9, ms50, tolerance) + assertTimeDiffEquals(t, val9, val10, ms50, tolerance) + assertTimeDiffEquals(t, t0, t1, ms50, tolerance) + assertTimeDiffEquals(t, t1, t2, ms50, tolerance) + assertTimeDiffEquals(t, t2, t3, ms75, tolerance) + assertTimeDiffEquals(t, t3, t4, ms75, tolerance) + assertTimeDiffEquals(t, t4, t5, ms100, tolerance) + assertTimeDiffEquals(t, t5, t6, ms100, tolerance) + assertTimeDiffEquals(t, t6, t7, ms100, tolerance) + assertTimeDiffEquals(t, t7, t8, ms100, tolerance) + assertTimeDiffEquals(t, t8, t9, ms50, tolerance) + assertTimeDiffEquals(t, t9, t10, ms50, tolerance) + }) + + t.Run("no ticks after stop", func(t *testing.T) { + minInterval := 50 * time.Millisecond + maxInterval := 100 * time.Millisecond + steps := uint(2) + tolerance := 10 * time.Millisecond + + ticker := NewLinearTicker(minInterval, maxInterval, steps) + ticker.Start() + + t0 := time.Now() + val1 := <-ticker.C() + t1 := time.Now() + val2 := <-ticker.C() + t2 := time.Now() + + ticker.Stop() + + tickOccurred := false + ctx, cancel := context.WithTimeout(context.Background(), 2*minInterval) + defer cancel() + + select { + case <-ticker.C(): + tickOccurred = true + case <-ctx.Done(): + tickOccurred = false + } + + assert.False(t, tickOccurred) + + assertTimeDiffEquals(t, val1, val2, minInterval, tolerance) + assertTimeDiffEquals(t, t0, t1, minInterval, tolerance) + assertTimeDiffEquals(t, t1, t2, minInterval, tolerance) + }) + + t.Run("ticker starts again", func(t *testing.T) { + minInterval := 50 * time.Millisecond + maxInterval := 100 * time.Millisecond + steps := uint(2) + tolerance := 25 * time.Millisecond + + ticker := NewLinearTicker(minInterval, maxInterval, steps) + ticker.Start() + + t01 := time.Now() + val1 := <-ticker.C() + t1 := time.Now() + val2 := <-ticker.C() + t2 := time.Now() + + ticker.Stop() + ticker.Start() + + t02 := time.Now() + val3 := <-ticker.C() + t3 := time.Now() + val4 := <-ticker.C() + t4 := time.Now() + + ticker.Stop() + + assertTimeDiffEquals(t, val1, val2, minInterval, tolerance) + assertTimeDiffEquals(t, val3, val4, minInterval, tolerance) + assertTimeDiffEquals(t, t01, t1, minInterval, tolerance) + assertTimeDiffEquals(t, t1, t2, minInterval, tolerance) + assertTimeDiffEquals(t, t02, t3, minInterval, tolerance) + assertTimeDiffEquals(t, t3, t4, minInterval, tolerance) + }) + + t.Run("ticker does not run with invalid params", func(t *testing.T) { + run := func(t *testing.T, ticker CycleTicker) { + ticker.Start() + + tickOccurred := false + ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond) + defer cancel() + + select { + case <-ticker.C(): + tickOccurred = true + case <-ctx.Done(): + tickOccurred = false + } + + assert.False(t, tickOccurred) + + ticker.Stop() + } + + t.Run("minInterval <= 0", func(t *testing.T) { + ticker := NewLinearTicker(0, 100*time.Millisecond, 1) + + run(t, ticker) + }) + + t.Run("maxInterval <= 0", func(t *testing.T) { + ticker := NewLinearTicker(50*time.Millisecond, 0, 1) + + run(t, ticker) + }) + + t.Run("steps = 0", func(t *testing.T) { + ticker := NewLinearTicker(50*time.Millisecond, 100*time.Millisecond, 0) + + run(t, ticker) + }) + + t.Run("minInterval > maxInterval", func(t *testing.T) { + ticker := NewLinearTicker(100*time.Millisecond, 50*time.Millisecond, 0) + + run(t, ticker) + }) + }) +} + +func Test_ExpTicker(t *testing.T) { + t.Run("channel is empty before started", func(t *testing.T) { + minInterval := 10 * time.Millisecond + maxInterval := 20 * time.Millisecond + base := uint(2) + steps := uint(2) + ticker := NewExpTicker(minInterval, maxInterval, base, steps) + + assertNoTick(t, ticker.C()) + + ticker.Start() + time.Sleep(2 * minInterval) + + assertTick(t, ticker.C()) + }) + + t.Run("interval is fixed between CycleExecuted calls, advances on false, resets on true", func(t *testing.T) { + ms25 := 25 * time.Millisecond + ms50 := 50 * time.Millisecond + ms100 := 100 * time.Millisecond + tolerance := 25 * time.Millisecond + + minInterval := ms25 + maxInterval := ms100 + base := uint(2) + steps := uint(2) + + ticker := NewExpTicker(minInterval, maxInterval, base, steps) + ticker.Start() + + t0 := time.Now() + val1 := <-ticker.C() + t1 := time.Now() + val2 := <-ticker.C() + t2 := time.Now() + + ticker.CycleExecuted(false) + + val3 := <-ticker.C() + t3 := time.Now() + val4 := <-ticker.C() + t4 := time.Now() + + ticker.CycleExecuted(false) + + val5 := <-ticker.C() + t5 := time.Now() + val6 := <-ticker.C() + t6 := time.Now() + + ticker.CycleExecuted(false) + + val7 := <-ticker.C() + t7 := time.Now() + val8 := <-ticker.C() + t8 := time.Now() + + ticker.CycleExecuted(true) + + val9 := <-ticker.C() + t9 := time.Now() + val10 := <-ticker.C() + t10 := time.Now() + + ticker.Stop() + + assertTimeDiffEquals(t, val1, val2, ms25, tolerance) + assertTimeDiffEquals(t, val2, val3, ms50, tolerance) + assertTimeDiffEquals(t, val3, val4, ms50, tolerance) + assertTimeDiffEquals(t, val4, val5, ms100, tolerance) + assertTimeDiffEquals(t, val5, val6, ms100, tolerance) + assertTimeDiffEquals(t, val6, val7, ms100, tolerance) + assertTimeDiffEquals(t, val7, val8, ms100, tolerance) + assertTimeDiffEquals(t, val8, val9, ms25, tolerance) + assertTimeDiffEquals(t, val9, val10, ms25, tolerance) + assertTimeDiffEquals(t, t0, t1, ms25, tolerance) + assertTimeDiffEquals(t, t1, t2, ms25, tolerance) + assertTimeDiffEquals(t, t2, t3, ms50, tolerance) + assertTimeDiffEquals(t, t3, t4, ms50, tolerance) + assertTimeDiffEquals(t, t4, t5, ms100, tolerance) + assertTimeDiffEquals(t, t5, t6, ms100, tolerance) + assertTimeDiffEquals(t, t6, t7, ms100, tolerance) + assertTimeDiffEquals(t, t7, t8, ms100, tolerance) + assertTimeDiffEquals(t, t8, t9, ms25, tolerance) + assertTimeDiffEquals(t, t9, t10, ms25, tolerance) + }) + + t.Run("no ticks after stop", func(t *testing.T) { + minInterval := 25 * time.Millisecond + maxInterval := 100 * time.Millisecond + base := uint(2) + steps := uint(2) + tolerance := 25 * time.Millisecond + + ticker := NewExpTicker(minInterval, maxInterval, base, steps) + ticker.Start() + + t0 := time.Now() + val1 := <-ticker.C() + t1 := time.Now() + val2 := <-ticker.C() + t2 := time.Now() + + ticker.Stop() + + tickOccurred := false + ctx, cancel := context.WithTimeout(context.Background(), 2*minInterval) + defer cancel() + + select { + case <-ticker.C(): + tickOccurred = true + case <-ctx.Done(): + tickOccurred = false + } + + assert.False(t, tickOccurred) + + assertTimeDiffEquals(t, val1, val2, minInterval, tolerance) + assertTimeDiffEquals(t, t0, t1, minInterval, tolerance) + assertTimeDiffEquals(t, t1, t2, minInterval, tolerance) + }) + + t.Run("ticker starts again", func(t *testing.T) { + minInterval := 25 * time.Millisecond + maxInterval := 100 * time.Millisecond + base := uint(2) + steps := uint(2) + tolerance := 25 * time.Millisecond + + ticker := NewExpTicker(minInterval, maxInterval, base, steps) + ticker.Start() + + t01 := time.Now() + val1 := <-ticker.C() + t1 := time.Now() + val2 := <-ticker.C() + t2 := time.Now() + + ticker.Stop() + ticker.Start() + + t02 := time.Now() + val3 := <-ticker.C() + t3 := time.Now() + val4 := <-ticker.C() + t4 := time.Now() + + ticker.Stop() + + assertTimeDiffEquals(t, val1, val2, minInterval, tolerance) + assertTimeDiffEquals(t, val3, val4, minInterval, tolerance) + assertTimeDiffEquals(t, t01, t1, minInterval, tolerance) + assertTimeDiffEquals(t, t1, t2, minInterval, tolerance) + assertTimeDiffEquals(t, t02, t3, minInterval, tolerance) + assertTimeDiffEquals(t, t3, t4, minInterval, tolerance) + }) + + t.Run("ticker does not run with invalid params", func(t *testing.T) { + run := func(t *testing.T, ticker CycleTicker) { + ticker.Start() + + tickOccurred := false + ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond) + defer cancel() + + select { + case <-ticker.C(): + tickOccurred = true + case <-ctx.Done(): + tickOccurred = false + } + + assert.False(t, tickOccurred) + + ticker.Stop() + } + + t.Run("minInterval <= 0", func(t *testing.T) { + ticker := NewExpTicker(0, 100*time.Millisecond, 2, 2) + + run(t, ticker) + }) + + t.Run("maxInterval <= 0", func(t *testing.T) { + ticker := NewExpTicker(100*time.Millisecond, 0, 2, 2) + + run(t, ticker) + }) + + t.Run("base == 0", func(t *testing.T) { + ticker := NewExpTicker(25*time.Millisecond, 100*time.Millisecond, 0, 2) + + run(t, ticker) + }) + + t.Run("steps = 0", func(t *testing.T) { + ticker := NewExpTicker(25*time.Millisecond, 100*time.Millisecond, 2, 0) + + run(t, ticker) + }) + + t.Run("minInterval > maxInterval", func(t *testing.T) { + ticker := NewExpTicker(100*time.Millisecond, 25*time.Millisecond, 2, 2) + + run(t, ticker) + }) + }) +} + +func Test_LinearToIntervals(t *testing.T) { + type testCase struct { + name string + minInterval time.Duration + maxInterval time.Duration + steps uint + expected []time.Duration + } + + testCases := []testCase{ + { + name: "100 => 5000; steps 2", + minInterval: 100 * time.Millisecond, + maxInterval: 5 * time.Second, + steps: 2, + expected: []time.Duration{ + 100_000_000, + 2_550_000_000, + 5_000_000_000, + }, + }, + { + name: "100 => 5000; steps 3", + minInterval: 100 * time.Millisecond, + maxInterval: 5 * time.Second, + steps: 3, + expected: []time.Duration{ + 100_000_000, + 1_733_333_333, + 3_366_666_666, + 5_000_000_000, + }, + }, + { + name: "100 => 5000; steps 4", + minInterval: 100 * time.Millisecond, + maxInterval: 5 * time.Second, + steps: 4, + expected: []time.Duration{ + 100_000_000, + 1_325_000_000, + 2_550_000_000, + 3_775_000_000, + 5_000_000_000, + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + res := linearToIntervals(tc.minInterval, tc.maxInterval, tc.steps) + + assert.ElementsMatch(t, res, tc.expected) + }) + } +} + +func Test_ExpToIntervals(t *testing.T) { + type testCase struct { + name string + minInterval time.Duration + maxInterval time.Duration + base uint + steps uint + expected []time.Duration + } + + testCases := []testCase{ + { + name: "100 => 5000; base 2; steps 2", + minInterval: 100 * time.Millisecond, + maxInterval: 5 * time.Second, + base: 2, + steps: 2, + expected: []time.Duration{ + 100_000_000, + 1_733_333_333, + 5_000_000_000, + }, + }, + { + name: "100 => 5000; base 2; steps 3", + minInterval: 100 * time.Millisecond, + maxInterval: 5 * time.Second, + base: 2, + steps: 3, + expected: []time.Duration{ + 100_000_000, + 800_000_000, + 2_200_000_000, + 5_000_000_000, + }, + }, + { + name: "100 => 5000; base 2; steps 4", + minInterval: 100 * time.Millisecond, + maxInterval: 5 * time.Second, + base: 2, + steps: 4, + expected: []time.Duration{ + 100_000_000, + 426_666_666, + 1_080_000_000, + 2_386_666_666, + 5_000_000_000, + }, + }, + { + name: "100 => 5000; base 3; steps 2", + minInterval: 100 * time.Millisecond, + maxInterval: 5 * time.Second, + base: 3, + steps: 2, + expected: []time.Duration{ + 100_000_000, + 1_325_000_000, + 5_000_000_000, + }, + }, + { + name: "100 => 5000; base 3; steps 3", + minInterval: 100 * time.Millisecond, + maxInterval: 5 * time.Second, + base: 3, + steps: 3, + expected: []time.Duration{ + 100_000_000, + 476_923_076, + 1_607_692_307, + 5_000_000_000, + }, + }, + { + name: "100 => 5000; base 3; steps 4", + minInterval: 100 * time.Millisecond, + maxInterval: 5 * time.Second, + base: 3, + steps: 4, + expected: []time.Duration{ + 100_000_000, + 222_500_000, + 590_000_000, + 1_692_500_000, + 5_000_000_000, + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + res := expToIntervals(tc.minInterval, tc.maxInterval, tc.base, tc.steps) + + assert.ElementsMatch(t, res, tc.expected) + }) + } +} + +func assertTimeDiffEquals(t *testing.T, time1, time2 time.Time, expected time.Duration, tolerance time.Duration) { + diff := time2.Sub(time1) + assert.GreaterOrEqual(t, diff, expected-tolerance) + assert.LessOrEqual(t, diff, expected+tolerance) +} + +func assertTick(t *testing.T, tickCh <-chan time.Time) { + select { + case <-tickCh: + default: + assert.Fail(t, "should have tick") + } +} + +func assertNoTick(t *testing.T, tickCh <-chan time.Time) { + select { + case <-tickCh: + assert.Fail(t, "should not have tick") + default: + } +} diff --git a/platform/dbops/binaries/weaviate-src/entities/deepcopy/models_deepcopy.go b/platform/dbops/binaries/weaviate-src/entities/deepcopy/models_deepcopy.go new file mode 100644 index 0000000000000000000000000000000000000000..8cea5229f010135773e4fd976c905087dcd59e71 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/deepcopy/models_deepcopy.go @@ -0,0 +1,104 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package deepcopy + +import "github.com/weaviate/weaviate/entities/models" + +func Schema(s *models.Schema) *models.Schema { + classes := make([]*models.Class, len(s.Classes)) + for i, class := range s.Classes { + classes[i] = Class(class) + } + + return &models.Schema{Name: s.Name, Maintainer: s.Maintainer, Classes: classes} +} + +func Class(c *models.Class) *models.Class { + if c == nil { + return nil + } + + var properties []*models.Property = nil + if c.Properties != nil { + properties = make([]*models.Property, len(c.Properties)) + for i, prop := range c.Properties { + properties[i] = Prop(prop) + } + } + var replicationConf *models.ReplicationConfig = nil + if c.ReplicationConfig != nil { + replicationConf = &models.ReplicationConfig{ + Factor: c.ReplicationConfig.Factor, + DeletionStrategy: c.ReplicationConfig.DeletionStrategy, + } + } + + return &models.Class{ + Class: c.Class, + Description: c.Description, + ModuleConfig: c.ModuleConfig, + ShardingConfig: c.ShardingConfig, + VectorIndexConfig: c.VectorIndexConfig, + VectorIndexType: c.VectorIndexType, + ReplicationConfig: replicationConf, + Vectorizer: c.Vectorizer, + InvertedIndexConfig: InvertedIndexConfig(c.InvertedIndexConfig), + Properties: properties, + } +} + +func Prop(p *models.Property) *models.Property { + return &models.Property{ + DataType: p.DataType, + Description: p.Description, + ModuleConfig: p.ModuleConfig, + Name: p.Name, + Tokenization: p.Tokenization, + IndexFilterable: ptrBoolCopy(p.IndexFilterable), + IndexSearchable: ptrBoolCopy(p.IndexSearchable), + IndexRangeFilters: ptrBoolCopy(p.IndexRangeFilters), + } +} + +func ptrBoolCopy(ptrBool *bool) *bool { + if ptrBool != nil { + b := *ptrBool + return &b + } + return nil +} + +func InvertedIndexConfig(i *models.InvertedIndexConfig) *models.InvertedIndexConfig { + if i == nil { + return nil + } + + var bm25 *models.BM25Config = nil + if i.Bm25 != nil { + bm25 = &models.BM25Config{B: i.Bm25.B, K1: i.Bm25.K1} + } + + var stopwords *models.StopwordConfig = nil + if i.Stopwords != nil { + stopwords = &models.StopwordConfig{Additions: i.Stopwords.Additions, Preset: i.Stopwords.Preset, Removals: i.Stopwords.Removals} + } + + return &models.InvertedIndexConfig{ + Bm25: bm25, + CleanupIntervalSeconds: i.CleanupIntervalSeconds, + IndexNullState: i.IndexNullState, + IndexPropertyLength: i.IndexPropertyLength, + IndexTimestamps: i.IndexTimestamps, + Stopwords: stopwords, + UsingBlockMaxWAND: i.UsingBlockMaxWAND, + } +} diff --git a/platform/dbops/binaries/weaviate-src/entities/diskio/files.go b/platform/dbops/binaries/weaviate-src/entities/diskio/files.go new file mode 100644 index 0000000000000000000000000000000000000000..98abb1db2554df6c36557c09d728fd02077c4c3e --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/diskio/files.go @@ -0,0 +1,78 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package diskio + +import ( + "errors" + "io" + "os" +) + +func FileExists(file string) (bool, error) { + _, err := os.Stat(file) + if os.IsNotExist(err) { + return false, nil + } + if err != nil { + return false, err + } + return true, nil +} + +func IsDirEmpty(dir string) (bool, error) { + f, err := os.Open(dir) + if err != nil { + return false, err + } + defer f.Close() + + _, err = f.Readdirnames(1) + if errors.Is(err, io.EOF) { + return true, nil + } + + return false, err +} + +func Fsync(path string) error { + f, err := os.Open(path) + if err != nil { + return err + } + defer f.Close() + + return f.Sync() +} + +// GetFileWithSizes gets all files in a directory including their filesize +func GetFileWithSizes(dirPath string) (map[string]int64, error) { + dir, err := os.Open(dirPath) + if err != nil { + return nil, err + } + defer dir.Close() + + // Read all entries at once including file sizes + fileInfos, err := dir.Readdir(-1) + if err != nil { + return nil, err + } + + fileSizes := make(map[string]int64) + for _, info := range fileInfos { + if !info.IsDir() { // Skip directories + fileSizes[info.Name()] = info.Size() + } + } + + return fileSizes, nil +} diff --git a/platform/dbops/binaries/weaviate-src/entities/diskio/metered_reader.go b/platform/dbops/binaries/weaviate-src/entities/diskio/metered_reader.go new file mode 100644 index 0000000000000000000000000000000000000000..450c137f8bffc2ca4901aa8f54b3e05307f71a85 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/diskio/metered_reader.go @@ -0,0 +1,68 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package diskio + +import ( + "time" +) + +type Reader interface { + Read(p []byte) (n int, err error) + ReadAt(p []byte, off int64) (n int, err error) +} + +type MeteredReaderCallback func(read int64, nanoseconds int64) + +type MeteredReader struct { + file Reader + cb MeteredReaderCallback +} + +// Read passes the read through to the underlying reader. On a successful read, +// it will trigger the attached callback and provide it with metrics. If no +// callback is set, it will ignore it. +func (m *MeteredReader) Read(p []byte) (n int, err error) { + start := time.Now() + n, err = m.file.Read(p) + took := time.Since(start).Nanoseconds() + if err != nil { + return + } + + if m.cb != nil { + m.cb(int64(n), took) + } + + return +} + +// ReadAt passes the read through to the underlying reader. On a successful read, +// it will trigger the attached callback and provide it with metrics. If no +// callback is set, it will ignore it. +func (m *MeteredReader) ReadAt(p []byte, off int64) (n int, err error) { + start := time.Now() + n, err = m.file.ReadAt(p, off) + took := time.Since(start).Nanoseconds() + if err != nil { + return + } + + if m.cb != nil { + m.cb(int64(n), took) + } + + return +} + +func NewMeteredReader(file Reader, cb MeteredReaderCallback) *MeteredReader { + return &MeteredReader{file: file, cb: cb} +} diff --git a/platform/dbops/binaries/weaviate-src/entities/diskio/metered_reader_test.go b/platform/dbops/binaries/weaviate-src/entities/diskio/metered_reader_test.go new file mode 100644 index 0000000000000000000000000000000000000000..921e0798598705a5f71830d48ee05493ec53b741 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/diskio/metered_reader_test.go @@ -0,0 +1,81 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package diskio + +import ( + "bytes" + "io" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestMeteredReader(t *testing.T) { + data := make([]byte, 128) + + t.Run("happy path - with callback", func(t *testing.T) { + var ( + read int64 + took int64 + ) + + cb := func(r int64, n int64) { + read = r + took = n + } + + mr := NewMeteredReader(bytes.NewReader(data), cb) + + target := make([]byte, 128) + n, err := mr.Read(target) + + require.Nil(t, err) + assert.Equal(t, int64(n), read) + assert.Greater(t, took, int64(0)) + }) + + t.Run("happy path - without callback", func(t *testing.T) { + mr := NewMeteredReader(bytes.NewReader(data), nil) + + target := make([]byte, 128) + _, err := mr.Read(target) + require.Nil(t, err) + }) + + t.Run("with an error", func(t *testing.T) { + var ( + read int64 + took int64 + ) + + cb := func(r int64, n int64) { + read = r + took = n + } + + underlying := bytes.NewReader(data) + // provoke EOF error by seeking to end of data + underlying.Seek(128, 0) + mr := NewMeteredReader(underlying, cb) + + target := make([]byte, 128) + _, err := mr.Read(target) + + assert.Equal(t, io.EOF, err) + + // callback should not have been called in error cases, so we expect to + // read initial values + assert.Equal(t, int64(0), read) + assert.Equal(t, int64(0), took) + }) +} diff --git a/platform/dbops/binaries/weaviate-src/entities/diskio/metered_writer_block_size.go b/platform/dbops/binaries/weaviate-src/entities/diskio/metered_writer_block_size.go new file mode 100644 index 0000000000000000000000000000000000000000..cfdcd141e017e86a937574734e383e3899441328 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/diskio/metered_writer_block_size.go @@ -0,0 +1,59 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package diskio + +import ( + "io" +) + +type MeteredWriterCallback func(written int64) + +type WriterSeekerCloser interface { + io.Writer + io.Seeker +} + +type MeteredWriter struct { + w WriterSeekerCloser + cb MeteredWriterCallback +} + +func (m *MeteredWriter) Write(p []byte) (n int, err error) { + n, err = m.w.Write(p) + if err != nil { + return + } + + if m.cb != nil { + m.cb(int64(n)) + } + + return +} + +func (m *MeteredWriter) Seek(offset int64, whence int) (int64, error) { + n, err := m.w.Seek(offset, whence) + if m.cb != nil { + m.cb(0) + } + + return n, err +} + +var _ = WriterSeekerCloser(&MeteredWriter{}) + +func NewMeteredWriter(w WriterSeekerCloser, cb MeteredWriterCallback) *MeteredWriter { + return &MeteredWriter{ + w: w, + cb: cb, + } +} diff --git a/platform/dbops/binaries/weaviate-src/entities/dto/dto.go b/platform/dbops/binaries/weaviate-src/entities/dto/dto.go new file mode 100644 index 0000000000000000000000000000000000000000..d9fe90e3b14eb9e84931d77a9801ced14967fc49 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/dto/dto.go @@ -0,0 +1,160 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package dto + +import ( + "encoding/json" + "fmt" + + "github.com/weaviate/weaviate/entities/additional" + "github.com/weaviate/weaviate/entities/filters" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/search" + "github.com/weaviate/weaviate/entities/searchparams" +) + +type GroupParams struct { + Strategy string + Force float32 +} + +type TargetCombinationType int + +const ( + Sum TargetCombinationType = iota + Average + Minimum + ManualWeights + RelativeScore +) + +// no weights are set for default, needs to be added if this is changed to something else +const DefaultTargetCombinationType = Minimum + +type TargetCombination struct { + // just one of these can be set, precedence in order + Type TargetCombinationType + Weights []float32 +} + +type GetParams struct { + Filters *filters.LocalFilter + ClassName string + Pagination *filters.Pagination + Cursor *filters.Cursor + Sort []filters.Sort + Properties search.SelectProperties + NearVector *searchparams.NearVector + NearObject *searchparams.NearObject + KeywordRanking *searchparams.KeywordRanking + HybridSearch *searchparams.HybridSearch + GroupBy *searchparams.GroupBy + TargetVector string + TargetVectorCombination *TargetCombination + Group *GroupParams + ModuleParams map[string]interface{} + AdditionalProperties additional.Properties + ReplicationProperties *additional.ReplicationProperties + Tenant string + IsRefOrigin bool // is created by ref filter + Alias string // used only to transfer alias passed in search request, not used for actual search +} + +type Embedding interface { + []float32 | [][]float32 +} + +func IsVectorEmpty(vector models.Vector) (bool, error) { + switch v := vector.(type) { + case nil: + return true, nil + case models.C11yVector: + return len(v) == 0, nil + case []float32: + return len(v) == 0, nil + case [][]float32: + return len(v) == 0, nil + default: + return false, fmt.Errorf("unrecognized vector type: %T", vector) + } +} + +func GetVectors(in models.Vectors) (map[string][]float32, map[string][][]float32, error) { + var vectors map[string][]float32 + var multiVectors map[string][][]float32 + if len(in) > 0 { + for targetVector, vector := range in { + switch vec := vector.(type) { + case []interface{}: + if vectors == nil { + vectors = make(map[string][]float32) + } + asVectorArray := make([]float32, len(vec)) + for i := range vec { + switch v := vec[i].(type) { + case json.Number: + asFloat, err := v.Float64() + if err != nil { + return nil, nil, fmt.Errorf("parse []interface{} as vector for target vector: %s: %w", targetVector, err) + } + asVectorArray[i] = float32(asFloat) + case float64: + asVectorArray[i] = float32(v) + case float32: + asVectorArray[i] = v + default: + return nil, nil, fmt.Errorf("parse []interface{} as vector for target vector: %s, unrecognized type: %T", targetVector, vec[i]) + } + } + vectors[targetVector] = asVectorArray + case [][]interface{}: + if multiVectors == nil { + multiVectors = make(map[string][][]float32) + } + asMultiVectorArray := make([][]float32, len(vec)) + for i := range vec { + asMultiVectorArray[i] = make([]float32, len(vec[i])) + for j := range vec[i] { + switch v := vec[i][j].(type) { + case json.Number: + asFloat, err := v.Float64() + if err != nil { + return nil, nil, fmt.Errorf("parse []interface{} as multi vector for target vector: %s: %w", targetVector, err) + } + asMultiVectorArray[i][j] = float32(asFloat) + case float64: + asMultiVectorArray[i][j] = float32(v) + case float32: + asMultiVectorArray[i][j] = v + default: + return nil, nil, fmt.Errorf("parse []interface{} as multi vector for target vector: %s, unrecognized type: %T", targetVector, vec[i]) + } + } + } + multiVectors[targetVector] = asMultiVectorArray + case []float32: + if vectors == nil { + vectors = make(map[string][]float32) + } + vectors[targetVector] = vec + case [][]float32: + if multiVectors == nil { + multiVectors = make(map[string][][]float32) + } + multiVectors[targetVector] = vec + default: + return nil, nil, fmt.Errorf("unrecognized vector type: %T for target vector: %s", vector, targetVector) + } + } + } + return vectors, multiVectors, nil +} diff --git a/platform/dbops/binaries/weaviate-src/entities/dto/dto_test.go b/platform/dbops/binaries/weaviate-src/entities/dto/dto_test.go new file mode 100644 index 0000000000000000000000000000000000000000..5552b8cae76c47f0252e577d76b3d986d7910417 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/dto/dto_test.go @@ -0,0 +1,82 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package dto + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/entities/models" +) + +func TestVectors(t *testing.T) { + var vectors []models.Vector + + t.Run("insert", func(t *testing.T) { + regularVector := []float32{0.1, 0.2, 0.3} + multiVector := [][]float32{{0.1, 0.2, 0.3}, {0.1, 0.2, 0.3}} + + vectors = append(vectors, regularVector) + vectors = append(vectors, multiVector) + + require.Len(t, vectors, 2) + assert.IsType(t, []float32{}, vectors[0]) + assert.IsType(t, [][]float32{}, vectors[1]) + }) + + t.Run("type check", func(t *testing.T) { + isMultiVectorFn := func(in models.Vector) (bool, error) { + switch in.(type) { + case []float32: + return false, nil + case [][]float32: + return true, nil + default: + return false, fmt.Errorf("unsupported type: %T", in) + } + } + isMultiVector, err := isMultiVectorFn(vectors[0]) + require.NoError(t, err) + assert.False(t, isMultiVector) + isMultiVector, err = isMultiVectorFn(vectors[1]) + require.NoError(t, err) + assert.True(t, isMultiVector) + }) + + t.Run("as vector slice", func(t *testing.T) { + searchVectors := [][]float32{{0.1, 0.2}, {0.11, 0.22}, {0.111, 0.222}} + + var asVectorSlice []models.Vector + for _, vector := range searchVectors { + asVectorSlice = append(asVectorSlice, vector) + } + + require.Len(t, asVectorSlice, len(searchVectors)) + assert.ElementsMatch(t, searchVectors, asVectorSlice) + }) + + t.Run("case to vector types", func(t *testing.T) { + searchVectors := []models.Vector{[]float32{0.1, 0.2}, [][]float32{{0.11, 0.22}, {0.111, 0.222, 0.333}}, []float32{0.111, 0.222}} + + regularVector, ok := searchVectors[0].([]float32) + require.True(t, ok) + assert.Len(t, regularVector, 2) + + multiVector, ok := searchVectors[1].([][]float32) + require.True(t, ok) + require.Len(t, multiVector, 2) + assert.Len(t, multiVector[0], 2) + assert.Len(t, multiVector[1], 3) + }) +} diff --git a/platform/dbops/binaries/weaviate-src/entities/errorcompounder/compounder.go b/platform/dbops/binaries/weaviate-src/entities/errorcompounder/compounder.go new file mode 100644 index 0000000000000000000000000000000000000000..4480fafe846936113dd48a0d248989fdf6f7be50 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/errorcompounder/compounder.go @@ -0,0 +1,64 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package errorcompounder + +import ( + "fmt" + "strings" + + "github.com/pkg/errors" +) + +type ErrorCompounder struct { + errors []error +} + +func New() *ErrorCompounder { + return &ErrorCompounder{} +} + +func (ec *ErrorCompounder) Add(err error) { + if err != nil { + ec.errors = append(ec.errors, err) + } +} + +func (ec *ErrorCompounder) Addf(msg string, args ...interface{}) { + ec.errors = append(ec.errors, fmt.Errorf(msg, args...)) +} + +func (ec *ErrorCompounder) AddWrap(err error, wrapMsg ...string) { + if err != nil { + ec.errors = append(ec.errors, errors.Wrap(err, wrapMsg[0])) + } +} + +func (ec *ErrorCompounder) ToError() error { + if len(ec.errors) == 0 { + return nil + } + + var msg strings.Builder + for i, err := range ec.errors { + if i != 0 { + msg.WriteString(", ") + } + + msg.WriteString(err.Error()) + } + + return errors.New(msg.String()) +} + +func (ec *ErrorCompounder) Len() int { + return len(ec.errors) +} diff --git a/platform/dbops/binaries/weaviate-src/entities/errorcompounder/compounder_thread_safe.go b/platform/dbops/binaries/weaviate-src/entities/errorcompounder/compounder_thread_safe.go new file mode 100644 index 0000000000000000000000000000000000000000..31a4895f2165a4937ccfcdae1cadca7fb21c2ca9 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/errorcompounder/compounder_thread_safe.go @@ -0,0 +1,69 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package errorcompounder + +import ( + "fmt" + "strings" + "sync" + + "github.com/pkg/errors" +) + +type SafeErrorCompounder struct { + sync.Mutex + errors []error +} + +func NewSafe() *SafeErrorCompounder { + return &SafeErrorCompounder{} +} + +func (ec *SafeErrorCompounder) Add(err error) { + ec.Lock() + defer ec.Unlock() + if err != nil { + ec.errors = append(ec.errors, err) + } +} + +func (ec *SafeErrorCompounder) Addf(msg string, args ...interface{}) { + ec.Lock() + defer ec.Unlock() + ec.errors = append(ec.errors, fmt.Errorf(msg, args...)) +} + +func (ec *SafeErrorCompounder) ToError() error { + ec.Lock() + defer ec.Unlock() + if len(ec.errors) == 0 { + return nil + } + + var msg strings.Builder + for i, err := range ec.errors { + if i != 0 { + msg.WriteString(", ") + } + + msg.WriteString(err.Error()) + } + + return errors.New(msg.String()) +} + +func (ec *SafeErrorCompounder) First() error { + if len(ec.errors) == 0 { + return nil + } + return ec.errors[0] +} diff --git a/platform/dbops/binaries/weaviate-src/entities/errors/error_group_wrapper.go b/platform/dbops/binaries/weaviate-src/entities/errors/error_group_wrapper.go new file mode 100644 index 0000000000000000000000000000000000000000..a96493adac1bae380bb7f3c3585bbfc13b454d49 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/errors/error_group_wrapper.go @@ -0,0 +1,136 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package errors + +import ( + "context" + "fmt" + "os" + "runtime" + "runtime/debug" + + "github.com/sirupsen/logrus" + + entcfg "github.com/weaviate/weaviate/entities/config" + entsentry "github.com/weaviate/weaviate/entities/sentry" + "golang.org/x/sync/errgroup" +) + +// ErrorGroupWrapper is a custom type that embeds errgroup.Group. +type ErrorGroupWrapper struct { + *errgroup.Group + returnError error + variables []interface{} + logger logrus.FieldLogger + deferFunc func(localVars ...interface{}) + cancelCtx func() + routineCounter int + includeStack bool + limitSet int +} + +// NewErrorGroupWrapper creates a new ErrorGroupWrapper. +func NewErrorGroupWrapper(logger logrus.FieldLogger, vars ...interface{}) *ErrorGroupWrapper { + egw := &ErrorGroupWrapper{ + Group: new(errgroup.Group), + returnError: nil, + variables: vars, + logger: logger, + + // this dummy func makes it safe to call cancelCtx even if a wrapper without a + // context is used. Avoids a nil check later on. + cancelCtx: func() {}, + } + egw.setDeferFunc() + + if entcfg.Enabled(os.Getenv("LOG_STACK_TRACE_ON_ERROR_GROUP")) { + egw.includeStack = true + } + return egw +} + +// NewErrorGroupWithContextWrapper creates a new ErrorGroupWrapper +func NewErrorGroupWithContextWrapper(logger logrus.FieldLogger, ctx context.Context, vars ...interface{}) (*ErrorGroupWrapper, context.Context) { + ctx, cancel := context.WithCancel(ctx) + eg, ctx := errgroup.WithContext(ctx) + egw := &ErrorGroupWrapper{ + Group: eg, + returnError: nil, + variables: vars, + logger: logger, + cancelCtx: cancel, + } + egw.setDeferFunc() + + if entcfg.Enabled(os.Getenv("LOG_STACK_TRACE_ON_ERROR_GROUP")) { + egw.includeStack = true + } + + return egw, ctx +} + +func (egw *ErrorGroupWrapper) setDeferFunc() { + disable := entcfg.Enabled(os.Getenv("DISABLE_RECOVERY_ON_PANIC")) + if !disable { + egw.deferFunc = func(localVars ...interface{}) { + if r := recover(); r != nil { + entsentry.Recover(r) + egw.logger.WithField("panic", r).Errorf("Recovered from panic: %v, local variables %v, additional localVars %v\n", r, localVars, egw.variables) + debug.PrintStack() + egw.returnError = fmt.Errorf("panic occurred: %v", r) + egw.cancelCtx() + } + } + } else { + egw.deferFunc = func(localVars ...interface{}) {} + } +} + +// Go overrides the Go method to add panic recovery logic. +func (egw *ErrorGroupWrapper) Go(f func() error, localVars ...interface{}) { + egw.Group.Go(func() error { + defer egw.deferFunc(localVars) + return f() + }) + egw.routineCounter++ +} + +// SetLimit overrides the SetLimit method to set a limit on the number of +// goroutines and track what's set. +func (egw *ErrorGroupWrapper) SetLimit(limit int) { + egw.Group.SetLimit(limit) + egw.limitSet = limit +} + +// Wait waits for all goroutines to finish and returns the first non-nil error. +func (egw *ErrorGroupWrapper) Wait() error { + logBase := egw.logger.WithFields(logrus.Fields{ + "action": "error_group_wait_initiated", + "jobs_count": egw.routineCounter, + "limit": egw.limitSet, + }) + + if egw.includeStack { + stackBuf := make([]byte, 4096) + n := runtime.Stack(stackBuf, false) + stackBuf = stackBuf[:n] + + logBase = logBase.WithField("stack", string(stackBuf)) + } + + logBase.Debugf("Waiting for %d jobs to finish with limit %d", egw.routineCounter, egw.limitSet) + + if err := egw.Group.Wait(); err != nil { + return err + } + return egw.returnError +} diff --git a/platform/dbops/binaries/weaviate-src/entities/errors/error_group_wrapper_test.go b/platform/dbops/binaries/weaviate-src/entities/errors/error_group_wrapper_test.go new file mode 100644 index 0000000000000000000000000000000000000000..d274f3b084f27843fbb8e2290128db81ac56f347 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/errors/error_group_wrapper_test.go @@ -0,0 +1,112 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package errors + +import ( + "bytes" + "context" + "os" + "testing" + + "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" +) + +func TestErrorGroupWrapper(t *testing.T) { + cases := []struct { + env string + set bool + }{ + {env: "something", set: true}, + {env: "something", set: false}, + {env: "", set: true}, + {env: "false", set: true}, + // {env: "true", set: true}, this will NOT recover the panic, but we cannot recover on a higher level and there + // is no way to have the test succeed + } + for _, tt := range cases { + t.Run(tt.env, func(t *testing.T) { + var buf bytes.Buffer + log := logrus.New() + log.SetOutput(&buf) + defer func() { + log.SetOutput(os.Stderr) + }() + + eg := NewErrorGroupWrapper(log) + if tt.set { + t.Setenv("DISABLE_RECOVERY_ON_PANIC", tt.env) + } + eg.Go(func() error { + slice := make([]string, 0) + slice[0] = "test" + return nil + }) + err := eg.Wait() + assert.Contains(t, buf.String(), "Recovered from panic") + assert.Contains(t, err.Error(), "index out of range") + }) + } +} + +// The assumption is that the context returned by the group will be cancelled +// as soon as one goroutine panics +func TestErrorGroupWrapperWithContext_Panics(t *testing.T) { + var buf bytes.Buffer + log := logrus.New() + log.SetOutput(&buf) + defer func() { + log.SetOutput(os.Stderr) + }() + + ctx := context.Background() + eg, ctx := NewErrorGroupWithContextWrapper(log, ctx) + + eg.Go(func() error { + slice := make([]string, 0) + slice[0] = "test" + return nil + }) + + // if the wrapper wouldn't cancel the context this line would block forever + <-ctx.Done() + assert.NotNil(t, ctx.Err()) + + err := eg.Wait() + assert.Contains(t, buf.String(), "Recovered from panic") + assert.Contains(t, err.Error(), "index out of range") +} + +// The assumption is that when the goroutine doesn't panic, the context +// does not get canceled +func TestErrorGroupWrapperWithContext_DoesNotPanic(t *testing.T) { + var buf bytes.Buffer + log := logrus.New() + log.SetOutput(&buf) + defer func() { + log.SetOutput(os.Stderr) + }() + + ctx := context.Background() + eg, ctx := NewErrorGroupWithContextWrapper(log, ctx) + + eg.Go(func() error { + slice := make([]string, 1) + slice[0] = "test" + return nil + }) + + assert.Nil(t, ctx.Err()) + err := eg.Wait() + assert.Nil(t, err) + assert.NotContains(t, buf.String(), "Recovered from panic") +} diff --git a/platform/dbops/binaries/weaviate-src/entities/errors/errors_graphql.go b/platform/dbops/binaries/weaviate-src/entities/errors/errors_graphql.go new file mode 100644 index 0000000000000000000000000000000000000000..ac4b23914c3ce5694bae7f3d3bf02cd6b1c1018d --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/errors/errors_graphql.go @@ -0,0 +1,66 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package errors + +import ( + "errors" + "fmt" +) + +type ErrGraphQLUser struct { + err error + queryType, className string +} + +func (e ErrGraphQLUser) Error() string { + return e.err.Error() +} + +func (e ErrGraphQLUser) OriginalError() error { + return e.err +} + +func (e ErrGraphQLUser) QueryType() string { + return e.queryType +} + +func (e ErrGraphQLUser) ClassName() string { + return e.className +} + +func NewErrGraphQLUser(err error, operation, className string) ErrGraphQLUser { + return ErrGraphQLUser{err, operation, className} +} + +type ErrRateLimit struct { + err error +} + +func (e ErrRateLimit) Error() string { + return e.err.Error() +} + +func NewErrRateLimit() ErrRateLimit { + return ErrRateLimit{errors.New("429 Too many requests")} +} + +type ErrLockConnector struct { + err error +} + +func (e ErrLockConnector) Error() string { + return e.err.Error() +} + +func NewErrLockConnector(err error) ErrLockConnector { + return ErrLockConnector{fmt.Errorf("could not acquire lock: %w", err)} +} diff --git a/platform/dbops/binaries/weaviate-src/entities/errors/errors_http.go b/platform/dbops/binaries/weaviate-src/entities/errors/errors_http.go new file mode 100644 index 0000000000000000000000000000000000000000..8dc45bedb1e54e854ad6cfe6f1bf41cf375d28d6 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/errors/errors_http.go @@ -0,0 +1,63 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package errors + +type ErrUnprocessable struct { + err error +} + +func (e ErrUnprocessable) Error() string { + return e.err.Error() +} + +func NewErrUnprocessable(err error) ErrUnprocessable { + return ErrUnprocessable{err} +} + +type ErrNotFound struct { + err error +} + +func (e ErrNotFound) Error() string { + if e.err != nil { + return e.err.Error() + } + return "" +} + +func NewErrNotFound(err error) ErrNotFound { + return ErrNotFound{err} +} + +type ErrContextExpired struct { + err error +} + +func (e ErrContextExpired) Error() string { + return e.err.Error() +} + +func NewErrContextExpired(err error) ErrContextExpired { + return ErrContextExpired{err} +} + +type ErrInternal struct { + err error +} + +func (e ErrInternal) Error() string { + return e.err.Error() +} + +func NewErrInternal(err error) ErrInternal { + return ErrInternal{err} +} diff --git a/platform/dbops/binaries/weaviate-src/entities/errors/errors_multitenancy.go b/platform/dbops/binaries/weaviate-src/entities/errors/errors_multitenancy.go new file mode 100644 index 0000000000000000000000000000000000000000..e1a527fc6dd7deec6c09b5011861d29a3e324103 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/errors/errors_multitenancy.go @@ -0,0 +1,25 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package errors + +import ( + "github.com/pkg/errors" +) + +var ( + ErrTenantNotActive = errors.New("tenant not active") + ErrTenantNotFound = errors.New("tenant not found") +) + +func IsTenantNotFound(err error) bool { + return errors.Is(errors.Unwrap(err), ErrTenantNotFound) +} diff --git a/platform/dbops/binaries/weaviate-src/entities/errors/errors_remote_client.go b/platform/dbops/binaries/weaviate-src/entities/errors/errors_remote_client.go new file mode 100644 index 0000000000000000000000000000000000000000..23723ab2783aebfdbbfffea562f69917cb1449bd --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/errors/errors_remote_client.go @@ -0,0 +1,72 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package errors + +import ( + "fmt" +) + +type ErrOpenHttpRequest struct { + err error +} + +func (e ErrOpenHttpRequest) Error() string { + return e.err.Error() +} + +func NewErrOpenHttpRequest(err error) ErrOpenHttpRequest { + return ErrOpenHttpRequest{fmt.Errorf("open http request: %w", err)} +} + +type ErrSendHttpRequest struct { + err error +} + +func (e ErrSendHttpRequest) Error() string { + return e.err.Error() +} + +// Unwrap returns the original inner error, so it can be +// used with errors.Is and errors.As +func (e ErrSendHttpRequest) Unwrap() error { + return e.err +} + +func NewErrSendHttpRequest(err error) ErrSendHttpRequest { + return ErrSendHttpRequest{fmt.Errorf("send http request: %w", err)} +} + +type ErrUnexpectedStatusCode struct { + err error +} + +func (e ErrUnexpectedStatusCode) Error() string { + return e.err.Error() +} + +func NewErrUnexpectedStatusCode(statusCode int, body []byte) ErrUnexpectedStatusCode { + return ErrUnexpectedStatusCode{ + err: fmt.Errorf("unexpected status code %d (%s)", statusCode, body), + } +} + +type ErrUnmarshalBody struct { + err error +} + +func (e ErrUnmarshalBody) Error() string { + return e.err.Error() +} + +func NewErrUnmarshalBody(err error) ErrUnmarshalBody { + return ErrUnmarshalBody{fmt.Errorf("unmarshal body: %w", err)} +} diff --git a/platform/dbops/binaries/weaviate-src/entities/errors/go_wrapper.go b/platform/dbops/binaries/weaviate-src/entities/errors/go_wrapper.go new file mode 100644 index 0000000000000000000000000000000000000000..1444114ae9f2a81a1a76048fbf5f65846180af6d --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/errors/go_wrapper.go @@ -0,0 +1,61 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package errors + +import ( + "fmt" + "os" + "runtime/debug" + + entcfg "github.com/weaviate/weaviate/entities/config" + entsentry "github.com/weaviate/weaviate/entities/sentry" + + "github.com/sirupsen/logrus" +) + +func GoWrapper(f func(), logger logrus.FieldLogger) { + go func() { + defer func() { + if !entcfg.Enabled(os.Getenv("DISABLE_RECOVERY_ON_PANIC")) { + if r := recover(); r != nil { + logger.Errorf("Recovered from panic: %v", r) + entsentry.Recover(r) + debug.PrintStack() + } + } + }() + f() + }() +} + +func GoWrapperWithErrorCh(f func(), logger logrus.FieldLogger) chan error { + errChan := make(chan error, 1) + go func() { + defer func() { + if !entcfg.Enabled(os.Getenv("DISABLE_RECOVERY_ON_PANIC")) { + if r := recover(); r != nil { + logger.Errorf("Recovered from panic: %v", r) + entsentry.Recover(r) + debug.PrintStack() + errChan <- fmt.Errorf("panic occurred: %v", r) + } + } + }() + f() + errChan <- nil + }() + return errChan +} + +func GoWrapperWithBlock(f func(), logger logrus.FieldLogger) error { + return <-GoWrapperWithErrorCh(f, logger) +} diff --git a/platform/dbops/binaries/weaviate-src/entities/errors/go_wrapper_test.go b/platform/dbops/binaries/weaviate-src/entities/errors/go_wrapper_test.go new file mode 100644 index 0000000000000000000000000000000000000000..170bea4c8d693eaf8f51ca6a98f463a2e3b2bc45 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/errors/go_wrapper_test.go @@ -0,0 +1,139 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package errors + +import ( + "bytes" + "os" + "sync" + "sync/atomic" + "testing" + "time" + + "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" +) + +func TestGoWrapper(t *testing.T) { + cases := []struct { + env string + set bool + }{ + {env: "something", set: true}, + {env: "something", set: false}, + {env: "", set: true}, + {env: "false", set: true}, + // {env: "true", set: true}, // this will NOT recover the panic, but we cannot recover on a higher level and + // there is no way to have the test succeed + } + for _, tt := range cases { + t.Run(tt.env, func(t *testing.T) { + var buf bytes.Buffer + log := logrus.New() + log.SetOutput(&buf) + + if tt.set { + t.Setenv("DISABLE_RECOVERY_ON_PANIC", tt.env) + } + wg := sync.WaitGroup{} + wg.Add(1) + GoWrapper(func() { + defer wg.Done() + panic("test") + }, log) + wg.Wait() + + // wait for the recover function in the wrapper to write to the log. This is done after the defer function + // in the function we pass to the wrapper has been called and we have no way to block until it is done. + // Note that this does not matter in normal operation as we do not depend on the log being written to + time.Sleep(100 * time.Millisecond) + log.SetOutput(os.Stderr) + assert.Contains(t, buf.String(), "Recovered from panic") + }) + } +} + +func TestGoWrapperWithBlock(t *testing.T) { + cases := []struct { + env string + set bool + }{ + {env: "something", set: true}, + {env: "something", set: false}, + {env: "", set: true}, + {env: "false", set: true}, + } + for _, tt := range cases { + t.Run(tt.env, func(t *testing.T) { + var buf bytes.Buffer + log := logrus.New() + log.SetOutput(&buf) + + if tt.set { + t.Setenv("DISABLE_RECOVERY_ON_PANIC", tt.env) + } + err := GoWrapperWithBlock(func() { + panic("test panic") + }, log) + assert.NotNil(t, err) + + // wait for the recover function in the wrapper to write to the log. This is done after the defer function + // in the function we pass to the wrapper has been called and we have no way to block until it is done. + // Note that this does not matter in normal operation as we do not depend on the log being written to + time.Sleep(100 * time.Millisecond) + log.SetOutput(os.Stderr) + assert.Contains(t, buf.String(), "Recovered from panic") + assert.Contains(t, buf.String(), "test panic") + }) + } +} + +func TestGoWrapperWithErrorCh(t *testing.T) { + cases := []struct { + env string + set bool + }{ + {env: "something", set: true}, + {env: "something", set: false}, + {env: "", set: true}, + {env: "false", set: true}, + } + for _, tt := range cases { + t.Run(tt.env, func(t *testing.T) { + var buf bytes.Buffer + log := logrus.New() + log.SetOutput(&buf) + + if tt.set { + t.Setenv("DISABLE_RECOVERY_ON_PANIC", tt.env) + } + + var a atomic.Bool + a.Store(true) + + errCh := GoWrapperWithErrorCh(func() { + time.Sleep(50 * time.Millisecond) + a.Store(false) + panic("test panic") + }, log) + + assert.True(t, a.Load()) + assert.NotNil(t, errCh) + assert.NotNil(t, <-errCh) + assert.False(t, a.Load()) + + log.SetOutput(os.Stderr) + assert.Contains(t, buf.String(), "Recovered from panic") + assert.Contains(t, buf.String(), "test panic") + }) + } +} diff --git a/platform/dbops/binaries/weaviate-src/entities/filters/analtyics_props.go b/platform/dbops/binaries/weaviate-src/entities/filters/analtyics_props.go new file mode 100644 index 0000000000000000000000000000000000000000..34aeb76f36a62d380f6de456ca0fada43828ed07 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/filters/analtyics_props.go @@ -0,0 +1,20 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package filters + +// AnalyticsProps will be extracted from the graphql args of analytics +// functions (such as Meta and Aggregate). They tell the connectors whether +// to use an external analytics engine if such an engine is configured. +type AnalyticsProps struct { + UseAnalyticsEngine bool + ForceRecalculate bool +} diff --git a/platform/dbops/binaries/weaviate-src/entities/filters/consts.go b/platform/dbops/binaries/weaviate-src/entities/filters/consts.go new file mode 100644 index 0000000000000000000000000000000000000000..02107a95e51a1a7447af29f9082e278a8e85796c --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/filters/consts.go @@ -0,0 +1,27 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package filters + +const ( + InternalPropBackwardsCompatID = "id" + InternalPropID = "_id" + InternalNullIndex = "_nullState" + InternalPropertyLength = "_propertyLength" + InternalPropCreationTimeUnix = "_creationTimeUnix" + InternalPropLastUpdateTimeUnix = "_lastUpdateTimeUnix" +) + +// NotNullState is encoded as 0, so it can be read with the IsNull operator and value false. +const ( + InternalNotNullState = iota + InternalNullState +) diff --git a/platform/dbops/binaries/weaviate-src/entities/filters/cursor.go b/platform/dbops/binaries/weaviate-src/entities/filters/cursor.go new file mode 100644 index 0000000000000000000000000000000000000000..a1c88e02132d21440a45480f97d6ceb5bf9f6485 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/filters/cursor.go @@ -0,0 +1,37 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package filters + +type Cursor struct { + After string `json:"after"` + Limit int `json:"limit"` +} + +// ExtractCursorFromArgs gets the limit key out of a map. Not specific to +// GQL, but can be used from GQL +func ExtractCursorFromArgs(args map[string]interface{}) (*Cursor, error) { + after, afterOk := args["after"] + + limit, limitOk := args["limit"] + if !limitOk || limit.(int) < 0 { + limit = LimitFlagNotSet + } + + if !afterOk && !limitOk || after == nil { + return nil, nil + } + + return &Cursor{ + After: after.(string), + Limit: limit.(int), + }, nil +} diff --git a/platform/dbops/binaries/weaviate-src/entities/filters/cursor_validator.go b/platform/dbops/binaries/weaviate-src/entities/filters/cursor_validator.go new file mode 100644 index 0000000000000000000000000000000000000000..c919cd8e57dbb22cd92d484ba1360b46827dab11 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/filters/cursor_validator.go @@ -0,0 +1,49 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package filters + +import ( + "fmt" + "strings" + + "github.com/google/uuid" + "github.com/pkg/errors" + "github.com/weaviate/weaviate/entities/schema" +) + +func ValidateCursor(className schema.ClassName, cursor *Cursor, offset int, filters *LocalFilter, sort []Sort) error { + if className == "" { + return fmt.Errorf("class parameter cannot be empty") + } + if offset > 0 || filters != nil || sort != nil { + var params []string + if offset > 0 { + params = append(params, "offset") + } + if filters != nil { + params = append(params, "where") + } + if sort != nil { + params = append(params, "sort") + } + return fmt.Errorf("%s cannot be set with after and limit parameters", strings.Join(params, ",")) + } + if cursor.After != "" { + if _, err := uuid.Parse(cursor.After); err != nil { + return errors.Wrapf(err, "after parameter '%s' is not a valid uuid", cursor.After) + } + } + if cursor.Limit < 0 { + return fmt.Errorf("limit parameter must be set") + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/entities/filters/filters.go b/platform/dbops/binaries/weaviate-src/entities/filters/filters.go new file mode 100644 index 0000000000000000000000000000000000000000..46fe13b7daca88df6d0c9b1f29f5d19fa9701c21 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/filters/filters.go @@ -0,0 +1,164 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package filters + +import ( + "encoding/json" + + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/schema" +) + +type Operator int + +const ( + OperatorEqual Operator = iota + 1 + OperatorNotEqual + OperatorGreaterThan + OperatorGreaterThanEqual + OperatorLessThan + OperatorLessThanEqual + OperatorAnd + OperatorOr + OperatorWithinGeoRange + OperatorLike + OperatorIsNull + ContainsAny + ContainsAll + ContainsNone + OperatorNot +) + +func (o Operator) OnValue() bool { + switch o { + case OperatorEqual, + OperatorNotEqual, + OperatorGreaterThan, + OperatorGreaterThanEqual, + OperatorLessThan, + OperatorLessThanEqual, + OperatorWithinGeoRange, + OperatorLike, + OperatorIsNull, + ContainsAny, + ContainsAll, + ContainsNone: + return true + + case OperatorOr, OperatorAnd, OperatorNot: + return false + + default: + return false + } +} + +func (o Operator) Name() string { + switch o { + case OperatorEqual: + return "Equal" + case OperatorNotEqual: + return "NotEqual" + case OperatorGreaterThan: + return "GreaterThan" + case OperatorGreaterThanEqual: + return "GreaterThanEqual" + case OperatorLessThan: + return "LessThan" + case OperatorLessThanEqual: + return "LessThanEqual" + case OperatorAnd: + return "And" + case OperatorOr: + return "Or" + case OperatorWithinGeoRange: + return "WithinGeoRange" + case OperatorLike: + return "Like" + case OperatorIsNull: + return "IsNull" + case ContainsAny: + return "ContainsAny" + case ContainsAll: + return "ContainsAll" + case ContainsNone: + return "ContainsNone" + case OperatorNot: + return "Not" + default: + panic("Unknown operator") + } +} + +func (o Operator) IsContains() bool { + switch o { + case ContainsAny, ContainsAll, ContainsNone: + return true + default: + return false + } +} + +type LocalFilter struct { + Root *Clause `json:"root"` +} + +type Value struct { + Value interface{} `json:"value"` + Type schema.DataType `json:"type"` +} + +func (v *Value) UnmarshalJSON(data []byte) error { + type Alias Value + aux := struct { + *Alias + }{ + Alias: (*Alias)(v), + } + + err := json.Unmarshal(data, &aux) + if err != nil { + return err + } + + asFloat, ok := v.Value.(float64) + if v.Type == schema.DataTypeInt && ok { + v.Value = int(asFloat) + } + + if v.Type == schema.DataTypeGeoCoordinates { + temp := struct { + Value GeoRange `json:"value"` + }{} + + if err := json.Unmarshal(data, &temp); err != nil { + return err + } + v.Value = temp.Value + } + + return nil +} + +type Clause struct { + Operator Operator `json:"operator"` + On *Path `json:"on"` + Value *Value `json:"value"` + Operands []Clause `json:"operands"` +} + +// GeoRange to be used with fields of type GeoCoordinates. Identifies a point +// and a maximum distance from that point. +type GeoRange struct { + *models.GeoCoordinates + Distance float32 `json:"distance"` +} diff --git a/platform/dbops/binaries/weaviate-src/entities/filters/filters_serialization_test.go b/platform/dbops/binaries/weaviate-src/entities/filters/filters_serialization_test.go new file mode 100644 index 0000000000000000000000000000000000000000..3c6c1bf396196e6ae69ad18ee084182e1b6feeb3 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/filters/filters_serialization_test.go @@ -0,0 +1,82 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package filters + +import ( + "encoding/json" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/schema" +) + +func TestSerializeValue(t *testing.T) { + t.Run("with a float value", func(t *testing.T) { + before := Value{ + Value: float64(3), + Type: schema.DataTypeNumber, + } + + bytes, err := json.Marshal(before) + require.Nil(t, err) + + var after Value + err = json.Unmarshal(bytes, &after) + require.Nil(t, err) + + assert.Equal(t, before, after) + }) + + t.Run("with an int value", func(t *testing.T) { + before := Value{ + Value: int(3), + Type: schema.DataTypeInt, + } + + bytes, err := json.Marshal(before) + require.Nil(t, err) + + var after Value + err = json.Unmarshal(bytes, &after) + require.Nil(t, err) + + assert.Equal(t, before, after) + }) + + t.Run("with a geo value", func(t *testing.T) { + before := Value{ + Value: GeoRange{ + GeoCoordinates: &models.GeoCoordinates{ + Latitude: ptFloat32(51.51), + Longitude: ptFloat32(-0.09), + }, + Distance: 2000, + }, + Type: schema.DataTypeGeoCoordinates, + } + + bytes, err := json.Marshal(before) + require.Nil(t, err) + + var after Value + err = json.Unmarshal(bytes, &after) + require.Nil(t, err) + + assert.Equal(t, before, after) + }) +} + +func ptFloat32(v float32) *float32 { + return &v +} diff --git a/platform/dbops/binaries/weaviate-src/entities/filters/filters_test.go b/platform/dbops/binaries/weaviate-src/entities/filters/filters_test.go new file mode 100644 index 0000000000000000000000000000000000000000..f39dfa80c4f83b93f0324b3b19612ee394ab20d4 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/filters/filters_test.go @@ -0,0 +1,50 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package filters + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestOperators(t *testing.T) { + type test struct { + op Operator + expectedName string + expectedOnValue bool + } + + tests := []test{ + {op: OperatorEqual, expectedName: "Equal", expectedOnValue: true}, + {op: OperatorNotEqual, expectedName: "NotEqual", expectedOnValue: true}, + {op: OperatorGreaterThan, expectedName: "GreaterThan", expectedOnValue: true}, + {op: OperatorGreaterThanEqual, expectedName: "GreaterThanEqual", expectedOnValue: true}, + {op: OperatorLessThanEqual, expectedName: "LessThanEqual", expectedOnValue: true}, + {op: OperatorLessThan, expectedName: "LessThan", expectedOnValue: true}, + {op: OperatorWithinGeoRange, expectedName: "WithinGeoRange", expectedOnValue: true}, + {op: OperatorLike, expectedName: "Like", expectedOnValue: true}, + {op: OperatorAnd, expectedName: "And", expectedOnValue: false}, + {op: OperatorOr, expectedName: "Or", expectedOnValue: false}, + {op: ContainsAny, expectedName: "ContainsAny", expectedOnValue: true}, + {op: ContainsAll, expectedName: "ContainsAll", expectedOnValue: true}, + {op: ContainsNone, expectedName: "ContainsNone", expectedOnValue: true}, + {op: OperatorNot, expectedName: "Not", expectedOnValue: false}, + } + + for _, test := range tests { + t.Run(test.expectedName, func(t *testing.T) { + assert.Equal(t, test.expectedName, test.op.Name(), "name must match") + assert.Equal(t, test.expectedOnValue, test.op.OnValue(), "onValue must match") + }) + } +} diff --git a/platform/dbops/binaries/weaviate-src/entities/filters/filters_validator.go b/platform/dbops/binaries/weaviate-src/entities/filters/filters_validator.go new file mode 100644 index 0000000000000000000000000000000000000000..099b32a1925abaedf1e6f0ffba4377e8c4b18ac0 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/filters/filters_validator.go @@ -0,0 +1,289 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package filters + +import ( + "fmt" + "strings" + + "github.com/pkg/errors" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/schema" +) + +// string and stringArray are deprecated as of v1.19 +// however they are allowed in filters and considered aliases +// for text and textArray +var deprecatedDataTypeAliases map[schema.DataType]schema.DataType = map[schema.DataType]schema.DataType{ + schema.DataTypeString: schema.DataTypeText, + schema.DataTypeStringArray: schema.DataTypeTextArray, +} + +func ValidateFilters(authorizedGetClass func(string) (*models.Class, error), filters *LocalFilter) error { + if filters == nil { + return errors.New("empty where") + } + cw := newClauseWrapper(filters.Root) + if err := validateClause(authorizedGetClass, cw); err != nil { + return err + } + cw.updateClause() + return nil +} + +func validateClause(authorizedGetClass func(string) (*models.Class, error), cw *clauseWrapper) error { + // check if nested + if cw.getOperands() != nil { + var errs []error + + for i, child := range cw.getOperands() { + if err := validateClause(authorizedGetClass, child); err != nil { + errs = append(errs, errors.Wrapf(err, "child operand at position %d", i)) + } + } + + if len(errs) > 0 { + return mergeErrs(errs) + } + return nil + } + + // validate current + + className := cw.getClassName() + propName := cw.getPropertyName() + + if IsInternalProperty(propName) { + return validateInternalPropertyClause(propName, cw) + } + + class, err := authorizedGetClass(className.String()) + if err != nil { + return err + } + if class == nil { + return errors.Errorf("class %q does not exist in schema", + className) + } + + propNameTyped := string(propName) + lengthPropName, isPropLengthFilter := schema.IsPropertyLength(propNameTyped, 0) + if isPropLengthFilter { + propName = schema.PropertyName(lengthPropName) + } + + prop, err := schema.GetPropertyByName(class, propName.String()) + if err != nil { + return err + } + + if cw.getOperator() == OperatorIsNull { + if !cw.isType(schema.DataTypeBoolean) { + return errors.Errorf("operator IsNull requires a booleanValue, got %q instead", + cw.getValueNameFromType()) + } + return nil + } + + if isPropLengthFilter { + if !cw.isType(schema.DataTypeInt) { + return errors.Errorf("Filtering for property length requires IntValue, got %q instead", + cw.getValueNameFromType()) + } + switch op := cw.getOperator(); op { + case OperatorEqual, OperatorNotEqual, OperatorGreaterThan, OperatorGreaterThanEqual, + OperatorLessThan, OperatorLessThanEqual: + // ok + default: + return errors.Errorf("Filtering for property length supports operators (not) equal and greater/less than (equal), got %q instead", + op) + } + if val := cw.getValue(); val.(int) < 0 { + return errors.Errorf("Can only filter for positive property length got %v instead", val) + } + return nil + } + + if isUUIDType(prop.DataType[0]) { + return validateUUIDType(propName, cw) + } + + if schema.IsRefDataType(prop.DataType) { + // bit of an edge case, directly on refs (i.e. not on a primitive prop of a + // ref) we only allow valueInt which is what's used to count references + if cw.isType(schema.DataTypeInt) { + return nil + } + return errors.Errorf("Property %q is a ref prop to the class %q. Only "+ + "\"valueInt\" can be used on a ref prop directly to count the number of refs. "+ + "Or did you mean to filter on a primitive prop of the referenced class? "+ + "In this case make sure your path contains 3 elements in the form of "+ + "[, , ]", + propName, prop.DataType[0]) + } else if baseType, ok := schema.IsArrayType(schema.DataType(prop.DataType[0])); ok { + if !cw.isType(baseType) { + return errors.Errorf("data type filter cannot use %q on type %q, use %q instead", + cw.getValueNameFromType(), + schema.DataType(prop.DataType[0]), + valueNameFromDataType(baseType)) + } + } else if !cw.isType(schema.DataType(prop.DataType[0])) { + return errors.Errorf("data type filter cannot use %q on type %q, use %q instead", + cw.getValueNameFromType(), + schema.DataType(prop.DataType[0]), + valueNameFromDataType(schema.DataType(prop.DataType[0]))) + } + + return nil +} + +func valueNameFromDataType(dt schema.DataType) string { + return "value" + strings.ToUpper(string(dt[0])) + string(dt[1:]) +} + +func mergeErrs(errs []error) error { + msgs := make([]string, len(errs)) + for i, err := range errs { + msgs[i] = err.Error() + } + + return errors.Errorf("%s", strings.Join(msgs, ", ")) +} + +func IsInternalProperty(propName schema.PropertyName) bool { + switch propName { + case InternalPropBackwardsCompatID, + InternalPropID, + InternalPropCreationTimeUnix, + InternalPropLastUpdateTimeUnix: + return true + default: + return false + } +} + +func validateInternalPropertyClause(propName schema.PropertyName, cw *clauseWrapper) error { + switch propName { + case InternalPropBackwardsCompatID, InternalPropID: + if cw.isType(schema.DataTypeText) { + return nil + } + return errors.Errorf( + `using ["_id"] to filter by uuid: must use "valueText" to specify the id`) + case InternalPropCreationTimeUnix, InternalPropLastUpdateTimeUnix: + if cw.isType(schema.DataTypeDate) || cw.isType(schema.DataTypeText) { + return nil + } + return errors.Errorf( + `using ["%s"] to filter by timestamp: must use "valueText" or "valueDate"`, propName) + default: + return errors.Errorf("unsupported internal property: %s", propName) + } +} + +func isUUIDType(dtString string) bool { + dt := schema.DataType(dtString) + return dt == schema.DataTypeUUID || dt == schema.DataTypeUUIDArray +} + +func validateUUIDType(propName schema.PropertyName, cw *clauseWrapper) error { + if cw.isType(schema.DataTypeText) { + return validateUUIDOperators(propName, cw) + } + + return fmt.Errorf("property %q is of type \"uuid\" or \"uuid[]\": "+ + "specify uuid as string using \"valueText\"", propName) +} + +func validateUUIDOperators(propName schema.PropertyName, cw *clauseWrapper) error { + op := cw.getOperator() + + switch op { + case OperatorEqual, OperatorNotEqual, + OperatorLessThan, OperatorLessThanEqual, + OperatorGreaterThan, OperatorGreaterThanEqual, + ContainsAll, ContainsAny, ContainsNone: + return nil + default: + return fmt.Errorf("operator %q cannot be used on uuid/uuid[] props", op.Name()) + } +} + +type clauseWrapper struct { + clause *Clause + origType schema.DataType + aliasType schema.DataType + operands []*clauseWrapper +} + +func newClauseWrapper(clause *Clause) *clauseWrapper { + w := &clauseWrapper{clause: clause} + if clause.Operands != nil { + w.operands = make([]*clauseWrapper, len(clause.Operands)) + for i := range clause.Operands { + w.operands[i] = newClauseWrapper(&clause.Operands[i]) + } + } else { + w.origType = clause.Value.Type + w.aliasType = deprecatedDataTypeAliases[clause.Value.Type] + } + return w +} + +func (w *clauseWrapper) isType(dt schema.DataType) bool { + if w.operands != nil { + return false + } + return dt == w.origType || (dt == w.aliasType && w.aliasType != "") +} + +func (w *clauseWrapper) getValueNameFromType() string { + return valueNameFromDataType(w.origType) +} + +func (w *clauseWrapper) getOperands() []*clauseWrapper { + return w.operands +} + +func (w *clauseWrapper) getOperator() Operator { + return w.clause.Operator +} + +func (w *clauseWrapper) getValue() interface{} { + return w.clause.Value.Value +} + +func (w *clauseWrapper) getClassName() schema.ClassName { + if w.operands != nil { + return "" + } + return w.clause.On.GetInnerMost().Class +} + +func (w *clauseWrapper) getPropertyName() schema.PropertyName { + if w.operands != nil { + return "" + } + return w.clause.On.GetInnerMost().Property +} + +func (w *clauseWrapper) updateClause() { + if w.operands != nil { + for i := range w.operands { + w.operands[i].updateClause() + } + } else { + if w.aliasType != "" { + w.clause.Value.Type = w.aliasType + } + } +} diff --git a/platform/dbops/binaries/weaviate-src/entities/filters/filters_validator_test.go b/platform/dbops/binaries/weaviate-src/entities/filters/filters_validator_test.go new file mode 100644 index 0000000000000000000000000000000000000000..c2fdc000b3d7bd84d37a3d0d9dad597b9445ba3a --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/filters/filters_validator_test.go @@ -0,0 +1,322 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package filters + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/schema" +) + +type fakeFinder struct { + mock.Mock +} + +func (f *fakeFinder) ReadOnlyClass(name string) (*models.Class, error) { + args := f.Called(name) + model := args.Get(0) + if model == nil { + return nil, nil + } + return model.(*models.Class), nil +} + +func TestValidateIsNullOperator(t *testing.T) { + tests := []struct { + name string + schemaType schema.DataType + valid bool + }{ + { + name: "Valid datatype", + schemaType: schema.DataTypeBoolean, + valid: true, + }, + { + name: "Invalid datatype (array)", + schemaType: schema.DataTypeBooleanArray, + valid: false, + }, + { + name: "Invalid datatype (text)", + schemaType: schema.DataTypeText, + valid: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + cl := Clause{ + Operator: OperatorIsNull, + Value: &Value{Value: true, Type: tt.schemaType}, + On: &Path{Class: "Car", Property: "horsepower"}, + } + + f := &fakeFinder{} + f.On("ReadOnlyClass", mock.Anything).Return( + &models.Class{ + Class: "Car", + Properties: []*models.Property{ + {Name: "modelName", DataType: schema.DataTypeText.PropString(), Tokenization: models.PropertyTokenizationWhitespace}, + {Name: "manufacturerName", DataType: schema.DataTypeText.PropString(), Tokenization: models.PropertyTokenizationWhitespace}, + {Name: "horsepower", DataType: []string{"int"}}, + }, + }, + ) + err := validateClause(f.ReadOnlyClass, newClauseWrapper(&cl)) + if tt.valid { + require.Nil(t, err) + } else { + require.NotNil(t, err) + } + }) + } +} + +func TestValidatePropertyLength(t *testing.T) { + tests := []struct { + name string + schemaType schema.DataType + valid bool + operator Operator + value int + }{ + { + name: "Valid datatype and operator", + schemaType: schema.DataTypeInt, + valid: true, + operator: OperatorEqual, + value: 0, + }, + { + name: "Invalid datatype (array)", + schemaType: schema.DataTypeBooleanArray, + valid: false, + operator: OperatorEqual, + value: 1, + }, + { + name: "Invalid datatype (text)", + schemaType: schema.DataTypeText, + valid: false, + operator: OperatorEqual, + value: 2, + }, + { + name: "Invalid operator (Or)", + schemaType: schema.DataTypeText, + valid: false, + operator: OperatorOr, + value: 10, + }, + { + name: "Invalid value (negative)", + schemaType: schema.DataTypeText, + valid: false, + operator: OperatorEqual, + value: -5, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + cl := Clause{ + Operator: OperatorEqual, + Value: &Value{Value: tt.value, Type: tt.schemaType}, + On: &Path{Class: "Car", Property: "len(horsepower)"}, + } + + f := &fakeFinder{} + f.On("ReadOnlyClass", mock.Anything).Return( + &models.Class{ + Class: "Car", + Properties: []*models.Property{ + {Name: "horsepower", DataType: []string{"int"}}, + }, + }, + ) + err := validateClause(f.ReadOnlyClass, newClauseWrapper(&cl)) + if tt.valid { + require.Nil(t, err) + } else { + require.NotNil(t, err) + } + }) + } +} + +func TestValidateUUIDFilter(t *testing.T) { + tests := []struct { + name string + schemaType schema.DataType + valid bool + operator Operator + value int + }{ + { + name: "Valid datatype and operator", + schemaType: schema.DataTypeText, + valid: true, + operator: OperatorEqual, + value: 0, + }, + { + name: "Wrong data type (int)", + schemaType: schema.DataTypeInt, + valid: false, + operator: OperatorEqual, + value: 0, + }, + { + name: "Wrong operator (Like)", + schemaType: schema.DataTypeText, + valid: false, + operator: OperatorLike, + value: 0, + }, + + { + name: "[deprecated string] Valid datatype and operator", + schemaType: schema.DataTypeString, + valid: true, + operator: OperatorEqual, + value: 0, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + for _, prop := range []schema.PropertyName{"my_id", "my_idz"} { + cl := Clause{ + Operator: tt.operator, + Value: &Value{Value: tt.value, Type: tt.schemaType}, + On: &Path{Class: "Car", Property: prop}, + } + + f := &fakeFinder{} + f.On("ReadOnlyClass", mock.Anything).Return( + &models.Class{ + Class: "Car", + Properties: []*models.Property{ + {Name: "my_id", DataType: []string{string(schema.DataTypeUUID)}}, + {Name: "my_idz", DataType: []string{string(schema.DataTypeUUIDArray)}}, + }, + }, + ) + err := validateClause(f.ReadOnlyClass, newClauseWrapper(&cl)) + if tt.valid { + require.Nil(t, err) + } else { + require.NotNil(t, err) + } + } + }) + } +} + +func TestClauseWrapper(t *testing.T) { + type testCase struct { + name string + valueType schema.DataType + requiredType schema.DataType + + expectedValid bool + expectedValueName string + } + + testCases := []testCase{ + { + name: "string accepted where text is required", + valueType: schema.DataTypeString, + requiredType: schema.DataTypeText, + expectedValid: true, + expectedValueName: "valueString", + }, + { + name: "text accepted where text is required", + valueType: schema.DataTypeText, + requiredType: schema.DataTypeText, + expectedValid: true, + expectedValueName: "valueText", + }, + { + name: "string[] accepted where text[] is required", + valueType: schema.DataTypeStringArray, + requiredType: schema.DataTypeTextArray, + expectedValid: true, + expectedValueName: "valueString[]", + }, + { + name: "text[] accepted where text[] is required", + valueType: schema.DataTypeTextArray, + requiredType: schema.DataTypeTextArray, + expectedValid: true, + expectedValueName: "valueText[]", + }, + { + name: "text not accepted where string is required", + valueType: schema.DataTypeText, + requiredType: schema.DataTypeString, + expectedValid: false, + expectedValueName: "valueText", + }, + { + name: "text[] not accepted where string[] is required", + valueType: schema.DataTypeTextArray, + requiredType: schema.DataTypeStringArray, + expectedValid: false, + expectedValueName: "valueText[]", + }, + { + name: "int not accepted where boolean is required", + valueType: schema.DataTypeInt, + requiredType: schema.DataTypeBoolean, + expectedValid: false, + expectedValueName: "valueInt", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + clause := Clause{ + Operator: OperatorEqual, + Value: &Value{Value: "someValue", Type: tc.valueType}, + On: &Path{Class: "SomeClass", Property: "someProperty"}, + } + + cw := newClauseWrapper(&clause) + + assert.Equal(t, tc.expectedValid, cw.isType(tc.requiredType)) + assert.Equal(t, tc.expectedValueName, cw.getValueNameFromType()) + + assert.Equal(t, "someValue", cw.getValue()) + assert.Equal(t, schema.ClassName("SomeClass"), cw.getClassName()) + assert.Equal(t, schema.PropertyName("someProperty"), cw.getPropertyName()) + assert.Equal(t, OperatorEqual, cw.getOperator()) + assert.Nil(t, cw.getOperands()) + + t.Run("clause is updated to required type if valid", func(t *testing.T) { + cw.updateClause() + + if tc.expectedValid { + assert.Equal(t, tc.requiredType, clause.Value.Type) + } else { + assert.Equal(t, tc.valueType, clause.Value.Type) + } + }) + }) + } +} diff --git a/platform/dbops/binaries/weaviate-src/entities/filters/pagination.go b/platform/dbops/binaries/weaviate-src/entities/filters/pagination.go new file mode 100644 index 0000000000000000000000000000000000000000..9d8734d43f0d993173b3d5dd9b6261cc1ba55014 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/filters/pagination.go @@ -0,0 +1,58 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package filters + +const ( + // LimitFlagSearchByDist indicates that the + // vector search should be conducted by + // distance, without limit + LimitFlagSearchByDist int = iota - 2 + + // LimitFlagNotSet indicates that no limit + // was provided by the client + LimitFlagNotSet +) + +type Pagination struct { + Offset int + Limit int + Autocut int +} + +// ExtractPaginationFromArgs gets the limit key out of a map. Not specific to +// GQL, but can be used from GQL +func ExtractPaginationFromArgs(args map[string]interface{}) (*Pagination, error) { + offset, offsetOk := args["offset"] + if !offsetOk { + offset = 0 + } + + limit, limitOk := args["limit"] + if !limitOk || limit.(int) < 0 { + limit = LimitFlagNotSet + } + + autocut, autocutOk := args["autocut"] + if !autocutOk { + autocut = 0 // disabled + } + + if !offsetOk && !limitOk && !autocutOk { + return nil, nil + } + + return &Pagination{ + Offset: offset.(int), + Limit: limit.(int), + Autocut: autocut.(int), + }, nil +} diff --git a/platform/dbops/binaries/weaviate-src/entities/filters/pagination_test.go b/platform/dbops/binaries/weaviate-src/entities/filters/pagination_test.go new file mode 100644 index 0000000000000000000000000000000000000000..5750c3e984a65f8bb4b2c6e55c7aa7caeb811313 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/filters/pagination_test.go @@ -0,0 +1,58 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package filters + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestExtractPagination(t *testing.T) { + t.Run("without a limit present", func(t *testing.T) { + p, err := ExtractPaginationFromArgs(map[string]interface{}{}) + require.Nil(t, err) + assert.Nil(t, p) + }) + + t.Run("with a limit present", func(t *testing.T) { + p, err := ExtractPaginationFromArgs(map[string]interface{}{ + "limit": 25, + }) + require.Nil(t, err) + require.NotNil(t, p) + assert.Equal(t, 0, p.Offset) + assert.Equal(t, 25, p.Limit) + }) + + t.Run("with a offset present", func(t *testing.T) { + p, err := ExtractPaginationFromArgs(map[string]interface{}{ + "offset": 11, + }) + require.Nil(t, err) + require.NotNil(t, p) + assert.Equal(t, 11, p.Offset) + assert.Equal(t, -1, p.Limit) + }) + + t.Run("with offset and limit present", func(t *testing.T) { + p, err := ExtractPaginationFromArgs(map[string]interface{}{ + "offset": 11, + "limit": 25, + }) + require.Nil(t, err) + require.NotNil(t, p) + assert.Equal(t, 11, p.Offset) + assert.Equal(t, 25, p.Limit) + }) +} diff --git a/platform/dbops/binaries/weaviate-src/entities/filters/path.go b/platform/dbops/binaries/weaviate-src/entities/filters/path.go new file mode 100644 index 0000000000000000000000000000000000000000..c1986aecc8c635c5537374e84e3ecf20d59bdac3 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/filters/path.go @@ -0,0 +1,145 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package filters + +import ( + "fmt" + "strings" + + "github.com/weaviate/weaviate/entities/schema" +) + +// Represents the path in a filter. +// Either RelationProperty or PrimitiveProperty must be empty (e.g. ""). +type Path struct { + Class schema.ClassName `json:"class"` + Property schema.PropertyName `json:"property"` + + // If nil, then this is the property we're interested in. + // If a pointer to another Path, the constraint applies to that one. + Child *Path `json:"child"` +} + +// GetInnerMost recursively searches for child paths, only when no more +// children can be found will the path be returned +func (p *Path) GetInnerMost() *Path { + if p.Child == nil { + return p + } + + return p.Child.GetInnerMost() +} + +// Slice flattens the nested path into a slice of segments +func (p *Path) Slice() []string { + return appendNestedPath(p, true) +} + +func (p *Path) SliceInterface() []interface{} { + path := appendNestedPath(p, true) + out := make([]interface{}, len(path)) + for i, element := range path { + out[i] = element + } + return out +} + +func appendNestedPath(p *Path, omitClass bool) []string { + result := []string{} + if !omitClass { + result = append(result, string(p.Class)) + } + + if p.Child != nil { + property := string(p.Property) + result = append(result, property) + result = append(result, appendNestedPath(p.Child, false)...) + } else { + result = append(result, string(p.Property)) + } + + return result +} + +// ParsePath Parses the path +// It parses an array of strings in this format +// [0] ClassName -> The root class name we're drilling down from +// [1] propertyName -> The property name we're interested in. +func ParsePath(pathElements []interface{}, rootClass string) (*Path, error) { + // we need to manually insert the root class, as that is omitted from the user + pathElements = append([]interface{}{rootClass}, pathElements...) + + // The sentinel is used to bootstrap the inlined recursion. + // we return sentinel.Child at the end. + var sentinel Path + + // Keep track of where we are in the path (e.g. always points to latest Path segment) + current := &sentinel + + // Now go through the path elements, step over it in increments of two. + // Simple case: ClassName -> property + // Nested path case: ClassName -> HasRef -> ClassOfRef -> Property + for i := 0; i < len(pathElements); i += 2 { + lengthRemaining := len(pathElements) - i + if lengthRemaining < 2 { + return nil, fmt.Errorf("missing an argument after '%s'", pathElements[i]) + } + + rawClassName, ok := pathElements[i].(string) + if !ok { + return nil, fmt.Errorf("element %v is not a string", i+1) + } + + rawPropertyName, ok := pathElements[i+1].(string) + if !ok { + return nil, fmt.Errorf("element %v is not a string", i+2) + } + + className, err := schema.ValidateClassName(rawClassName) + if err != nil { + return nil, fmt.Errorf("expected a valid class name in 'path' field for the filter but got '%s'", rawClassName) + } + + var propertyName schema.PropertyName + lengthPropName, isPropLengthFilter := schema.IsPropertyLength(rawPropertyName, 0) + if isPropLengthFilter { + // check if property in len(PROPERTY) is valid + _, err = schema.ValidatePropertyName(lengthPropName) + if err != nil { + return nil, fmt.Errorf("expected a valid property name in 'path' field for the filter, but got '%s'", lengthPropName) + } + propertyName = schema.PropertyName(rawPropertyName) + } else { + propertyName, err = schema.ValidatePropertyName(rawPropertyName) + // Invalid property name? + // Try to parse it as as a reference or a length. + if err != nil { + untitlizedPropertyName := strings.ToLower(rawPropertyName[0:1]) + rawPropertyName[1:] + propertyName, err = schema.ValidatePropertyName(untitlizedPropertyName) + if err != nil { + return nil, fmt.Errorf("expected a valid property name in 'path' field for the filter, but got '%s'", rawPropertyName) + } + } + + } + + current.Child = &Path{ + Class: className, + Property: propertyName, + } + + // And down we go. + current = current.Child + } + + return sentinel.Child, nil +} diff --git a/platform/dbops/binaries/weaviate-src/entities/filters/path_test.go b/platform/dbops/binaries/weaviate-src/entities/filters/path_test.go new file mode 100644 index 0000000000000000000000000000000000000000..76880a1be01632b08814244ecdc5fbbae8e88c7e --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/filters/path_test.go @@ -0,0 +1,146 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package filters + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func Test_ParsePath(t *testing.T) { + t.Run("with a primitive prop", func(t *testing.T) { + rootClass := "City" + segments := []interface{}{"population"} + expectedPath := &Path{ + Class: "City", + Property: "population", + } + + path, err := ParsePath(segments, rootClass) + + require.Nil(t, err, "should not error") + assert.Equal(t, expectedPath, path, "should parse the path correctly") + }) + + t.Run("with len prop", func(t *testing.T) { + rootClass := "City" + segments := []interface{}{"len(population)"} + expectedPath := &Path{ + Class: "City", + Property: "len(population)", + } + + path, err := ParsePath(segments, rootClass) + + require.Nil(t, err, "should not error") + assert.Equal(t, expectedPath, path, "should parse the path correctly") + }) + + t.Run("with nested refs", func(t *testing.T) { + rootClass := "City" + segments := []interface{}{"inCountry", "Country", "inContinent", "Continent", "onPlanet", "Planet", "name"} + expectedPath := &Path{ + Class: "City", + Property: "inCountry", + Child: &Path{ + Class: "Country", + Property: "inContinent", + Child: &Path{ + Class: "Continent", + Property: "onPlanet", + Child: &Path{ + Class: "Planet", + Property: "name", + }, + }, + }, + } + + path, err := ParsePath(segments, rootClass) + + require.Nil(t, err, "should not error") + assert.Equal(t, expectedPath, path, "should parse the path correctly") + + // Extract innermost path element + innerMost := path.GetInnerMost() + assert.Equal(t, innerMost, &Path{Class: "Planet", Property: "name"}) + + // Print Slice + }) + + t.Run("with non-valid prop", func(t *testing.T) { + rootClass := "City" + segments := []interface{}{"populatS356()ion"} + _, err := ParsePath(segments, rootClass) + require.NotNil(t, err, "should error") + }) + + t.Run("with non-valid len prop", func(t *testing.T) { + rootClass := "City" + segments := []interface{}{"len(populatS356()ion)"} + _, err := ParsePath(segments, rootClass) + require.NotNil(t, err, "should error") + }) +} + +func Test_SlicePath(t *testing.T) { + t.Run("with a primitive prop", func(t *testing.T) { + path := &Path{ + Class: "City", + Property: "population", + } + expectedSegments := []interface{}{"population"} + + segments := path.SliceInterface() + + assert.Equal(t, expectedSegments, segments, "should slice the path correctly") + }) + + t.Run("with nested refs", func(t *testing.T) { + path := &Path{ + Class: "City", + Property: "inCountry", + Child: &Path{ + Class: "Country", + Property: "inContinent", + Child: &Path{ + Class: "Continent", + Property: "onPlanet", + Child: &Path{ + Class: "Planet", + Property: "name", + }, + }, + }, + } + + t.Run("as []interface{}", func(t *testing.T) { + expectedSegments := []interface{}{"inCountry", "Country", "inContinent", "Continent", "onPlanet", "Planet", "name"} + segments := path.SliceInterface() + assert.Equal(t, expectedSegments, segments, "should slice the path correctly") + }) + + t.Run("as []string titleized", func(t *testing.T) { + expectedSegments := []string{"inCountry", "Country", "inContinent", "Continent", "onPlanet", "Planet", "name"} + segments := path.Slice() + assert.Equal(t, expectedSegments, segments, "should slice the path correctly") + }) + + t.Run("as []string non-titleized", func(t *testing.T) { + expectedSegments := []string{"inCountry", "Country", "inContinent", "Continent", "onPlanet", "Planet", "name"} + segments := path.Slice() + assert.Equal(t, expectedSegments, segments, "should slice the path correctly") + }) + }) +} diff --git a/platform/dbops/binaries/weaviate-src/entities/filters/sort.go b/platform/dbops/binaries/weaviate-src/entities/filters/sort.go new file mode 100644 index 0000000000000000000000000000000000000000..8fd7a7b7be7547f5ed164a27509ebed2042de0ae --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/filters/sort.go @@ -0,0 +1,45 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package filters + +// Sort contains path and order (asc, desc) information +type Sort struct { + Path []string `json:"path"` + Order string `json:"order"` +} + +// ExtractSortFromArgs gets the sort parameters +func ExtractSortFromArgs(in []interface{}) []Sort { + var args []Sort + + for i := range in { + sortFilter, ok := in[i].(map[string]interface{}) + if ok { + var path []string + pathParam, ok := sortFilter["path"].([]interface{}) + if ok { + path = make([]string, len(pathParam)) + for i, value := range pathParam { + path[i] = value.(string) + } + } + var order string + orderParam, ok := sortFilter["order"] + if ok { + order = orderParam.(string) + } + args = append(args, Sort{path, order}) + } + } + + return args +} diff --git a/platform/dbops/binaries/weaviate-src/entities/filters/sort_validator.go b/platform/dbops/binaries/weaviate-src/entities/filters/sort_validator.go new file mode 100644 index 0000000000000000000000000000000000000000..1a1214de6ec2570a836303d7748bee87f4c137dc --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/filters/sort_validator.go @@ -0,0 +1,84 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package filters + +import ( + "fmt" + + "github.com/pkg/errors" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/schema" +) + +func ValidateSort(getClass func(string) *models.Class, className schema.ClassName, sort []Sort) error { + if len(sort) == 0 { + return errors.New("empty sort") + } + + var errs []error + for i := range sort { + if err := validateSortClause(getClass, className, sort[i]); err != nil { + errs = append(errs, errors.Wrapf(err, "sort parameter at position %d", i)) + } + } + + if len(errs) > 0 { + return mergeErrs(errs) + } else { + return nil + } +} + +func validateSortClause(getClass func(string) *models.Class, className schema.ClassName, sort Sort) error { + // validate current + path, order := sort.Path, sort.Order + + if len(order) > 0 && order != "asc" && order != "desc" { + return errors.Errorf(`invalid order parameter, `+ + `possible values are: ["asc", "desc"] not: "%s"`, order) + } + + switch len(path) { + case 0: + return errors.New("path parameter cannot be empty") + case 1: + class := getClass(className.String()) + if class == nil { + return errors.Errorf("class %q does not exist in schema", className) + } + propName := schema.PropertyName(path[0]) + if IsInternalProperty(propName) { + // handle internal properties + return nil + } + + prop, err := schema.GetPropertyByName(class, string(propName)) + if err != nil { + return err + } + + if isUUIDType(prop.DataType[0]) { + return fmt.Errorf("prop %q is of type uuid/uuid[]: "+ + "sorting by uuid is currently not supported - if you believe it should be, "+ + "please open a feature request on github.com/weaviate/weaviate", prop.Name) + } + + if schema.IsRefDataType(prop.DataType) { + return errors.Errorf("sorting by reference not supported, "+ + "property %q is a ref prop to the class %q", propName, prop.DataType[0]) + } + return nil + default: + return errors.New("sorting by reference not supported, " + + "path must have exactly one argument") + } +} diff --git a/platform/dbops/binaries/weaviate-src/entities/filters/sort_validator_test.go b/platform/dbops/binaries/weaviate-src/entities/filters/sort_validator_test.go new file mode 100644 index 0000000000000000000000000000000000000000..4acf989fcdcb30acd1d23566200439cf5dfcf21b --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/filters/sort_validator_test.go @@ -0,0 +1,85 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package filters + +import ( + "testing" + + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/schema" +) + +func TestSortValidation(t *testing.T) { + tests := []struct { + name string + prop string + valid bool + }{ + { + name: "existing prop - string", + valid: true, + prop: "modelName", + }, + { + name: "existing prop - int", + valid: true, + prop: "horsepower", + }, + { + name: "invalid prop", + valid: false, + prop: "idontexist", + }, + { + name: "uuid prop", + valid: false, + prop: "my_id", + }, + { + name: "uuid[] prop", + valid: false, + prop: "my_idz", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + sch := &schema.Schema{Objects: &models.Schema{ + Classes: []*models.Class{ + { + Class: "Car", + Properties: []*models.Property{ + {Name: "modelName", DataType: schema.DataTypeText.PropString(), Tokenization: models.PropertyTokenizationWhitespace}, + {Name: "manufacturerName", DataType: schema.DataTypeText.PropString(), Tokenization: models.PropertyTokenizationWhitespace}, + {Name: "horsepower", DataType: []string{"int"}}, + {Name: "my_id", DataType: []string{"uuid"}}, + {Name: "my_idz", DataType: []string{"uuid[]"}}, + }, + }, + }, + }} + + sort := []Sort{{ + Path: []string{tt.prop}, + Order: "asc", + }} + + err := ValidateSort(sch.GetClass, schema.ClassName("Car"), sort) + if tt.valid { + require.Nil(t, err) + } else { + require.NotNil(t, err) + } + }) + } +} diff --git a/platform/dbops/binaries/weaviate-src/entities/interval/backoff.go b/platform/dbops/binaries/weaviate-src/entities/interval/backoff.go new file mode 100644 index 0000000000000000000000000000000000000000..48ec01c674fd853f7e9445bef821d71acda86e57 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/interval/backoff.go @@ -0,0 +1,78 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package interval + +import ( + "sort" + "time" +) + +var defaultBackoffs = []time.Duration{ + time.Duration(0), + 30 * time.Second, + 2 * time.Minute, + 10 * time.Minute, + 1 * time.Hour, + 12 * time.Hour, +} + +// BackoffTimer tracks a given range of intervals with increasing duration +type BackoffTimer struct { + backoffLevel int + backoffs []time.Duration + lastInterval time.Time +} + +// NewBackoffTimer constructs and returns a *BackoffTimer instance +// If no backoffs are provided, defaultBackoffs is used. When the +// last backoff duration has elapsed, the timer will use the final +// duration for the remainder of the BackoffTimer's lifetime +func NewBackoffTimer(backoffs ...time.Duration) *BackoffTimer { + boff := &BackoffTimer{backoffs: backoffs} + if len(backoffs) == 0 { + boff.backoffs = defaultBackoffs + } else { + sort.Slice(backoffs, func(i, j int) bool { + return backoffs[i] < backoffs[j] + }) + } + return boff +} + +// IncreaseInterval bumps the duration of the interval up to the next given value +func (b *BackoffTimer) IncreaseInterval() { + b.lastInterval = time.Now() + if b.backoffLevel < len(b.backoffs) { + b.backoffLevel += 1 + } +} + +// IntervalElapsed returns if the current interval has elapsed +func (b *BackoffTimer) IntervalElapsed() bool { + return time.Since(b.lastInterval) > b.CurrentInterval() +} + +// Reset returns BackoffTimer to its original empty state +func (b *BackoffTimer) Reset() { + b.lastInterval = time.Time{} + b.backoffLevel = 0 +} + +func (b *BackoffTimer) CurrentInterval() time.Duration { + if b.backoffLevel >= len(b.backoffs) { + return b.backoffs[len(b.backoffs)-1] + } + + interval := b.backoffs[b.backoffLevel] + + return interval +} diff --git a/platform/dbops/binaries/weaviate-src/entities/interval/backoff_test.go b/platform/dbops/binaries/weaviate-src/entities/interval/backoff_test.go new file mode 100644 index 0000000000000000000000000000000000000000..ae6eec929b040c06ddd0d8ecc60ec2c3c0bfa001 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/interval/backoff_test.go @@ -0,0 +1,78 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package interval + +import ( + "sort" + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +func TestBackoffInterval(t *testing.T) { + t.Run("with default backoffs", func(t *testing.T) { + boff := NewBackoffTimer() + + assert.Equal(t, boff.backoffs, defaultBackoffs) + assert.Zero(t, boff.backoffLevel) + assert.Zero(t, boff.lastInterval) + assert.Equal(t, time.Duration(0), boff.CurrentInterval()) + assert.True(t, boff.IntervalElapsed()) + + i := 1 + for ; i < len(defaultBackoffs); i++ { + boff.IncreaseInterval() + assert.False(t, boff.IntervalElapsed()) + assert.Equal(t, i, boff.backoffLevel) + assert.Equal(t, defaultBackoffs[i], boff.CurrentInterval()) + } + + boff.IncreaseInterval() + assert.False(t, boff.IntervalElapsed()) + assert.Equal(t, i, boff.backoffLevel) + assert.Equal(t, defaultBackoffs[len(defaultBackoffs)-1], boff.CurrentInterval()) + }) + + t.Run("with custom backoffs", func(t *testing.T) { + var ( + durations = []time.Duration{time.Second, time.Nanosecond, time.Millisecond} + sorted = make([]time.Duration, len(durations)) + ) + + copy(sorted, durations) + sort.Slice(sorted, func(i, j int) bool { + return sorted[i] < sorted[j] + }) + + boff := NewBackoffTimer(durations...) + assert.Equal(t, boff.backoffs, sorted) + assert.True(t, boff.IntervalElapsed()) + assert.Equal(t, sorted[0], boff.CurrentInterval()) + + boff.IncreaseInterval() + time.Sleep(time.Millisecond) + assert.True(t, boff.IntervalElapsed()) + assert.Equal(t, sorted[1], boff.CurrentInterval()) + + boff.IncreaseInterval() + assert.False(t, boff.IntervalElapsed()) + time.Sleep(time.Second) + assert.True(t, boff.IntervalElapsed()) + assert.Equal(t, sorted[2], boff.CurrentInterval()) + + boff.IncreaseInterval() + assert.False(t, boff.IntervalElapsed()) + assert.False(t, boff.IntervalElapsed()) + assert.Equal(t, sorted[len(sorted)-1], boff.CurrentInterval()) + }) +} diff --git a/platform/dbops/binaries/weaviate-src/entities/inverted/errors.go b/platform/dbops/binaries/weaviate-src/entities/inverted/errors.go new file mode 100644 index 0000000000000000000000000000000000000000..07563202ac11b514a47d6ef5ec8759c65f5df3e3 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/inverted/errors.go @@ -0,0 +1,47 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package inverted + +import "fmt" + +type MissingIndexError struct { + format string + args []any +} + +func NewMissingFilterableIndexError(propName string) error { + return MissingIndexError{missingFilterableFormat, []any{propName, propName}} +} + +func NewMissingSearchableIndexError(propName string) error { + return MissingIndexError{missingSearchableFormat, []any{propName, propName}} +} + +func NewMissingFilterableMetaCountIndexError(propName string) error { + return MissingIndexError{missingFilterableMetaCountFormat, []any{propName, propName}} +} + +func (e MissingIndexError) Error() string { + return fmt.Sprintf(e.format, e.args...) +} + +const ( + missingFilterableFormat = "Filtering by property '%s' requires inverted index. " + + "Is `indexFilterable` option of property '%s' enabled? " + + "Set it to `true` or leave empty" + missingSearchableFormat = "Searching by property '%s' requires inverted index. " + + "Is `indexSearchable` option of property '%s' enabled? " + + "Set it to `true` or leave empty" + missingFilterableMetaCountFormat = "Searching by property '%s' count requires inverted index. " + + "Is `indexFilterable` option of property '%s' enabled? " + + "Set it to `true` or leave empty" +) diff --git a/platform/dbops/binaries/weaviate-src/entities/inverted/serialization.go b/platform/dbops/binaries/weaviate-src/entities/inverted/serialization.go new file mode 100644 index 0000000000000000000000000000000000000000..7287c0fac609511baf40372ac343e650779e172a --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/inverted/serialization.go @@ -0,0 +1,155 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package inverted + +import ( + "bytes" + "encoding/binary" + "fmt" + "math" + + "github.com/pkg/errors" +) + +// LexicographicallySortableFloat64 transforms a conversion to a +// lexicographically sortable byte slice. In general, for lexicographical +// sorting big endian notatino is required. Additionally the sign needs to be +// flipped in any case, but additionally each remaining byte also needs to be +// flipped if the number is negative +func LexicographicallySortableFloat64(in float64) ([]byte, error) { + buf := bytes.NewBuffer(nil) + + err := binary.Write(buf, binary.BigEndian, in) + if err != nil { + return nil, errors.Wrap(err, "serialize float64 value as big endian") + } + + var out []byte + if in >= 0 { + // on positive numbers only flip the sign + out = buf.Bytes() + firstByte := out[0] ^ 0x80 + out = append([]byte{firstByte}, out[1:]...) + } else { + // on negative numbers flip every bit + out = make([]byte, 8) + for i, b := range buf.Bytes() { + out[i] = b ^ 0xFF + } + } + + return out, nil +} + +// ParseLexicographicallySortableFloat64 reverses the changes in +// LexicographicallySortableFloat64 +func ParseLexicographicallySortableFloat64(in []byte) (float64, error) { + if len(in) != 8 { + return 0, fmt.Errorf("float64 must be 8 bytes long, got: %d", len(in)) + } + + flipped := make([]byte, 8) + if in[0]&0x80 == 0x80 { + // encoded as negative means it was originally positive, so we only need to + // flip the sign + flipped[0] = in[0] ^ 0x80 + + // the remainder can be copied + for i := 1; i < 8; i++ { + flipped[i] = in[i] + } + } else { + // encoded as positive means it was originally negative, so we need to flip + // everything + for i := 0; i < 8; i++ { + flipped[i] = in[i] ^ 0xFF + } + } + + r := bytes.NewReader(flipped) + var value float64 + + err := binary.Read(r, binary.BigEndian, &value) + if err != nil { + return 0, errors.Wrap(err, "deserialize float64 value as big endian") + } + + return value, nil +} + +// LexicographicallySortableInt64 performs a conversion to a lexicographically +// sortable byte slice. For this, big endian notation is required and the sign +// must be flipped +func LexicographicallySortableInt64(in int64) ([]byte, error) { + buf := bytes.NewBuffer(nil) + asInt64 := int64(in) + + // flip the sign + asInt64 = asInt64 ^ math.MinInt64 + + err := binary.Write(buf, binary.BigEndian, asInt64) + if err != nil { + return nil, errors.Wrap(err, "serialize int value as big endian") + } + + return buf.Bytes(), nil +} + +// ParseLexicographicallySortableInt64 reverses the changes in +// LexicographicallySortableInt64 +func ParseLexicographicallySortableInt64(in []byte) (int64, error) { + if len(in) != 8 { + return 0, fmt.Errorf("int64 must be 8 bytes long, got: %d", len(in)) + } + + r := bytes.NewReader(in) + var value int64 + + err := binary.Read(r, binary.BigEndian, &value) + if err != nil { + return 0, errors.Wrap(err, "deserialize int64 value as big endian") + } + + return value ^ math.MinInt64, nil +} + +// LexicographicallySortableUint64 performs a conversion to a lexicographically +// sortable byte slice. For this, big endian notation is required. +func LexicographicallySortableUint64(in uint64) ([]byte, error) { + buf := bytes.NewBuffer(nil) + + // no signs to flip as this is a uint + err := binary.Write(buf, binary.BigEndian, in) + if err != nil { + return nil, errors.Wrap(err, "serialize int value as big endian") + } + + return buf.Bytes(), nil +} + +// ParseLexicographicallySortableUint64 reverses the changes in +// LexicographicallySortableUint64 +func ParseLexicographicallySortableUint64(in []byte) (uint64, error) { + if len(in) != 8 { + return 0, fmt.Errorf("uint64 must be 8 bytes long, got: %d", len(in)) + } + + r := bytes.NewReader(in) + var value uint64 + + err := binary.Read(r, binary.BigEndian, &value) + if err != nil { + return 0, errors.Wrap(err, "deserialize uint64 value as big endian") + } + + return value, nil +} diff --git a/platform/dbops/binaries/weaviate-src/entities/inverted/serialization_test.go b/platform/dbops/binaries/weaviate-src/entities/inverted/serialization_test.go new file mode 100644 index 0000000000000000000000000000000000000000..62c5928778df81f8320e09edf39aa52fbcea5e44 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/inverted/serialization_test.go @@ -0,0 +1,96 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package inverted + +import ( + "fmt" + "math" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// TestSerialization makes sure that writing and reading into the +// lexicographically sortable types byte slices ends up with the same values as +// original. There is no focus on the sortability itself, as that is already +// tested extensively in analyzer_test.go +func TestSerialization(t *testing.T) { + t.Run("float64", func(t *testing.T) { + subjects := []float64{ + math.SmallestNonzeroFloat64, + -400.0001, + -21, + 0, + 21, + 400.0001, + math.MaxFloat64, + } + + for _, sub := range subjects { + t.Run(fmt.Sprintf("with %f", sub), func(t *testing.T) { + bytes, err := LexicographicallySortableFloat64(sub) + require.Nil(t, err) + + parsed, err := ParseLexicographicallySortableFloat64(bytes) + require.Nil(t, err) + + assert.Equal(t, sub, parsed, "before and after must match") + }) + } + }) + + t.Run("int64", func(t *testing.T) { + subjects := []int64{ + math.MinInt64, + -400, + -21, + 0, + 21, + 400, + math.MaxInt64, + } + + for _, sub := range subjects { + t.Run(fmt.Sprintf("with %d", sub), func(t *testing.T) { + bytes, err := LexicographicallySortableInt64(sub) + require.Nil(t, err) + + parsed, err := ParseLexicographicallySortableInt64(bytes) + require.Nil(t, err) + + assert.Equal(t, sub, parsed, "before and after must match") + }) + } + }) + + t.Run("uint64", func(t *testing.T) { + subjects := []uint64{ + 0, + 21, + 400, + math.MaxUint64, + } + + for _, sub := range subjects { + t.Run(fmt.Sprintf("with %d", sub), func(t *testing.T) { + bytes, err := LexicographicallySortableUint64(sub) + require.Nil(t, err) + + parsed, err := ParseLexicographicallySortableUint64(bytes) + require.Nil(t, err) + + assert.Equal(t, sub, parsed, "before and after must match") + }) + } + }) +} diff --git a/platform/dbops/binaries/weaviate-src/entities/lsmkv/errors.go b/platform/dbops/binaries/weaviate-src/entities/lsmkv/errors.go new file mode 100644 index 0000000000000000000000000000000000000000..557ec32ebc0ed8826d1632196f5eed26d272cdb6 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/lsmkv/errors.go @@ -0,0 +1,44 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package lsmkv + +import ( + "errors" + "fmt" + "time" +) + +var ( + NotFound = errors.New("not found") + Deleted = errors.New("deleted") +) + +type ErrDeleted struct { + deletionTime time.Time +} + +func NewErrDeleted(deletionTime time.Time) ErrDeleted { + return ErrDeleted{deletionTime: deletionTime} +} + +func (e ErrDeleted) DeletionTime() time.Time { + return e.deletionTime +} + +func (e ErrDeleted) Error() string { + return fmt.Sprintf("%v: deletion time %s", Deleted, e.deletionTime) +} + +// Unwrap returns Deleted error so to satisfy checks like errors.Is(err, lsmkv.Deleted) +func (e ErrDeleted) Unwrap() error { + return Deleted +} diff --git a/platform/dbops/binaries/weaviate-src/entities/models/additional_properties.go b/platform/dbops/binaries/weaviate-src/entities/models/additional_properties.go new file mode 100644 index 0000000000000000000000000000000000000000..7727aeda2e99efdd4401ce949ce7aa6331efbdfc --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/models/additional_properties.go @@ -0,0 +1,38 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/strfmt" +) + +// AdditionalProperties (Response only) Additional meta information about a single object. +// +// swagger:model AdditionalProperties +type AdditionalProperties map[string]interface{} + +// Validate validates this additional properties +func (m AdditionalProperties) Validate(formats strfmt.Registry) error { + return nil +} + +// ContextValidate validates this additional properties based on context it is used +func (m AdditionalProperties) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/entities/models/alias.go b/platform/dbops/binaries/weaviate-src/entities/models/alias.go new file mode 100644 index 0000000000000000000000000000000000000000..c41ce5b17faec909ec70a01f96f048b385af146a --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/models/alias.go @@ -0,0 +1,64 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// Alias Represents the mapping between an alias name and a collection. An alias provides an alternative name for accessing a collection. +// +// swagger:model Alias +type Alias struct { + + // The unique name of the alias that serves as an alternative identifier for the collection. + Alias string `json:"alias,omitempty"` + + // The name of the collection (class) to which this alias is mapped. + Class string `json:"class,omitempty"` +} + +// Validate validates this alias +func (m *Alias) Validate(formats strfmt.Registry) error { + return nil +} + +// ContextValidate validates this alias based on context it is used +func (m *Alias) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *Alias) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *Alias) UnmarshalBinary(b []byte) error { + var res Alias + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/entities/models/alias_response.go b/platform/dbops/binaries/weaviate-src/entities/models/alias_response.go new file mode 100644 index 0000000000000000000000000000000000000000..9bee3073984a2cf8eb6a9c49461865f3b4cdd3a8 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/models/alias_response.go @@ -0,0 +1,127 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "strconv" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// AliasResponse Response object containing a list of alias mappings. +// +// swagger:model AliasResponse +type AliasResponse struct { + + // Array of alias objects, each containing an alias-to-collection mapping. + Aliases []*Alias `json:"aliases"` +} + +// Validate validates this alias response +func (m *AliasResponse) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateAliases(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *AliasResponse) validateAliases(formats strfmt.Registry) error { + if swag.IsZero(m.Aliases) { // not required + return nil + } + + for i := 0; i < len(m.Aliases); i++ { + if swag.IsZero(m.Aliases[i]) { // not required + continue + } + + if m.Aliases[i] != nil { + if err := m.Aliases[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("aliases" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("aliases" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +// ContextValidate validate this alias response based on the context it is used +func (m *AliasResponse) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateAliases(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *AliasResponse) contextValidateAliases(ctx context.Context, formats strfmt.Registry) error { + + for i := 0; i < len(m.Aliases); i++ { + + if m.Aliases[i] != nil { + if err := m.Aliases[i].ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("aliases" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("aliases" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +// MarshalBinary interface implementation +func (m *AliasResponse) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *AliasResponse) UnmarshalBinary(b []byte) error { + var res AliasResponse + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/entities/models/async_replication_status.go b/platform/dbops/binaries/weaviate-src/entities/models/async_replication_status.go new file mode 100644 index 0000000000000000000000000000000000000000..1e451668e333f3449c2d9a6fe5639727c964de21 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/models/async_replication_status.go @@ -0,0 +1,67 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// AsyncReplicationStatus The status of the async replication. +// +// swagger:model AsyncReplicationStatus +type AsyncReplicationStatus struct { + + // The number of objects propagated in the most recent iteration. + ObjectsPropagated uint64 `json:"objectsPropagated,omitempty"` + + // The start time of the most recent iteration. + StartDiffTimeUnixMillis int64 `json:"startDiffTimeUnixMillis,omitempty"` + + // The target node of the replication, if set, otherwise empty. + TargetNode string `json:"targetNode,omitempty"` +} + +// Validate validates this async replication status +func (m *AsyncReplicationStatus) Validate(formats strfmt.Registry) error { + return nil +} + +// ContextValidate validates this async replication status based on context it is used +func (m *AsyncReplicationStatus) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *AsyncReplicationStatus) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *AsyncReplicationStatus) UnmarshalBinary(b []byte) error { + var res AsyncReplicationStatus + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/entities/models/b_m25_config.go b/platform/dbops/binaries/weaviate-src/entities/models/b_m25_config.go new file mode 100644 index 0000000000000000000000000000000000000000..d73cccaa4321ad6f6a9205f80fbffc635439eee4 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/models/b_m25_config.go @@ -0,0 +1,64 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// BM25Config tuning parameters for the BM25 algorithm +// +// swagger:model BM25Config +type BM25Config struct { + + // Calibrates term-weight scaling based on the document length (default: 0.75). + B float32 `json:"b,omitempty"` + + // Calibrates term-weight scaling based on the term frequency within a document (default: 1.2). + K1 float32 `json:"k1,omitempty"` +} + +// Validate validates this b m25 config +func (m *BM25Config) Validate(formats strfmt.Registry) error { + return nil +} + +// ContextValidate validates this b m25 config based on context it is used +func (m *BM25Config) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *BM25Config) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *BM25Config) UnmarshalBinary(b []byte) error { + var res BM25Config + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/entities/models/backup_config.go b/platform/dbops/binaries/weaviate-src/entities/models/backup_config.go new file mode 100644 index 0000000000000000000000000000000000000000..80b083a08e1273b8172e8b3cef644463288e21b5 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/models/backup_config.go @@ -0,0 +1,178 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "encoding/json" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// BackupConfig Backup custom configuration +// +// swagger:model BackupConfig +type BackupConfig struct { + + // Name of the bucket, container, volume, etc + Bucket string `json:"Bucket,omitempty"` + + // Desired CPU core utilization ranging from 1%-80% + // Maximum: 80 + // Minimum: 1 + CPUPercentage int64 `json:"CPUPercentage,omitempty"` + + // Aimed chunk size, with a minimum of 2MB, default of 128MB, and a maximum of 512MB. The actual chunk size may vary. + // Maximum: 512 + // Minimum: 2 + ChunkSize int64 `json:"ChunkSize,omitempty"` + + // compression level used by compression algorithm + // Enum: [DefaultCompression BestSpeed BestCompression] + CompressionLevel string `json:"CompressionLevel,omitempty"` + + // name of the endpoint, e.g. s3.amazonaws.com + Endpoint string `json:"Endpoint,omitempty"` + + // Path or key within the bucket + Path string `json:"Path,omitempty"` +} + +// Validate validates this backup config +func (m *BackupConfig) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateCPUPercentage(formats); err != nil { + res = append(res, err) + } + + if err := m.validateChunkSize(formats); err != nil { + res = append(res, err) + } + + if err := m.validateCompressionLevel(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *BackupConfig) validateCPUPercentage(formats strfmt.Registry) error { + if swag.IsZero(m.CPUPercentage) { // not required + return nil + } + + if err := validate.MinimumInt("CPUPercentage", "body", m.CPUPercentage, 1, false); err != nil { + return err + } + + if err := validate.MaximumInt("CPUPercentage", "body", m.CPUPercentage, 80, false); err != nil { + return err + } + + return nil +} + +func (m *BackupConfig) validateChunkSize(formats strfmt.Registry) error { + if swag.IsZero(m.ChunkSize) { // not required + return nil + } + + if err := validate.MinimumInt("ChunkSize", "body", m.ChunkSize, 2, false); err != nil { + return err + } + + if err := validate.MaximumInt("ChunkSize", "body", m.ChunkSize, 512, false); err != nil { + return err + } + + return nil +} + +var backupConfigTypeCompressionLevelPropEnum []interface{} + +func init() { + var res []string + if err := json.Unmarshal([]byte(`["DefaultCompression","BestSpeed","BestCompression"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + backupConfigTypeCompressionLevelPropEnum = append(backupConfigTypeCompressionLevelPropEnum, v) + } +} + +const ( + + // BackupConfigCompressionLevelDefaultCompression captures enum value "DefaultCompression" + BackupConfigCompressionLevelDefaultCompression string = "DefaultCompression" + + // BackupConfigCompressionLevelBestSpeed captures enum value "BestSpeed" + BackupConfigCompressionLevelBestSpeed string = "BestSpeed" + + // BackupConfigCompressionLevelBestCompression captures enum value "BestCompression" + BackupConfigCompressionLevelBestCompression string = "BestCompression" +) + +// prop value enum +func (m *BackupConfig) validateCompressionLevelEnum(path, location string, value string) error { + if err := validate.EnumCase(path, location, value, backupConfigTypeCompressionLevelPropEnum, true); err != nil { + return err + } + return nil +} + +func (m *BackupConfig) validateCompressionLevel(formats strfmt.Registry) error { + if swag.IsZero(m.CompressionLevel) { // not required + return nil + } + + // value enum + if err := m.validateCompressionLevelEnum("CompressionLevel", "body", m.CompressionLevel); err != nil { + return err + } + + return nil +} + +// ContextValidate validates this backup config based on context it is used +func (m *BackupConfig) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *BackupConfig) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *BackupConfig) UnmarshalBinary(b []byte) error { + var res BackupConfig + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/entities/models/backup_create_request.go b/platform/dbops/binaries/weaviate-src/entities/models/backup_create_request.go new file mode 100644 index 0000000000000000000000000000000000000000..5ad75a94ab2cc1b6a9079117a2dec2e2e9d9d169 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/models/backup_create_request.go @@ -0,0 +1,124 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// BackupCreateRequest Request body for creating a backup of a set of classes +// +// swagger:model BackupCreateRequest +type BackupCreateRequest struct { + + // Custom configuration for the backup creation process + Config *BackupConfig `json:"config,omitempty"` + + // List of collections to exclude from the backup creation process. If not set, all collections are included. Cannot be used together with `include`. + Exclude []string `json:"exclude"` + + // The ID of the backup (required). Must be URL-safe and work as a filesystem path, only lowercase, numbers, underscore, minus characters allowed. + ID string `json:"id,omitempty"` + + // List of collections to include in the backup creation process. If not set, all collections are included. Cannot be used together with `exclude`. + Include []string `json:"include"` +} + +// Validate validates this backup create request +func (m *BackupCreateRequest) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateConfig(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *BackupCreateRequest) validateConfig(formats strfmt.Registry) error { + if swag.IsZero(m.Config) { // not required + return nil + } + + if m.Config != nil { + if err := m.Config.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("config") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("config") + } + return err + } + } + + return nil +} + +// ContextValidate validate this backup create request based on the context it is used +func (m *BackupCreateRequest) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateConfig(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *BackupCreateRequest) contextValidateConfig(ctx context.Context, formats strfmt.Registry) error { + + if m.Config != nil { + if err := m.Config.ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("config") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("config") + } + return err + } + } + + return nil +} + +// MarshalBinary interface implementation +func (m *BackupCreateRequest) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *BackupCreateRequest) UnmarshalBinary(b []byte) error { + var res BackupCreateRequest + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/entities/models/backup_create_response.go b/platform/dbops/binaries/weaviate-src/entities/models/backup_create_response.go new file mode 100644 index 0000000000000000000000000000000000000000..e4f67504ba302433c959af9c0c0df6aa855a9a1b --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/models/backup_create_response.go @@ -0,0 +1,146 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "encoding/json" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// BackupCreateResponse The definition of a backup create response body +// +// swagger:model BackupCreateResponse +type BackupCreateResponse struct { + + // Backup backend name e.g. filesystem, gcs, s3. + Backend string `json:"backend,omitempty"` + + // Name of the bucket, container, volume, etc + Bucket string `json:"bucket,omitempty"` + + // The list of classes for which the backup creation process was started + Classes []string `json:"classes"` + + // error message if creation failed + Error string `json:"error,omitempty"` + + // The ID of the backup. Must be URL-safe and work as a filesystem path, only lowercase, numbers, underscore, minus characters allowed. + ID string `json:"id,omitempty"` + + // Path within bucket of backup + Path string `json:"path,omitempty"` + + // phase of backup creation process + // Enum: [STARTED TRANSFERRING TRANSFERRED SUCCESS FAILED CANCELED] + Status *string `json:"status,omitempty"` +} + +// Validate validates this backup create response +func (m *BackupCreateResponse) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateStatus(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +var backupCreateResponseTypeStatusPropEnum []interface{} + +func init() { + var res []string + if err := json.Unmarshal([]byte(`["STARTED","TRANSFERRING","TRANSFERRED","SUCCESS","FAILED","CANCELED"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + backupCreateResponseTypeStatusPropEnum = append(backupCreateResponseTypeStatusPropEnum, v) + } +} + +const ( + + // BackupCreateResponseStatusSTARTED captures enum value "STARTED" + BackupCreateResponseStatusSTARTED string = "STARTED" + + // BackupCreateResponseStatusTRANSFERRING captures enum value "TRANSFERRING" + BackupCreateResponseStatusTRANSFERRING string = "TRANSFERRING" + + // BackupCreateResponseStatusTRANSFERRED captures enum value "TRANSFERRED" + BackupCreateResponseStatusTRANSFERRED string = "TRANSFERRED" + + // BackupCreateResponseStatusSUCCESS captures enum value "SUCCESS" + BackupCreateResponseStatusSUCCESS string = "SUCCESS" + + // BackupCreateResponseStatusFAILED captures enum value "FAILED" + BackupCreateResponseStatusFAILED string = "FAILED" + + // BackupCreateResponseStatusCANCELED captures enum value "CANCELED" + BackupCreateResponseStatusCANCELED string = "CANCELED" +) + +// prop value enum +func (m *BackupCreateResponse) validateStatusEnum(path, location string, value string) error { + if err := validate.EnumCase(path, location, value, backupCreateResponseTypeStatusPropEnum, true); err != nil { + return err + } + return nil +} + +func (m *BackupCreateResponse) validateStatus(formats strfmt.Registry) error { + if swag.IsZero(m.Status) { // not required + return nil + } + + // value enum + if err := m.validateStatusEnum("status", "body", *m.Status); err != nil { + return err + } + + return nil +} + +// ContextValidate validates this backup create response based on context it is used +func (m *BackupCreateResponse) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *BackupCreateResponse) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *BackupCreateResponse) UnmarshalBinary(b []byte) error { + var res BackupCreateResponse + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/entities/models/backup_create_status_response.go b/platform/dbops/binaries/weaviate-src/entities/models/backup_create_status_response.go new file mode 100644 index 0000000000000000000000000000000000000000..3196fabf2c717d3226b2f09608b9004e75ea865b --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/models/backup_create_status_response.go @@ -0,0 +1,140 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "encoding/json" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// BackupCreateStatusResponse The definition of a backup create metadata +// +// swagger:model BackupCreateStatusResponse +type BackupCreateStatusResponse struct { + + // Backup backend name e.g. filesystem, gcs, s3. + Backend string `json:"backend,omitempty"` + + // error message if creation failed + Error string `json:"error,omitempty"` + + // The ID of the backup. Must be URL-safe and work as a filesystem path, only lowercase, numbers, underscore, minus characters allowed. + ID string `json:"id,omitempty"` + + // destination path of backup files proper to selected backend + Path string `json:"path,omitempty"` + + // phase of backup creation process + // Enum: [STARTED TRANSFERRING TRANSFERRED SUCCESS FAILED CANCELED] + Status *string `json:"status,omitempty"` +} + +// Validate validates this backup create status response +func (m *BackupCreateStatusResponse) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateStatus(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +var backupCreateStatusResponseTypeStatusPropEnum []interface{} + +func init() { + var res []string + if err := json.Unmarshal([]byte(`["STARTED","TRANSFERRING","TRANSFERRED","SUCCESS","FAILED","CANCELED"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + backupCreateStatusResponseTypeStatusPropEnum = append(backupCreateStatusResponseTypeStatusPropEnum, v) + } +} + +const ( + + // BackupCreateStatusResponseStatusSTARTED captures enum value "STARTED" + BackupCreateStatusResponseStatusSTARTED string = "STARTED" + + // BackupCreateStatusResponseStatusTRANSFERRING captures enum value "TRANSFERRING" + BackupCreateStatusResponseStatusTRANSFERRING string = "TRANSFERRING" + + // BackupCreateStatusResponseStatusTRANSFERRED captures enum value "TRANSFERRED" + BackupCreateStatusResponseStatusTRANSFERRED string = "TRANSFERRED" + + // BackupCreateStatusResponseStatusSUCCESS captures enum value "SUCCESS" + BackupCreateStatusResponseStatusSUCCESS string = "SUCCESS" + + // BackupCreateStatusResponseStatusFAILED captures enum value "FAILED" + BackupCreateStatusResponseStatusFAILED string = "FAILED" + + // BackupCreateStatusResponseStatusCANCELED captures enum value "CANCELED" + BackupCreateStatusResponseStatusCANCELED string = "CANCELED" +) + +// prop value enum +func (m *BackupCreateStatusResponse) validateStatusEnum(path, location string, value string) error { + if err := validate.EnumCase(path, location, value, backupCreateStatusResponseTypeStatusPropEnum, true); err != nil { + return err + } + return nil +} + +func (m *BackupCreateStatusResponse) validateStatus(formats strfmt.Registry) error { + if swag.IsZero(m.Status) { // not required + return nil + } + + // value enum + if err := m.validateStatusEnum("status", "body", *m.Status); err != nil { + return err + } + + return nil +} + +// ContextValidate validates this backup create status response based on context it is used +func (m *BackupCreateStatusResponse) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *BackupCreateStatusResponse) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *BackupCreateStatusResponse) UnmarshalBinary(b []byte) error { + var res BackupCreateStatusResponse + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/entities/models/backup_list_response.go b/platform/dbops/binaries/weaviate-src/entities/models/backup_list_response.go new file mode 100644 index 0000000000000000000000000000000000000000..dd3b26c5c84df822b1d86f709bd17587505d0ac2 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/models/backup_list_response.go @@ -0,0 +1,193 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "encoding/json" + "strconv" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// BackupListResponse The definition of a backup create response body +// +// swagger:model BackupListResponse +type BackupListResponse []*BackupListResponseItems0 + +// Validate validates this backup list response +func (m BackupListResponse) Validate(formats strfmt.Registry) error { + var res []error + + for i := 0; i < len(m); i++ { + if swag.IsZero(m[i]) { // not required + continue + } + + if m[i] != nil { + if err := m[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName(strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName(strconv.Itoa(i)) + } + return err + } + } + + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// ContextValidate validate this backup list response based on the context it is used +func (m BackupListResponse) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + for i := 0; i < len(m); i++ { + + if m[i] != nil { + if err := m[i].ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName(strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName(strconv.Itoa(i)) + } + return err + } + } + + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// BackupListResponseItems0 backup list response items0 +// +// swagger:model BackupListResponseItems0 +type BackupListResponseItems0 struct { + + // The list of classes for which the existed backup process + Classes []string `json:"classes"` + + // The ID of the backup. Must be URL-safe and work as a filesystem path, only lowercase, numbers, underscore, minus characters allowed. + ID string `json:"id,omitempty"` + + // status of backup process + // Enum: [STARTED TRANSFERRING TRANSFERRED SUCCESS FAILED CANCELED] + Status string `json:"status,omitempty"` +} + +// Validate validates this backup list response items0 +func (m *BackupListResponseItems0) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateStatus(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +var backupListResponseItems0TypeStatusPropEnum []interface{} + +func init() { + var res []string + if err := json.Unmarshal([]byte(`["STARTED","TRANSFERRING","TRANSFERRED","SUCCESS","FAILED","CANCELED"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + backupListResponseItems0TypeStatusPropEnum = append(backupListResponseItems0TypeStatusPropEnum, v) + } +} + +const ( + + // BackupListResponseItems0StatusSTARTED captures enum value "STARTED" + BackupListResponseItems0StatusSTARTED string = "STARTED" + + // BackupListResponseItems0StatusTRANSFERRING captures enum value "TRANSFERRING" + BackupListResponseItems0StatusTRANSFERRING string = "TRANSFERRING" + + // BackupListResponseItems0StatusTRANSFERRED captures enum value "TRANSFERRED" + BackupListResponseItems0StatusTRANSFERRED string = "TRANSFERRED" + + // BackupListResponseItems0StatusSUCCESS captures enum value "SUCCESS" + BackupListResponseItems0StatusSUCCESS string = "SUCCESS" + + // BackupListResponseItems0StatusFAILED captures enum value "FAILED" + BackupListResponseItems0StatusFAILED string = "FAILED" + + // BackupListResponseItems0StatusCANCELED captures enum value "CANCELED" + BackupListResponseItems0StatusCANCELED string = "CANCELED" +) + +// prop value enum +func (m *BackupListResponseItems0) validateStatusEnum(path, location string, value string) error { + if err := validate.EnumCase(path, location, value, backupListResponseItems0TypeStatusPropEnum, true); err != nil { + return err + } + return nil +} + +func (m *BackupListResponseItems0) validateStatus(formats strfmt.Registry) error { + if swag.IsZero(m.Status) { // not required + return nil + } + + // value enum + if err := m.validateStatusEnum("status", "body", m.Status); err != nil { + return err + } + + return nil +} + +// ContextValidate validates this backup list response items0 based on context it is used +func (m *BackupListResponseItems0) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *BackupListResponseItems0) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *BackupListResponseItems0) UnmarshalBinary(b []byte) error { + var res BackupListResponseItems0 + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/entities/models/backup_restore_request.go b/platform/dbops/binaries/weaviate-src/entities/models/backup_restore_request.go new file mode 100644 index 0000000000000000000000000000000000000000..36724266018b9796db79c67ef9b27d3d5ac0dc98 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/models/backup_restore_request.go @@ -0,0 +1,127 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// BackupRestoreRequest Request body for restoring a backup for a set of classes +// +// swagger:model BackupRestoreRequest +type BackupRestoreRequest struct { + + // Custom configuration for the backup restoration process + Config *RestoreConfig `json:"config,omitempty"` + + // List of classes to exclude from the backup restoration process + Exclude []string `json:"exclude"` + + // List of classes to include in the backup restoration process + Include []string `json:"include"` + + // Allows overriding the node names stored in the backup with different ones. Useful when restoring backups to a different environment. + NodeMapping map[string]string `json:"node_mapping,omitempty"` + + // Allows ovewriting the collection alias if there is a conflict + OverwriteAlias bool `json:"overwriteAlias,omitempty"` +} + +// Validate validates this backup restore request +func (m *BackupRestoreRequest) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateConfig(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *BackupRestoreRequest) validateConfig(formats strfmt.Registry) error { + if swag.IsZero(m.Config) { // not required + return nil + } + + if m.Config != nil { + if err := m.Config.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("config") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("config") + } + return err + } + } + + return nil +} + +// ContextValidate validate this backup restore request based on the context it is used +func (m *BackupRestoreRequest) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateConfig(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *BackupRestoreRequest) contextValidateConfig(ctx context.Context, formats strfmt.Registry) error { + + if m.Config != nil { + if err := m.Config.ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("config") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("config") + } + return err + } + } + + return nil +} + +// MarshalBinary interface implementation +func (m *BackupRestoreRequest) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *BackupRestoreRequest) UnmarshalBinary(b []byte) error { + var res BackupRestoreRequest + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/entities/models/backup_restore_response.go b/platform/dbops/binaries/weaviate-src/entities/models/backup_restore_response.go new file mode 100644 index 0000000000000000000000000000000000000000..f185b36bcf7c70b6615398dd6d605bed7a782591 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/models/backup_restore_response.go @@ -0,0 +1,143 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "encoding/json" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// BackupRestoreResponse The definition of a backup restore response body +// +// swagger:model BackupRestoreResponse +type BackupRestoreResponse struct { + + // Backup backend name e.g. filesystem, gcs, s3. + Backend string `json:"backend,omitempty"` + + // The list of classes for which the backup restoration process was started + Classes []string `json:"classes"` + + // error message if restoration failed + Error string `json:"error,omitempty"` + + // The ID of the backup. Must be URL-safe and work as a filesystem path, only lowercase, numbers, underscore, minus characters allowed. + ID string `json:"id,omitempty"` + + // destination path of backup files proper to selected backend + Path string `json:"path,omitempty"` + + // phase of backup restoration process + // Enum: [STARTED TRANSFERRING TRANSFERRED SUCCESS FAILED CANCELED] + Status *string `json:"status,omitempty"` +} + +// Validate validates this backup restore response +func (m *BackupRestoreResponse) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateStatus(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +var backupRestoreResponseTypeStatusPropEnum []interface{} + +func init() { + var res []string + if err := json.Unmarshal([]byte(`["STARTED","TRANSFERRING","TRANSFERRED","SUCCESS","FAILED","CANCELED"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + backupRestoreResponseTypeStatusPropEnum = append(backupRestoreResponseTypeStatusPropEnum, v) + } +} + +const ( + + // BackupRestoreResponseStatusSTARTED captures enum value "STARTED" + BackupRestoreResponseStatusSTARTED string = "STARTED" + + // BackupRestoreResponseStatusTRANSFERRING captures enum value "TRANSFERRING" + BackupRestoreResponseStatusTRANSFERRING string = "TRANSFERRING" + + // BackupRestoreResponseStatusTRANSFERRED captures enum value "TRANSFERRED" + BackupRestoreResponseStatusTRANSFERRED string = "TRANSFERRED" + + // BackupRestoreResponseStatusSUCCESS captures enum value "SUCCESS" + BackupRestoreResponseStatusSUCCESS string = "SUCCESS" + + // BackupRestoreResponseStatusFAILED captures enum value "FAILED" + BackupRestoreResponseStatusFAILED string = "FAILED" + + // BackupRestoreResponseStatusCANCELED captures enum value "CANCELED" + BackupRestoreResponseStatusCANCELED string = "CANCELED" +) + +// prop value enum +func (m *BackupRestoreResponse) validateStatusEnum(path, location string, value string) error { + if err := validate.EnumCase(path, location, value, backupRestoreResponseTypeStatusPropEnum, true); err != nil { + return err + } + return nil +} + +func (m *BackupRestoreResponse) validateStatus(formats strfmt.Registry) error { + if swag.IsZero(m.Status) { // not required + return nil + } + + // value enum + if err := m.validateStatusEnum("status", "body", *m.Status); err != nil { + return err + } + + return nil +} + +// ContextValidate validates this backup restore response based on context it is used +func (m *BackupRestoreResponse) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *BackupRestoreResponse) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *BackupRestoreResponse) UnmarshalBinary(b []byte) error { + var res BackupRestoreResponse + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/entities/models/backup_restore_status_response.go b/platform/dbops/binaries/weaviate-src/entities/models/backup_restore_status_response.go new file mode 100644 index 0000000000000000000000000000000000000000..078eff9eec7f604f6d621e4e8ba74deb20351ced --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/models/backup_restore_status_response.go @@ -0,0 +1,140 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "encoding/json" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// BackupRestoreStatusResponse The definition of a backup restore metadata +// +// swagger:model BackupRestoreStatusResponse +type BackupRestoreStatusResponse struct { + + // Backup backend name e.g. filesystem, gcs, s3. + Backend string `json:"backend,omitempty"` + + // error message if restoration failed + Error string `json:"error,omitempty"` + + // The ID of the backup. Must be URL-safe and work as a filesystem path, only lowercase, numbers, underscore, minus characters allowed. + ID string `json:"id,omitempty"` + + // destination path of backup files proper to selected backup backend, contains bucket and path + Path string `json:"path,omitempty"` + + // phase of backup restoration process + // Enum: [STARTED TRANSFERRING TRANSFERRED SUCCESS FAILED CANCELED] + Status *string `json:"status,omitempty"` +} + +// Validate validates this backup restore status response +func (m *BackupRestoreStatusResponse) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateStatus(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +var backupRestoreStatusResponseTypeStatusPropEnum []interface{} + +func init() { + var res []string + if err := json.Unmarshal([]byte(`["STARTED","TRANSFERRING","TRANSFERRED","SUCCESS","FAILED","CANCELED"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + backupRestoreStatusResponseTypeStatusPropEnum = append(backupRestoreStatusResponseTypeStatusPropEnum, v) + } +} + +const ( + + // BackupRestoreStatusResponseStatusSTARTED captures enum value "STARTED" + BackupRestoreStatusResponseStatusSTARTED string = "STARTED" + + // BackupRestoreStatusResponseStatusTRANSFERRING captures enum value "TRANSFERRING" + BackupRestoreStatusResponseStatusTRANSFERRING string = "TRANSFERRING" + + // BackupRestoreStatusResponseStatusTRANSFERRED captures enum value "TRANSFERRED" + BackupRestoreStatusResponseStatusTRANSFERRED string = "TRANSFERRED" + + // BackupRestoreStatusResponseStatusSUCCESS captures enum value "SUCCESS" + BackupRestoreStatusResponseStatusSUCCESS string = "SUCCESS" + + // BackupRestoreStatusResponseStatusFAILED captures enum value "FAILED" + BackupRestoreStatusResponseStatusFAILED string = "FAILED" + + // BackupRestoreStatusResponseStatusCANCELED captures enum value "CANCELED" + BackupRestoreStatusResponseStatusCANCELED string = "CANCELED" +) + +// prop value enum +func (m *BackupRestoreStatusResponse) validateStatusEnum(path, location string, value string) error { + if err := validate.EnumCase(path, location, value, backupRestoreStatusResponseTypeStatusPropEnum, true); err != nil { + return err + } + return nil +} + +func (m *BackupRestoreStatusResponse) validateStatus(formats strfmt.Registry) error { + if swag.IsZero(m.Status) { // not required + return nil + } + + // value enum + if err := m.validateStatusEnum("status", "body", *m.Status); err != nil { + return err + } + + return nil +} + +// ContextValidate validates this backup restore status response based on context it is used +func (m *BackupRestoreStatusResponse) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *BackupRestoreStatusResponse) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *BackupRestoreStatusResponse) UnmarshalBinary(b []byte) error { + var res BackupRestoreStatusResponse + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/entities/models/batch_delete.go b/platform/dbops/binaries/weaviate-src/entities/models/batch_delete.go new file mode 100644 index 0000000000000000000000000000000000000000..09b8997b86ce96aa9d06c3096c463c9fb38f75d9 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/models/batch_delete.go @@ -0,0 +1,218 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// BatchDelete batch delete +// +// swagger:model BatchDelete +type BatchDelete struct { + + // Timestamp of deletion in milliseconds since epoch UTC. + DeletionTimeUnixMilli *int64 `json:"deletionTimeUnixMilli,omitempty"` + + // If true, the call will show which objects would be matched using the specified filter without deleting any objects.

Depending on the configured verbosity, you will either receive a count of affected objects, or a list of IDs. + DryRun *bool `json:"dryRun,omitempty"` + + // match + Match *BatchDeleteMatch `json:"match,omitempty"` + + // Controls the verbosity of the output, possible values are: "minimal", "verbose". Defaults to "minimal". + Output *string `json:"output,omitempty"` +} + +// Validate validates this batch delete +func (m *BatchDelete) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateMatch(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *BatchDelete) validateMatch(formats strfmt.Registry) error { + if swag.IsZero(m.Match) { // not required + return nil + } + + if m.Match != nil { + if err := m.Match.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("match") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("match") + } + return err + } + } + + return nil +} + +// ContextValidate validate this batch delete based on the context it is used +func (m *BatchDelete) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateMatch(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *BatchDelete) contextValidateMatch(ctx context.Context, formats strfmt.Registry) error { + + if m.Match != nil { + if err := m.Match.ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("match") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("match") + } + return err + } + } + + return nil +} + +// MarshalBinary interface implementation +func (m *BatchDelete) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *BatchDelete) UnmarshalBinary(b []byte) error { + var res BatchDelete + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} + +// BatchDeleteMatch Outlines how to find the objects to be deleted. +// +// swagger:model BatchDeleteMatch +type BatchDeleteMatch struct { + + // Class (name) which objects will be deleted. + // Example: City + Class string `json:"class,omitempty"` + + // Filter to limit the objects to be deleted. + Where *WhereFilter `json:"where,omitempty"` +} + +// Validate validates this batch delete match +func (m *BatchDeleteMatch) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateWhere(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *BatchDeleteMatch) validateWhere(formats strfmt.Registry) error { + if swag.IsZero(m.Where) { // not required + return nil + } + + if m.Where != nil { + if err := m.Where.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("match" + "." + "where") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("match" + "." + "where") + } + return err + } + } + + return nil +} + +// ContextValidate validate this batch delete match based on the context it is used +func (m *BatchDeleteMatch) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateWhere(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *BatchDeleteMatch) contextValidateWhere(ctx context.Context, formats strfmt.Registry) error { + + if m.Where != nil { + if err := m.Where.ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("match" + "." + "where") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("match" + "." + "where") + } + return err + } + } + + return nil +} + +// MarshalBinary interface implementation +func (m *BatchDeleteMatch) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *BatchDeleteMatch) UnmarshalBinary(b []byte) error { + var res BatchDeleteMatch + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/entities/models/batch_delete_response.go b/platform/dbops/binaries/weaviate-src/entities/models/batch_delete_response.go new file mode 100644 index 0000000000000000000000000000000000000000..60d6ae46146c14403585193d0991b83d11100480 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/models/batch_delete_response.go @@ -0,0 +1,543 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "encoding/json" + "strconv" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// BatchDeleteResponse Delete Objects response. +// +// swagger:model BatchDeleteResponse +type BatchDeleteResponse struct { + + // Timestamp of deletion in milliseconds since epoch UTC. + DeletionTimeUnixMilli *int64 `json:"deletionTimeUnixMilli,omitempty"` + + // If true, objects will not be deleted yet, but merely listed. Defaults to false. + DryRun *bool `json:"dryRun,omitempty"` + + // match + Match *BatchDeleteResponseMatch `json:"match,omitempty"` + + // Controls the verbosity of the output, possible values are: "minimal", "verbose". Defaults to "minimal". + Output *string `json:"output,omitempty"` + + // results + Results *BatchDeleteResponseResults `json:"results,omitempty"` +} + +// Validate validates this batch delete response +func (m *BatchDeleteResponse) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateMatch(formats); err != nil { + res = append(res, err) + } + + if err := m.validateResults(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *BatchDeleteResponse) validateMatch(formats strfmt.Registry) error { + if swag.IsZero(m.Match) { // not required + return nil + } + + if m.Match != nil { + if err := m.Match.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("match") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("match") + } + return err + } + } + + return nil +} + +func (m *BatchDeleteResponse) validateResults(formats strfmt.Registry) error { + if swag.IsZero(m.Results) { // not required + return nil + } + + if m.Results != nil { + if err := m.Results.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("results") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("results") + } + return err + } + } + + return nil +} + +// ContextValidate validate this batch delete response based on the context it is used +func (m *BatchDeleteResponse) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateMatch(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidateResults(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *BatchDeleteResponse) contextValidateMatch(ctx context.Context, formats strfmt.Registry) error { + + if m.Match != nil { + if err := m.Match.ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("match") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("match") + } + return err + } + } + + return nil +} + +func (m *BatchDeleteResponse) contextValidateResults(ctx context.Context, formats strfmt.Registry) error { + + if m.Results != nil { + if err := m.Results.ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("results") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("results") + } + return err + } + } + + return nil +} + +// MarshalBinary interface implementation +func (m *BatchDeleteResponse) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *BatchDeleteResponse) UnmarshalBinary(b []byte) error { + var res BatchDeleteResponse + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} + +// BatchDeleteResponseMatch Outlines how to find the objects to be deleted. +// +// swagger:model BatchDeleteResponseMatch +type BatchDeleteResponseMatch struct { + + // Class (name) which objects will be deleted. + // Example: City + Class string `json:"class,omitempty"` + + // Filter to limit the objects to be deleted. + Where *WhereFilter `json:"where,omitempty"` +} + +// Validate validates this batch delete response match +func (m *BatchDeleteResponseMatch) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateWhere(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *BatchDeleteResponseMatch) validateWhere(formats strfmt.Registry) error { + if swag.IsZero(m.Where) { // not required + return nil + } + + if m.Where != nil { + if err := m.Where.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("match" + "." + "where") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("match" + "." + "where") + } + return err + } + } + + return nil +} + +// ContextValidate validate this batch delete response match based on the context it is used +func (m *BatchDeleteResponseMatch) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateWhere(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *BatchDeleteResponseMatch) contextValidateWhere(ctx context.Context, formats strfmt.Registry) error { + + if m.Where != nil { + if err := m.Where.ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("match" + "." + "where") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("match" + "." + "where") + } + return err + } + } + + return nil +} + +// MarshalBinary interface implementation +func (m *BatchDeleteResponseMatch) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *BatchDeleteResponseMatch) UnmarshalBinary(b []byte) error { + var res BatchDeleteResponseMatch + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} + +// BatchDeleteResponseResults batch delete response results +// +// swagger:model BatchDeleteResponseResults +type BatchDeleteResponseResults struct { + + // How many objects should have been deleted but could not be deleted. + Failed int64 `json:"failed"` + + // The most amount of objects that can be deleted in a single query, equals QUERY_MAXIMUM_RESULTS. + Limit int64 `json:"limit"` + + // How many objects were matched by the filter. + Matches int64 `json:"matches"` + + // With output set to "minimal" only objects with error occurred will the be described. Successfully deleted objects would be omitted. Output set to "verbose" will list all of the objets with their respective statuses. + Objects []*BatchDeleteResponseResultsObjectsItems0 `json:"objects"` + + // How many objects were successfully deleted in this round. + Successful int64 `json:"successful"` +} + +// Validate validates this batch delete response results +func (m *BatchDeleteResponseResults) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateObjects(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *BatchDeleteResponseResults) validateObjects(formats strfmt.Registry) error { + if swag.IsZero(m.Objects) { // not required + return nil + } + + for i := 0; i < len(m.Objects); i++ { + if swag.IsZero(m.Objects[i]) { // not required + continue + } + + if m.Objects[i] != nil { + if err := m.Objects[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("results" + "." + "objects" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("results" + "." + "objects" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +// ContextValidate validate this batch delete response results based on the context it is used +func (m *BatchDeleteResponseResults) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateObjects(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *BatchDeleteResponseResults) contextValidateObjects(ctx context.Context, formats strfmt.Registry) error { + + for i := 0; i < len(m.Objects); i++ { + + if m.Objects[i] != nil { + if err := m.Objects[i].ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("results" + "." + "objects" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("results" + "." + "objects" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +// MarshalBinary interface implementation +func (m *BatchDeleteResponseResults) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *BatchDeleteResponseResults) UnmarshalBinary(b []byte) error { + var res BatchDeleteResponseResults + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} + +// BatchDeleteResponseResultsObjectsItems0 Results for this specific Object. +// +// swagger:model BatchDeleteResponseResultsObjectsItems0 +type BatchDeleteResponseResultsObjectsItems0 struct { + + // errors + Errors *ErrorResponse `json:"errors,omitempty"` + + // ID of the Object. + // Format: uuid + ID strfmt.UUID `json:"id,omitempty"` + + // status + // Enum: [SUCCESS DRYRUN FAILED] + Status *string `json:"status,omitempty"` +} + +// Validate validates this batch delete response results objects items0 +func (m *BatchDeleteResponseResultsObjectsItems0) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateErrors(formats); err != nil { + res = append(res, err) + } + + if err := m.validateID(formats); err != nil { + res = append(res, err) + } + + if err := m.validateStatus(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *BatchDeleteResponseResultsObjectsItems0) validateErrors(formats strfmt.Registry) error { + if swag.IsZero(m.Errors) { // not required + return nil + } + + if m.Errors != nil { + if err := m.Errors.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("errors") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("errors") + } + return err + } + } + + return nil +} + +func (m *BatchDeleteResponseResultsObjectsItems0) validateID(formats strfmt.Registry) error { + if swag.IsZero(m.ID) { // not required + return nil + } + + if err := validate.FormatOf("id", "body", "uuid", m.ID.String(), formats); err != nil { + return err + } + + return nil +} + +var batchDeleteResponseResultsObjectsItems0TypeStatusPropEnum []interface{} + +func init() { + var res []string + if err := json.Unmarshal([]byte(`["SUCCESS","DRYRUN","FAILED"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + batchDeleteResponseResultsObjectsItems0TypeStatusPropEnum = append(batchDeleteResponseResultsObjectsItems0TypeStatusPropEnum, v) + } +} + +const ( + + // BatchDeleteResponseResultsObjectsItems0StatusSUCCESS captures enum value "SUCCESS" + BatchDeleteResponseResultsObjectsItems0StatusSUCCESS string = "SUCCESS" + + // BatchDeleteResponseResultsObjectsItems0StatusDRYRUN captures enum value "DRYRUN" + BatchDeleteResponseResultsObjectsItems0StatusDRYRUN string = "DRYRUN" + + // BatchDeleteResponseResultsObjectsItems0StatusFAILED captures enum value "FAILED" + BatchDeleteResponseResultsObjectsItems0StatusFAILED string = "FAILED" +) + +// prop value enum +func (m *BatchDeleteResponseResultsObjectsItems0) validateStatusEnum(path, location string, value string) error { + if err := validate.EnumCase(path, location, value, batchDeleteResponseResultsObjectsItems0TypeStatusPropEnum, true); err != nil { + return err + } + return nil +} + +func (m *BatchDeleteResponseResultsObjectsItems0) validateStatus(formats strfmt.Registry) error { + if swag.IsZero(m.Status) { // not required + return nil + } + + // value enum + if err := m.validateStatusEnum("status", "body", *m.Status); err != nil { + return err + } + + return nil +} + +// ContextValidate validate this batch delete response results objects items0 based on the context it is used +func (m *BatchDeleteResponseResultsObjectsItems0) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateErrors(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *BatchDeleteResponseResultsObjectsItems0) contextValidateErrors(ctx context.Context, formats strfmt.Registry) error { + + if m.Errors != nil { + if err := m.Errors.ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("errors") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("errors") + } + return err + } + } + + return nil +} + +// MarshalBinary interface implementation +func (m *BatchDeleteResponseResultsObjectsItems0) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *BatchDeleteResponseResultsObjectsItems0) UnmarshalBinary(b []byte) error { + var res BatchDeleteResponseResultsObjectsItems0 + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/entities/models/batch_reference.go b/platform/dbops/binaries/weaviate-src/entities/models/batch_reference.go new file mode 100644 index 0000000000000000000000000000000000000000..1e6fd173b6d080191fc90445c519b23c8ab7eaf3 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/models/batch_reference.go @@ -0,0 +1,110 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// BatchReference batch reference +// +// swagger:model BatchReference +type BatchReference struct { + + // Long-form beacon-style URI to identify the source of the cross-ref including the property name. Should be in the form of weaviate://localhost////, where must be one of 'objects', 'objects' and and must represent the cross-ref property of source class to be used. + // Example: weaviate://localhost/Zoo/a5d09582-4239-4702-81c9-92a6e0122bb4/hasAnimals + // Format: uri + From strfmt.URI `json:"from,omitempty"` + + // Name of the reference tenant. + Tenant string `json:"tenant,omitempty"` + + // Short-form URI to point to the cross-ref. Should be in the form of weaviate://localhost/ for the example of a local cross-ref to an object + // Example: weaviate://localhost/97525810-a9a5-4eb0-858a-71449aeb007f + // Format: uri + To strfmt.URI `json:"to,omitempty"` +} + +// Validate validates this batch reference +func (m *BatchReference) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateFrom(formats); err != nil { + res = append(res, err) + } + + if err := m.validateTo(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *BatchReference) validateFrom(formats strfmt.Registry) error { + if swag.IsZero(m.From) { // not required + return nil + } + + if err := validate.FormatOf("from", "body", "uri", m.From.String(), formats); err != nil { + return err + } + + return nil +} + +func (m *BatchReference) validateTo(formats strfmt.Registry) error { + if swag.IsZero(m.To) { // not required + return nil + } + + if err := validate.FormatOf("to", "body", "uri", m.To.String(), formats); err != nil { + return err + } + + return nil +} + +// ContextValidate validates this batch reference based on context it is used +func (m *BatchReference) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *BatchReference) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *BatchReference) UnmarshalBinary(b []byte) error { + var res BatchReference + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/entities/models/batch_reference_response.go b/platform/dbops/binaries/weaviate-src/entities/models/batch_reference_response.go new file mode 100644 index 0000000000000000000000000000000000000000..d5636ebad9aa47516a394e7b33ff735f3baadfd2 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/models/batch_reference_response.go @@ -0,0 +1,314 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "encoding/json" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// BatchReferenceResponse batch reference response +// +// swagger:model BatchReferenceResponse +type BatchReferenceResponse struct { + BatchReference + + // result + Result *BatchReferenceResponseAO1Result `json:"result,omitempty"` +} + +// UnmarshalJSON unmarshals this object from a JSON structure +func (m *BatchReferenceResponse) UnmarshalJSON(raw []byte) error { + // AO0 + var aO0 BatchReference + if err := swag.ReadJSON(raw, &aO0); err != nil { + return err + } + m.BatchReference = aO0 + + // AO1 + var dataAO1 struct { + Result *BatchReferenceResponseAO1Result `json:"result,omitempty"` + } + if err := swag.ReadJSON(raw, &dataAO1); err != nil { + return err + } + + m.Result = dataAO1.Result + + return nil +} + +// MarshalJSON marshals this object to a JSON structure +func (m BatchReferenceResponse) MarshalJSON() ([]byte, error) { + _parts := make([][]byte, 0, 2) + + aO0, err := swag.WriteJSON(m.BatchReference) + if err != nil { + return nil, err + } + _parts = append(_parts, aO0) + var dataAO1 struct { + Result *BatchReferenceResponseAO1Result `json:"result,omitempty"` + } + + dataAO1.Result = m.Result + + jsonDataAO1, errAO1 := swag.WriteJSON(dataAO1) + if errAO1 != nil { + return nil, errAO1 + } + _parts = append(_parts, jsonDataAO1) + return swag.ConcatJSON(_parts...), nil +} + +// Validate validates this batch reference response +func (m *BatchReferenceResponse) Validate(formats strfmt.Registry) error { + var res []error + + // validation for a type composition with BatchReference + if err := m.BatchReference.Validate(formats); err != nil { + res = append(res, err) + } + + if err := m.validateResult(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *BatchReferenceResponse) validateResult(formats strfmt.Registry) error { + + if swag.IsZero(m.Result) { // not required + return nil + } + + if m.Result != nil { + if err := m.Result.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("result") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("result") + } + return err + } + } + + return nil +} + +// ContextValidate validate this batch reference response based on the context it is used +func (m *BatchReferenceResponse) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + // validation for a type composition with BatchReference + if err := m.BatchReference.ContextValidate(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidateResult(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *BatchReferenceResponse) contextValidateResult(ctx context.Context, formats strfmt.Registry) error { + + if m.Result != nil { + if err := m.Result.ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("result") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("result") + } + return err + } + } + + return nil +} + +// MarshalBinary interface implementation +func (m *BatchReferenceResponse) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *BatchReferenceResponse) UnmarshalBinary(b []byte) error { + var res BatchReferenceResponse + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} + +// BatchReferenceResponseAO1Result Results for this specific reference. +// +// swagger:model BatchReferenceResponseAO1Result +type BatchReferenceResponseAO1Result struct { + + // errors + Errors *ErrorResponse `json:"errors,omitempty"` + + // status + // Enum: [SUCCESS FAILED] + Status *string `json:"status,omitempty"` +} + +// Validate validates this batch reference response a o1 result +func (m *BatchReferenceResponseAO1Result) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateErrors(formats); err != nil { + res = append(res, err) + } + + if err := m.validateStatus(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *BatchReferenceResponseAO1Result) validateErrors(formats strfmt.Registry) error { + if swag.IsZero(m.Errors) { // not required + return nil + } + + if m.Errors != nil { + if err := m.Errors.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("result" + "." + "errors") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("result" + "." + "errors") + } + return err + } + } + + return nil +} + +var batchReferenceResponseAO1ResultTypeStatusPropEnum []interface{} + +func init() { + var res []string + if err := json.Unmarshal([]byte(`["SUCCESS","FAILED"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + batchReferenceResponseAO1ResultTypeStatusPropEnum = append(batchReferenceResponseAO1ResultTypeStatusPropEnum, v) + } +} + +const ( + + // BatchReferenceResponseAO1ResultStatusSUCCESS captures enum value "SUCCESS" + BatchReferenceResponseAO1ResultStatusSUCCESS string = "SUCCESS" + + // BatchReferenceResponseAO1ResultStatusFAILED captures enum value "FAILED" + BatchReferenceResponseAO1ResultStatusFAILED string = "FAILED" +) + +// prop value enum +func (m *BatchReferenceResponseAO1Result) validateStatusEnum(path, location string, value string) error { + if err := validate.EnumCase(path, location, value, batchReferenceResponseAO1ResultTypeStatusPropEnum, true); err != nil { + return err + } + return nil +} + +func (m *BatchReferenceResponseAO1Result) validateStatus(formats strfmt.Registry) error { + if swag.IsZero(m.Status) { // not required + return nil + } + + // value enum + if err := m.validateStatusEnum("result"+"."+"status", "body", *m.Status); err != nil { + return err + } + + return nil +} + +// ContextValidate validate this batch reference response a o1 result based on the context it is used +func (m *BatchReferenceResponseAO1Result) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateErrors(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *BatchReferenceResponseAO1Result) contextValidateErrors(ctx context.Context, formats strfmt.Registry) error { + + if m.Errors != nil { + if err := m.Errors.ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("result" + "." + "errors") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("result" + "." + "errors") + } + return err + } + } + + return nil +} + +// MarshalBinary interface implementation +func (m *BatchReferenceResponseAO1Result) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *BatchReferenceResponseAO1Result) UnmarshalBinary(b []byte) error { + var res BatchReferenceResponseAO1Result + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/entities/models/batch_stats.go b/platform/dbops/binaries/weaviate-src/entities/models/batch_stats.go new file mode 100644 index 0000000000000000000000000000000000000000..a82c82e70fc1968ba47998803201cb521185b57f --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/models/batch_stats.go @@ -0,0 +1,64 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// BatchStats The summary of a nodes batch queue congestion status. +// +// swagger:model BatchStats +type BatchStats struct { + + // How many objects are currently in the batch queue. + QueueLength *int64 `json:"queueLength,omitempty"` + + // How many objects are approximately processed from the batch queue per second. + RatePerSecond int64 `json:"ratePerSecond"` +} + +// Validate validates this batch stats +func (m *BatchStats) Validate(formats strfmt.Registry) error { + return nil +} + +// ContextValidate validates this batch stats based on context it is used +func (m *BatchStats) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *BatchStats) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *BatchStats) UnmarshalBinary(b []byte) error { + var res BatchStats + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/entities/models/c11y_extension.go b/platform/dbops/binaries/weaviate-src/entities/models/c11y_extension.go new file mode 100644 index 0000000000000000000000000000000000000000..3a2db64ed31e3a02eaa54bcd10eacc7dfb9490ab --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/models/c11y_extension.go @@ -0,0 +1,68 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// C11yExtension A resource describing an extension to the contextinoary, containing both the identifier and the definition of the extension +// +// swagger:model C11yExtension +type C11yExtension struct { + + // The new concept you want to extend. Must be an all-lowercase single word, or a space delimited compound word. Examples: 'foobarium', 'my custom concept' + // Example: foobarium + Concept string `json:"concept,omitempty"` + + // A list of space-delimited words or a sentence describing what the custom concept is about. Avoid using the custom concept itself. An Example definition for the custom concept 'foobarium': would be 'a naturally occurring element which can only be seen by programmers' + Definition string `json:"definition,omitempty"` + + // Weight of the definition of the new concept where 1='override existing definition entirely' and 0='ignore custom definition'. Note that if the custom concept is not present in the contextionary yet, the weight cannot be less than 1. + Weight float32 `json:"weight,omitempty"` +} + +// Validate validates this c11y extension +func (m *C11yExtension) Validate(formats strfmt.Registry) error { + return nil +} + +// ContextValidate validates this c11y extension based on context it is used +func (m *C11yExtension) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *C11yExtension) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *C11yExtension) UnmarshalBinary(b []byte) error { + var res C11yExtension + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/entities/models/c11y_nearest_neighbors.go b/platform/dbops/binaries/weaviate-src/entities/models/c11y_nearest_neighbors.go new file mode 100644 index 0000000000000000000000000000000000000000..f44257a49c9c35533d77038607661e70cc687f3b --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/models/c11y_nearest_neighbors.go @@ -0,0 +1,124 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "strconv" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// C11yNearestNeighbors C11y function to show the nearest neighbors to a word. +// +// swagger:model C11yNearestNeighbors +type C11yNearestNeighbors []*C11yNearestNeighborsItems0 + +// Validate validates this c11y nearest neighbors +func (m C11yNearestNeighbors) Validate(formats strfmt.Registry) error { + var res []error + + for i := 0; i < len(m); i++ { + if swag.IsZero(m[i]) { // not required + continue + } + + if m[i] != nil { + if err := m[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName(strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName(strconv.Itoa(i)) + } + return err + } + } + + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// ContextValidate validate this c11y nearest neighbors based on the context it is used +func (m C11yNearestNeighbors) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + for i := 0; i < len(m); i++ { + + if m[i] != nil { + if err := m[i].ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName(strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName(strconv.Itoa(i)) + } + return err + } + } + + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// C11yNearestNeighborsItems0 c11y nearest neighbors items0 +// +// swagger:model C11yNearestNeighborsItems0 +type C11yNearestNeighborsItems0 struct { + + // distance + Distance float32 `json:"distance,omitempty"` + + // word + Word string `json:"word,omitempty"` +} + +// Validate validates this c11y nearest neighbors items0 +func (m *C11yNearestNeighborsItems0) Validate(formats strfmt.Registry) error { + return nil +} + +// ContextValidate validates this c11y nearest neighbors items0 based on context it is used +func (m *C11yNearestNeighborsItems0) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *C11yNearestNeighborsItems0) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *C11yNearestNeighborsItems0) UnmarshalBinary(b []byte) error { + var res C11yNearestNeighborsItems0 + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/entities/models/c11y_vector.go b/platform/dbops/binaries/weaviate-src/entities/models/c11y_vector.go new file mode 100644 index 0000000000000000000000000000000000000000..ed14ce5a5e0fc1a30a2b72298f8a85725f26ff77 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/models/c11y_vector.go @@ -0,0 +1,38 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/strfmt" +) + +// C11yVector A vector representation of the object in the Contextionary. If provided at object creation, this wil take precedence over any vectorizer setting. +// +// swagger:model C11yVector +type C11yVector []float32 + +// Validate validates this c11y vector +func (m C11yVector) Validate(formats strfmt.Registry) error { + return nil +} + +// ContextValidate validates this c11y vector based on context it is used +func (m C11yVector) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/entities/models/c11y_vector_based_question.go b/platform/dbops/binaries/weaviate-src/entities/models/c11y_vector_based_question.go new file mode 100644 index 0000000000000000000000000000000000000000..c3925ee47e180741d60af9c623269b20ceaa6980 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/models/c11y_vector_based_question.go @@ -0,0 +1,265 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "strconv" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// C11yVectorBasedQuestion Receive question based on array of classes, properties and values. +// +// swagger:model C11yVectorBasedQuestion +type C11yVectorBasedQuestion []*C11yVectorBasedQuestionItems0 + +// Validate validates this c11y vector based question +func (m C11yVectorBasedQuestion) Validate(formats strfmt.Registry) error { + var res []error + + for i := 0; i < len(m); i++ { + if swag.IsZero(m[i]) { // not required + continue + } + + if m[i] != nil { + if err := m[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName(strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName(strconv.Itoa(i)) + } + return err + } + } + + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// ContextValidate validate this c11y vector based question based on the context it is used +func (m C11yVectorBasedQuestion) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + for i := 0; i < len(m); i++ { + + if m[i] != nil { + if err := m[i].ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName(strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName(strconv.Itoa(i)) + } + return err + } + } + + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// C11yVectorBasedQuestionItems0 c11y vector based question items0 +// +// swagger:model C11yVectorBasedQuestionItems0 +type C11yVectorBasedQuestionItems0 struct { + + // Vectorized properties. + // Max Items: 300 + // Min Items: 300 + ClassProps []*C11yVectorBasedQuestionItems0ClassPropsItems0 `json:"classProps"` + + // Vectorized classname. + // Max Items: 300 + // Min Items: 300 + ClassVectors []float32 `json:"classVectors"` +} + +// Validate validates this c11y vector based question items0 +func (m *C11yVectorBasedQuestionItems0) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateClassProps(formats); err != nil { + res = append(res, err) + } + + if err := m.validateClassVectors(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *C11yVectorBasedQuestionItems0) validateClassProps(formats strfmt.Registry) error { + if swag.IsZero(m.ClassProps) { // not required + return nil + } + + iClassPropsSize := int64(len(m.ClassProps)) + + if err := validate.MinItems("classProps", "body", iClassPropsSize, 300); err != nil { + return err + } + + if err := validate.MaxItems("classProps", "body", iClassPropsSize, 300); err != nil { + return err + } + + for i := 0; i < len(m.ClassProps); i++ { + if swag.IsZero(m.ClassProps[i]) { // not required + continue + } + + if m.ClassProps[i] != nil { + if err := m.ClassProps[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("classProps" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("classProps" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +func (m *C11yVectorBasedQuestionItems0) validateClassVectors(formats strfmt.Registry) error { + if swag.IsZero(m.ClassVectors) { // not required + return nil + } + + iClassVectorsSize := int64(len(m.ClassVectors)) + + if err := validate.MinItems("classVectors", "body", iClassVectorsSize, 300); err != nil { + return err + } + + if err := validate.MaxItems("classVectors", "body", iClassVectorsSize, 300); err != nil { + return err + } + + return nil +} + +// ContextValidate validate this c11y vector based question items0 based on the context it is used +func (m *C11yVectorBasedQuestionItems0) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateClassProps(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *C11yVectorBasedQuestionItems0) contextValidateClassProps(ctx context.Context, formats strfmt.Registry) error { + + for i := 0; i < len(m.ClassProps); i++ { + + if m.ClassProps[i] != nil { + if err := m.ClassProps[i].ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("classProps" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("classProps" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +// MarshalBinary interface implementation +func (m *C11yVectorBasedQuestionItems0) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *C11yVectorBasedQuestionItems0) UnmarshalBinary(b []byte) error { + var res C11yVectorBasedQuestionItems0 + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} + +// C11yVectorBasedQuestionItems0ClassPropsItems0 c11y vector based question items0 class props items0 +// +// swagger:model C11yVectorBasedQuestionItems0ClassPropsItems0 +type C11yVectorBasedQuestionItems0ClassPropsItems0 struct { + + // props vectors + PropsVectors []float32 `json:"propsVectors"` + + // String with valuename. + Value string `json:"value,omitempty"` +} + +// Validate validates this c11y vector based question items0 class props items0 +func (m *C11yVectorBasedQuestionItems0ClassPropsItems0) Validate(formats strfmt.Registry) error { + return nil +} + +// ContextValidate validates this c11y vector based question items0 class props items0 based on context it is used +func (m *C11yVectorBasedQuestionItems0ClassPropsItems0) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *C11yVectorBasedQuestionItems0ClassPropsItems0) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *C11yVectorBasedQuestionItems0ClassPropsItems0) UnmarshalBinary(b []byte) error { + var res C11yVectorBasedQuestionItems0ClassPropsItems0 + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/entities/models/c11y_words_response.go b/platform/dbops/binaries/weaviate-src/entities/models/c11y_words_response.go new file mode 100644 index 0000000000000000000000000000000000000000..3646d3d66aec06697e2cc872c3c59c734a10f6d3 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/models/c11y_words_response.go @@ -0,0 +1,531 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "strconv" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// C11yWordsResponse An array of available words and contexts. +// +// swagger:model C11yWordsResponse +type C11yWordsResponse struct { + + // concatenated word + ConcatenatedWord *C11yWordsResponseConcatenatedWord `json:"concatenatedWord,omitempty"` + + // Weighted results for per individual word + IndividualWords []*C11yWordsResponseIndividualWordsItems0 `json:"individualWords"` +} + +// Validate validates this c11y words response +func (m *C11yWordsResponse) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateConcatenatedWord(formats); err != nil { + res = append(res, err) + } + + if err := m.validateIndividualWords(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *C11yWordsResponse) validateConcatenatedWord(formats strfmt.Registry) error { + if swag.IsZero(m.ConcatenatedWord) { // not required + return nil + } + + if m.ConcatenatedWord != nil { + if err := m.ConcatenatedWord.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("concatenatedWord") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("concatenatedWord") + } + return err + } + } + + return nil +} + +func (m *C11yWordsResponse) validateIndividualWords(formats strfmt.Registry) error { + if swag.IsZero(m.IndividualWords) { // not required + return nil + } + + for i := 0; i < len(m.IndividualWords); i++ { + if swag.IsZero(m.IndividualWords[i]) { // not required + continue + } + + if m.IndividualWords[i] != nil { + if err := m.IndividualWords[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("individualWords" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("individualWords" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +// ContextValidate validate this c11y words response based on the context it is used +func (m *C11yWordsResponse) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateConcatenatedWord(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidateIndividualWords(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *C11yWordsResponse) contextValidateConcatenatedWord(ctx context.Context, formats strfmt.Registry) error { + + if m.ConcatenatedWord != nil { + if err := m.ConcatenatedWord.ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("concatenatedWord") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("concatenatedWord") + } + return err + } + } + + return nil +} + +func (m *C11yWordsResponse) contextValidateIndividualWords(ctx context.Context, formats strfmt.Registry) error { + + for i := 0; i < len(m.IndividualWords); i++ { + + if m.IndividualWords[i] != nil { + if err := m.IndividualWords[i].ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("individualWords" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("individualWords" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +// MarshalBinary interface implementation +func (m *C11yWordsResponse) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *C11yWordsResponse) UnmarshalBinary(b []byte) error { + var res C11yWordsResponse + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} + +// C11yWordsResponseConcatenatedWord Weighted results for all words +// +// swagger:model C11yWordsResponseConcatenatedWord +type C11yWordsResponseConcatenatedWord struct { + + // concatenated nearest neighbors + ConcatenatedNearestNeighbors C11yNearestNeighbors `json:"concatenatedNearestNeighbors,omitempty"` + + // concatenated vector + ConcatenatedVector C11yVector `json:"concatenatedVector,omitempty"` + + // concatenated word + ConcatenatedWord string `json:"concatenatedWord,omitempty"` + + // single words + SingleWords []string `json:"singleWords"` +} + +// Validate validates this c11y words response concatenated word +func (m *C11yWordsResponseConcatenatedWord) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateConcatenatedNearestNeighbors(formats); err != nil { + res = append(res, err) + } + + if err := m.validateConcatenatedVector(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *C11yWordsResponseConcatenatedWord) validateConcatenatedNearestNeighbors(formats strfmt.Registry) error { + if swag.IsZero(m.ConcatenatedNearestNeighbors) { // not required + return nil + } + + if err := m.ConcatenatedNearestNeighbors.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("concatenatedWord" + "." + "concatenatedNearestNeighbors") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("concatenatedWord" + "." + "concatenatedNearestNeighbors") + } + return err + } + + return nil +} + +func (m *C11yWordsResponseConcatenatedWord) validateConcatenatedVector(formats strfmt.Registry) error { + if swag.IsZero(m.ConcatenatedVector) { // not required + return nil + } + + if err := m.ConcatenatedVector.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("concatenatedWord" + "." + "concatenatedVector") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("concatenatedWord" + "." + "concatenatedVector") + } + return err + } + + return nil +} + +// ContextValidate validate this c11y words response concatenated word based on the context it is used +func (m *C11yWordsResponseConcatenatedWord) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateConcatenatedNearestNeighbors(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidateConcatenatedVector(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *C11yWordsResponseConcatenatedWord) contextValidateConcatenatedNearestNeighbors(ctx context.Context, formats strfmt.Registry) error { + + if err := m.ConcatenatedNearestNeighbors.ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("concatenatedWord" + "." + "concatenatedNearestNeighbors") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("concatenatedWord" + "." + "concatenatedNearestNeighbors") + } + return err + } + + return nil +} + +func (m *C11yWordsResponseConcatenatedWord) contextValidateConcatenatedVector(ctx context.Context, formats strfmt.Registry) error { + + if err := m.ConcatenatedVector.ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("concatenatedWord" + "." + "concatenatedVector") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("concatenatedWord" + "." + "concatenatedVector") + } + return err + } + + return nil +} + +// MarshalBinary interface implementation +func (m *C11yWordsResponseConcatenatedWord) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *C11yWordsResponseConcatenatedWord) UnmarshalBinary(b []byte) error { + var res C11yWordsResponseConcatenatedWord + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} + +// C11yWordsResponseIndividualWordsItems0 c11y words response individual words items0 +// +// swagger:model C11yWordsResponseIndividualWordsItems0 +type C11yWordsResponseIndividualWordsItems0 struct { + + // info + Info *C11yWordsResponseIndividualWordsItems0Info `json:"info,omitempty"` + + // present + Present bool `json:"present,omitempty"` + + // word + Word string `json:"word,omitempty"` +} + +// Validate validates this c11y words response individual words items0 +func (m *C11yWordsResponseIndividualWordsItems0) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateInfo(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *C11yWordsResponseIndividualWordsItems0) validateInfo(formats strfmt.Registry) error { + if swag.IsZero(m.Info) { // not required + return nil + } + + if m.Info != nil { + if err := m.Info.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("info") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("info") + } + return err + } + } + + return nil +} + +// ContextValidate validate this c11y words response individual words items0 based on the context it is used +func (m *C11yWordsResponseIndividualWordsItems0) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateInfo(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *C11yWordsResponseIndividualWordsItems0) contextValidateInfo(ctx context.Context, formats strfmt.Registry) error { + + if m.Info != nil { + if err := m.Info.ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("info") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("info") + } + return err + } + } + + return nil +} + +// MarshalBinary interface implementation +func (m *C11yWordsResponseIndividualWordsItems0) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *C11yWordsResponseIndividualWordsItems0) UnmarshalBinary(b []byte) error { + var res C11yWordsResponseIndividualWordsItems0 + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} + +// C11yWordsResponseIndividualWordsItems0Info c11y words response individual words items0 info +// +// swagger:model C11yWordsResponseIndividualWordsItems0Info +type C11yWordsResponseIndividualWordsItems0Info struct { + + // nearest neighbors + NearestNeighbors C11yNearestNeighbors `json:"nearestNeighbors,omitempty"` + + // vector + Vector C11yVector `json:"vector,omitempty"` +} + +// Validate validates this c11y words response individual words items0 info +func (m *C11yWordsResponseIndividualWordsItems0Info) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateNearestNeighbors(formats); err != nil { + res = append(res, err) + } + + if err := m.validateVector(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *C11yWordsResponseIndividualWordsItems0Info) validateNearestNeighbors(formats strfmt.Registry) error { + if swag.IsZero(m.NearestNeighbors) { // not required + return nil + } + + if err := m.NearestNeighbors.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("info" + "." + "nearestNeighbors") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("info" + "." + "nearestNeighbors") + } + return err + } + + return nil +} + +func (m *C11yWordsResponseIndividualWordsItems0Info) validateVector(formats strfmt.Registry) error { + if swag.IsZero(m.Vector) { // not required + return nil + } + + if err := m.Vector.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("info" + "." + "vector") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("info" + "." + "vector") + } + return err + } + + return nil +} + +// ContextValidate validate this c11y words response individual words items0 info based on the context it is used +func (m *C11yWordsResponseIndividualWordsItems0Info) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateNearestNeighbors(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidateVector(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *C11yWordsResponseIndividualWordsItems0Info) contextValidateNearestNeighbors(ctx context.Context, formats strfmt.Registry) error { + + if err := m.NearestNeighbors.ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("info" + "." + "nearestNeighbors") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("info" + "." + "nearestNeighbors") + } + return err + } + + return nil +} + +func (m *C11yWordsResponseIndividualWordsItems0Info) contextValidateVector(ctx context.Context, formats strfmt.Registry) error { + + if err := m.Vector.ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("info" + "." + "vector") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("info" + "." + "vector") + } + return err + } + + return nil +} + +// MarshalBinary interface implementation +func (m *C11yWordsResponseIndividualWordsItems0Info) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *C11yWordsResponseIndividualWordsItems0Info) UnmarshalBinary(b []byte) error { + var res C11yWordsResponseIndividualWordsItems0Info + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/entities/models/class.go b/platform/dbops/binaries/weaviate-src/entities/models/class.go new file mode 100644 index 0000000000000000000000000000000000000000..7eba527563b5c102a0edc1da360f6249afd4f545 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/models/class.go @@ -0,0 +1,339 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "strconv" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// Class class +// +// swagger:model Class +type Class struct { + + // Name of the class (a.k.a. 'collection') (required). Multiple words should be concatenated in CamelCase, e.g. `ArticleAuthor`. + Class string `json:"class,omitempty"` + + // Description of the collection for metadata purposes. + Description string `json:"description,omitempty"` + + // inverted index config + InvertedIndexConfig *InvertedIndexConfig `json:"invertedIndexConfig,omitempty"` + + // Configuration specific to modules in a collection context. + ModuleConfig interface{} `json:"moduleConfig,omitempty"` + + // multi tenancy config + MultiTenancyConfig *MultiTenancyConfig `json:"multiTenancyConfig,omitempty"` + + // Define properties of the collection. + Properties []*Property `json:"properties"` + + // replication config + ReplicationConfig *ReplicationConfig `json:"replicationConfig,omitempty"` + + // Manage how the index should be sharded and distributed in the cluster + ShardingConfig interface{} `json:"shardingConfig,omitempty"` + + // Configure named vectors. Either use this field or `vectorizer`, `vectorIndexType`, and `vectorIndexConfig` fields. Available from `v1.24.0`. + VectorConfig map[string]VectorConfig `json:"vectorConfig,omitempty"` + + // Vector-index config, that is specific to the type of index selected in vectorIndexType + VectorIndexConfig interface{} `json:"vectorIndexConfig,omitempty"` + + // Name of the vector index to use, eg. (HNSW) + VectorIndexType string `json:"vectorIndexType,omitempty"` + + // Specify how the vectors for this class should be determined. The options are either 'none' - this means you have to import a vector with each object yourself - or the name of a module that provides vectorization capabilities, such as 'text2vec-contextionary'. If left empty, it will use the globally configured default which can itself either be 'none' or a specific module. + Vectorizer string `json:"vectorizer,omitempty"` +} + +// Validate validates this class +func (m *Class) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateInvertedIndexConfig(formats); err != nil { + res = append(res, err) + } + + if err := m.validateMultiTenancyConfig(formats); err != nil { + res = append(res, err) + } + + if err := m.validateProperties(formats); err != nil { + res = append(res, err) + } + + if err := m.validateReplicationConfig(formats); err != nil { + res = append(res, err) + } + + if err := m.validateVectorConfig(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *Class) validateInvertedIndexConfig(formats strfmt.Registry) error { + if swag.IsZero(m.InvertedIndexConfig) { // not required + return nil + } + + if m.InvertedIndexConfig != nil { + if err := m.InvertedIndexConfig.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("invertedIndexConfig") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("invertedIndexConfig") + } + return err + } + } + + return nil +} + +func (m *Class) validateMultiTenancyConfig(formats strfmt.Registry) error { + if swag.IsZero(m.MultiTenancyConfig) { // not required + return nil + } + + if m.MultiTenancyConfig != nil { + if err := m.MultiTenancyConfig.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("multiTenancyConfig") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("multiTenancyConfig") + } + return err + } + } + + return nil +} + +func (m *Class) validateProperties(formats strfmt.Registry) error { + if swag.IsZero(m.Properties) { // not required + return nil + } + + for i := 0; i < len(m.Properties); i++ { + if swag.IsZero(m.Properties[i]) { // not required + continue + } + + if m.Properties[i] != nil { + if err := m.Properties[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("properties" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("properties" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +func (m *Class) validateReplicationConfig(formats strfmt.Registry) error { + if swag.IsZero(m.ReplicationConfig) { // not required + return nil + } + + if m.ReplicationConfig != nil { + if err := m.ReplicationConfig.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("replicationConfig") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("replicationConfig") + } + return err + } + } + + return nil +} + +func (m *Class) validateVectorConfig(formats strfmt.Registry) error { + if swag.IsZero(m.VectorConfig) { // not required + return nil + } + + for k := range m.VectorConfig { + + if err := validate.Required("vectorConfig"+"."+k, "body", m.VectorConfig[k]); err != nil { + return err + } + if val, ok := m.VectorConfig[k]; ok { + if err := val.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("vectorConfig" + "." + k) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("vectorConfig" + "." + k) + } + return err + } + } + + } + + return nil +} + +// ContextValidate validate this class based on the context it is used +func (m *Class) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateInvertedIndexConfig(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidateMultiTenancyConfig(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidateProperties(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidateReplicationConfig(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidateVectorConfig(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *Class) contextValidateInvertedIndexConfig(ctx context.Context, formats strfmt.Registry) error { + + if m.InvertedIndexConfig != nil { + if err := m.InvertedIndexConfig.ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("invertedIndexConfig") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("invertedIndexConfig") + } + return err + } + } + + return nil +} + +func (m *Class) contextValidateMultiTenancyConfig(ctx context.Context, formats strfmt.Registry) error { + + if m.MultiTenancyConfig != nil { + if err := m.MultiTenancyConfig.ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("multiTenancyConfig") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("multiTenancyConfig") + } + return err + } + } + + return nil +} + +func (m *Class) contextValidateProperties(ctx context.Context, formats strfmt.Registry) error { + + for i := 0; i < len(m.Properties); i++ { + + if m.Properties[i] != nil { + if err := m.Properties[i].ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("properties" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("properties" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +func (m *Class) contextValidateReplicationConfig(ctx context.Context, formats strfmt.Registry) error { + + if m.ReplicationConfig != nil { + if err := m.ReplicationConfig.ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("replicationConfig") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("replicationConfig") + } + return err + } + } + + return nil +} + +func (m *Class) contextValidateVectorConfig(ctx context.Context, formats strfmt.Registry) error { + + for k := range m.VectorConfig { + + if val, ok := m.VectorConfig[k]; ok { + if err := val.ContextValidate(ctx, formats); err != nil { + return err + } + } + + } + + return nil +} + +// MarshalBinary interface implementation +func (m *Class) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *Class) UnmarshalBinary(b []byte) error { + var res Class + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/entities/models/classification.go b/platform/dbops/binaries/weaviate-src/entities/models/classification.go new file mode 100644 index 0000000000000000000000000000000000000000..a0ba2b492c4940e9ca3826f9d0dbdab9408a5dfd --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/models/classification.go @@ -0,0 +1,442 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "encoding/json" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// Classification Manage classifications, trigger them and view status of past classifications. +// +// swagger:model Classification +type Classification struct { + + // base the text-based classification on these fields (of type text) + // Example: ["description"] + BasedOnProperties []string `json:"basedOnProperties"` + + // class (name) which is used in this classification + // Example: City + Class string `json:"class,omitempty"` + + // which ref-property to set as part of the classification + // Example: ["inCountry"] + ClassifyProperties []string `json:"classifyProperties"` + + // error message if status == failed + // Example: classify xzy: something went wrong + Error string `json:"error,omitempty"` + + // filters + Filters *ClassificationFilters `json:"filters,omitempty"` + + // ID to uniquely identify this classification run + // Example: ee722219-b8ec-4db1-8f8d-5150bb1a9e0c + // Format: uuid + ID strfmt.UUID `json:"id,omitempty"` + + // additional meta information about the classification + Meta *ClassificationMeta `json:"meta,omitempty"` + + // classification-type specific settings + Settings interface{} `json:"settings,omitempty"` + + // status of this classification + // Example: running + // Enum: [running completed failed] + Status string `json:"status,omitempty"` + + // which algorithm to use for classifications + Type string `json:"type,omitempty"` +} + +// Validate validates this classification +func (m *Classification) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateFilters(formats); err != nil { + res = append(res, err) + } + + if err := m.validateID(formats); err != nil { + res = append(res, err) + } + + if err := m.validateMeta(formats); err != nil { + res = append(res, err) + } + + if err := m.validateStatus(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *Classification) validateFilters(formats strfmt.Registry) error { + if swag.IsZero(m.Filters) { // not required + return nil + } + + if m.Filters != nil { + if err := m.Filters.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("filters") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("filters") + } + return err + } + } + + return nil +} + +func (m *Classification) validateID(formats strfmt.Registry) error { + if swag.IsZero(m.ID) { // not required + return nil + } + + if err := validate.FormatOf("id", "body", "uuid", m.ID.String(), formats); err != nil { + return err + } + + return nil +} + +func (m *Classification) validateMeta(formats strfmt.Registry) error { + if swag.IsZero(m.Meta) { // not required + return nil + } + + if m.Meta != nil { + if err := m.Meta.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("meta") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("meta") + } + return err + } + } + + return nil +} + +var classificationTypeStatusPropEnum []interface{} + +func init() { + var res []string + if err := json.Unmarshal([]byte(`["running","completed","failed"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + classificationTypeStatusPropEnum = append(classificationTypeStatusPropEnum, v) + } +} + +const ( + + // ClassificationStatusRunning captures enum value "running" + ClassificationStatusRunning string = "running" + + // ClassificationStatusCompleted captures enum value "completed" + ClassificationStatusCompleted string = "completed" + + // ClassificationStatusFailed captures enum value "failed" + ClassificationStatusFailed string = "failed" +) + +// prop value enum +func (m *Classification) validateStatusEnum(path, location string, value string) error { + if err := validate.EnumCase(path, location, value, classificationTypeStatusPropEnum, true); err != nil { + return err + } + return nil +} + +func (m *Classification) validateStatus(formats strfmt.Registry) error { + if swag.IsZero(m.Status) { // not required + return nil + } + + // value enum + if err := m.validateStatusEnum("status", "body", m.Status); err != nil { + return err + } + + return nil +} + +// ContextValidate validate this classification based on the context it is used +func (m *Classification) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateFilters(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidateMeta(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *Classification) contextValidateFilters(ctx context.Context, formats strfmt.Registry) error { + + if m.Filters != nil { + if err := m.Filters.ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("filters") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("filters") + } + return err + } + } + + return nil +} + +func (m *Classification) contextValidateMeta(ctx context.Context, formats strfmt.Registry) error { + + if m.Meta != nil { + if err := m.Meta.ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("meta") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("meta") + } + return err + } + } + + return nil +} + +// MarshalBinary interface implementation +func (m *Classification) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *Classification) UnmarshalBinary(b []byte) error { + var res Classification + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} + +// ClassificationFilters classification filters +// +// swagger:model ClassificationFilters +type ClassificationFilters struct { + + // limit the objects to be classified + SourceWhere *WhereFilter `json:"sourceWhere,omitempty"` + + // Limit the possible sources when using an algorithm which doesn't really on training data, e.g. 'contextual'. When using an algorithm with a training set, such as 'knn', limit the training set instead + TargetWhere *WhereFilter `json:"targetWhere,omitempty"` + + // Limit the training objects to be considered during the classification. Can only be used on types with explicit training sets, such as 'knn' + TrainingSetWhere *WhereFilter `json:"trainingSetWhere,omitempty"` +} + +// Validate validates this classification filters +func (m *ClassificationFilters) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateSourceWhere(formats); err != nil { + res = append(res, err) + } + + if err := m.validateTargetWhere(formats); err != nil { + res = append(res, err) + } + + if err := m.validateTrainingSetWhere(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *ClassificationFilters) validateSourceWhere(formats strfmt.Registry) error { + if swag.IsZero(m.SourceWhere) { // not required + return nil + } + + if m.SourceWhere != nil { + if err := m.SourceWhere.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("filters" + "." + "sourceWhere") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("filters" + "." + "sourceWhere") + } + return err + } + } + + return nil +} + +func (m *ClassificationFilters) validateTargetWhere(formats strfmt.Registry) error { + if swag.IsZero(m.TargetWhere) { // not required + return nil + } + + if m.TargetWhere != nil { + if err := m.TargetWhere.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("filters" + "." + "targetWhere") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("filters" + "." + "targetWhere") + } + return err + } + } + + return nil +} + +func (m *ClassificationFilters) validateTrainingSetWhere(formats strfmt.Registry) error { + if swag.IsZero(m.TrainingSetWhere) { // not required + return nil + } + + if m.TrainingSetWhere != nil { + if err := m.TrainingSetWhere.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("filters" + "." + "trainingSetWhere") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("filters" + "." + "trainingSetWhere") + } + return err + } + } + + return nil +} + +// ContextValidate validate this classification filters based on the context it is used +func (m *ClassificationFilters) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateSourceWhere(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidateTargetWhere(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidateTrainingSetWhere(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *ClassificationFilters) contextValidateSourceWhere(ctx context.Context, formats strfmt.Registry) error { + + if m.SourceWhere != nil { + if err := m.SourceWhere.ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("filters" + "." + "sourceWhere") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("filters" + "." + "sourceWhere") + } + return err + } + } + + return nil +} + +func (m *ClassificationFilters) contextValidateTargetWhere(ctx context.Context, formats strfmt.Registry) error { + + if m.TargetWhere != nil { + if err := m.TargetWhere.ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("filters" + "." + "targetWhere") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("filters" + "." + "targetWhere") + } + return err + } + } + + return nil +} + +func (m *ClassificationFilters) contextValidateTrainingSetWhere(ctx context.Context, formats strfmt.Registry) error { + + if m.TrainingSetWhere != nil { + if err := m.TrainingSetWhere.ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("filters" + "." + "trainingSetWhere") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("filters" + "." + "trainingSetWhere") + } + return err + } + } + + return nil +} + +// MarshalBinary interface implementation +func (m *ClassificationFilters) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *ClassificationFilters) UnmarshalBinary(b []byte) error { + var res ClassificationFilters + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/entities/models/classification_meta.go b/platform/dbops/binaries/weaviate-src/entities/models/classification_meta.go new file mode 100644 index 0000000000000000000000000000000000000000..53d8d4a9709ca9e074a12aec7a367fcd45d73ce0 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/models/classification_meta.go @@ -0,0 +1,119 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// ClassificationMeta Additional information to a specific classification +// +// swagger:model ClassificationMeta +type ClassificationMeta struct { + + // time when this classification finished + // Example: 2017-07-21T17:32:28Z + // Format: date-time + Completed strfmt.DateTime `json:"completed,omitempty"` + + // number of objects which were taken into consideration for classification + // Example: 147 + Count int64 `json:"count,omitempty"` + + // number of objects which could not be classified - see error message for details + // Example: 7 + CountFailed int64 `json:"countFailed,omitempty"` + + // number of objects successfully classified + // Example: 140 + CountSucceeded int64 `json:"countSucceeded,omitempty"` + + // time when this classification was started + // Example: 2017-07-21T17:32:28Z + // Format: date-time + Started strfmt.DateTime `json:"started,omitempty"` +} + +// Validate validates this classification meta +func (m *ClassificationMeta) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateCompleted(formats); err != nil { + res = append(res, err) + } + + if err := m.validateStarted(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *ClassificationMeta) validateCompleted(formats strfmt.Registry) error { + if swag.IsZero(m.Completed) { // not required + return nil + } + + if err := validate.FormatOf("completed", "body", "date-time", m.Completed.String(), formats); err != nil { + return err + } + + return nil +} + +func (m *ClassificationMeta) validateStarted(formats strfmt.Registry) error { + if swag.IsZero(m.Started) { // not required + return nil + } + + if err := validate.FormatOf("started", "body", "date-time", m.Started.String(), formats); err != nil { + return err + } + + return nil +} + +// ContextValidate validates this classification meta based on context it is used +func (m *ClassificationMeta) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *ClassificationMeta) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *ClassificationMeta) UnmarshalBinary(b []byte) error { + var res ClassificationMeta + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/entities/models/cluster_statistics_response.go b/platform/dbops/binaries/weaviate-src/entities/models/cluster_statistics_response.go new file mode 100644 index 0000000000000000000000000000000000000000..8f82c567c6e148d619af9046ea7a8c7242190e00 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/models/cluster_statistics_response.go @@ -0,0 +1,130 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "strconv" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// ClusterStatisticsResponse The cluster statistics of all of the Weaviate nodes +// +// swagger:model ClusterStatisticsResponse +type ClusterStatisticsResponse struct { + + // statistics + Statistics []*Statistics `json:"statistics"` + + // synchronized + Synchronized bool `json:"synchronized"` +} + +// Validate validates this cluster statistics response +func (m *ClusterStatisticsResponse) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateStatistics(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *ClusterStatisticsResponse) validateStatistics(formats strfmt.Registry) error { + if swag.IsZero(m.Statistics) { // not required + return nil + } + + for i := 0; i < len(m.Statistics); i++ { + if swag.IsZero(m.Statistics[i]) { // not required + continue + } + + if m.Statistics[i] != nil { + if err := m.Statistics[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("statistics" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("statistics" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +// ContextValidate validate this cluster statistics response based on the context it is used +func (m *ClusterStatisticsResponse) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateStatistics(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *ClusterStatisticsResponse) contextValidateStatistics(ctx context.Context, formats strfmt.Registry) error { + + for i := 0; i < len(m.Statistics); i++ { + + if m.Statistics[i] != nil { + if err := m.Statistics[i].ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("statistics" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("statistics" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +// MarshalBinary interface implementation +func (m *ClusterStatisticsResponse) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *ClusterStatisticsResponse) UnmarshalBinary(b []byte) error { + var res ClusterStatisticsResponse + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/entities/models/d_b_user_info.go b/platform/dbops/binaries/weaviate-src/entities/models/d_b_user_info.go new file mode 100644 index 0000000000000000000000000000000000000000..9094b7cc0af716cb196e5f8148a7bf598857f647 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/models/d_b_user_info.go @@ -0,0 +1,229 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "encoding/json" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// DBUserInfo d b user info +// +// swagger:model DBUserInfo +type DBUserInfo struct { + + // activity status of the returned user + // Required: true + Active *bool `json:"active"` + + // First 3 letters of the associated API-key + // Max Length: 3 + APIKeyFirstLetters string `json:"apiKeyFirstLetters,omitempty"` + + // Date and time in ISO 8601 format (YYYY-MM-DDTHH:MM:SSZ) + // Format: date-time + CreatedAt strfmt.DateTime `json:"createdAt,omitempty"` + + // type of the returned user + // Required: true + // Enum: [db_user db_env_user] + DbUserType *string `json:"dbUserType"` + + // Date and time in ISO 8601 format (YYYY-MM-DDTHH:MM:SSZ) + // Format: date-time + LastUsedAt strfmt.DateTime `json:"lastUsedAt,omitempty"` + + // The role names associated to the user + // Required: true + Roles []string `json:"roles"` + + // The user id of the given user + // Required: true + UserID *string `json:"userId"` +} + +// Validate validates this d b user info +func (m *DBUserInfo) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateActive(formats); err != nil { + res = append(res, err) + } + + if err := m.validateAPIKeyFirstLetters(formats); err != nil { + res = append(res, err) + } + + if err := m.validateCreatedAt(formats); err != nil { + res = append(res, err) + } + + if err := m.validateDbUserType(formats); err != nil { + res = append(res, err) + } + + if err := m.validateLastUsedAt(formats); err != nil { + res = append(res, err) + } + + if err := m.validateRoles(formats); err != nil { + res = append(res, err) + } + + if err := m.validateUserID(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *DBUserInfo) validateActive(formats strfmt.Registry) error { + + if err := validate.Required("active", "body", m.Active); err != nil { + return err + } + + return nil +} + +func (m *DBUserInfo) validateAPIKeyFirstLetters(formats strfmt.Registry) error { + if swag.IsZero(m.APIKeyFirstLetters) { // not required + return nil + } + + if err := validate.MaxLength("apiKeyFirstLetters", "body", m.APIKeyFirstLetters, 3); err != nil { + return err + } + + return nil +} + +func (m *DBUserInfo) validateCreatedAt(formats strfmt.Registry) error { + if swag.IsZero(m.CreatedAt) { // not required + return nil + } + + if err := validate.FormatOf("createdAt", "body", "date-time", m.CreatedAt.String(), formats); err != nil { + return err + } + + return nil +} + +var dBUserInfoTypeDbUserTypePropEnum []interface{} + +func init() { + var res []string + if err := json.Unmarshal([]byte(`["db_user","db_env_user"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + dBUserInfoTypeDbUserTypePropEnum = append(dBUserInfoTypeDbUserTypePropEnum, v) + } +} + +const ( + + // DBUserInfoDbUserTypeDbUser captures enum value "db_user" + DBUserInfoDbUserTypeDbUser string = "db_user" + + // DBUserInfoDbUserTypeDbEnvUser captures enum value "db_env_user" + DBUserInfoDbUserTypeDbEnvUser string = "db_env_user" +) + +// prop value enum +func (m *DBUserInfo) validateDbUserTypeEnum(path, location string, value string) error { + if err := validate.EnumCase(path, location, value, dBUserInfoTypeDbUserTypePropEnum, true); err != nil { + return err + } + return nil +} + +func (m *DBUserInfo) validateDbUserType(formats strfmt.Registry) error { + + if err := validate.Required("dbUserType", "body", m.DbUserType); err != nil { + return err + } + + // value enum + if err := m.validateDbUserTypeEnum("dbUserType", "body", *m.DbUserType); err != nil { + return err + } + + return nil +} + +func (m *DBUserInfo) validateLastUsedAt(formats strfmt.Registry) error { + if swag.IsZero(m.LastUsedAt) { // not required + return nil + } + + if err := validate.FormatOf("lastUsedAt", "body", "date-time", m.LastUsedAt.String(), formats); err != nil { + return err + } + + return nil +} + +func (m *DBUserInfo) validateRoles(formats strfmt.Registry) error { + + if err := validate.Required("roles", "body", m.Roles); err != nil { + return err + } + + return nil +} + +func (m *DBUserInfo) validateUserID(formats strfmt.Registry) error { + + if err := validate.Required("userId", "body", m.UserID); err != nil { + return err + } + + return nil +} + +// ContextValidate validates this d b user info based on context it is used +func (m *DBUserInfo) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *DBUserInfo) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *DBUserInfo) UnmarshalBinary(b []byte) error { + var res DBUserInfo + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/entities/models/deprecation.go b/platform/dbops/binaries/weaviate-src/entities/models/deprecation.go new file mode 100644 index 0000000000000000000000000000000000000000..a4ed9083a2e821e3c75710359036a4a7998ca2ca --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/models/deprecation.go @@ -0,0 +1,132 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// Deprecation deprecation +// +// swagger:model Deprecation +type Deprecation struct { + + // Describes which API is effected, usually one of: REST, GraphQL + APIType string `json:"apiType,omitempty"` + + // The id that uniquely identifies this particular deprecations (mostly used internally) + ID string `json:"id,omitempty"` + + // The locations within the specified API affected by this deprecation + Locations []string `json:"locations"` + + // User-required object to not be affected by the (planned) removal + Mitigation string `json:"mitigation,omitempty"` + + // What this deprecation is about + Msg string `json:"msg,omitempty"` + + // A best-effort guess of which upcoming version will remove the feature entirely + PlannedRemovalVersion string `json:"plannedRemovalVersion,omitempty"` + + // If the feature has already been removed, it was removed in this version + RemovedIn *string `json:"removedIn,omitempty"` + + // If the feature has already been removed, it was removed at this timestamp + // Format: date-time + RemovedTime *strfmt.DateTime `json:"removedTime,omitempty"` + + // The deprecation was introduced in this version + // Format: date-time + SinceTime strfmt.DateTime `json:"sinceTime,omitempty"` + + // The deprecation was introduced in this version + SinceVersion string `json:"sinceVersion,omitempty"` + + // Whether the problematic API functionality is deprecated (planned to be removed) or already removed + Status string `json:"status,omitempty"` +} + +// Validate validates this deprecation +func (m *Deprecation) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateRemovedTime(formats); err != nil { + res = append(res, err) + } + + if err := m.validateSinceTime(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *Deprecation) validateRemovedTime(formats strfmt.Registry) error { + if swag.IsZero(m.RemovedTime) { // not required + return nil + } + + if err := validate.FormatOf("removedTime", "body", "date-time", m.RemovedTime.String(), formats); err != nil { + return err + } + + return nil +} + +func (m *Deprecation) validateSinceTime(formats strfmt.Registry) error { + if swag.IsZero(m.SinceTime) { // not required + return nil + } + + if err := validate.FormatOf("sinceTime", "body", "date-time", m.SinceTime.String(), formats); err != nil { + return err + } + + return nil +} + +// ContextValidate validates this deprecation based on context it is used +func (m *Deprecation) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *Deprecation) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *Deprecation) UnmarshalBinary(b []byte) error { + var res Deprecation + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/entities/models/distributed_task.go b/platform/dbops/binaries/weaviate-src/entities/models/distributed_task.go new file mode 100644 index 0000000000000000000000000000000000000000..b45b471fb218ca326fe0908132bd7a2623c36985 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/models/distributed_task.go @@ -0,0 +1,123 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// DistributedTask Distributed task metadata. +// +// swagger:model DistributedTask +type DistributedTask struct { + + // The high level reason why the task failed. + Error string `json:"error,omitempty"` + + // The time when the task was finished. + // Format: date-time + FinishedAt strfmt.DateTime `json:"finishedAt,omitempty"` + + // The nodes that finished the task. + FinishedNodes []string `json:"finishedNodes"` + + // The ID of the task. + ID string `json:"id,omitempty"` + + // The payload of the task. + Payload interface{} `json:"payload,omitempty"` + + // The time when the task was created. + // Format: date-time + StartedAt strfmt.DateTime `json:"startedAt,omitempty"` + + // The status of the task. + Status string `json:"status,omitempty"` + + // The version of the task. + Version int64 `json:"version,omitempty"` +} + +// Validate validates this distributed task +func (m *DistributedTask) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateFinishedAt(formats); err != nil { + res = append(res, err) + } + + if err := m.validateStartedAt(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *DistributedTask) validateFinishedAt(formats strfmt.Registry) error { + if swag.IsZero(m.FinishedAt) { // not required + return nil + } + + if err := validate.FormatOf("finishedAt", "body", "date-time", m.FinishedAt.String(), formats); err != nil { + return err + } + + return nil +} + +func (m *DistributedTask) validateStartedAt(formats strfmt.Registry) error { + if swag.IsZero(m.StartedAt) { // not required + return nil + } + + if err := validate.FormatOf("startedAt", "body", "date-time", m.StartedAt.String(), formats); err != nil { + return err + } + + return nil +} + +// ContextValidate validates this distributed task based on context it is used +func (m *DistributedTask) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *DistributedTask) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *DistributedTask) UnmarshalBinary(b []byte) error { + var res DistributedTask + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/entities/models/distributed_tasks.go b/platform/dbops/binaries/weaviate-src/entities/models/distributed_tasks.go new file mode 100644 index 0000000000000000000000000000000000000000..05d656d3d0bfac90d505748c714f82acd2b20ca4 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/models/distributed_tasks.go @@ -0,0 +1,89 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "strconv" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/validate" +) + +// DistributedTasks Active distributed tasks by namespace. +// +// swagger:model DistributedTasks +type DistributedTasks map[string][]DistributedTask + +// Validate validates this distributed tasks +func (m DistributedTasks) Validate(formats strfmt.Registry) error { + var res []error + + for k := range m { + + if err := validate.Required(k, "body", m[k]); err != nil { + return err + } + + for i := 0; i < len(m[k]); i++ { + + if err := m[k][i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName(k + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName(k + "." + strconv.Itoa(i)) + } + return err + } + + } + + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// ContextValidate validate this distributed tasks based on the context it is used +func (m DistributedTasks) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + for k := range m { + + for i := 0; i < len(m[k]); i++ { + + if err := m[k][i].ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName(k + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName(k + "." + strconv.Itoa(i)) + } + return err + } + + } + + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/entities/models/error_response.go b/platform/dbops/binaries/weaviate-src/entities/models/error_response.go new file mode 100644 index 0000000000000000000000000000000000000000..a8cfd7abe974355e9be473de387bb18ba4fdec25 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/models/error_response.go @@ -0,0 +1,164 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "strconv" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// ErrorResponse An error response given by Weaviate end-points. +// +// swagger:model ErrorResponse +type ErrorResponse struct { + + // error + Error []*ErrorResponseErrorItems0 `json:"error"` +} + +// Validate validates this error response +func (m *ErrorResponse) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateError(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *ErrorResponse) validateError(formats strfmt.Registry) error { + if swag.IsZero(m.Error) { // not required + return nil + } + + for i := 0; i < len(m.Error); i++ { + if swag.IsZero(m.Error[i]) { // not required + continue + } + + if m.Error[i] != nil { + if err := m.Error[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("error" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("error" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +// ContextValidate validate this error response based on the context it is used +func (m *ErrorResponse) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateError(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *ErrorResponse) contextValidateError(ctx context.Context, formats strfmt.Registry) error { + + for i := 0; i < len(m.Error); i++ { + + if m.Error[i] != nil { + if err := m.Error[i].ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("error" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("error" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +// MarshalBinary interface implementation +func (m *ErrorResponse) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *ErrorResponse) UnmarshalBinary(b []byte) error { + var res ErrorResponse + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} + +// ErrorResponseErrorItems0 error response error items0 +// +// swagger:model ErrorResponseErrorItems0 +type ErrorResponseErrorItems0 struct { + + // message + Message string `json:"message,omitempty"` +} + +// Validate validates this error response error items0 +func (m *ErrorResponseErrorItems0) Validate(formats strfmt.Registry) error { + return nil +} + +// ContextValidate validates this error response error items0 based on context it is used +func (m *ErrorResponseErrorItems0) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *ErrorResponseErrorItems0) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *ErrorResponseErrorItems0) UnmarshalBinary(b []byte) error { + var res ErrorResponseErrorItems0 + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/entities/models/geo_coordinates.go b/platform/dbops/binaries/weaviate-src/entities/models/geo_coordinates.go new file mode 100644 index 0000000000000000000000000000000000000000..6a9299d6e335c67d22e037f8300f640f9c19f90f --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/models/geo_coordinates.go @@ -0,0 +1,64 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// GeoCoordinates geo coordinates +// +// swagger:model GeoCoordinates +type GeoCoordinates struct { + + // The latitude of the point on earth in decimal form + Latitude *float32 `json:"latitude,omitempty"` + + // The longitude of the point on earth in decimal form + Longitude *float32 `json:"longitude,omitempty"` +} + +// Validate validates this geo coordinates +func (m *GeoCoordinates) Validate(formats strfmt.Registry) error { + return nil +} + +// ContextValidate validates this geo coordinates based on context it is used +func (m *GeoCoordinates) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *GeoCoordinates) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *GeoCoordinates) UnmarshalBinary(b []byte) error { + var res GeoCoordinates + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/entities/models/graph_q_l_error.go b/platform/dbops/binaries/weaviate-src/entities/models/graph_q_l_error.go new file mode 100644 index 0000000000000000000000000000000000000000..852bd46819196c4d086cc03528efd58d28e77da1 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/models/graph_q_l_error.go @@ -0,0 +1,173 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "strconv" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// GraphQLError An error response caused by a GraphQL query. +// +// swagger:model GraphQLError +type GraphQLError struct { + + // locations + Locations []*GraphQLErrorLocationsItems0 `json:"locations"` + + // message + Message string `json:"message,omitempty"` + + // path + Path []string `json:"path"` +} + +// Validate validates this graph q l error +func (m *GraphQLError) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateLocations(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *GraphQLError) validateLocations(formats strfmt.Registry) error { + if swag.IsZero(m.Locations) { // not required + return nil + } + + for i := 0; i < len(m.Locations); i++ { + if swag.IsZero(m.Locations[i]) { // not required + continue + } + + if m.Locations[i] != nil { + if err := m.Locations[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("locations" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("locations" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +// ContextValidate validate this graph q l error based on the context it is used +func (m *GraphQLError) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateLocations(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *GraphQLError) contextValidateLocations(ctx context.Context, formats strfmt.Registry) error { + + for i := 0; i < len(m.Locations); i++ { + + if m.Locations[i] != nil { + if err := m.Locations[i].ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("locations" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("locations" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +// MarshalBinary interface implementation +func (m *GraphQLError) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *GraphQLError) UnmarshalBinary(b []byte) error { + var res GraphQLError + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} + +// GraphQLErrorLocationsItems0 graph q l error locations items0 +// +// swagger:model GraphQLErrorLocationsItems0 +type GraphQLErrorLocationsItems0 struct { + + // column + Column int64 `json:"column,omitempty"` + + // line + Line int64 `json:"line,omitempty"` +} + +// Validate validates this graph q l error locations items0 +func (m *GraphQLErrorLocationsItems0) Validate(formats strfmt.Registry) error { + return nil +} + +// ContextValidate validates this graph q l error locations items0 based on context it is used +func (m *GraphQLErrorLocationsItems0) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *GraphQLErrorLocationsItems0) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *GraphQLErrorLocationsItems0) UnmarshalBinary(b []byte) error { + var res GraphQLErrorLocationsItems0 + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/entities/models/graph_q_l_queries.go b/platform/dbops/binaries/weaviate-src/entities/models/graph_q_l_queries.go new file mode 100644 index 0000000000000000000000000000000000000000..4d00b51c8d6400d5b7ab80313b71c7da79329c0e --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/models/graph_q_l_queries.go @@ -0,0 +1,84 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "strconv" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// GraphQLQueries A list of GraphQL queries. +// +// swagger:model GraphQLQueries +type GraphQLQueries []*GraphQLQuery + +// Validate validates this graph q l queries +func (m GraphQLQueries) Validate(formats strfmt.Registry) error { + var res []error + + for i := 0; i < len(m); i++ { + if swag.IsZero(m[i]) { // not required + continue + } + + if m[i] != nil { + if err := m[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName(strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName(strconv.Itoa(i)) + } + return err + } + } + + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// ContextValidate validate this graph q l queries based on the context it is used +func (m GraphQLQueries) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + for i := 0; i < len(m); i++ { + + if m[i] != nil { + if err := m[i].ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName(strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName(strconv.Itoa(i)) + } + return err + } + } + + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/entities/models/graph_q_l_query.go b/platform/dbops/binaries/weaviate-src/entities/models/graph_q_l_query.go new file mode 100644 index 0000000000000000000000000000000000000000..c4bf951a3eb71feea8d6072fde990f28a4047141 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/models/graph_q_l_query.go @@ -0,0 +1,67 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// GraphQLQuery GraphQL query based on: http://facebook.github.io/graphql/. +// +// swagger:model GraphQLQuery +type GraphQLQuery struct { + + // The name of the operation if multiple exist in the query. + OperationName string `json:"operationName,omitempty"` + + // Query based on GraphQL syntax. + Query string `json:"query,omitempty"` + + // Additional variables for the query. + Variables interface{} `json:"variables,omitempty"` +} + +// Validate validates this graph q l query +func (m *GraphQLQuery) Validate(formats strfmt.Registry) error { + return nil +} + +// ContextValidate validates this graph q l query based on context it is used +func (m *GraphQLQuery) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *GraphQLQuery) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *GraphQLQuery) UnmarshalBinary(b []byte) error { + var res GraphQLQuery + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/entities/models/graph_q_l_response.go b/platform/dbops/binaries/weaviate-src/entities/models/graph_q_l_response.go new file mode 100644 index 0000000000000000000000000000000000000000..40beb2d743c00a69d6dac0ee7735b90e86b9082b --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/models/graph_q_l_response.go @@ -0,0 +1,130 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "strconv" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// GraphQLResponse GraphQL based response: http://facebook.github.io/graphql/. +// +// swagger:model GraphQLResponse +type GraphQLResponse struct { + + // GraphQL data object. + Data map[string]JSONObject `json:"data,omitempty"` + + // Array with errors. + Errors []*GraphQLError `json:"errors,omitempty"` +} + +// Validate validates this graph q l response +func (m *GraphQLResponse) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateErrors(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *GraphQLResponse) validateErrors(formats strfmt.Registry) error { + if swag.IsZero(m.Errors) { // not required + return nil + } + + for i := 0; i < len(m.Errors); i++ { + if swag.IsZero(m.Errors[i]) { // not required + continue + } + + if m.Errors[i] != nil { + if err := m.Errors[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("errors" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("errors" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +// ContextValidate validate this graph q l response based on the context it is used +func (m *GraphQLResponse) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateErrors(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *GraphQLResponse) contextValidateErrors(ctx context.Context, formats strfmt.Registry) error { + + for i := 0; i < len(m.Errors); i++ { + + if m.Errors[i] != nil { + if err := m.Errors[i].ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("errors" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("errors" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +// MarshalBinary interface implementation +func (m *GraphQLResponse) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *GraphQLResponse) UnmarshalBinary(b []byte) error { + var res GraphQLResponse + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/entities/models/graph_q_l_responses.go b/platform/dbops/binaries/weaviate-src/entities/models/graph_q_l_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..429b1ca890bcc4864bd5b180b019f329f547dad0 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/models/graph_q_l_responses.go @@ -0,0 +1,84 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "strconv" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// GraphQLResponses A list of GraphQL responses. +// +// swagger:model GraphQLResponses +type GraphQLResponses []*GraphQLResponse + +// Validate validates this graph q l responses +func (m GraphQLResponses) Validate(formats strfmt.Registry) error { + var res []error + + for i := 0; i < len(m); i++ { + if swag.IsZero(m[i]) { // not required + continue + } + + if m[i] != nil { + if err := m[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName(strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName(strconv.Itoa(i)) + } + return err + } + } + + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// ContextValidate validate this graph q l responses based on the context it is used +func (m GraphQLResponses) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + for i := 0; i < len(m); i++ { + + if m[i] != nil { + if err := m[i].ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName(strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName(strconv.Itoa(i)) + } + return err + } + } + + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/entities/models/group_type.go b/platform/dbops/binaries/weaviate-src/entities/models/group_type.go new file mode 100644 index 0000000000000000000000000000000000000000..8be0eb3700fdcde09d7b723635666f2b9f2110ac --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/models/group_type.go @@ -0,0 +1,89 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "encoding/json" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/validate" +) + +// GroupType If the group contains OIDC or database users. +// +// swagger:model GroupType +type GroupType string + +func NewGroupType(value GroupType) *GroupType { + return &value +} + +// Pointer returns a pointer to a freshly-allocated GroupType. +func (m GroupType) Pointer() *GroupType { + return &m +} + +const ( + + // GroupTypeDb captures enum value "db" + GroupTypeDb GroupType = "db" + + // GroupTypeOidc captures enum value "oidc" + GroupTypeOidc GroupType = "oidc" +) + +// for schema +var groupTypeEnum []interface{} + +func init() { + var res []GroupType + if err := json.Unmarshal([]byte(`["db","oidc"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + groupTypeEnum = append(groupTypeEnum, v) + } +} + +func (m GroupType) validateGroupTypeEnum(path, location string, value GroupType) error { + if err := validate.EnumCase(path, location, value, groupTypeEnum, true); err != nil { + return err + } + return nil +} + +// Validate validates this group type +func (m GroupType) Validate(formats strfmt.Registry) error { + var res []error + + // value enum + if err := m.validateGroupTypeEnum("", "body", m); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// ContextValidate validates this group type based on context it is used +func (m GroupType) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/entities/models/inverted_index_config.go b/platform/dbops/binaries/weaviate-src/entities/models/inverted_index_config.go new file mode 100644 index 0000000000000000000000000000000000000000..09e3ce9dcc89275775054d3c0c624954f8f6d097 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/models/inverted_index_config.go @@ -0,0 +1,176 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// InvertedIndexConfig Configure the inverted index built into Weaviate (default: 60). +// +// swagger:model InvertedIndexConfig +type InvertedIndexConfig struct { + + // bm25 + Bm25 *BM25Config `json:"bm25,omitempty"` + + // Asynchronous index clean up happens every n seconds + CleanupIntervalSeconds int64 `json:"cleanupIntervalSeconds,omitempty"` + + // Index each object with the null state (default: 'false'). + IndexNullState bool `json:"indexNullState,omitempty"` + + // Index length of properties (default: 'false'). + IndexPropertyLength bool `json:"indexPropertyLength,omitempty"` + + // Index each object by its internal timestamps (default: 'false'). + IndexTimestamps bool `json:"indexTimestamps,omitempty"` + + // stopwords + Stopwords *StopwordConfig `json:"stopwords,omitempty"` + + // Using BlockMax WAND for query execution (default: 'false', will be 'true' for new collections created after 1.30). + UsingBlockMaxWAND bool `json:"usingBlockMaxWAND,omitempty"` +} + +// Validate validates this inverted index config +func (m *InvertedIndexConfig) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateBm25(formats); err != nil { + res = append(res, err) + } + + if err := m.validateStopwords(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *InvertedIndexConfig) validateBm25(formats strfmt.Registry) error { + if swag.IsZero(m.Bm25) { // not required + return nil + } + + if m.Bm25 != nil { + if err := m.Bm25.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("bm25") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("bm25") + } + return err + } + } + + return nil +} + +func (m *InvertedIndexConfig) validateStopwords(formats strfmt.Registry) error { + if swag.IsZero(m.Stopwords) { // not required + return nil + } + + if m.Stopwords != nil { + if err := m.Stopwords.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("stopwords") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("stopwords") + } + return err + } + } + + return nil +} + +// ContextValidate validate this inverted index config based on the context it is used +func (m *InvertedIndexConfig) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateBm25(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidateStopwords(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *InvertedIndexConfig) contextValidateBm25(ctx context.Context, formats strfmt.Registry) error { + + if m.Bm25 != nil { + if err := m.Bm25.ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("bm25") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("bm25") + } + return err + } + } + + return nil +} + +func (m *InvertedIndexConfig) contextValidateStopwords(ctx context.Context, formats strfmt.Registry) error { + + if m.Stopwords != nil { + if err := m.Stopwords.ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("stopwords") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("stopwords") + } + return err + } + } + + return nil +} + +// MarshalBinary interface implementation +func (m *InvertedIndexConfig) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *InvertedIndexConfig) UnmarshalBinary(b []byte) error { + var res InvertedIndexConfig + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/entities/models/json_object.go b/platform/dbops/binaries/weaviate-src/entities/models/json_object.go new file mode 100644 index 0000000000000000000000000000000000000000..e3de03367ecd7fd6b316a83d8280648c2674ba5e --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/models/json_object.go @@ -0,0 +1,22 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// JSONObject JSON object value. +// +// swagger:model JsonObject +type JSONObject interface{} diff --git a/platform/dbops/binaries/weaviate-src/entities/models/link.go b/platform/dbops/binaries/weaviate-src/entities/models/link.go new file mode 100644 index 0000000000000000000000000000000000000000..83221bb238333f604bf1499a113ffc9287cf1f3d --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/models/link.go @@ -0,0 +1,70 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// Link link +// +// swagger:model Link +type Link struct { + + // weaviate documentation about this resource group + DocumentationHref string `json:"documentationHref,omitempty"` + + // target of the link + Href string `json:"href,omitempty"` + + // human readable name of the resource group + Name string `json:"name,omitempty"` + + // relationship if both resources are related, e.g. 'next', 'previous', 'parent', etc. + Rel string `json:"rel,omitempty"` +} + +// Validate validates this link +func (m *Link) Validate(formats strfmt.Registry) error { + return nil +} + +// ContextValidate validates this link based on context it is used +func (m *Link) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *Link) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *Link) UnmarshalBinary(b []byte) error { + var res Link + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/entities/models/meta.go b/platform/dbops/binaries/weaviate-src/entities/models/meta.go new file mode 100644 index 0000000000000000000000000000000000000000..5ee1a056a4e327ea982d4ae32c6a71ceb3cf6451 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/models/meta.go @@ -0,0 +1,70 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// Meta Contains meta information of the current Weaviate instance. +// +// swagger:model Meta +type Meta struct { + + // Max message size for GRPC connection in bytes. + GrpcMaxMessageSize int64 `json:"grpcMaxMessageSize,omitempty"` + + // The url of the host. + Hostname string `json:"hostname,omitempty"` + + // Module-specific meta information. + Modules interface{} `json:"modules,omitempty"` + + // The Weaviate server version. + Version string `json:"version,omitempty"` +} + +// Validate validates this meta +func (m *Meta) Validate(formats strfmt.Registry) error { + return nil +} + +// ContextValidate validates this meta based on context it is used +func (m *Meta) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *Meta) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *Meta) UnmarshalBinary(b []byte) error { + var res Meta + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/entities/models/multi_tenancy_config.go b/platform/dbops/binaries/weaviate-src/entities/models/multi_tenancy_config.go new file mode 100644 index 0000000000000000000000000000000000000000..8b1bb08698282e33e15102814ed285dd5f07c055 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/models/multi_tenancy_config.go @@ -0,0 +1,67 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// MultiTenancyConfig Configuration related to multi-tenancy within a class +// +// swagger:model MultiTenancyConfig +type MultiTenancyConfig struct { + + // Existing tenants should (not) be turned HOT implicitly when they are accessed and in another activity status (default: false). + AutoTenantActivation bool `json:"autoTenantActivation"` + + // Nonexistent tenants should (not) be created implicitly (default: false). + AutoTenantCreation bool `json:"autoTenantCreation"` + + // Whether or not multi-tenancy is enabled for this class (default: false). + Enabled bool `json:"enabled"` +} + +// Validate validates this multi tenancy config +func (m *MultiTenancyConfig) Validate(formats strfmt.Registry) error { + return nil +} + +// ContextValidate validates this multi tenancy config based on context it is used +func (m *MultiTenancyConfig) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *MultiTenancyConfig) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *MultiTenancyConfig) UnmarshalBinary(b []byte) error { + var res MultiTenancyConfig + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/entities/models/multiple_ref.go b/platform/dbops/binaries/weaviate-src/entities/models/multiple_ref.go new file mode 100644 index 0000000000000000000000000000000000000000..f6cf7b6bf3f3c8eb389d69549d6a781ac9ba3ac6 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/models/multiple_ref.go @@ -0,0 +1,84 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "strconv" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// MultipleRef Multiple instances of references to other objects. +// +// swagger:model MultipleRef +type MultipleRef []*SingleRef + +// Validate validates this multiple ref +func (m MultipleRef) Validate(formats strfmt.Registry) error { + var res []error + + for i := 0; i < len(m); i++ { + if swag.IsZero(m[i]) { // not required + continue + } + + if m[i] != nil { + if err := m[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName(strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName(strconv.Itoa(i)) + } + return err + } + } + + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// ContextValidate validate this multiple ref based on the context it is used +func (m MultipleRef) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + for i := 0; i < len(m); i++ { + + if m[i] != nil { + if err := m[i].ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName(strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName(strconv.Itoa(i)) + } + return err + } + } + + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/entities/models/nested_property.go b/platform/dbops/binaries/weaviate-src/entities/models/nested_property.go new file mode 100644 index 0000000000000000000000000000000000000000..4d0b3acacbe5ceb910e2eb2b5ed865076dad294e --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/models/nested_property.go @@ -0,0 +1,218 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "encoding/json" + "strconv" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// NestedProperty nested property +// +// swagger:model NestedProperty +type NestedProperty struct { + + // data type + DataType []string `json:"dataType"` + + // description + Description string `json:"description,omitempty"` + + // index filterable + IndexFilterable *bool `json:"indexFilterable,omitempty"` + + // index range filters + IndexRangeFilters *bool `json:"indexRangeFilters,omitempty"` + + // index searchable + IndexSearchable *bool `json:"indexSearchable,omitempty"` + + // name + Name string `json:"name,omitempty"` + + // The properties of the nested object(s). Applies to object and object[] data types. + NestedProperties []*NestedProperty `json:"nestedProperties,omitempty"` + + // tokenization + // Enum: [word lowercase whitespace field trigram gse kagome_kr kagome_ja gse_ch] + Tokenization string `json:"tokenization,omitempty"` +} + +// Validate validates this nested property +func (m *NestedProperty) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateNestedProperties(formats); err != nil { + res = append(res, err) + } + + if err := m.validateTokenization(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *NestedProperty) validateNestedProperties(formats strfmt.Registry) error { + if swag.IsZero(m.NestedProperties) { // not required + return nil + } + + for i := 0; i < len(m.NestedProperties); i++ { + if swag.IsZero(m.NestedProperties[i]) { // not required + continue + } + + if m.NestedProperties[i] != nil { + if err := m.NestedProperties[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("nestedProperties" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("nestedProperties" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +var nestedPropertyTypeTokenizationPropEnum []interface{} + +func init() { + var res []string + if err := json.Unmarshal([]byte(`["word","lowercase","whitespace","field","trigram","gse","kagome_kr","kagome_ja","gse_ch"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + nestedPropertyTypeTokenizationPropEnum = append(nestedPropertyTypeTokenizationPropEnum, v) + } +} + +const ( + + // NestedPropertyTokenizationWord captures enum value "word" + NestedPropertyTokenizationWord string = "word" + + // NestedPropertyTokenizationLowercase captures enum value "lowercase" + NestedPropertyTokenizationLowercase string = "lowercase" + + // NestedPropertyTokenizationWhitespace captures enum value "whitespace" + NestedPropertyTokenizationWhitespace string = "whitespace" + + // NestedPropertyTokenizationField captures enum value "field" + NestedPropertyTokenizationField string = "field" + + // NestedPropertyTokenizationTrigram captures enum value "trigram" + NestedPropertyTokenizationTrigram string = "trigram" + + // NestedPropertyTokenizationGse captures enum value "gse" + NestedPropertyTokenizationGse string = "gse" + + // NestedPropertyTokenizationKagomeKr captures enum value "kagome_kr" + NestedPropertyTokenizationKagomeKr string = "kagome_kr" + + // NestedPropertyTokenizationKagomeJa captures enum value "kagome_ja" + NestedPropertyTokenizationKagomeJa string = "kagome_ja" + + // NestedPropertyTokenizationGseCh captures enum value "gse_ch" + NestedPropertyTokenizationGseCh string = "gse_ch" +) + +// prop value enum +func (m *NestedProperty) validateTokenizationEnum(path, location string, value string) error { + if err := validate.EnumCase(path, location, value, nestedPropertyTypeTokenizationPropEnum, true); err != nil { + return err + } + return nil +} + +func (m *NestedProperty) validateTokenization(formats strfmt.Registry) error { + if swag.IsZero(m.Tokenization) { // not required + return nil + } + + // value enum + if err := m.validateTokenizationEnum("tokenization", "body", m.Tokenization); err != nil { + return err + } + + return nil +} + +// ContextValidate validate this nested property based on the context it is used +func (m *NestedProperty) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateNestedProperties(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *NestedProperty) contextValidateNestedProperties(ctx context.Context, formats strfmt.Registry) error { + + for i := 0; i < len(m.NestedProperties); i++ { + + if m.NestedProperties[i] != nil { + if err := m.NestedProperties[i].ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("nestedProperties" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("nestedProperties" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +// MarshalBinary interface implementation +func (m *NestedProperty) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *NestedProperty) UnmarshalBinary(b []byte) error { + var res NestedProperty + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/entities/models/node_shard_status.go b/platform/dbops/binaries/weaviate-src/entities/models/node_shard_status.go new file mode 100644 index 0000000000000000000000000000000000000000..12ea96a5445d2778760e1a47b37397ee4f0c543e --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/models/node_shard_status.go @@ -0,0 +1,154 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "strconv" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// NodeShardStatus The definition of a node shard status response body +// +// swagger:model NodeShardStatus +type NodeShardStatus struct { + + // The status of the async replication. + AsyncReplicationStatus []*AsyncReplicationStatus `json:"asyncReplicationStatus"` + + // The name of shard's class. + Class string `json:"class"` + + // The status of vector compression/quantization. + Compressed bool `json:"compressed"` + + // The load status of the shard. + Loaded bool `json:"loaded"` + + // The name of the shard. + Name string `json:"name"` + + // Number of replicas for the shard. + NumberOfReplicas int64 `json:"numberOfReplicas,omitempty"` + + // The number of objects in shard. + ObjectCount int64 `json:"objectCount"` + + // Minimum number of replicas for the shard. + ReplicationFactor int64 `json:"replicationFactor,omitempty"` + + // The status of the vector indexing process. + VectorIndexingStatus string `json:"vectorIndexingStatus"` + + // The length of the vector indexing queue. + VectorQueueLength int64 `json:"vectorQueueLength"` +} + +// Validate validates this node shard status +func (m *NodeShardStatus) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateAsyncReplicationStatus(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *NodeShardStatus) validateAsyncReplicationStatus(formats strfmt.Registry) error { + if swag.IsZero(m.AsyncReplicationStatus) { // not required + return nil + } + + for i := 0; i < len(m.AsyncReplicationStatus); i++ { + if swag.IsZero(m.AsyncReplicationStatus[i]) { // not required + continue + } + + if m.AsyncReplicationStatus[i] != nil { + if err := m.AsyncReplicationStatus[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("asyncReplicationStatus" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("asyncReplicationStatus" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +// ContextValidate validate this node shard status based on the context it is used +func (m *NodeShardStatus) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateAsyncReplicationStatus(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *NodeShardStatus) contextValidateAsyncReplicationStatus(ctx context.Context, formats strfmt.Registry) error { + + for i := 0; i < len(m.AsyncReplicationStatus); i++ { + + if m.AsyncReplicationStatus[i] != nil { + if err := m.AsyncReplicationStatus[i].ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("asyncReplicationStatus" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("asyncReplicationStatus" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +// MarshalBinary interface implementation +func (m *NodeShardStatus) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *NodeShardStatus) UnmarshalBinary(b []byte) error { + var res NodeShardStatus + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/entities/models/node_stats.go b/platform/dbops/binaries/weaviate-src/entities/models/node_stats.go new file mode 100644 index 0000000000000000000000000000000000000000..6de39df29da18424923506441666de73224105a1 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/models/node_stats.go @@ -0,0 +1,64 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// NodeStats The summary of Weaviate's statistics. +// +// swagger:model NodeStats +type NodeStats struct { + + // The total number of objects in DB. + ObjectCount int64 `json:"objectCount"` + + // The count of Weaviate's shards. To see this value, set `output` to `verbose`. + ShardCount int64 `json:"shardCount"` +} + +// Validate validates this node stats +func (m *NodeStats) Validate(formats strfmt.Registry) error { + return nil +} + +// ContextValidate validates this node stats based on context it is used +func (m *NodeStats) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *NodeStats) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *NodeStats) UnmarshalBinary(b []byte) error { + var res NodeStats + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/entities/models/node_status.go b/platform/dbops/binaries/weaviate-src/entities/models/node_status.go new file mode 100644 index 0000000000000000000000000000000000000000..3f7f9a6da4c2d4fe98906d0e83e874ad6816a229 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/models/node_status.go @@ -0,0 +1,286 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "encoding/json" + "strconv" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// NodeStatus The definition of a backup node status response body +// +// swagger:model NodeStatus +type NodeStatus struct { + + // Weaviate batch statistics. + BatchStats *BatchStats `json:"batchStats,omitempty"` + + // The gitHash of Weaviate. + GitHash string `json:"gitHash,omitempty"` + + // The name of the node. + Name string `json:"name,omitempty"` + + // The list of the shards with it's statistics. + Shards []*NodeShardStatus `json:"shards"` + + // Weaviate overall statistics. + Stats *NodeStats `json:"stats,omitempty"` + + // Node's status. + // Enum: [HEALTHY UNHEALTHY UNAVAILABLE TIMEOUT] + Status *string `json:"status,omitempty"` + + // The version of Weaviate. + Version string `json:"version,omitempty"` +} + +// Validate validates this node status +func (m *NodeStatus) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateBatchStats(formats); err != nil { + res = append(res, err) + } + + if err := m.validateShards(formats); err != nil { + res = append(res, err) + } + + if err := m.validateStats(formats); err != nil { + res = append(res, err) + } + + if err := m.validateStatus(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *NodeStatus) validateBatchStats(formats strfmt.Registry) error { + if swag.IsZero(m.BatchStats) { // not required + return nil + } + + if m.BatchStats != nil { + if err := m.BatchStats.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("batchStats") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("batchStats") + } + return err + } + } + + return nil +} + +func (m *NodeStatus) validateShards(formats strfmt.Registry) error { + if swag.IsZero(m.Shards) { // not required + return nil + } + + for i := 0; i < len(m.Shards); i++ { + if swag.IsZero(m.Shards[i]) { // not required + continue + } + + if m.Shards[i] != nil { + if err := m.Shards[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("shards" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("shards" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +func (m *NodeStatus) validateStats(formats strfmt.Registry) error { + if swag.IsZero(m.Stats) { // not required + return nil + } + + if m.Stats != nil { + if err := m.Stats.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("stats") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("stats") + } + return err + } + } + + return nil +} + +var nodeStatusTypeStatusPropEnum []interface{} + +func init() { + var res []string + if err := json.Unmarshal([]byte(`["HEALTHY","UNHEALTHY","UNAVAILABLE","TIMEOUT"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + nodeStatusTypeStatusPropEnum = append(nodeStatusTypeStatusPropEnum, v) + } +} + +const ( + + // NodeStatusStatusHEALTHY captures enum value "HEALTHY" + NodeStatusStatusHEALTHY string = "HEALTHY" + + // NodeStatusStatusUNHEALTHY captures enum value "UNHEALTHY" + NodeStatusStatusUNHEALTHY string = "UNHEALTHY" + + // NodeStatusStatusUNAVAILABLE captures enum value "UNAVAILABLE" + NodeStatusStatusUNAVAILABLE string = "UNAVAILABLE" + + // NodeStatusStatusTIMEOUT captures enum value "TIMEOUT" + NodeStatusStatusTIMEOUT string = "TIMEOUT" +) + +// prop value enum +func (m *NodeStatus) validateStatusEnum(path, location string, value string) error { + if err := validate.EnumCase(path, location, value, nodeStatusTypeStatusPropEnum, true); err != nil { + return err + } + return nil +} + +func (m *NodeStatus) validateStatus(formats strfmt.Registry) error { + if swag.IsZero(m.Status) { // not required + return nil + } + + // value enum + if err := m.validateStatusEnum("status", "body", *m.Status); err != nil { + return err + } + + return nil +} + +// ContextValidate validate this node status based on the context it is used +func (m *NodeStatus) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateBatchStats(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidateShards(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidateStats(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *NodeStatus) contextValidateBatchStats(ctx context.Context, formats strfmt.Registry) error { + + if m.BatchStats != nil { + if err := m.BatchStats.ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("batchStats") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("batchStats") + } + return err + } + } + + return nil +} + +func (m *NodeStatus) contextValidateShards(ctx context.Context, formats strfmt.Registry) error { + + for i := 0; i < len(m.Shards); i++ { + + if m.Shards[i] != nil { + if err := m.Shards[i].ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("shards" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("shards" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +func (m *NodeStatus) contextValidateStats(ctx context.Context, formats strfmt.Registry) error { + + if m.Stats != nil { + if err := m.Stats.ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("stats") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("stats") + } + return err + } + } + + return nil +} + +// MarshalBinary interface implementation +func (m *NodeStatus) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *NodeStatus) UnmarshalBinary(b []byte) error { + var res NodeStatus + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/entities/models/nodes_status_response.go b/platform/dbops/binaries/weaviate-src/entities/models/nodes_status_response.go new file mode 100644 index 0000000000000000000000000000000000000000..7a40eefbca463fd2d07163f68066f6a10fc87a87 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/models/nodes_status_response.go @@ -0,0 +1,127 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "strconv" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// NodesStatusResponse The status of all of the Weaviate nodes +// +// swagger:model NodesStatusResponse +type NodesStatusResponse struct { + + // nodes + Nodes []*NodeStatus `json:"nodes"` +} + +// Validate validates this nodes status response +func (m *NodesStatusResponse) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateNodes(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *NodesStatusResponse) validateNodes(formats strfmt.Registry) error { + if swag.IsZero(m.Nodes) { // not required + return nil + } + + for i := 0; i < len(m.Nodes); i++ { + if swag.IsZero(m.Nodes[i]) { // not required + continue + } + + if m.Nodes[i] != nil { + if err := m.Nodes[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("nodes" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("nodes" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +// ContextValidate validate this nodes status response based on the context it is used +func (m *NodesStatusResponse) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateNodes(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *NodesStatusResponse) contextValidateNodes(ctx context.Context, formats strfmt.Registry) error { + + for i := 0; i < len(m.Nodes); i++ { + + if m.Nodes[i] != nil { + if err := m.Nodes[i].ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("nodes" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("nodes" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +// MarshalBinary interface implementation +func (m *NodesStatusResponse) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *NodesStatusResponse) UnmarshalBinary(b []byte) error { + var res NodesStatusResponse + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/entities/models/object.go b/platform/dbops/binaries/weaviate-src/entities/models/object.go new file mode 100644 index 0000000000000000000000000000000000000000..d33e42ffbe9056d19350ec749723f62dc56c891e --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/models/object.go @@ -0,0 +1,238 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// Object object +// +// swagger:model Object +type Object struct { + + // additional + Additional AdditionalProperties `json:"additional,omitempty"` + + // Class of the Object, defined in the schema. + Class string `json:"class,omitempty"` + + // (Response only) Timestamp of creation of this object in milliseconds since epoch UTC. + CreationTimeUnix int64 `json:"creationTimeUnix,omitempty"` + + // ID of the Object. + // Format: uuid + ID strfmt.UUID `json:"id,omitempty"` + + // (Response only) Timestamp of the last object update in milliseconds since epoch UTC. + LastUpdateTimeUnix int64 `json:"lastUpdateTimeUnix,omitempty"` + + // properties + Properties PropertySchema `json:"properties,omitempty"` + + // Name of the Objects tenant. + Tenant string `json:"tenant,omitempty"` + + // This field returns vectors associated with the Object. C11yVector, Vector or Vectors values are possible. + Vector C11yVector `json:"vector,omitempty"` + + // vector weights + VectorWeights VectorWeights `json:"vectorWeights,omitempty"` + + // This field returns vectors associated with the Object. + Vectors Vectors `json:"vectors,omitempty"` +} + +// Validate validates this object +func (m *Object) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateAdditional(formats); err != nil { + res = append(res, err) + } + + if err := m.validateID(formats); err != nil { + res = append(res, err) + } + + if err := m.validateVector(formats); err != nil { + res = append(res, err) + } + + if err := m.validateVectors(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *Object) validateAdditional(formats strfmt.Registry) error { + if swag.IsZero(m.Additional) { // not required + return nil + } + + if m.Additional != nil { + if err := m.Additional.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("additional") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("additional") + } + return err + } + } + + return nil +} + +func (m *Object) validateID(formats strfmt.Registry) error { + if swag.IsZero(m.ID) { // not required + return nil + } + + if err := validate.FormatOf("id", "body", "uuid", m.ID.String(), formats); err != nil { + return err + } + + return nil +} + +func (m *Object) validateVector(formats strfmt.Registry) error { + if swag.IsZero(m.Vector) { // not required + return nil + } + + if err := m.Vector.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("vector") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("vector") + } + return err + } + + return nil +} + +func (m *Object) validateVectors(formats strfmt.Registry) error { + if swag.IsZero(m.Vectors) { // not required + return nil + } + + if m.Vectors != nil { + if err := m.Vectors.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("vectors") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("vectors") + } + return err + } + } + + return nil +} + +// ContextValidate validate this object based on the context it is used +func (m *Object) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateAdditional(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidateVector(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidateVectors(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *Object) contextValidateAdditional(ctx context.Context, formats strfmt.Registry) error { + + if err := m.Additional.ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("additional") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("additional") + } + return err + } + + return nil +} + +func (m *Object) contextValidateVector(ctx context.Context, formats strfmt.Registry) error { + + if err := m.Vector.ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("vector") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("vector") + } + return err + } + + return nil +} + +func (m *Object) contextValidateVectors(ctx context.Context, formats strfmt.Registry) error { + + if err := m.Vectors.ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("vectors") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("vectors") + } + return err + } + + return nil +} + +// MarshalBinary interface implementation +func (m *Object) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *Object) UnmarshalBinary(b []byte) error { + var res Object + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/entities/models/objects_get_response.go b/platform/dbops/binaries/weaviate-src/entities/models/objects_get_response.go new file mode 100644 index 0000000000000000000000000000000000000000..1d73069ca255bb8589837825d86212d2ab1d43c7 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/models/objects_get_response.go @@ -0,0 +1,394 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "encoding/json" + "strconv" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// ObjectsGetResponse objects get response +// +// swagger:model ObjectsGetResponse +type ObjectsGetResponse struct { + Object + + // deprecations + Deprecations []*Deprecation `json:"deprecations"` + + // result + Result *ObjectsGetResponseAO2Result `json:"result,omitempty"` +} + +// UnmarshalJSON unmarshals this object from a JSON structure +func (m *ObjectsGetResponse) UnmarshalJSON(raw []byte) error { + // AO0 + var aO0 Object + if err := swag.ReadJSON(raw, &aO0); err != nil { + return err + } + m.Object = aO0 + + // AO1 + var dataAO1 struct { + Deprecations []*Deprecation `json:"deprecations"` + } + if err := swag.ReadJSON(raw, &dataAO1); err != nil { + return err + } + + m.Deprecations = dataAO1.Deprecations + + // AO2 + var dataAO2 struct { + Result *ObjectsGetResponseAO2Result `json:"result,omitempty"` + } + if err := swag.ReadJSON(raw, &dataAO2); err != nil { + return err + } + + m.Result = dataAO2.Result + + return nil +} + +// MarshalJSON marshals this object to a JSON structure +func (m ObjectsGetResponse) MarshalJSON() ([]byte, error) { + _parts := make([][]byte, 0, 3) + + aO0, err := swag.WriteJSON(m.Object) + if err != nil { + return nil, err + } + _parts = append(_parts, aO0) + var dataAO1 struct { + Deprecations []*Deprecation `json:"deprecations"` + } + + dataAO1.Deprecations = m.Deprecations + + jsonDataAO1, errAO1 := swag.WriteJSON(dataAO1) + if errAO1 != nil { + return nil, errAO1 + } + _parts = append(_parts, jsonDataAO1) + var dataAO2 struct { + Result *ObjectsGetResponseAO2Result `json:"result,omitempty"` + } + + dataAO2.Result = m.Result + + jsonDataAO2, errAO2 := swag.WriteJSON(dataAO2) + if errAO2 != nil { + return nil, errAO2 + } + _parts = append(_parts, jsonDataAO2) + return swag.ConcatJSON(_parts...), nil +} + +// Validate validates this objects get response +func (m *ObjectsGetResponse) Validate(formats strfmt.Registry) error { + var res []error + + // validation for a type composition with Object + if err := m.Object.Validate(formats); err != nil { + res = append(res, err) + } + + if err := m.validateDeprecations(formats); err != nil { + res = append(res, err) + } + + if err := m.validateResult(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *ObjectsGetResponse) validateDeprecations(formats strfmt.Registry) error { + + if swag.IsZero(m.Deprecations) { // not required + return nil + } + + for i := 0; i < len(m.Deprecations); i++ { + if swag.IsZero(m.Deprecations[i]) { // not required + continue + } + + if m.Deprecations[i] != nil { + if err := m.Deprecations[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("deprecations" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("deprecations" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +func (m *ObjectsGetResponse) validateResult(formats strfmt.Registry) error { + + if swag.IsZero(m.Result) { // not required + return nil + } + + if m.Result != nil { + if err := m.Result.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("result") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("result") + } + return err + } + } + + return nil +} + +// ContextValidate validate this objects get response based on the context it is used +func (m *ObjectsGetResponse) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + // validation for a type composition with Object + if err := m.Object.ContextValidate(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidateDeprecations(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidateResult(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *ObjectsGetResponse) contextValidateDeprecations(ctx context.Context, formats strfmt.Registry) error { + + for i := 0; i < len(m.Deprecations); i++ { + + if m.Deprecations[i] != nil { + if err := m.Deprecations[i].ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("deprecations" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("deprecations" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +func (m *ObjectsGetResponse) contextValidateResult(ctx context.Context, formats strfmt.Registry) error { + + if m.Result != nil { + if err := m.Result.ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("result") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("result") + } + return err + } + } + + return nil +} + +// MarshalBinary interface implementation +func (m *ObjectsGetResponse) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *ObjectsGetResponse) UnmarshalBinary(b []byte) error { + var res ObjectsGetResponse + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} + +// ObjectsGetResponseAO2Result Results for this specific Object. +// +// swagger:model ObjectsGetResponseAO2Result +type ObjectsGetResponseAO2Result struct { + + // errors + Errors *ErrorResponse `json:"errors,omitempty"` + + // status + // Enum: [SUCCESS FAILED] + Status *string `json:"status,omitempty"` +} + +// Validate validates this objects get response a o2 result +func (m *ObjectsGetResponseAO2Result) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateErrors(formats); err != nil { + res = append(res, err) + } + + if err := m.validateStatus(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *ObjectsGetResponseAO2Result) validateErrors(formats strfmt.Registry) error { + if swag.IsZero(m.Errors) { // not required + return nil + } + + if m.Errors != nil { + if err := m.Errors.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("result" + "." + "errors") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("result" + "." + "errors") + } + return err + } + } + + return nil +} + +var objectsGetResponseAO2ResultTypeStatusPropEnum []interface{} + +func init() { + var res []string + if err := json.Unmarshal([]byte(`["SUCCESS","FAILED"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + objectsGetResponseAO2ResultTypeStatusPropEnum = append(objectsGetResponseAO2ResultTypeStatusPropEnum, v) + } +} + +const ( + + // ObjectsGetResponseAO2ResultStatusSUCCESS captures enum value "SUCCESS" + ObjectsGetResponseAO2ResultStatusSUCCESS string = "SUCCESS" + + // ObjectsGetResponseAO2ResultStatusFAILED captures enum value "FAILED" + ObjectsGetResponseAO2ResultStatusFAILED string = "FAILED" +) + +// prop value enum +func (m *ObjectsGetResponseAO2Result) validateStatusEnum(path, location string, value string) error { + if err := validate.EnumCase(path, location, value, objectsGetResponseAO2ResultTypeStatusPropEnum, true); err != nil { + return err + } + return nil +} + +func (m *ObjectsGetResponseAO2Result) validateStatus(formats strfmt.Registry) error { + if swag.IsZero(m.Status) { // not required + return nil + } + + // value enum + if err := m.validateStatusEnum("result"+"."+"status", "body", *m.Status); err != nil { + return err + } + + return nil +} + +// ContextValidate validate this objects get response a o2 result based on the context it is used +func (m *ObjectsGetResponseAO2Result) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateErrors(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *ObjectsGetResponseAO2Result) contextValidateErrors(ctx context.Context, formats strfmt.Registry) error { + + if m.Errors != nil { + if err := m.Errors.ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("result" + "." + "errors") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("result" + "." + "errors") + } + return err + } + } + + return nil +} + +// MarshalBinary interface implementation +func (m *ObjectsGetResponseAO2Result) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *ObjectsGetResponseAO2Result) UnmarshalBinary(b []byte) error { + var res ObjectsGetResponseAO2Result + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/entities/models/objects_list_response.go b/platform/dbops/binaries/weaviate-src/entities/models/objects_list_response.go new file mode 100644 index 0000000000000000000000000000000000000000..3aab6f4d58c9a3c2c1f6757f07716373f4001feb --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/models/objects_list_response.go @@ -0,0 +1,187 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "strconv" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// ObjectsListResponse List of Objects. +// +// swagger:model ObjectsListResponse +type ObjectsListResponse struct { + + // deprecations + Deprecations []*Deprecation `json:"deprecations"` + + // The actual list of Objects. + Objects []*Object `json:"objects"` + + // The total number of Objects for the query. The number of items in a response may be smaller due to paging. + TotalResults int64 `json:"totalResults,omitempty"` +} + +// Validate validates this objects list response +func (m *ObjectsListResponse) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateDeprecations(formats); err != nil { + res = append(res, err) + } + + if err := m.validateObjects(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *ObjectsListResponse) validateDeprecations(formats strfmt.Registry) error { + if swag.IsZero(m.Deprecations) { // not required + return nil + } + + for i := 0; i < len(m.Deprecations); i++ { + if swag.IsZero(m.Deprecations[i]) { // not required + continue + } + + if m.Deprecations[i] != nil { + if err := m.Deprecations[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("deprecations" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("deprecations" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +func (m *ObjectsListResponse) validateObjects(formats strfmt.Registry) error { + if swag.IsZero(m.Objects) { // not required + return nil + } + + for i := 0; i < len(m.Objects); i++ { + if swag.IsZero(m.Objects[i]) { // not required + continue + } + + if m.Objects[i] != nil { + if err := m.Objects[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("objects" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("objects" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +// ContextValidate validate this objects list response based on the context it is used +func (m *ObjectsListResponse) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateDeprecations(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidateObjects(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *ObjectsListResponse) contextValidateDeprecations(ctx context.Context, formats strfmt.Registry) error { + + for i := 0; i < len(m.Deprecations); i++ { + + if m.Deprecations[i] != nil { + if err := m.Deprecations[i].ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("deprecations" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("deprecations" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +func (m *ObjectsListResponse) contextValidateObjects(ctx context.Context, formats strfmt.Registry) error { + + for i := 0; i < len(m.Objects); i++ { + + if m.Objects[i] != nil { + if err := m.Objects[i].ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("objects" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("objects" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +// MarshalBinary interface implementation +func (m *ObjectsListResponse) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *ObjectsListResponse) UnmarshalBinary(b []byte) error { + var res ObjectsListResponse + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/entities/models/patch_document_action.go b/platform/dbops/binaries/weaviate-src/entities/models/patch_document_action.go new file mode 100644 index 0000000000000000000000000000000000000000..56a3d0d7d5abb7134f287c946162467d46b85a0f --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/models/patch_document_action.go @@ -0,0 +1,204 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "encoding/json" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// PatchDocumentAction Either a JSONPatch document as defined by RFC 6902 (from, op, path, value), or a merge document (RFC 7396). +// +// swagger:model PatchDocumentAction +type PatchDocumentAction struct { + + // A string containing a JSON Pointer value. + From string `json:"from,omitempty"` + + // merge + Merge *Object `json:"merge,omitempty"` + + // The operation to be performed. + // Required: true + // Enum: [add remove replace move copy test] + Op *string `json:"op"` + + // A JSON-Pointer. + // Required: true + Path *string `json:"path"` + + // The value to be used within the operations. + Value interface{} `json:"value,omitempty"` +} + +// Validate validates this patch document action +func (m *PatchDocumentAction) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateMerge(formats); err != nil { + res = append(res, err) + } + + if err := m.validateOp(formats); err != nil { + res = append(res, err) + } + + if err := m.validatePath(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *PatchDocumentAction) validateMerge(formats strfmt.Registry) error { + if swag.IsZero(m.Merge) { // not required + return nil + } + + if m.Merge != nil { + if err := m.Merge.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("merge") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("merge") + } + return err + } + } + + return nil +} + +var patchDocumentActionTypeOpPropEnum []interface{} + +func init() { + var res []string + if err := json.Unmarshal([]byte(`["add","remove","replace","move","copy","test"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + patchDocumentActionTypeOpPropEnum = append(patchDocumentActionTypeOpPropEnum, v) + } +} + +const ( + + // PatchDocumentActionOpAdd captures enum value "add" + PatchDocumentActionOpAdd string = "add" + + // PatchDocumentActionOpRemove captures enum value "remove" + PatchDocumentActionOpRemove string = "remove" + + // PatchDocumentActionOpReplace captures enum value "replace" + PatchDocumentActionOpReplace string = "replace" + + // PatchDocumentActionOpMove captures enum value "move" + PatchDocumentActionOpMove string = "move" + + // PatchDocumentActionOpCopy captures enum value "copy" + PatchDocumentActionOpCopy string = "copy" + + // PatchDocumentActionOpTest captures enum value "test" + PatchDocumentActionOpTest string = "test" +) + +// prop value enum +func (m *PatchDocumentAction) validateOpEnum(path, location string, value string) error { + if err := validate.EnumCase(path, location, value, patchDocumentActionTypeOpPropEnum, true); err != nil { + return err + } + return nil +} + +func (m *PatchDocumentAction) validateOp(formats strfmt.Registry) error { + + if err := validate.Required("op", "body", m.Op); err != nil { + return err + } + + // value enum + if err := m.validateOpEnum("op", "body", *m.Op); err != nil { + return err + } + + return nil +} + +func (m *PatchDocumentAction) validatePath(formats strfmt.Registry) error { + + if err := validate.Required("path", "body", m.Path); err != nil { + return err + } + + return nil +} + +// ContextValidate validate this patch document action based on the context it is used +func (m *PatchDocumentAction) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateMerge(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *PatchDocumentAction) contextValidateMerge(ctx context.Context, formats strfmt.Registry) error { + + if m.Merge != nil { + if err := m.Merge.ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("merge") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("merge") + } + return err + } + } + + return nil +} + +// MarshalBinary interface implementation +func (m *PatchDocumentAction) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *PatchDocumentAction) UnmarshalBinary(b []byte) error { + var res PatchDocumentAction + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/entities/models/patch_document_object.go b/platform/dbops/binaries/weaviate-src/entities/models/patch_document_object.go new file mode 100644 index 0000000000000000000000000000000000000000..00c1a24bf512249a0b557e58bf8e503e6f20a2f1 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/models/patch_document_object.go @@ -0,0 +1,204 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "encoding/json" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// PatchDocumentObject Either a JSONPatch document as defined by RFC 6902 (from, op, path, value), or a merge document (RFC 7396). +// +// swagger:model PatchDocumentObject +type PatchDocumentObject struct { + + // A string containing a JSON Pointer value. + From string `json:"from,omitempty"` + + // merge + Merge *Object `json:"merge,omitempty"` + + // The operation to be performed. + // Required: true + // Enum: [add remove replace move copy test] + Op *string `json:"op"` + + // A JSON-Pointer. + // Required: true + Path *string `json:"path"` + + // The value to be used within the operations. + Value interface{} `json:"value,omitempty"` +} + +// Validate validates this patch document object +func (m *PatchDocumentObject) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateMerge(formats); err != nil { + res = append(res, err) + } + + if err := m.validateOp(formats); err != nil { + res = append(res, err) + } + + if err := m.validatePath(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *PatchDocumentObject) validateMerge(formats strfmt.Registry) error { + if swag.IsZero(m.Merge) { // not required + return nil + } + + if m.Merge != nil { + if err := m.Merge.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("merge") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("merge") + } + return err + } + } + + return nil +} + +var patchDocumentObjectTypeOpPropEnum []interface{} + +func init() { + var res []string + if err := json.Unmarshal([]byte(`["add","remove","replace","move","copy","test"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + patchDocumentObjectTypeOpPropEnum = append(patchDocumentObjectTypeOpPropEnum, v) + } +} + +const ( + + // PatchDocumentObjectOpAdd captures enum value "add" + PatchDocumentObjectOpAdd string = "add" + + // PatchDocumentObjectOpRemove captures enum value "remove" + PatchDocumentObjectOpRemove string = "remove" + + // PatchDocumentObjectOpReplace captures enum value "replace" + PatchDocumentObjectOpReplace string = "replace" + + // PatchDocumentObjectOpMove captures enum value "move" + PatchDocumentObjectOpMove string = "move" + + // PatchDocumentObjectOpCopy captures enum value "copy" + PatchDocumentObjectOpCopy string = "copy" + + // PatchDocumentObjectOpTest captures enum value "test" + PatchDocumentObjectOpTest string = "test" +) + +// prop value enum +func (m *PatchDocumentObject) validateOpEnum(path, location string, value string) error { + if err := validate.EnumCase(path, location, value, patchDocumentObjectTypeOpPropEnum, true); err != nil { + return err + } + return nil +} + +func (m *PatchDocumentObject) validateOp(formats strfmt.Registry) error { + + if err := validate.Required("op", "body", m.Op); err != nil { + return err + } + + // value enum + if err := m.validateOpEnum("op", "body", *m.Op); err != nil { + return err + } + + return nil +} + +func (m *PatchDocumentObject) validatePath(formats strfmt.Registry) error { + + if err := validate.Required("path", "body", m.Path); err != nil { + return err + } + + return nil +} + +// ContextValidate validate this patch document object based on the context it is used +func (m *PatchDocumentObject) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateMerge(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *PatchDocumentObject) contextValidateMerge(ctx context.Context, formats strfmt.Registry) error { + + if m.Merge != nil { + if err := m.Merge.ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("merge") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("merge") + } + return err + } + } + + return nil +} + +// MarshalBinary interface implementation +func (m *PatchDocumentObject) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *PatchDocumentObject) UnmarshalBinary(b []byte) error { + var res PatchDocumentObject + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/entities/models/peer_update.go b/platform/dbops/binaries/weaviate-src/entities/models/peer_update.go new file mode 100644 index 0000000000000000000000000000000000000000..6605208e746f21ffa5e782d80707c8612450cc2c --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/models/peer_update.go @@ -0,0 +1,111 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// PeerUpdate A single peer in the network. +// +// swagger:model PeerUpdate +type PeerUpdate struct { + + // The session ID of the peer. + // Format: uuid + ID strfmt.UUID `json:"id,omitempty"` + + // Human readable name. + Name string `json:"name,omitempty"` + + // The latest known hash of the peer's schema. + SchemaHash string `json:"schemaHash,omitempty"` + + // The location where the peer is exposed to the internet. + // Format: uri + URI strfmt.URI `json:"uri,omitempty"` +} + +// Validate validates this peer update +func (m *PeerUpdate) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateID(formats); err != nil { + res = append(res, err) + } + + if err := m.validateURI(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *PeerUpdate) validateID(formats strfmt.Registry) error { + if swag.IsZero(m.ID) { // not required + return nil + } + + if err := validate.FormatOf("id", "body", "uuid", m.ID.String(), formats); err != nil { + return err + } + + return nil +} + +func (m *PeerUpdate) validateURI(formats strfmt.Registry) error { + if swag.IsZero(m.URI) { // not required + return nil + } + + if err := validate.FormatOf("uri", "body", "uri", m.URI.String(), formats); err != nil { + return err + } + + return nil +} + +// ContextValidate validates this peer update based on context it is used +func (m *PeerUpdate) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *PeerUpdate) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *PeerUpdate) UnmarshalBinary(b []byte) error { + var res PeerUpdate + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/entities/models/peer_update_list.go b/platform/dbops/binaries/weaviate-src/entities/models/peer_update_list.go new file mode 100644 index 0000000000000000000000000000000000000000..2dcca483895a4ecce5e87c5c9aca4422023ac5ea --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/models/peer_update_list.go @@ -0,0 +1,84 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "strconv" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// PeerUpdateList List of known peers. +// +// swagger:model PeerUpdateList +type PeerUpdateList []*PeerUpdate + +// Validate validates this peer update list +func (m PeerUpdateList) Validate(formats strfmt.Registry) error { + var res []error + + for i := 0; i < len(m); i++ { + if swag.IsZero(m[i]) { // not required + continue + } + + if m[i] != nil { + if err := m[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName(strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName(strconv.Itoa(i)) + } + return err + } + } + + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// ContextValidate validate this peer update list based on the context it is used +func (m PeerUpdateList) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + for i := 0; i < len(m); i++ { + + if m[i] != nil { + if err := m[i].ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName(strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName(strconv.Itoa(i)) + } + return err + } + } + + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/entities/models/permission.go b/platform/dbops/binaries/weaviate-src/entities/models/permission.go new file mode 100644 index 0000000000000000000000000000000000000000..16c367ab0cd0c064e7ce68a471cecf7a378e2141 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/models/permission.go @@ -0,0 +1,1226 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "encoding/json" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// Permission permissions attached to a role. +// +// swagger:model Permission +type Permission struct { + + // allowed actions in weaviate. + // Required: true + // Enum: [manage_backups read_cluster create_data read_data update_data delete_data read_nodes create_roles read_roles update_roles delete_roles create_collections read_collections update_collections delete_collections assign_and_revoke_users create_users read_users update_users delete_users create_tenants read_tenants update_tenants delete_tenants create_replicate read_replicate update_replicate delete_replicate create_aliases read_aliases update_aliases delete_aliases assign_and_revoke_groups read_groups] + Action *string `json:"action"` + + // aliases + Aliases *PermissionAliases `json:"aliases,omitempty"` + + // backups + Backups *PermissionBackups `json:"backups,omitempty"` + + // collections + Collections *PermissionCollections `json:"collections,omitempty"` + + // data + Data *PermissionData `json:"data,omitempty"` + + // groups + Groups *PermissionGroups `json:"groups,omitempty"` + + // nodes + Nodes *PermissionNodes `json:"nodes,omitempty"` + + // replicate + Replicate *PermissionReplicate `json:"replicate,omitempty"` + + // roles + Roles *PermissionRoles `json:"roles,omitempty"` + + // tenants + Tenants *PermissionTenants `json:"tenants,omitempty"` + + // users + Users *PermissionUsers `json:"users,omitempty"` +} + +// Validate validates this permission +func (m *Permission) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateAction(formats); err != nil { + res = append(res, err) + } + + if err := m.validateAliases(formats); err != nil { + res = append(res, err) + } + + if err := m.validateBackups(formats); err != nil { + res = append(res, err) + } + + if err := m.validateCollections(formats); err != nil { + res = append(res, err) + } + + if err := m.validateData(formats); err != nil { + res = append(res, err) + } + + if err := m.validateGroups(formats); err != nil { + res = append(res, err) + } + + if err := m.validateNodes(formats); err != nil { + res = append(res, err) + } + + if err := m.validateReplicate(formats); err != nil { + res = append(res, err) + } + + if err := m.validateRoles(formats); err != nil { + res = append(res, err) + } + + if err := m.validateTenants(formats); err != nil { + res = append(res, err) + } + + if err := m.validateUsers(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +var permissionTypeActionPropEnum []interface{} + +func init() { + var res []string + if err := json.Unmarshal([]byte(`["manage_backups","read_cluster","create_data","read_data","update_data","delete_data","read_nodes","create_roles","read_roles","update_roles","delete_roles","create_collections","read_collections","update_collections","delete_collections","assign_and_revoke_users","create_users","read_users","update_users","delete_users","create_tenants","read_tenants","update_tenants","delete_tenants","create_replicate","read_replicate","update_replicate","delete_replicate","create_aliases","read_aliases","update_aliases","delete_aliases","assign_and_revoke_groups","read_groups"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + permissionTypeActionPropEnum = append(permissionTypeActionPropEnum, v) + } +} + +const ( + + // PermissionActionManageBackups captures enum value "manage_backups" + PermissionActionManageBackups string = "manage_backups" + + // PermissionActionReadCluster captures enum value "read_cluster" + PermissionActionReadCluster string = "read_cluster" + + // PermissionActionCreateData captures enum value "create_data" + PermissionActionCreateData string = "create_data" + + // PermissionActionReadData captures enum value "read_data" + PermissionActionReadData string = "read_data" + + // PermissionActionUpdateData captures enum value "update_data" + PermissionActionUpdateData string = "update_data" + + // PermissionActionDeleteData captures enum value "delete_data" + PermissionActionDeleteData string = "delete_data" + + // PermissionActionReadNodes captures enum value "read_nodes" + PermissionActionReadNodes string = "read_nodes" + + // PermissionActionCreateRoles captures enum value "create_roles" + PermissionActionCreateRoles string = "create_roles" + + // PermissionActionReadRoles captures enum value "read_roles" + PermissionActionReadRoles string = "read_roles" + + // PermissionActionUpdateRoles captures enum value "update_roles" + PermissionActionUpdateRoles string = "update_roles" + + // PermissionActionDeleteRoles captures enum value "delete_roles" + PermissionActionDeleteRoles string = "delete_roles" + + // PermissionActionCreateCollections captures enum value "create_collections" + PermissionActionCreateCollections string = "create_collections" + + // PermissionActionReadCollections captures enum value "read_collections" + PermissionActionReadCollections string = "read_collections" + + // PermissionActionUpdateCollections captures enum value "update_collections" + PermissionActionUpdateCollections string = "update_collections" + + // PermissionActionDeleteCollections captures enum value "delete_collections" + PermissionActionDeleteCollections string = "delete_collections" + + // PermissionActionAssignAndRevokeUsers captures enum value "assign_and_revoke_users" + PermissionActionAssignAndRevokeUsers string = "assign_and_revoke_users" + + // PermissionActionCreateUsers captures enum value "create_users" + PermissionActionCreateUsers string = "create_users" + + // PermissionActionReadUsers captures enum value "read_users" + PermissionActionReadUsers string = "read_users" + + // PermissionActionUpdateUsers captures enum value "update_users" + PermissionActionUpdateUsers string = "update_users" + + // PermissionActionDeleteUsers captures enum value "delete_users" + PermissionActionDeleteUsers string = "delete_users" + + // PermissionActionCreateTenants captures enum value "create_tenants" + PermissionActionCreateTenants string = "create_tenants" + + // PermissionActionReadTenants captures enum value "read_tenants" + PermissionActionReadTenants string = "read_tenants" + + // PermissionActionUpdateTenants captures enum value "update_tenants" + PermissionActionUpdateTenants string = "update_tenants" + + // PermissionActionDeleteTenants captures enum value "delete_tenants" + PermissionActionDeleteTenants string = "delete_tenants" + + // PermissionActionCreateReplicate captures enum value "create_replicate" + PermissionActionCreateReplicate string = "create_replicate" + + // PermissionActionReadReplicate captures enum value "read_replicate" + PermissionActionReadReplicate string = "read_replicate" + + // PermissionActionUpdateReplicate captures enum value "update_replicate" + PermissionActionUpdateReplicate string = "update_replicate" + + // PermissionActionDeleteReplicate captures enum value "delete_replicate" + PermissionActionDeleteReplicate string = "delete_replicate" + + // PermissionActionCreateAliases captures enum value "create_aliases" + PermissionActionCreateAliases string = "create_aliases" + + // PermissionActionReadAliases captures enum value "read_aliases" + PermissionActionReadAliases string = "read_aliases" + + // PermissionActionUpdateAliases captures enum value "update_aliases" + PermissionActionUpdateAliases string = "update_aliases" + + // PermissionActionDeleteAliases captures enum value "delete_aliases" + PermissionActionDeleteAliases string = "delete_aliases" + + // PermissionActionAssignAndRevokeGroups captures enum value "assign_and_revoke_groups" + PermissionActionAssignAndRevokeGroups string = "assign_and_revoke_groups" + + // PermissionActionReadGroups captures enum value "read_groups" + PermissionActionReadGroups string = "read_groups" +) + +// prop value enum +func (m *Permission) validateActionEnum(path, location string, value string) error { + if err := validate.EnumCase(path, location, value, permissionTypeActionPropEnum, true); err != nil { + return err + } + return nil +} + +func (m *Permission) validateAction(formats strfmt.Registry) error { + + if err := validate.Required("action", "body", m.Action); err != nil { + return err + } + + // value enum + if err := m.validateActionEnum("action", "body", *m.Action); err != nil { + return err + } + + return nil +} + +func (m *Permission) validateAliases(formats strfmt.Registry) error { + if swag.IsZero(m.Aliases) { // not required + return nil + } + + if m.Aliases != nil { + if err := m.Aliases.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("aliases") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("aliases") + } + return err + } + } + + return nil +} + +func (m *Permission) validateBackups(formats strfmt.Registry) error { + if swag.IsZero(m.Backups) { // not required + return nil + } + + if m.Backups != nil { + if err := m.Backups.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("backups") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("backups") + } + return err + } + } + + return nil +} + +func (m *Permission) validateCollections(formats strfmt.Registry) error { + if swag.IsZero(m.Collections) { // not required + return nil + } + + if m.Collections != nil { + if err := m.Collections.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("collections") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("collections") + } + return err + } + } + + return nil +} + +func (m *Permission) validateData(formats strfmt.Registry) error { + if swag.IsZero(m.Data) { // not required + return nil + } + + if m.Data != nil { + if err := m.Data.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("data") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("data") + } + return err + } + } + + return nil +} + +func (m *Permission) validateGroups(formats strfmt.Registry) error { + if swag.IsZero(m.Groups) { // not required + return nil + } + + if m.Groups != nil { + if err := m.Groups.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("groups") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("groups") + } + return err + } + } + + return nil +} + +func (m *Permission) validateNodes(formats strfmt.Registry) error { + if swag.IsZero(m.Nodes) { // not required + return nil + } + + if m.Nodes != nil { + if err := m.Nodes.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("nodes") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("nodes") + } + return err + } + } + + return nil +} + +func (m *Permission) validateReplicate(formats strfmt.Registry) error { + if swag.IsZero(m.Replicate) { // not required + return nil + } + + if m.Replicate != nil { + if err := m.Replicate.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("replicate") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("replicate") + } + return err + } + } + + return nil +} + +func (m *Permission) validateRoles(formats strfmt.Registry) error { + if swag.IsZero(m.Roles) { // not required + return nil + } + + if m.Roles != nil { + if err := m.Roles.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("roles") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("roles") + } + return err + } + } + + return nil +} + +func (m *Permission) validateTenants(formats strfmt.Registry) error { + if swag.IsZero(m.Tenants) { // not required + return nil + } + + if m.Tenants != nil { + if err := m.Tenants.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("tenants") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("tenants") + } + return err + } + } + + return nil +} + +func (m *Permission) validateUsers(formats strfmt.Registry) error { + if swag.IsZero(m.Users) { // not required + return nil + } + + if m.Users != nil { + if err := m.Users.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("users") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("users") + } + return err + } + } + + return nil +} + +// ContextValidate validate this permission based on the context it is used +func (m *Permission) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateAliases(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidateBackups(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidateCollections(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidateData(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidateGroups(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidateNodes(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidateReplicate(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidateRoles(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidateTenants(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidateUsers(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *Permission) contextValidateAliases(ctx context.Context, formats strfmt.Registry) error { + + if m.Aliases != nil { + if err := m.Aliases.ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("aliases") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("aliases") + } + return err + } + } + + return nil +} + +func (m *Permission) contextValidateBackups(ctx context.Context, formats strfmt.Registry) error { + + if m.Backups != nil { + if err := m.Backups.ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("backups") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("backups") + } + return err + } + } + + return nil +} + +func (m *Permission) contextValidateCollections(ctx context.Context, formats strfmt.Registry) error { + + if m.Collections != nil { + if err := m.Collections.ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("collections") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("collections") + } + return err + } + } + + return nil +} + +func (m *Permission) contextValidateData(ctx context.Context, formats strfmt.Registry) error { + + if m.Data != nil { + if err := m.Data.ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("data") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("data") + } + return err + } + } + + return nil +} + +func (m *Permission) contextValidateGroups(ctx context.Context, formats strfmt.Registry) error { + + if m.Groups != nil { + if err := m.Groups.ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("groups") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("groups") + } + return err + } + } + + return nil +} + +func (m *Permission) contextValidateNodes(ctx context.Context, formats strfmt.Registry) error { + + if m.Nodes != nil { + if err := m.Nodes.ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("nodes") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("nodes") + } + return err + } + } + + return nil +} + +func (m *Permission) contextValidateReplicate(ctx context.Context, formats strfmt.Registry) error { + + if m.Replicate != nil { + if err := m.Replicate.ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("replicate") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("replicate") + } + return err + } + } + + return nil +} + +func (m *Permission) contextValidateRoles(ctx context.Context, formats strfmt.Registry) error { + + if m.Roles != nil { + if err := m.Roles.ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("roles") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("roles") + } + return err + } + } + + return nil +} + +func (m *Permission) contextValidateTenants(ctx context.Context, formats strfmt.Registry) error { + + if m.Tenants != nil { + if err := m.Tenants.ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("tenants") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("tenants") + } + return err + } + } + + return nil +} + +func (m *Permission) contextValidateUsers(ctx context.Context, formats strfmt.Registry) error { + + if m.Users != nil { + if err := m.Users.ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("users") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("users") + } + return err + } + } + + return nil +} + +// MarshalBinary interface implementation +func (m *Permission) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *Permission) UnmarshalBinary(b []byte) error { + var res Permission + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} + +// PermissionAliases Resource definition for alias-related actions and permissions. Used to specify which aliases and collections can be accessed or modified. +// +// swagger:model PermissionAliases +type PermissionAliases struct { + + // A string that specifies which aliases this permission applies to. Can be an exact alias name or a regex pattern. The default value `*` applies the permission to all aliases. + Alias *string `json:"alias,omitempty"` + + // A string that specifies which collections this permission applies to. Can be an exact collection name or a regex pattern. The default value `*` applies the permission to all collections. + Collection *string `json:"collection,omitempty"` +} + +// Validate validates this permission aliases +func (m *PermissionAliases) Validate(formats strfmt.Registry) error { + return nil +} + +// ContextValidate validates this permission aliases based on context it is used +func (m *PermissionAliases) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *PermissionAliases) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *PermissionAliases) UnmarshalBinary(b []byte) error { + var res PermissionAliases + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} + +// PermissionBackups resources applicable for backup actions +// +// swagger:model PermissionBackups +type PermissionBackups struct { + + // string or regex. if a specific collection name, if left empty it will be ALL or * + Collection *string `json:"collection,omitempty"` +} + +// Validate validates this permission backups +func (m *PermissionBackups) Validate(formats strfmt.Registry) error { + return nil +} + +// ContextValidate validates this permission backups based on context it is used +func (m *PermissionBackups) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *PermissionBackups) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *PermissionBackups) UnmarshalBinary(b []byte) error { + var res PermissionBackups + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} + +// PermissionCollections resources applicable for collection and/or tenant actions +// +// swagger:model PermissionCollections +type PermissionCollections struct { + + // string or regex. if a specific collection name, if left empty it will be ALL or * + Collection *string `json:"collection,omitempty"` +} + +// Validate validates this permission collections +func (m *PermissionCollections) Validate(formats strfmt.Registry) error { + return nil +} + +// ContextValidate validates this permission collections based on context it is used +func (m *PermissionCollections) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *PermissionCollections) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *PermissionCollections) UnmarshalBinary(b []byte) error { + var res PermissionCollections + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} + +// PermissionData resources applicable for data actions +// +// swagger:model PermissionData +type PermissionData struct { + + // string or regex. if a specific collection name, if left empty it will be ALL or * + Collection *string `json:"collection,omitempty"` + + // string or regex. if a specific object ID, if left empty it will be ALL or * + Object *string `json:"object,omitempty"` + + // string or regex. if a specific tenant name, if left empty it will be ALL or * + Tenant *string `json:"tenant,omitempty"` +} + +// Validate validates this permission data +func (m *PermissionData) Validate(formats strfmt.Registry) error { + return nil +} + +// ContextValidate validates this permission data based on context it is used +func (m *PermissionData) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *PermissionData) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *PermissionData) UnmarshalBinary(b []byte) error { + var res PermissionData + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} + +// PermissionGroups Resources applicable for group actions. +// +// swagger:model PermissionGroups +type PermissionGroups struct { + + // A string that specifies which groups this permission applies to. Can be an exact group name or a regex pattern. The default value `*` applies the permission to all groups. + Group *string `json:"group,omitempty"` + + // group type + GroupType GroupType `json:"groupType,omitempty"` +} + +// Validate validates this permission groups +func (m *PermissionGroups) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateGroupType(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *PermissionGroups) validateGroupType(formats strfmt.Registry) error { + if swag.IsZero(m.GroupType) { // not required + return nil + } + + if err := m.GroupType.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("groups" + "." + "groupType") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("groups" + "." + "groupType") + } + return err + } + + return nil +} + +// ContextValidate validate this permission groups based on the context it is used +func (m *PermissionGroups) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateGroupType(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *PermissionGroups) contextValidateGroupType(ctx context.Context, formats strfmt.Registry) error { + + if err := m.GroupType.ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("groups" + "." + "groupType") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("groups" + "." + "groupType") + } + return err + } + + return nil +} + +// MarshalBinary interface implementation +func (m *PermissionGroups) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *PermissionGroups) UnmarshalBinary(b []byte) error { + var res PermissionGroups + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} + +// PermissionNodes resources applicable for cluster actions +// +// swagger:model PermissionNodes +type PermissionNodes struct { + + // string or regex. if a specific collection name, if left empty it will be ALL or * + Collection *string `json:"collection,omitempty"` + + // whether to allow (verbose) returning shards and stats data in the response + // Enum: [verbose minimal] + Verbosity *string `json:"verbosity,omitempty"` +} + +// Validate validates this permission nodes +func (m *PermissionNodes) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateVerbosity(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +var permissionNodesTypeVerbosityPropEnum []interface{} + +func init() { + var res []string + if err := json.Unmarshal([]byte(`["verbose","minimal"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + permissionNodesTypeVerbosityPropEnum = append(permissionNodesTypeVerbosityPropEnum, v) + } +} + +const ( + + // PermissionNodesVerbosityVerbose captures enum value "verbose" + PermissionNodesVerbosityVerbose string = "verbose" + + // PermissionNodesVerbosityMinimal captures enum value "minimal" + PermissionNodesVerbosityMinimal string = "minimal" +) + +// prop value enum +func (m *PermissionNodes) validateVerbosityEnum(path, location string, value string) error { + if err := validate.EnumCase(path, location, value, permissionNodesTypeVerbosityPropEnum, true); err != nil { + return err + } + return nil +} + +func (m *PermissionNodes) validateVerbosity(formats strfmt.Registry) error { + if swag.IsZero(m.Verbosity) { // not required + return nil + } + + // value enum + if err := m.validateVerbosityEnum("nodes"+"."+"verbosity", "body", *m.Verbosity); err != nil { + return err + } + + return nil +} + +// ContextValidate validates this permission nodes based on context it is used +func (m *PermissionNodes) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *PermissionNodes) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *PermissionNodes) UnmarshalBinary(b []byte) error { + var res PermissionNodes + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} + +// PermissionReplicate resources applicable for replicate actions +// +// swagger:model PermissionReplicate +type PermissionReplicate struct { + + // string or regex. if a specific collection name, if left empty it will be ALL or * + Collection *string `json:"collection,omitempty"` + + // string or regex. if a specific shard name, if left empty it will be ALL or * + Shard *string `json:"shard,omitempty"` +} + +// Validate validates this permission replicate +func (m *PermissionReplicate) Validate(formats strfmt.Registry) error { + return nil +} + +// ContextValidate validates this permission replicate based on context it is used +func (m *PermissionReplicate) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *PermissionReplicate) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *PermissionReplicate) UnmarshalBinary(b []byte) error { + var res PermissionReplicate + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} + +// PermissionRoles resources applicable for role actions +// +// swagger:model PermissionRoles +type PermissionRoles struct { + + // string or regex. if a specific role name, if left empty it will be ALL or * + Role *string `json:"role,omitempty"` + + // set the scope for the manage role permission + // Enum: [all match] + Scope *string `json:"scope,omitempty"` +} + +// Validate validates this permission roles +func (m *PermissionRoles) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateScope(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +var permissionRolesTypeScopePropEnum []interface{} + +func init() { + var res []string + if err := json.Unmarshal([]byte(`["all","match"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + permissionRolesTypeScopePropEnum = append(permissionRolesTypeScopePropEnum, v) + } +} + +const ( + + // PermissionRolesScopeAll captures enum value "all" + PermissionRolesScopeAll string = "all" + + // PermissionRolesScopeMatch captures enum value "match" + PermissionRolesScopeMatch string = "match" +) + +// prop value enum +func (m *PermissionRoles) validateScopeEnum(path, location string, value string) error { + if err := validate.EnumCase(path, location, value, permissionRolesTypeScopePropEnum, true); err != nil { + return err + } + return nil +} + +func (m *PermissionRoles) validateScope(formats strfmt.Registry) error { + if swag.IsZero(m.Scope) { // not required + return nil + } + + // value enum + if err := m.validateScopeEnum("roles"+"."+"scope", "body", *m.Scope); err != nil { + return err + } + + return nil +} + +// ContextValidate validates this permission roles based on context it is used +func (m *PermissionRoles) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *PermissionRoles) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *PermissionRoles) UnmarshalBinary(b []byte) error { + var res PermissionRoles + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} + +// PermissionTenants resources applicable for tenant actions +// +// swagger:model PermissionTenants +type PermissionTenants struct { + + // string or regex. if a specific collection name, if left empty it will be ALL or * + Collection *string `json:"collection,omitempty"` + + // string or regex. if a specific tenant name, if left empty it will be ALL or * + Tenant *string `json:"tenant,omitempty"` +} + +// Validate validates this permission tenants +func (m *PermissionTenants) Validate(formats strfmt.Registry) error { + return nil +} + +// ContextValidate validates this permission tenants based on context it is used +func (m *PermissionTenants) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *PermissionTenants) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *PermissionTenants) UnmarshalBinary(b []byte) error { + var res PermissionTenants + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} + +// PermissionUsers resources applicable for user actions +// +// swagger:model PermissionUsers +type PermissionUsers struct { + + // string or regex. if a specific name, if left empty it will be ALL or * + Users *string `json:"users,omitempty"` +} + +// Validate validates this permission users +func (m *PermissionUsers) Validate(formats strfmt.Registry) error { + return nil +} + +// ContextValidate validates this permission users based on context it is used +func (m *PermissionUsers) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *PermissionUsers) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *PermissionUsers) UnmarshalBinary(b []byte) error { + var res PermissionUsers + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/entities/models/phone_number.go b/platform/dbops/binaries/weaviate-src/entities/models/phone_number.go new file mode 100644 index 0000000000000000000000000000000000000000..beb4166f90b7002575a9dcd43b8546775e4b856a --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/models/phone_number.go @@ -0,0 +1,79 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// PhoneNumber phone number +// +// swagger:model PhoneNumber +type PhoneNumber struct { + + // Read-only. The numerical country code (e.g. 49) + CountryCode uint64 `json:"countryCode,omitempty"` + + // Optional. The ISO 3166-1 alpha-2 country code. This is used to figure out the correct countryCode and international format if only a national number (e.g. 0123 4567) is provided + DefaultCountry string `json:"defaultCountry,omitempty"` + + // The raw input as the phone number is present in your raw data set. It will be parsed into the standardized formats if valid. + Input string `json:"input,omitempty"` + + // Read-only. Parsed result in the international format (e.g. +49 123 ...) + InternationalFormatted string `json:"internationalFormatted,omitempty"` + + // Read-only. The numerical representation of the national part + National uint64 `json:"national,omitempty"` + + // Read-only. Parsed result in the national format (e.g. 0123 456789) + NationalFormatted string `json:"nationalFormatted,omitempty"` + + // Read-only. Indicates whether the parsed number is a valid phone number + Valid bool `json:"valid,omitempty"` +} + +// Validate validates this phone number +func (m *PhoneNumber) Validate(formats strfmt.Registry) error { + return nil +} + +// ContextValidate validates this phone number based on context it is used +func (m *PhoneNumber) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *PhoneNumber) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *PhoneNumber) UnmarshalBinary(b []byte) error { + var res PhoneNumber + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/entities/models/principal.go b/platform/dbops/binaries/weaviate-src/entities/models/principal.go new file mode 100644 index 0000000000000000000000000000000000000000..37e0e5c77b80d28f2bef1f9fecd867a91202e911 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/models/principal.go @@ -0,0 +1,117 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// Principal principal +// +// swagger:model Principal +type Principal struct { + + // groups + Groups []string `json:"groups"` + + // user type + UserType UserTypeInput `json:"userType,omitempty"` + + // The username that was extracted either from the authentication information + Username string `json:"username,omitempty"` +} + +// Validate validates this principal +func (m *Principal) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateUserType(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *Principal) validateUserType(formats strfmt.Registry) error { + if swag.IsZero(m.UserType) { // not required + return nil + } + + if err := m.UserType.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("userType") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("userType") + } + return err + } + + return nil +} + +// ContextValidate validate this principal based on the context it is used +func (m *Principal) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateUserType(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *Principal) contextValidateUserType(ctx context.Context, formats strfmt.Registry) error { + + if err := m.UserType.ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("userType") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("userType") + } + return err + } + + return nil +} + +// MarshalBinary interface implementation +func (m *Principal) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *Principal) UnmarshalBinary(b []byte) error { + var res Principal + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/entities/models/property.go b/platform/dbops/binaries/weaviate-src/entities/models/property.go new file mode 100644 index 0000000000000000000000000000000000000000..6d55b025beaa8a027e3890aeca9d6ccac9a63118 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/models/property.go @@ -0,0 +1,224 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "encoding/json" + "strconv" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// Property property +// +// swagger:model Property +type Property struct { + + // Data type of the property (required). If it starts with a capital (for example Person), may be a reference to another type. + DataType []string `json:"dataType"` + + // Description of the property. + Description string `json:"description,omitempty"` + + // Whether to include this property in the filterable, Roaring Bitmap index. If `false`, this property cannot be used in `where` filters.

Note: Unrelated to vectorization behavior. + IndexFilterable *bool `json:"indexFilterable,omitempty"` + + // (Deprecated). Whether to include this property in the inverted index. If `false`, this property cannot be used in `where` filters, `bm25` or `hybrid` search.

Unrelated to vectorization behavior (deprecated as of v1.19; use indexFilterable or/and indexSearchable instead) + IndexInverted *bool `json:"indexInverted,omitempty"` + + // Whether to include this property in the filterable, range-based Roaring Bitmap index. Provides better performance for range queries compared to filterable index in large datasets. Applicable only to properties of data type int, number, date. + IndexRangeFilters *bool `json:"indexRangeFilters,omitempty"` + + // Optional. Should this property be indexed in the inverted index. Defaults to true. Applicable only to properties of data type text and text[]. If you choose false, you will not be able to use this property in bm25 or hybrid search. This property has no affect on vectorization decisions done by modules + IndexSearchable *bool `json:"indexSearchable,omitempty"` + + // Configuration specific to modules this Weaviate instance has installed + ModuleConfig interface{} `json:"moduleConfig,omitempty"` + + // The name of the property (required). Multiple words should be concatenated in camelCase, e.g. `nameOfAuthor`. + Name string `json:"name,omitempty"` + + // The properties of the nested object(s). Applies to object and object[] data types. + NestedProperties []*NestedProperty `json:"nestedProperties,omitempty"` + + // Determines tokenization of the property as separate words or whole field. Optional. Applies to text and text[] data types. Allowed values are `word` (default; splits on any non-alphanumerical, lowercases), `lowercase` (splits on white spaces, lowercases), `whitespace` (splits on white spaces), `field` (trims). Not supported for remaining data types + // Enum: [word lowercase whitespace field trigram gse kagome_kr kagome_ja gse_ch] + Tokenization string `json:"tokenization,omitempty"` +} + +// Validate validates this property +func (m *Property) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateNestedProperties(formats); err != nil { + res = append(res, err) + } + + if err := m.validateTokenization(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *Property) validateNestedProperties(formats strfmt.Registry) error { + if swag.IsZero(m.NestedProperties) { // not required + return nil + } + + for i := 0; i < len(m.NestedProperties); i++ { + if swag.IsZero(m.NestedProperties[i]) { // not required + continue + } + + if m.NestedProperties[i] != nil { + if err := m.NestedProperties[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("nestedProperties" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("nestedProperties" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +var propertyTypeTokenizationPropEnum []interface{} + +func init() { + var res []string + if err := json.Unmarshal([]byte(`["word","lowercase","whitespace","field","trigram","gse","kagome_kr","kagome_ja","gse_ch"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + propertyTypeTokenizationPropEnum = append(propertyTypeTokenizationPropEnum, v) + } +} + +const ( + + // PropertyTokenizationWord captures enum value "word" + PropertyTokenizationWord string = "word" + + // PropertyTokenizationLowercase captures enum value "lowercase" + PropertyTokenizationLowercase string = "lowercase" + + // PropertyTokenizationWhitespace captures enum value "whitespace" + PropertyTokenizationWhitespace string = "whitespace" + + // PropertyTokenizationField captures enum value "field" + PropertyTokenizationField string = "field" + + // PropertyTokenizationTrigram captures enum value "trigram" + PropertyTokenizationTrigram string = "trigram" + + // PropertyTokenizationGse captures enum value "gse" + PropertyTokenizationGse string = "gse" + + // PropertyTokenizationKagomeKr captures enum value "kagome_kr" + PropertyTokenizationKagomeKr string = "kagome_kr" + + // PropertyTokenizationKagomeJa captures enum value "kagome_ja" + PropertyTokenizationKagomeJa string = "kagome_ja" + + // PropertyTokenizationGseCh captures enum value "gse_ch" + PropertyTokenizationGseCh string = "gse_ch" +) + +// prop value enum +func (m *Property) validateTokenizationEnum(path, location string, value string) error { + if err := validate.EnumCase(path, location, value, propertyTypeTokenizationPropEnum, true); err != nil { + return err + } + return nil +} + +func (m *Property) validateTokenization(formats strfmt.Registry) error { + if swag.IsZero(m.Tokenization) { // not required + return nil + } + + // value enum + if err := m.validateTokenizationEnum("tokenization", "body", m.Tokenization); err != nil { + return err + } + + return nil +} + +// ContextValidate validate this property based on the context it is used +func (m *Property) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateNestedProperties(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *Property) contextValidateNestedProperties(ctx context.Context, formats strfmt.Registry) error { + + for i := 0; i < len(m.NestedProperties); i++ { + + if m.NestedProperties[i] != nil { + if err := m.NestedProperties[i].ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("nestedProperties" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("nestedProperties" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +// MarshalBinary interface implementation +func (m *Property) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *Property) UnmarshalBinary(b []byte) error { + var res Property + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/entities/models/property_schema.go b/platform/dbops/binaries/weaviate-src/entities/models/property_schema.go new file mode 100644 index 0000000000000000000000000000000000000000..433ac3facaea2970d7e899ae6008f552fa74e0c6 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/models/property_schema.go @@ -0,0 +1,22 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// PropertySchema Names and values of an individual property. A returned response may also contain additional metadata, such as from classification or feature projection. +// +// swagger:model PropertySchema +type PropertySchema interface{} diff --git a/platform/dbops/binaries/weaviate-src/entities/models/raft_statistics.go b/platform/dbops/binaries/weaviate-src/entities/models/raft_statistics.go new file mode 100644 index 0000000000000000000000000000000000000000..f87158cc9726308dcd44f0aeb839fcf9d1ed7e3c --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/models/raft_statistics.go @@ -0,0 +1,112 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// RaftStatistics The definition of Raft statistics. +// +// swagger:model RaftStatistics +type RaftStatistics struct { + + // applied index + AppliedIndex string `json:"appliedIndex,omitempty"` + + // commit index + CommitIndex string `json:"commitIndex,omitempty"` + + // fsm pending + FsmPending string `json:"fsmPending,omitempty"` + + // last contact + LastContact string `json:"lastContact,omitempty"` + + // last log index + LastLogIndex string `json:"lastLogIndex,omitempty"` + + // last log term + LastLogTerm string `json:"lastLogTerm,omitempty"` + + // last snapshot index + LastSnapshotIndex string `json:"lastSnapshotIndex,omitempty"` + + // last snapshot term + LastSnapshotTerm string `json:"lastSnapshotTerm,omitempty"` + + // Weaviate Raft nodes. + LatestConfiguration interface{} `json:"latestConfiguration,omitempty"` + + // latest configuration index + LatestConfigurationIndex string `json:"latestConfigurationIndex,omitempty"` + + // num peers + NumPeers string `json:"numPeers,omitempty"` + + // protocol version + ProtocolVersion string `json:"protocolVersion,omitempty"` + + // protocol version max + ProtocolVersionMax string `json:"protocolVersionMax,omitempty"` + + // protocol version min + ProtocolVersionMin string `json:"protocolVersionMin,omitempty"` + + // snapshot version max + SnapshotVersionMax string `json:"snapshotVersionMax,omitempty"` + + // snapshot version min + SnapshotVersionMin string `json:"snapshotVersionMin,omitempty"` + + // state + State string `json:"state,omitempty"` + + // term + Term string `json:"term,omitempty"` +} + +// Validate validates this raft statistics +func (m *RaftStatistics) Validate(formats strfmt.Registry) error { + return nil +} + +// ContextValidate validates this raft statistics based on context it is used +func (m *RaftStatistics) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *RaftStatistics) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *RaftStatistics) UnmarshalBinary(b []byte) error { + var res RaftStatistics + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/entities/models/reference_meta_classification.go b/platform/dbops/binaries/weaviate-src/entities/models/reference_meta_classification.go new file mode 100644 index 0000000000000000000000000000000000000000..7c632d808e51838ac20a6a30898b70cd769bfe0a --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/models/reference_meta_classification.go @@ -0,0 +1,88 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// ReferenceMetaClassification This meta field contains additional info about the classified reference property +// +// swagger:model ReferenceMetaClassification +type ReferenceMetaClassification struct { + + // The lowest distance of a neighbor in the losing group. Optional. If k equals the size of the winning group, there is no losing group + ClosestLosingDistance *float64 `json:"closestLosingDistance,omitempty"` + + // The lowest distance of any neighbor, regardless of whether they were in the winning or losing group + ClosestOverallDistance float64 `json:"closestOverallDistance,omitempty"` + + // Closest distance of a neighbor from the winning group + ClosestWinningDistance float64 `json:"closestWinningDistance,omitempty"` + + // size of the losing group, can be 0 if the winning group size equals k + LosingCount int64 `json:"losingCount,omitempty"` + + // deprecated - do not use, to be removed in 0.23.0 + LosingDistance *float64 `json:"losingDistance,omitempty"` + + // Mean distance of all neighbors from the losing group. Optional. If k equals the size of the winning group, there is no losing group. + MeanLosingDistance *float64 `json:"meanLosingDistance,omitempty"` + + // Mean distance of all neighbors from the winning group + MeanWinningDistance float64 `json:"meanWinningDistance,omitempty"` + + // overall neighbors checked as part of the classification. In most cases this will equal k, but could be lower than k - for example if not enough data was present + OverallCount int64 `json:"overallCount,omitempty"` + + // size of the winning group, a number between 1..k + WinningCount int64 `json:"winningCount,omitempty"` + + // deprecated - do not use, to be removed in 0.23.0 + WinningDistance float64 `json:"winningDistance,omitempty"` +} + +// Validate validates this reference meta classification +func (m *ReferenceMetaClassification) Validate(formats strfmt.Registry) error { + return nil +} + +// ContextValidate validates this reference meta classification based on context it is used +func (m *ReferenceMetaClassification) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *ReferenceMetaClassification) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *ReferenceMetaClassification) UnmarshalBinary(b []byte) error { + var res ReferenceMetaClassification + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/entities/models/replication_config.go b/platform/dbops/binaries/weaviate-src/entities/models/replication_config.go new file mode 100644 index 0000000000000000000000000000000000000000..6fd6c9a032aa9a16f20f2372a5fb2a7837c4a67c --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/models/replication_config.go @@ -0,0 +1,125 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "encoding/json" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// ReplicationConfig Configure how replication is executed in a cluster +// +// swagger:model ReplicationConfig +type ReplicationConfig struct { + + // Enable asynchronous replication (default: false). + AsyncEnabled bool `json:"asyncEnabled"` + + // Conflict resolution strategy for deleted objects. + // Enum: [NoAutomatedResolution DeleteOnConflict TimeBasedResolution] + DeletionStrategy string `json:"deletionStrategy,omitempty"` + + // Number of times a class is replicated (default: 1). + Factor int64 `json:"factor,omitempty"` +} + +// Validate validates this replication config +func (m *ReplicationConfig) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateDeletionStrategy(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +var replicationConfigTypeDeletionStrategyPropEnum []interface{} + +func init() { + var res []string + if err := json.Unmarshal([]byte(`["NoAutomatedResolution","DeleteOnConflict","TimeBasedResolution"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + replicationConfigTypeDeletionStrategyPropEnum = append(replicationConfigTypeDeletionStrategyPropEnum, v) + } +} + +const ( + + // ReplicationConfigDeletionStrategyNoAutomatedResolution captures enum value "NoAutomatedResolution" + ReplicationConfigDeletionStrategyNoAutomatedResolution string = "NoAutomatedResolution" + + // ReplicationConfigDeletionStrategyDeleteOnConflict captures enum value "DeleteOnConflict" + ReplicationConfigDeletionStrategyDeleteOnConflict string = "DeleteOnConflict" + + // ReplicationConfigDeletionStrategyTimeBasedResolution captures enum value "TimeBasedResolution" + ReplicationConfigDeletionStrategyTimeBasedResolution string = "TimeBasedResolution" +) + +// prop value enum +func (m *ReplicationConfig) validateDeletionStrategyEnum(path, location string, value string) error { + if err := validate.EnumCase(path, location, value, replicationConfigTypeDeletionStrategyPropEnum, true); err != nil { + return err + } + return nil +} + +func (m *ReplicationConfig) validateDeletionStrategy(formats strfmt.Registry) error { + if swag.IsZero(m.DeletionStrategy) { // not required + return nil + } + + // value enum + if err := m.validateDeletionStrategyEnum("deletionStrategy", "body", m.DeletionStrategy); err != nil { + return err + } + + return nil +} + +// ContextValidate validates this replication config based on context it is used +func (m *ReplicationConfig) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *ReplicationConfig) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *ReplicationConfig) UnmarshalBinary(b []byte) error { + var res ReplicationConfig + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/entities/models/replication_delete_replica_request.go b/platform/dbops/binaries/weaviate-src/entities/models/replication_delete_replica_request.go new file mode 100644 index 0000000000000000000000000000000000000000..ebfee0ac2023365af7e6e00220a1005b9f7c6fda --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/models/replication_delete_replica_request.go @@ -0,0 +1,116 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// ReplicationDeleteReplicaRequest Specifies the parameters required to permanently delete a specific shard replica from a particular node. This action will remove the replica's data from the node. +// +// swagger:model ReplicationDeleteReplicaRequest +type ReplicationDeleteReplicaRequest struct { + + // The name of the collection to which the shard replica belongs. + // Required: true + Collection *string `json:"collection"` + + // The name of the Weaviate node from which the shard replica will be deleted. + // Required: true + Node *string `json:"node"` + + // The ID of the shard whose replica is to be deleted. + // Required: true + Shard *string `json:"shard"` +} + +// Validate validates this replication delete replica request +func (m *ReplicationDeleteReplicaRequest) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateCollection(formats); err != nil { + res = append(res, err) + } + + if err := m.validateNode(formats); err != nil { + res = append(res, err) + } + + if err := m.validateShard(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *ReplicationDeleteReplicaRequest) validateCollection(formats strfmt.Registry) error { + + if err := validate.Required("collection", "body", m.Collection); err != nil { + return err + } + + return nil +} + +func (m *ReplicationDeleteReplicaRequest) validateNode(formats strfmt.Registry) error { + + if err := validate.Required("node", "body", m.Node); err != nil { + return err + } + + return nil +} + +func (m *ReplicationDeleteReplicaRequest) validateShard(formats strfmt.Registry) error { + + if err := validate.Required("shard", "body", m.Shard); err != nil { + return err + } + + return nil +} + +// ContextValidate validates this replication delete replica request based on context it is used +func (m *ReplicationDeleteReplicaRequest) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *ReplicationDeleteReplicaRequest) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *ReplicationDeleteReplicaRequest) UnmarshalBinary(b []byte) error { + var res ReplicationDeleteReplicaRequest + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/entities/models/replication_disable_replica_request.go b/platform/dbops/binaries/weaviate-src/entities/models/replication_disable_replica_request.go new file mode 100644 index 0000000000000000000000000000000000000000..fa47c4ba71877ccd306ce479f6c7b4ea9fa0fc11 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/models/replication_disable_replica_request.go @@ -0,0 +1,116 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// ReplicationDisableReplicaRequest Specifies the parameters required to mark a specific shard replica as inactive (soft-delete) on a particular node. This action typically prevents the replica from serving requests but does not immediately remove its data. +// +// swagger:model ReplicationDisableReplicaRequest +type ReplicationDisableReplicaRequest struct { + + // The name of the collection to which the shard replica belongs. + // Required: true + Collection *string `json:"collection"` + + // The name of the Weaviate node hosting the shard replica that is to be disabled. + // Required: true + Node *string `json:"node"` + + // The ID of the shard whose replica is to be disabled. + // Required: true + Shard *string `json:"shard"` +} + +// Validate validates this replication disable replica request +func (m *ReplicationDisableReplicaRequest) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateCollection(formats); err != nil { + res = append(res, err) + } + + if err := m.validateNode(formats); err != nil { + res = append(res, err) + } + + if err := m.validateShard(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *ReplicationDisableReplicaRequest) validateCollection(formats strfmt.Registry) error { + + if err := validate.Required("collection", "body", m.Collection); err != nil { + return err + } + + return nil +} + +func (m *ReplicationDisableReplicaRequest) validateNode(formats strfmt.Registry) error { + + if err := validate.Required("node", "body", m.Node); err != nil { + return err + } + + return nil +} + +func (m *ReplicationDisableReplicaRequest) validateShard(formats strfmt.Registry) error { + + if err := validate.Required("shard", "body", m.Shard); err != nil { + return err + } + + return nil +} + +// ContextValidate validates this replication disable replica request based on context it is used +func (m *ReplicationDisableReplicaRequest) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *ReplicationDisableReplicaRequest) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *ReplicationDisableReplicaRequest) UnmarshalBinary(b []byte) error { + var res ReplicationDisableReplicaRequest + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/entities/models/replication_replicate_details_replica_response.go b/platform/dbops/binaries/weaviate-src/entities/models/replication_replicate_details_replica_response.go new file mode 100644 index 0000000000000000000000000000000000000000..cc0f32605f3ca7246faa701f61ffe9479771d08c --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/models/replication_replicate_details_replica_response.go @@ -0,0 +1,331 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "encoding/json" + "strconv" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// ReplicationReplicateDetailsReplicaResponse Provides a comprehensive overview of a specific replication operation, detailing its unique ID, the involved collection, shard, source and target nodes, transfer type, current status, and optionally, its status history. +// +// swagger:model ReplicationReplicateDetailsReplicaResponse +type ReplicationReplicateDetailsReplicaResponse struct { + + // The name of the collection to which the shard being replicated belongs. + // Required: true + Collection *string `json:"collection"` + + // The unique identifier (ID) of this specific replication operation. + // Required: true + // Format: uuid + ID *strfmt.UUID `json:"id"` + + // Whether the replica operation is scheduled for cancellation. + ScheduledForCancel bool `json:"scheduledForCancel,omitempty"` + + // Whether the replica operation is scheduled for deletion. + ScheduledForDelete bool `json:"scheduledForDelete,omitempty"` + + // The name of the shard involved in this replication operation. + // Required: true + Shard *string `json:"shard"` + + // The identifier of the node from which the replica is being moved or copied (the source node). + // Required: true + SourceNode *string `json:"sourceNode"` + + // An object detailing the current operational state of the replica movement and any errors encountered. + // Required: true + Status *ReplicationReplicateDetailsReplicaStatus `json:"status"` + + // An array detailing the historical sequence of statuses the replication operation has transitioned through, if requested and available. + StatusHistory []*ReplicationReplicateDetailsReplicaStatus `json:"statusHistory"` + + // The identifier of the node to which the replica is being moved or copied (the target node). + // Required: true + TargetNode *string `json:"targetNode"` + + // Indicates whether the operation is a 'COPY' (source replica remains) or a 'MOVE' (source replica is removed after successful transfer). + // Required: true + // Enum: [COPY MOVE] + Type *string `json:"type"` + + // Whether the replica operation is uncancelable. + Uncancelable bool `json:"uncancelable,omitempty"` + + // The UNIX timestamp in ms when the replication operation was initiated. This is an approximate time and so should not be used for precise timing. + WhenStartedUnixMs int64 `json:"whenStartedUnixMs,omitempty"` +} + +// Validate validates this replication replicate details replica response +func (m *ReplicationReplicateDetailsReplicaResponse) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateCollection(formats); err != nil { + res = append(res, err) + } + + if err := m.validateID(formats); err != nil { + res = append(res, err) + } + + if err := m.validateShard(formats); err != nil { + res = append(res, err) + } + + if err := m.validateSourceNode(formats); err != nil { + res = append(res, err) + } + + if err := m.validateStatus(formats); err != nil { + res = append(res, err) + } + + if err := m.validateStatusHistory(formats); err != nil { + res = append(res, err) + } + + if err := m.validateTargetNode(formats); err != nil { + res = append(res, err) + } + + if err := m.validateType(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *ReplicationReplicateDetailsReplicaResponse) validateCollection(formats strfmt.Registry) error { + + if err := validate.Required("collection", "body", m.Collection); err != nil { + return err + } + + return nil +} + +func (m *ReplicationReplicateDetailsReplicaResponse) validateID(formats strfmt.Registry) error { + + if err := validate.Required("id", "body", m.ID); err != nil { + return err + } + + if err := validate.FormatOf("id", "body", "uuid", m.ID.String(), formats); err != nil { + return err + } + + return nil +} + +func (m *ReplicationReplicateDetailsReplicaResponse) validateShard(formats strfmt.Registry) error { + + if err := validate.Required("shard", "body", m.Shard); err != nil { + return err + } + + return nil +} + +func (m *ReplicationReplicateDetailsReplicaResponse) validateSourceNode(formats strfmt.Registry) error { + + if err := validate.Required("sourceNode", "body", m.SourceNode); err != nil { + return err + } + + return nil +} + +func (m *ReplicationReplicateDetailsReplicaResponse) validateStatus(formats strfmt.Registry) error { + + if err := validate.Required("status", "body", m.Status); err != nil { + return err + } + + if m.Status != nil { + if err := m.Status.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("status") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("status") + } + return err + } + } + + return nil +} + +func (m *ReplicationReplicateDetailsReplicaResponse) validateStatusHistory(formats strfmt.Registry) error { + if swag.IsZero(m.StatusHistory) { // not required + return nil + } + + for i := 0; i < len(m.StatusHistory); i++ { + if swag.IsZero(m.StatusHistory[i]) { // not required + continue + } + + if m.StatusHistory[i] != nil { + if err := m.StatusHistory[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("statusHistory" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("statusHistory" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +func (m *ReplicationReplicateDetailsReplicaResponse) validateTargetNode(formats strfmt.Registry) error { + + if err := validate.Required("targetNode", "body", m.TargetNode); err != nil { + return err + } + + return nil +} + +var replicationReplicateDetailsReplicaResponseTypeTypePropEnum []interface{} + +func init() { + var res []string + if err := json.Unmarshal([]byte(`["COPY","MOVE"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + replicationReplicateDetailsReplicaResponseTypeTypePropEnum = append(replicationReplicateDetailsReplicaResponseTypeTypePropEnum, v) + } +} + +const ( + + // ReplicationReplicateDetailsReplicaResponseTypeCOPY captures enum value "COPY" + ReplicationReplicateDetailsReplicaResponseTypeCOPY string = "COPY" + + // ReplicationReplicateDetailsReplicaResponseTypeMOVE captures enum value "MOVE" + ReplicationReplicateDetailsReplicaResponseTypeMOVE string = "MOVE" +) + +// prop value enum +func (m *ReplicationReplicateDetailsReplicaResponse) validateTypeEnum(path, location string, value string) error { + if err := validate.EnumCase(path, location, value, replicationReplicateDetailsReplicaResponseTypeTypePropEnum, true); err != nil { + return err + } + return nil +} + +func (m *ReplicationReplicateDetailsReplicaResponse) validateType(formats strfmt.Registry) error { + + if err := validate.Required("type", "body", m.Type); err != nil { + return err + } + + // value enum + if err := m.validateTypeEnum("type", "body", *m.Type); err != nil { + return err + } + + return nil +} + +// ContextValidate validate this replication replicate details replica response based on the context it is used +func (m *ReplicationReplicateDetailsReplicaResponse) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateStatus(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidateStatusHistory(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *ReplicationReplicateDetailsReplicaResponse) contextValidateStatus(ctx context.Context, formats strfmt.Registry) error { + + if m.Status != nil { + if err := m.Status.ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("status") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("status") + } + return err + } + } + + return nil +} + +func (m *ReplicationReplicateDetailsReplicaResponse) contextValidateStatusHistory(ctx context.Context, formats strfmt.Registry) error { + + for i := 0; i < len(m.StatusHistory); i++ { + + if m.StatusHistory[i] != nil { + if err := m.StatusHistory[i].ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("statusHistory" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("statusHistory" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +// MarshalBinary interface implementation +func (m *ReplicationReplicateDetailsReplicaResponse) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *ReplicationReplicateDetailsReplicaResponse) UnmarshalBinary(b []byte) error { + var res ReplicationReplicateDetailsReplicaResponse + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/entities/models/replication_replicate_details_replica_status.go b/platform/dbops/binaries/weaviate-src/entities/models/replication_replicate_details_replica_status.go new file mode 100644 index 0000000000000000000000000000000000000000..bcd385983e1b5c193b0c7315c8363b91fc950eb1 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/models/replication_replicate_details_replica_status.go @@ -0,0 +1,194 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "encoding/json" + "strconv" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// ReplicationReplicateDetailsReplicaStatus Represents the current or historical status of a shard replica involved in a replication operation, including its operational state and any associated errors. +// +// swagger:model ReplicationReplicateDetailsReplicaStatus +type ReplicationReplicateDetailsReplicaStatus struct { + + // A list of error messages encountered by this replica during the replication operation, if any. + Errors []*ReplicationReplicateDetailsReplicaStatusError `json:"errors"` + + // The current operational state of the replica during the replication process. + // Enum: [REGISTERED HYDRATING FINALIZING DEHYDRATING READY CANCELLED] + State string `json:"state,omitempty"` + + // The UNIX timestamp in ms when this state was first entered. This is an approximate time and so should not be used for precise timing. + WhenStartedUnixMs int64 `json:"whenStartedUnixMs,omitempty"` +} + +// Validate validates this replication replicate details replica status +func (m *ReplicationReplicateDetailsReplicaStatus) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateErrors(formats); err != nil { + res = append(res, err) + } + + if err := m.validateState(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *ReplicationReplicateDetailsReplicaStatus) validateErrors(formats strfmt.Registry) error { + if swag.IsZero(m.Errors) { // not required + return nil + } + + for i := 0; i < len(m.Errors); i++ { + if swag.IsZero(m.Errors[i]) { // not required + continue + } + + if m.Errors[i] != nil { + if err := m.Errors[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("errors" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("errors" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +var replicationReplicateDetailsReplicaStatusTypeStatePropEnum []interface{} + +func init() { + var res []string + if err := json.Unmarshal([]byte(`["REGISTERED","HYDRATING","FINALIZING","DEHYDRATING","READY","CANCELLED"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + replicationReplicateDetailsReplicaStatusTypeStatePropEnum = append(replicationReplicateDetailsReplicaStatusTypeStatePropEnum, v) + } +} + +const ( + + // ReplicationReplicateDetailsReplicaStatusStateREGISTERED captures enum value "REGISTERED" + ReplicationReplicateDetailsReplicaStatusStateREGISTERED string = "REGISTERED" + + // ReplicationReplicateDetailsReplicaStatusStateHYDRATING captures enum value "HYDRATING" + ReplicationReplicateDetailsReplicaStatusStateHYDRATING string = "HYDRATING" + + // ReplicationReplicateDetailsReplicaStatusStateFINALIZING captures enum value "FINALIZING" + ReplicationReplicateDetailsReplicaStatusStateFINALIZING string = "FINALIZING" + + // ReplicationReplicateDetailsReplicaStatusStateDEHYDRATING captures enum value "DEHYDRATING" + ReplicationReplicateDetailsReplicaStatusStateDEHYDRATING string = "DEHYDRATING" + + // ReplicationReplicateDetailsReplicaStatusStateREADY captures enum value "READY" + ReplicationReplicateDetailsReplicaStatusStateREADY string = "READY" + + // ReplicationReplicateDetailsReplicaStatusStateCANCELLED captures enum value "CANCELLED" + ReplicationReplicateDetailsReplicaStatusStateCANCELLED string = "CANCELLED" +) + +// prop value enum +func (m *ReplicationReplicateDetailsReplicaStatus) validateStateEnum(path, location string, value string) error { + if err := validate.EnumCase(path, location, value, replicationReplicateDetailsReplicaStatusTypeStatePropEnum, true); err != nil { + return err + } + return nil +} + +func (m *ReplicationReplicateDetailsReplicaStatus) validateState(formats strfmt.Registry) error { + if swag.IsZero(m.State) { // not required + return nil + } + + // value enum + if err := m.validateStateEnum("state", "body", m.State); err != nil { + return err + } + + return nil +} + +// ContextValidate validate this replication replicate details replica status based on the context it is used +func (m *ReplicationReplicateDetailsReplicaStatus) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateErrors(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *ReplicationReplicateDetailsReplicaStatus) contextValidateErrors(ctx context.Context, formats strfmt.Registry) error { + + for i := 0; i < len(m.Errors); i++ { + + if m.Errors[i] != nil { + if err := m.Errors[i].ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("errors" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("errors" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +// MarshalBinary interface implementation +func (m *ReplicationReplicateDetailsReplicaStatus) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *ReplicationReplicateDetailsReplicaStatus) UnmarshalBinary(b []byte) error { + var res ReplicationReplicateDetailsReplicaStatus + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/entities/models/replication_replicate_details_replica_status_error.go b/platform/dbops/binaries/weaviate-src/entities/models/replication_replicate_details_replica_status_error.go new file mode 100644 index 0000000000000000000000000000000000000000..d1daacbd3c00896a02ad11c72baa6314b9683344 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/models/replication_replicate_details_replica_status_error.go @@ -0,0 +1,64 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// ReplicationReplicateDetailsReplicaStatusError Represents an error encountered during a replication operation, including its timestamp and a human-readable message. +// +// swagger:model ReplicationReplicateDetailsReplicaStatusError +type ReplicationReplicateDetailsReplicaStatusError struct { + + // A human-readable message describing the error. + Message string `json:"message,omitempty"` + + // The unix timestamp in ms when the error occurred. This is an approximate time and so should not be used for precise timing. + WhenErroredUnixMs int64 `json:"whenErroredUnixMs,omitempty"` +} + +// Validate validates this replication replicate details replica status error +func (m *ReplicationReplicateDetailsReplicaStatusError) Validate(formats strfmt.Registry) error { + return nil +} + +// ContextValidate validates this replication replicate details replica status error based on context it is used +func (m *ReplicationReplicateDetailsReplicaStatusError) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *ReplicationReplicateDetailsReplicaStatusError) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *ReplicationReplicateDetailsReplicaStatusError) UnmarshalBinary(b []byte) error { + var res ReplicationReplicateDetailsReplicaStatusError + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/entities/models/replication_replicate_force_delete_request.go b/platform/dbops/binaries/weaviate-src/entities/models/replication_replicate_force_delete_request.go new file mode 100644 index 0000000000000000000000000000000000000000..0fddf4f294603e6ef2b264e5a7a413461d1fd7fe --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/models/replication_replicate_force_delete_request.go @@ -0,0 +1,97 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// ReplicationReplicateForceDeleteRequest Specifies the parameters available when force deleting replication operations. +// +// swagger:model ReplicationReplicateForceDeleteRequest +type ReplicationReplicateForceDeleteRequest struct { + + // The name of the collection to which the shard being replicated belongs. + Collection string `json:"collection,omitempty"` + + // If true, the operation will not actually delete anything but will return the expected outcome of the deletion. + DryRun *bool `json:"dryRun,omitempty"` + + // The unique identifier (ID) of the replication operation to be forcefully deleted. + // Format: uuid + ID strfmt.UUID `json:"id,omitempty"` + + // The name of the target node where the replication operations are registered. + Node string `json:"node,omitempty"` + + // The identifier of the shard involved in the replication operations. + Shard string `json:"shard,omitempty"` +} + +// Validate validates this replication replicate force delete request +func (m *ReplicationReplicateForceDeleteRequest) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateID(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *ReplicationReplicateForceDeleteRequest) validateID(formats strfmt.Registry) error { + if swag.IsZero(m.ID) { // not required + return nil + } + + if err := validate.FormatOf("id", "body", "uuid", m.ID.String(), formats); err != nil { + return err + } + + return nil +} + +// ContextValidate validates this replication replicate force delete request based on context it is used +func (m *ReplicationReplicateForceDeleteRequest) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *ReplicationReplicateForceDeleteRequest) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *ReplicationReplicateForceDeleteRequest) UnmarshalBinary(b []byte) error { + var res ReplicationReplicateForceDeleteRequest + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/entities/models/replication_replicate_force_delete_response.go b/platform/dbops/binaries/weaviate-src/entities/models/replication_replicate_force_delete_response.go new file mode 100644 index 0000000000000000000000000000000000000000..40589e9c2d507fb6aba5af849c268c43aa1ee189 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/models/replication_replicate_force_delete_response.go @@ -0,0 +1,92 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "strconv" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// ReplicationReplicateForceDeleteResponse Provides the UUIDs that were successfully force deleted as part of the replication operation. If dryRun is true, this will return the expected outcome without actually deleting anything. +// +// swagger:model ReplicationReplicateForceDeleteResponse +type ReplicationReplicateForceDeleteResponse struct { + + // The unique identifiers (IDs) of the replication operations that were forcefully deleted. + Deleted []strfmt.UUID `json:"deleted"` + + // Indicates whether the operation was a dry run (true) or an actual deletion (false). + DryRun bool `json:"dryRun,omitempty"` +} + +// Validate validates this replication replicate force delete response +func (m *ReplicationReplicateForceDeleteResponse) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateDeleted(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *ReplicationReplicateForceDeleteResponse) validateDeleted(formats strfmt.Registry) error { + if swag.IsZero(m.Deleted) { // not required + return nil + } + + for i := 0; i < len(m.Deleted); i++ { + + if err := validate.FormatOf("deleted"+"."+strconv.Itoa(i), "body", "uuid", m.Deleted[i].String(), formats); err != nil { + return err + } + + } + + return nil +} + +// ContextValidate validates this replication replicate force delete response based on context it is used +func (m *ReplicationReplicateForceDeleteResponse) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *ReplicationReplicateForceDeleteResponse) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *ReplicationReplicateForceDeleteResponse) UnmarshalBinary(b []byte) error { + var res ReplicationReplicateForceDeleteResponse + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/entities/models/replication_replicate_replica_request.go b/platform/dbops/binaries/weaviate-src/entities/models/replication_replicate_replica_request.go new file mode 100644 index 0000000000000000000000000000000000000000..2e504fccc31a20a4f69b3557d6418cd15f489ec0 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/models/replication_replicate_replica_request.go @@ -0,0 +1,184 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "encoding/json" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// ReplicationReplicateReplicaRequest Specifies the parameters required to initiate a shard replica movement operation between two nodes for a given collection and shard. This request defines the source and target node, the collection and type of transfer. +// +// swagger:model ReplicationReplicateReplicaRequest +type ReplicationReplicateReplicaRequest struct { + + // The name of the collection to which the target shard belongs. + // Required: true + Collection *string `json:"collection"` + + // The name of the shard whose replica is to be moved or copied. + // Required: true + Shard *string `json:"shard"` + + // The name of the Weaviate node currently hosting the shard replica that needs to be moved or copied. + // Required: true + SourceNode *string `json:"sourceNode"` + + // The name of the Weaviate node where the new shard replica will be created as part of the movement or copy operation. + // Required: true + TargetNode *string `json:"targetNode"` + + // Specifies the type of replication operation to perform. 'COPY' creates a new replica on the target node while keeping the source replica. 'MOVE' creates a new replica on the target node and then removes the source replica upon successful completion. Defaults to 'COPY' if omitted. + // Enum: [COPY MOVE] + Type *string `json:"type,omitempty"` +} + +// Validate validates this replication replicate replica request +func (m *ReplicationReplicateReplicaRequest) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateCollection(formats); err != nil { + res = append(res, err) + } + + if err := m.validateShard(formats); err != nil { + res = append(res, err) + } + + if err := m.validateSourceNode(formats); err != nil { + res = append(res, err) + } + + if err := m.validateTargetNode(formats); err != nil { + res = append(res, err) + } + + if err := m.validateType(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *ReplicationReplicateReplicaRequest) validateCollection(formats strfmt.Registry) error { + + if err := validate.Required("collection", "body", m.Collection); err != nil { + return err + } + + return nil +} + +func (m *ReplicationReplicateReplicaRequest) validateShard(formats strfmt.Registry) error { + + if err := validate.Required("shard", "body", m.Shard); err != nil { + return err + } + + return nil +} + +func (m *ReplicationReplicateReplicaRequest) validateSourceNode(formats strfmt.Registry) error { + + if err := validate.Required("sourceNode", "body", m.SourceNode); err != nil { + return err + } + + return nil +} + +func (m *ReplicationReplicateReplicaRequest) validateTargetNode(formats strfmt.Registry) error { + + if err := validate.Required("targetNode", "body", m.TargetNode); err != nil { + return err + } + + return nil +} + +var replicationReplicateReplicaRequestTypeTypePropEnum []interface{} + +func init() { + var res []string + if err := json.Unmarshal([]byte(`["COPY","MOVE"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + replicationReplicateReplicaRequestTypeTypePropEnum = append(replicationReplicateReplicaRequestTypeTypePropEnum, v) + } +} + +const ( + + // ReplicationReplicateReplicaRequestTypeCOPY captures enum value "COPY" + ReplicationReplicateReplicaRequestTypeCOPY string = "COPY" + + // ReplicationReplicateReplicaRequestTypeMOVE captures enum value "MOVE" + ReplicationReplicateReplicaRequestTypeMOVE string = "MOVE" +) + +// prop value enum +func (m *ReplicationReplicateReplicaRequest) validateTypeEnum(path, location string, value string) error { + if err := validate.EnumCase(path, location, value, replicationReplicateReplicaRequestTypeTypePropEnum, true); err != nil { + return err + } + return nil +} + +func (m *ReplicationReplicateReplicaRequest) validateType(formats strfmt.Registry) error { + if swag.IsZero(m.Type) { // not required + return nil + } + + // value enum + if err := m.validateTypeEnum("type", "body", *m.Type); err != nil { + return err + } + + return nil +} + +// ContextValidate validates this replication replicate replica request based on context it is used +func (m *ReplicationReplicateReplicaRequest) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *ReplicationReplicateReplicaRequest) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *ReplicationReplicateReplicaRequest) UnmarshalBinary(b []byte) error { + var res ReplicationReplicateReplicaRequest + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/entities/models/replication_replicate_replica_response.go b/platform/dbops/binaries/weaviate-src/entities/models/replication_replicate_replica_response.go new file mode 100644 index 0000000000000000000000000000000000000000..2d6537c3648b2fc3758a886db3e9452b2cf94ad3 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/models/replication_replicate_replica_response.go @@ -0,0 +1,87 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// ReplicationReplicateReplicaResponse Contains the unique identifier for a successfully initiated asynchronous replica movement operation. This ID can be used to track the progress of the operation. +// +// swagger:model ReplicationReplicateReplicaResponse +type ReplicationReplicateReplicaResponse struct { + + // The unique identifier (ID) assigned to the registered replication operation. + // Required: true + // Format: uuid + ID *strfmt.UUID `json:"id"` +} + +// Validate validates this replication replicate replica response +func (m *ReplicationReplicateReplicaResponse) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateID(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *ReplicationReplicateReplicaResponse) validateID(formats strfmt.Registry) error { + + if err := validate.Required("id", "body", m.ID); err != nil { + return err + } + + if err := validate.FormatOf("id", "body", "uuid", m.ID.String(), formats); err != nil { + return err + } + + return nil +} + +// ContextValidate validates this replication replicate replica response based on context it is used +func (m *ReplicationReplicateReplicaResponse) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *ReplicationReplicateReplicaResponse) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *ReplicationReplicateReplicaResponse) UnmarshalBinary(b []byte) error { + var res ReplicationReplicateReplicaResponse + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/entities/models/replication_shard_replicas.go b/platform/dbops/binaries/weaviate-src/entities/models/replication_shard_replicas.go new file mode 100644 index 0000000000000000000000000000000000000000..0ff417b18310296030776b78f9e450368e9f528c --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/models/replication_shard_replicas.go @@ -0,0 +1,64 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// ReplicationShardReplicas Represents a shard and lists the nodes that currently host its replicas. +// +// swagger:model ReplicationShardReplicas +type ReplicationShardReplicas struct { + + // replicas + Replicas []string `json:"replicas"` + + // shard + Shard string `json:"shard,omitempty"` +} + +// Validate validates this replication shard replicas +func (m *ReplicationShardReplicas) Validate(formats strfmt.Registry) error { + return nil +} + +// ContextValidate validates this replication shard replicas based on context it is used +func (m *ReplicationShardReplicas) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *ReplicationShardReplicas) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *ReplicationShardReplicas) UnmarshalBinary(b []byte) error { + var res ReplicationShardReplicas + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/entities/models/replication_sharding_state.go b/platform/dbops/binaries/weaviate-src/entities/models/replication_sharding_state.go new file mode 100644 index 0000000000000000000000000000000000000000..0147ba7ac0ccd38a4802c416831f0d7066276a03 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/models/replication_sharding_state.go @@ -0,0 +1,130 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "strconv" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// ReplicationShardingState Details the sharding layout for a specific collection, mapping each shard to its set of replicas across the cluster. +// +// swagger:model ReplicationShardingState +type ReplicationShardingState struct { + + // The name of the collection. + Collection string `json:"collection,omitempty"` + + // An array detailing each shard within the collection and the nodes hosting its replicas. + Shards []*ReplicationShardReplicas `json:"shards"` +} + +// Validate validates this replication sharding state +func (m *ReplicationShardingState) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateShards(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *ReplicationShardingState) validateShards(formats strfmt.Registry) error { + if swag.IsZero(m.Shards) { // not required + return nil + } + + for i := 0; i < len(m.Shards); i++ { + if swag.IsZero(m.Shards[i]) { // not required + continue + } + + if m.Shards[i] != nil { + if err := m.Shards[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("shards" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("shards" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +// ContextValidate validate this replication sharding state based on the context it is used +func (m *ReplicationShardingState) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateShards(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *ReplicationShardingState) contextValidateShards(ctx context.Context, formats strfmt.Registry) error { + + for i := 0; i < len(m.Shards); i++ { + + if m.Shards[i] != nil { + if err := m.Shards[i].ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("shards" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("shards" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +// MarshalBinary interface implementation +func (m *ReplicationShardingState) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *ReplicationShardingState) UnmarshalBinary(b []byte) error { + var res ReplicationShardingState + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/entities/models/replication_sharding_state_response.go b/platform/dbops/binaries/weaviate-src/entities/models/replication_sharding_state_response.go new file mode 100644 index 0000000000000000000000000000000000000000..14b9c40ae840924e35fc3b039e2112ae08505ffb --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/models/replication_sharding_state_response.go @@ -0,0 +1,115 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// ReplicationShardingStateResponse Provides the detailed sharding state for one or more collections, including the distribution of shards and their replicas across the cluster nodes. +// +// swagger:model ReplicationShardingStateResponse +type ReplicationShardingStateResponse struct { + + // sharding state + ShardingState *ReplicationShardingState `json:"shardingState,omitempty"` +} + +// Validate validates this replication sharding state response +func (m *ReplicationShardingStateResponse) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateShardingState(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *ReplicationShardingStateResponse) validateShardingState(formats strfmt.Registry) error { + if swag.IsZero(m.ShardingState) { // not required + return nil + } + + if m.ShardingState != nil { + if err := m.ShardingState.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("shardingState") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("shardingState") + } + return err + } + } + + return nil +} + +// ContextValidate validate this replication sharding state response based on the context it is used +func (m *ReplicationShardingStateResponse) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateShardingState(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *ReplicationShardingStateResponse) contextValidateShardingState(ctx context.Context, formats strfmt.Registry) error { + + if m.ShardingState != nil { + if err := m.ShardingState.ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("shardingState") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("shardingState") + } + return err + } + } + + return nil +} + +// MarshalBinary interface implementation +func (m *ReplicationShardingStateResponse) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *ReplicationShardingStateResponse) UnmarshalBinary(b []byte) error { + var res ReplicationShardingStateResponse + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/entities/models/restore_config.go b/platform/dbops/binaries/weaviate-src/entities/models/restore_config.go new file mode 100644 index 0000000000000000000000000000000000000000..2278c0ac3677dd0e50173308a0af11e6c691ac50 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/models/restore_config.go @@ -0,0 +1,200 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "encoding/json" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// RestoreConfig Backup custom configuration +// +// swagger:model RestoreConfig +type RestoreConfig struct { + + // Name of the bucket, container, volume, etc + Bucket string `json:"Bucket,omitempty"` + + // Desired CPU core utilization ranging from 1%-80% + // Maximum: 80 + // Minimum: 1 + CPUPercentage int64 `json:"CPUPercentage,omitempty"` + + // name of the endpoint, e.g. s3.amazonaws.com + Endpoint string `json:"Endpoint,omitempty"` + + // Path within the bucket + Path string `json:"Path,omitempty"` + + // How roles should be restored + // Enum: [noRestore all] + RolesOptions *string `json:"rolesOptions,omitempty"` + + // How users should be restored + // Enum: [noRestore all] + UsersOptions *string `json:"usersOptions,omitempty"` +} + +// Validate validates this restore config +func (m *RestoreConfig) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateCPUPercentage(formats); err != nil { + res = append(res, err) + } + + if err := m.validateRolesOptions(formats); err != nil { + res = append(res, err) + } + + if err := m.validateUsersOptions(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *RestoreConfig) validateCPUPercentage(formats strfmt.Registry) error { + if swag.IsZero(m.CPUPercentage) { // not required + return nil + } + + if err := validate.MinimumInt("CPUPercentage", "body", m.CPUPercentage, 1, false); err != nil { + return err + } + + if err := validate.MaximumInt("CPUPercentage", "body", m.CPUPercentage, 80, false); err != nil { + return err + } + + return nil +} + +var restoreConfigTypeRolesOptionsPropEnum []interface{} + +func init() { + var res []string + if err := json.Unmarshal([]byte(`["noRestore","all"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + restoreConfigTypeRolesOptionsPropEnum = append(restoreConfigTypeRolesOptionsPropEnum, v) + } +} + +const ( + + // RestoreConfigRolesOptionsNoRestore captures enum value "noRestore" + RestoreConfigRolesOptionsNoRestore string = "noRestore" + + // RestoreConfigRolesOptionsAll captures enum value "all" + RestoreConfigRolesOptionsAll string = "all" +) + +// prop value enum +func (m *RestoreConfig) validateRolesOptionsEnum(path, location string, value string) error { + if err := validate.EnumCase(path, location, value, restoreConfigTypeRolesOptionsPropEnum, true); err != nil { + return err + } + return nil +} + +func (m *RestoreConfig) validateRolesOptions(formats strfmt.Registry) error { + if swag.IsZero(m.RolesOptions) { // not required + return nil + } + + // value enum + if err := m.validateRolesOptionsEnum("rolesOptions", "body", *m.RolesOptions); err != nil { + return err + } + + return nil +} + +var restoreConfigTypeUsersOptionsPropEnum []interface{} + +func init() { + var res []string + if err := json.Unmarshal([]byte(`["noRestore","all"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + restoreConfigTypeUsersOptionsPropEnum = append(restoreConfigTypeUsersOptionsPropEnum, v) + } +} + +const ( + + // RestoreConfigUsersOptionsNoRestore captures enum value "noRestore" + RestoreConfigUsersOptionsNoRestore string = "noRestore" + + // RestoreConfigUsersOptionsAll captures enum value "all" + RestoreConfigUsersOptionsAll string = "all" +) + +// prop value enum +func (m *RestoreConfig) validateUsersOptionsEnum(path, location string, value string) error { + if err := validate.EnumCase(path, location, value, restoreConfigTypeUsersOptionsPropEnum, true); err != nil { + return err + } + return nil +} + +func (m *RestoreConfig) validateUsersOptions(formats strfmt.Registry) error { + if swag.IsZero(m.UsersOptions) { // not required + return nil + } + + // value enum + if err := m.validateUsersOptionsEnum("usersOptions", "body", *m.UsersOptions); err != nil { + return err + } + + return nil +} + +// ContextValidate validates this restore config based on context it is used +func (m *RestoreConfig) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *RestoreConfig) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *RestoreConfig) UnmarshalBinary(b []byte) error { + var res RestoreConfig + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/entities/models/role.go b/platform/dbops/binaries/weaviate-src/entities/models/role.go new file mode 100644 index 0000000000000000000000000000000000000000..cd3c685dcf9063990b7f5414a4e4534d52eeffd9 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/models/role.go @@ -0,0 +1,147 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "strconv" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// Role role +// +// swagger:model Role +type Role struct { + + // role name + // Required: true + Name *string `json:"name"` + + // permissions + // Required: true + Permissions []*Permission `json:"permissions"` +} + +// Validate validates this role +func (m *Role) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateName(formats); err != nil { + res = append(res, err) + } + + if err := m.validatePermissions(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *Role) validateName(formats strfmt.Registry) error { + + if err := validate.Required("name", "body", m.Name); err != nil { + return err + } + + return nil +} + +func (m *Role) validatePermissions(formats strfmt.Registry) error { + + if err := validate.Required("permissions", "body", m.Permissions); err != nil { + return err + } + + for i := 0; i < len(m.Permissions); i++ { + if swag.IsZero(m.Permissions[i]) { // not required + continue + } + + if m.Permissions[i] != nil { + if err := m.Permissions[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("permissions" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("permissions" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +// ContextValidate validate this role based on the context it is used +func (m *Role) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidatePermissions(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *Role) contextValidatePermissions(ctx context.Context, formats strfmt.Registry) error { + + for i := 0; i < len(m.Permissions); i++ { + + if m.Permissions[i] != nil { + if err := m.Permissions[i].ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("permissions" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("permissions" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +// MarshalBinary interface implementation +func (m *Role) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *Role) UnmarshalBinary(b []byte) error { + var res Role + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/entities/models/roles_list_response.go b/platform/dbops/binaries/weaviate-src/entities/models/roles_list_response.go new file mode 100644 index 0000000000000000000000000000000000000000..607a51f5e0a600b21fb7e229e59b24f014fc8b3f --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/models/roles_list_response.go @@ -0,0 +1,84 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "strconv" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// RolesListResponse list of roles +// +// swagger:model RolesListResponse +type RolesListResponse []*Role + +// Validate validates this roles list response +func (m RolesListResponse) Validate(formats strfmt.Registry) error { + var res []error + + for i := 0; i < len(m); i++ { + if swag.IsZero(m[i]) { // not required + continue + } + + if m[i] != nil { + if err := m[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName(strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName(strconv.Itoa(i)) + } + return err + } + } + + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// ContextValidate validate this roles list response based on the context it is used +func (m RolesListResponse) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + for i := 0; i < len(m); i++ { + + if m[i] != nil { + if err := m[i].ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName(strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName(strconv.Itoa(i)) + } + return err + } + } + + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/entities/models/schema.go b/platform/dbops/binaries/weaviate-src/entities/models/schema.go new file mode 100644 index 0000000000000000000000000000000000000000..f9edddc25ca6851305d5ee450073820b3cbfe4fa --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/models/schema.go @@ -0,0 +1,151 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "strconv" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// Schema Definitions of semantic schemas (also see: https://github.com/weaviate/weaviate-semantic-schemas). +// +// swagger:model Schema +type Schema struct { + + // Semantic classes that are available. + Classes []*Class `json:"classes"` + + // Email of the maintainer. + // Format: email + Maintainer strfmt.Email `json:"maintainer,omitempty"` + + // Name of the schema. + Name string `json:"name,omitempty"` +} + +// Validate validates this schema +func (m *Schema) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateClasses(formats); err != nil { + res = append(res, err) + } + + if err := m.validateMaintainer(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *Schema) validateClasses(formats strfmt.Registry) error { + if swag.IsZero(m.Classes) { // not required + return nil + } + + for i := 0; i < len(m.Classes); i++ { + if swag.IsZero(m.Classes[i]) { // not required + continue + } + + if m.Classes[i] != nil { + if err := m.Classes[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("classes" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("classes" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +func (m *Schema) validateMaintainer(formats strfmt.Registry) error { + if swag.IsZero(m.Maintainer) { // not required + return nil + } + + if err := validate.FormatOf("maintainer", "body", "email", m.Maintainer.String(), formats); err != nil { + return err + } + + return nil +} + +// ContextValidate validate this schema based on the context it is used +func (m *Schema) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateClasses(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *Schema) contextValidateClasses(ctx context.Context, formats strfmt.Registry) error { + + for i := 0; i < len(m.Classes); i++ { + + if m.Classes[i] != nil { + if err := m.Classes[i].ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("classes" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("classes" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +// MarshalBinary interface implementation +func (m *Schema) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *Schema) UnmarshalBinary(b []byte) error { + var res Schema + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/entities/models/schema_cluster_status.go b/platform/dbops/binaries/weaviate-src/entities/models/schema_cluster_status.go new file mode 100644 index 0000000000000000000000000000000000000000..1db75b521c74432232fdf67ac6246d1036780d91 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/models/schema_cluster_status.go @@ -0,0 +1,73 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// SchemaClusterStatus Indicates the health of the schema in a cluster. +// +// swagger:model SchemaClusterStatus +type SchemaClusterStatus struct { + + // Contains the sync check error if one occurred + Error string `json:"error,omitempty"` + + // True if the cluster is in sync, false if there is an issue (see error). + Healthy bool `json:"healthy"` + + // Hostname of the coordinating node, i.e. the one that received the cluster. This can be useful information if the error message contains phrases such as 'other nodes agree, but local does not', etc. + Hostname string `json:"hostname,omitempty"` + + // The cluster check at startup can be ignored (to recover from an out-of-sync situation). + IgnoreSchemaSync bool `json:"ignoreSchemaSync"` + + // Number of nodes that participated in the sync check + NodeCount int64 `json:"nodeCount,omitempty"` +} + +// Validate validates this schema cluster status +func (m *SchemaClusterStatus) Validate(formats strfmt.Registry) error { + return nil +} + +// ContextValidate validates this schema cluster status based on context it is used +func (m *SchemaClusterStatus) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *SchemaClusterStatus) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *SchemaClusterStatus) UnmarshalBinary(b []byte) error { + var res SchemaClusterStatus + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/entities/models/schema_history.go b/platform/dbops/binaries/weaviate-src/entities/models/schema_history.go new file mode 100644 index 0000000000000000000000000000000000000000..ec93101b8e254e6b7770188471520f7b86ea8a80 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/models/schema_history.go @@ -0,0 +1,22 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// SchemaHistory This is an open object, with OpenAPI Specification 3.0 this will be more detailed. See Weaviate docs for more info. In the future this will become a key/value OR a SingleRef definition. +// +// swagger:model SchemaHistory +type SchemaHistory interface{} diff --git a/platform/dbops/binaries/weaviate-src/entities/models/shard_status.go b/platform/dbops/binaries/weaviate-src/entities/models/shard_status.go new file mode 100644 index 0000000000000000000000000000000000000000..541b212ebefbc935d5d0fc07da281ba569174628 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/models/shard_status.go @@ -0,0 +1,61 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// ShardStatus The status of a single shard +// +// swagger:model ShardStatus +type ShardStatus struct { + + // Status of the shard + Status string `json:"status,omitempty"` +} + +// Validate validates this shard status +func (m *ShardStatus) Validate(formats strfmt.Registry) error { + return nil +} + +// ContextValidate validates this shard status based on context it is used +func (m *ShardStatus) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *ShardStatus) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *ShardStatus) UnmarshalBinary(b []byte) error { + var res ShardStatus + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/entities/models/shard_status_get_response.go b/platform/dbops/binaries/weaviate-src/entities/models/shard_status_get_response.go new file mode 100644 index 0000000000000000000000000000000000000000..127d97775aff852bf97d45f36820369b79962921 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/models/shard_status_get_response.go @@ -0,0 +1,67 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// ShardStatusGetResponse Response body of shard status get request +// +// swagger:model ShardStatusGetResponse +type ShardStatusGetResponse struct { + + // Name of the shard + Name string `json:"name,omitempty"` + + // Status of the shard + Status string `json:"status,omitempty"` + + // Size of the vector queue of the shard + VectorQueueSize int64 `json:"vectorQueueSize"` +} + +// Validate validates this shard status get response +func (m *ShardStatusGetResponse) Validate(formats strfmt.Registry) error { + return nil +} + +// ContextValidate validates this shard status get response based on context it is used +func (m *ShardStatusGetResponse) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *ShardStatusGetResponse) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *ShardStatusGetResponse) UnmarshalBinary(b []byte) error { + var res ShardStatusGetResponse + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/entities/models/shard_status_list.go b/platform/dbops/binaries/weaviate-src/entities/models/shard_status_list.go new file mode 100644 index 0000000000000000000000000000000000000000..b03bd7a371d385ca2eb79e27735db37b2e4afeb8 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/models/shard_status_list.go @@ -0,0 +1,84 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "strconv" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// ShardStatusList The status of all the shards of a Class +// +// swagger:model ShardStatusList +type ShardStatusList []*ShardStatusGetResponse + +// Validate validates this shard status list +func (m ShardStatusList) Validate(formats strfmt.Registry) error { + var res []error + + for i := 0; i < len(m); i++ { + if swag.IsZero(m[i]) { // not required + continue + } + + if m[i] != nil { + if err := m[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName(strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName(strconv.Itoa(i)) + } + return err + } + } + + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// ContextValidate validate this shard status list based on the context it is used +func (m ShardStatusList) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + for i := 0; i < len(m); i++ { + + if m[i] != nil { + if err := m[i].ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName(strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName(strconv.Itoa(i)) + } + return err + } + } + + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/entities/models/single_ref.go b/platform/dbops/binaries/weaviate-src/entities/models/single_ref.go new file mode 100644 index 0000000000000000000000000000000000000000..179136ae499298dcb2b31ce1c0f87beba3506f52 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/models/single_ref.go @@ -0,0 +1,179 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// SingleRef Either set beacon (direct reference) or set class and schema (concept reference) +// +// swagger:model SingleRef +type SingleRef struct { + + // If using a direct reference, specify the URI to point to the cross-ref here. Should be in the form of weaviate://localhost/ for the example of a local cross-ref to an object + // Format: uri + Beacon strfmt.URI `json:"beacon,omitempty"` + + // If using a concept reference (rather than a direct reference), specify the desired class name here + // Format: uri + Class strfmt.URI `json:"class,omitempty"` + + // Additional Meta information about classifications if the item was part of one + Classification *ReferenceMetaClassification `json:"classification,omitempty"` + + // If using a direct reference, this read-only fields provides a link to the referenced resource. If 'origin' is globally configured, an absolute URI is shown - a relative URI otherwise. + // Format: uri + Href strfmt.URI `json:"href,omitempty"` + + // If using a concept reference (rather than a direct reference), specify the desired properties here + Schema PropertySchema `json:"schema,omitempty"` +} + +// Validate validates this single ref +func (m *SingleRef) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateBeacon(formats); err != nil { + res = append(res, err) + } + + if err := m.validateClass(formats); err != nil { + res = append(res, err) + } + + if err := m.validateClassification(formats); err != nil { + res = append(res, err) + } + + if err := m.validateHref(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *SingleRef) validateBeacon(formats strfmt.Registry) error { + if swag.IsZero(m.Beacon) { // not required + return nil + } + + if err := validate.FormatOf("beacon", "body", "uri", m.Beacon.String(), formats); err != nil { + return err + } + + return nil +} + +func (m *SingleRef) validateClass(formats strfmt.Registry) error { + if swag.IsZero(m.Class) { // not required + return nil + } + + if err := validate.FormatOf("class", "body", "uri", m.Class.String(), formats); err != nil { + return err + } + + return nil +} + +func (m *SingleRef) validateClassification(formats strfmt.Registry) error { + if swag.IsZero(m.Classification) { // not required + return nil + } + + if m.Classification != nil { + if err := m.Classification.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("classification") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("classification") + } + return err + } + } + + return nil +} + +func (m *SingleRef) validateHref(formats strfmt.Registry) error { + if swag.IsZero(m.Href) { // not required + return nil + } + + if err := validate.FormatOf("href", "body", "uri", m.Href.String(), formats); err != nil { + return err + } + + return nil +} + +// ContextValidate validate this single ref based on the context it is used +func (m *SingleRef) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateClassification(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *SingleRef) contextValidateClassification(ctx context.Context, formats strfmt.Registry) error { + + if m.Classification != nil { + if err := m.Classification.ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("classification") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("classification") + } + return err + } + } + + return nil +} + +// MarshalBinary interface implementation +func (m *SingleRef) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *SingleRef) UnmarshalBinary(b []byte) error { + var res SingleRef + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/entities/models/statistics.go b/platform/dbops/binaries/weaviate-src/entities/models/statistics.go new file mode 100644 index 0000000000000000000000000000000000000000..c09068955cffe1f7779817b064b539b1590a54db --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/models/statistics.go @@ -0,0 +1,206 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "encoding/json" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// Statistics The definition of node statistics. +// +// swagger:model Statistics +type Statistics struct { + + // bootstrapped + Bootstrapped bool `json:"bootstrapped,omitempty"` + + // candidates + Candidates interface{} `json:"candidates,omitempty"` + + // db loaded + DbLoaded bool `json:"dbLoaded,omitempty"` + + // initial last applied index + InitialLastAppliedIndex uint64 `json:"initialLastAppliedIndex,omitempty"` + + // is voter + IsVoter bool `json:"isVoter,omitempty"` + + // last applied index + LastAppliedIndex float64 `json:"lastAppliedIndex,omitempty"` + + // leader address + LeaderAddress interface{} `json:"leaderAddress,omitempty"` + + // leader Id + LeaderID interface{} `json:"leaderId,omitempty"` + + // The name of the node. + Name string `json:"name,omitempty"` + + // open + Open bool `json:"open,omitempty"` + + // Weaviate Raft statistics. + Raft *RaftStatistics `json:"raft,omitempty"` + + // ready + Ready bool `json:"ready,omitempty"` + + // Node's status. + // Enum: [HEALTHY UNHEALTHY UNAVAILABLE TIMEOUT] + Status *string `json:"status,omitempty"` +} + +// Validate validates this statistics +func (m *Statistics) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateRaft(formats); err != nil { + res = append(res, err) + } + + if err := m.validateStatus(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *Statistics) validateRaft(formats strfmt.Registry) error { + if swag.IsZero(m.Raft) { // not required + return nil + } + + if m.Raft != nil { + if err := m.Raft.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("raft") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("raft") + } + return err + } + } + + return nil +} + +var statisticsTypeStatusPropEnum []interface{} + +func init() { + var res []string + if err := json.Unmarshal([]byte(`["HEALTHY","UNHEALTHY","UNAVAILABLE","TIMEOUT"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + statisticsTypeStatusPropEnum = append(statisticsTypeStatusPropEnum, v) + } +} + +const ( + + // StatisticsStatusHEALTHY captures enum value "HEALTHY" + StatisticsStatusHEALTHY string = "HEALTHY" + + // StatisticsStatusUNHEALTHY captures enum value "UNHEALTHY" + StatisticsStatusUNHEALTHY string = "UNHEALTHY" + + // StatisticsStatusUNAVAILABLE captures enum value "UNAVAILABLE" + StatisticsStatusUNAVAILABLE string = "UNAVAILABLE" + + // StatisticsStatusTIMEOUT captures enum value "TIMEOUT" + StatisticsStatusTIMEOUT string = "TIMEOUT" +) + +// prop value enum +func (m *Statistics) validateStatusEnum(path, location string, value string) error { + if err := validate.EnumCase(path, location, value, statisticsTypeStatusPropEnum, true); err != nil { + return err + } + return nil +} + +func (m *Statistics) validateStatus(formats strfmt.Registry) error { + if swag.IsZero(m.Status) { // not required + return nil + } + + // value enum + if err := m.validateStatusEnum("status", "body", *m.Status); err != nil { + return err + } + + return nil +} + +// ContextValidate validate this statistics based on the context it is used +func (m *Statistics) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateRaft(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *Statistics) contextValidateRaft(ctx context.Context, formats strfmt.Registry) error { + + if m.Raft != nil { + if err := m.Raft.ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("raft") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("raft") + } + return err + } + } + + return nil +} + +// MarshalBinary interface implementation +func (m *Statistics) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *Statistics) UnmarshalBinary(b []byte) error { + var res Statistics + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/entities/models/stopword_config.go b/platform/dbops/binaries/weaviate-src/entities/models/stopword_config.go new file mode 100644 index 0000000000000000000000000000000000000000..39e015acbbd444b18f8dfb74b7dcb69b9620b439 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/models/stopword_config.go @@ -0,0 +1,67 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// StopwordConfig fine-grained control over stopword list usage +// +// swagger:model StopwordConfig +type StopwordConfig struct { + + // Stopwords to be considered additionally (default: []). Can be any array of custom strings. + Additions []string `json:"additions"` + + // Pre-existing list of common words by language (default: 'en'). Options: ['en', 'none']. + Preset string `json:"preset,omitempty"` + + // Stopwords to be removed from consideration (default: []). Can be any array of custom strings. + Removals []string `json:"removals"` +} + +// Validate validates this stopword config +func (m *StopwordConfig) Validate(formats strfmt.Registry) error { + return nil +} + +// ContextValidate validates this stopword config based on context it is used +func (m *StopwordConfig) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *StopwordConfig) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *StopwordConfig) UnmarshalBinary(b []byte) error { + var res StopwordConfig + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/entities/models/tenant.go b/platform/dbops/binaries/weaviate-src/entities/models/tenant.go new file mode 100644 index 0000000000000000000000000000000000000000..0ee4c2ffc5ce92327f7cfe6c1a87efac7601e3b2 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/models/tenant.go @@ -0,0 +1,143 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "encoding/json" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// Tenant attributes representing a single tenant within weaviate +// +// swagger:model Tenant +type Tenant struct { + + // activity status of the tenant's shard. Optional for creating tenant (implicit `ACTIVE`) and required for updating tenant. For creation, allowed values are `ACTIVE` - tenant is fully active and `INACTIVE` - tenant is inactive; no actions can be performed on tenant, tenant's files are stored locally. For updating, `ACTIVE`, `INACTIVE` and also `OFFLOADED` - as INACTIVE, but files are stored on cloud storage. The following values are read-only and are set by the server for internal use: `OFFLOADING` - tenant is transitioning from ACTIVE/INACTIVE to OFFLOADED, `ONLOADING` - tenant is transitioning from OFFLOADED to ACTIVE/INACTIVE. We still accept deprecated names `HOT` (now `ACTIVE`), `COLD` (now `INACTIVE`), `FROZEN` (now `OFFLOADED`), `FREEZING` (now `OFFLOADING`), `UNFREEZING` (now `ONLOADING`). + // Enum: [ACTIVE INACTIVE OFFLOADED OFFLOADING ONLOADING HOT COLD FROZEN FREEZING UNFREEZING] + ActivityStatus string `json:"activityStatus,omitempty"` + + // The name of the tenant (required). + Name string `json:"name,omitempty"` +} + +// Validate validates this tenant +func (m *Tenant) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateActivityStatus(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +var tenantTypeActivityStatusPropEnum []interface{} + +func init() { + var res []string + if err := json.Unmarshal([]byte(`["ACTIVE","INACTIVE","OFFLOADED","OFFLOADING","ONLOADING","HOT","COLD","FROZEN","FREEZING","UNFREEZING"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + tenantTypeActivityStatusPropEnum = append(tenantTypeActivityStatusPropEnum, v) + } +} + +const ( + + // TenantActivityStatusACTIVE captures enum value "ACTIVE" + TenantActivityStatusACTIVE string = "ACTIVE" + + // TenantActivityStatusINACTIVE captures enum value "INACTIVE" + TenantActivityStatusINACTIVE string = "INACTIVE" + + // TenantActivityStatusOFFLOADED captures enum value "OFFLOADED" + TenantActivityStatusOFFLOADED string = "OFFLOADED" + + // TenantActivityStatusOFFLOADING captures enum value "OFFLOADING" + TenantActivityStatusOFFLOADING string = "OFFLOADING" + + // TenantActivityStatusONLOADING captures enum value "ONLOADING" + TenantActivityStatusONLOADING string = "ONLOADING" + + // TenantActivityStatusHOT captures enum value "HOT" + TenantActivityStatusHOT string = "HOT" + + // TenantActivityStatusCOLD captures enum value "COLD" + TenantActivityStatusCOLD string = "COLD" + + // TenantActivityStatusFROZEN captures enum value "FROZEN" + TenantActivityStatusFROZEN string = "FROZEN" + + // TenantActivityStatusFREEZING captures enum value "FREEZING" + TenantActivityStatusFREEZING string = "FREEZING" + + // TenantActivityStatusUNFREEZING captures enum value "UNFREEZING" + TenantActivityStatusUNFREEZING string = "UNFREEZING" +) + +// prop value enum +func (m *Tenant) validateActivityStatusEnum(path, location string, value string) error { + if err := validate.EnumCase(path, location, value, tenantTypeActivityStatusPropEnum, true); err != nil { + return err + } + return nil +} + +func (m *Tenant) validateActivityStatus(formats strfmt.Registry) error { + if swag.IsZero(m.ActivityStatus) { // not required + return nil + } + + // value enum + if err := m.validateActivityStatusEnum("activityStatus", "body", m.ActivityStatus); err != nil { + return err + } + + return nil +} + +// ContextValidate validates this tenant based on context it is used +func (m *Tenant) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *Tenant) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *Tenant) UnmarshalBinary(b []byte) error { + var res Tenant + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/entities/models/user_api_key.go b/platform/dbops/binaries/weaviate-src/entities/models/user_api_key.go new file mode 100644 index 0000000000000000000000000000000000000000..7a4f4ca6b0dca295e378712e3660339ad65f7943 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/models/user_api_key.go @@ -0,0 +1,82 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// UserAPIKey user Api key +// +// swagger:model UserApiKey +type UserAPIKey struct { + + // The apikey + // Required: true + Apikey *string `json:"apikey"` +} + +// Validate validates this user Api key +func (m *UserAPIKey) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateApikey(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *UserAPIKey) validateApikey(formats strfmt.Registry) error { + + if err := validate.Required("apikey", "body", m.Apikey); err != nil { + return err + } + + return nil +} + +// ContextValidate validates this user Api key based on context it is used +func (m *UserAPIKey) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *UserAPIKey) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *UserAPIKey) UnmarshalBinary(b []byte) error { + var res UserAPIKey + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/entities/models/user_own_info.go b/platform/dbops/binaries/weaviate-src/entities/models/user_own_info.go new file mode 100644 index 0000000000000000000000000000000000000000..9ea483e30d7fd7226fe6d988669187cf6ca40d9a --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/models/user_own_info.go @@ -0,0 +1,148 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "strconv" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// UserOwnInfo user own info +// +// swagger:model UserOwnInfo +type UserOwnInfo struct { + + // The groups associated to the user + Groups []string `json:"groups"` + + // roles + Roles []*Role `json:"roles"` + + // The username associated with the provided key + // Required: true + Username *string `json:"username"` +} + +// Validate validates this user own info +func (m *UserOwnInfo) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateRoles(formats); err != nil { + res = append(res, err) + } + + if err := m.validateUsername(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *UserOwnInfo) validateRoles(formats strfmt.Registry) error { + if swag.IsZero(m.Roles) { // not required + return nil + } + + for i := 0; i < len(m.Roles); i++ { + if swag.IsZero(m.Roles[i]) { // not required + continue + } + + if m.Roles[i] != nil { + if err := m.Roles[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("roles" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("roles" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +func (m *UserOwnInfo) validateUsername(formats strfmt.Registry) error { + + if err := validate.Required("username", "body", m.Username); err != nil { + return err + } + + return nil +} + +// ContextValidate validate this user own info based on the context it is used +func (m *UserOwnInfo) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateRoles(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *UserOwnInfo) contextValidateRoles(ctx context.Context, formats strfmt.Registry) error { + + for i := 0; i < len(m.Roles); i++ { + + if m.Roles[i] != nil { + if err := m.Roles[i].ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("roles" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("roles" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +// MarshalBinary interface implementation +func (m *UserOwnInfo) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *UserOwnInfo) UnmarshalBinary(b []byte) error { + var res UserOwnInfo + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/entities/models/user_type_input.go b/platform/dbops/binaries/weaviate-src/entities/models/user_type_input.go new file mode 100644 index 0000000000000000000000000000000000000000..6d6ab253d71bbb7d1fbe7f3bdc557070503d2b64 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/models/user_type_input.go @@ -0,0 +1,89 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "encoding/json" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/validate" +) + +// UserTypeInput the type of user +// +// swagger:model UserTypeInput +type UserTypeInput string + +func NewUserTypeInput(value UserTypeInput) *UserTypeInput { + return &value +} + +// Pointer returns a pointer to a freshly-allocated UserTypeInput. +func (m UserTypeInput) Pointer() *UserTypeInput { + return &m +} + +const ( + + // UserTypeInputDb captures enum value "db" + UserTypeInputDb UserTypeInput = "db" + + // UserTypeInputOidc captures enum value "oidc" + UserTypeInputOidc UserTypeInput = "oidc" +) + +// for schema +var userTypeInputEnum []interface{} + +func init() { + var res []UserTypeInput + if err := json.Unmarshal([]byte(`["db","oidc"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + userTypeInputEnum = append(userTypeInputEnum, v) + } +} + +func (m UserTypeInput) validateUserTypeInputEnum(path, location string, value UserTypeInput) error { + if err := validate.EnumCase(path, location, value, userTypeInputEnum, true); err != nil { + return err + } + return nil +} + +// Validate validates this user type input +func (m UserTypeInput) Validate(formats strfmt.Registry) error { + var res []error + + // value enum + if err := m.validateUserTypeInputEnum("", "body", m); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// ContextValidate validates this user type input based on context it is used +func (m UserTypeInput) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/entities/models/user_type_output.go b/platform/dbops/binaries/weaviate-src/entities/models/user_type_output.go new file mode 100644 index 0000000000000000000000000000000000000000..41b30b9ab1744441790e5b637570fba74366ca87 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/models/user_type_output.go @@ -0,0 +1,92 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "encoding/json" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/validate" +) + +// UserTypeOutput the type of user +// +// swagger:model UserTypeOutput +type UserTypeOutput string + +func NewUserTypeOutput(value UserTypeOutput) *UserTypeOutput { + return &value +} + +// Pointer returns a pointer to a freshly-allocated UserTypeOutput. +func (m UserTypeOutput) Pointer() *UserTypeOutput { + return &m +} + +const ( + + // UserTypeOutputDbUser captures enum value "db_user" + UserTypeOutputDbUser UserTypeOutput = "db_user" + + // UserTypeOutputDbEnvUser captures enum value "db_env_user" + UserTypeOutputDbEnvUser UserTypeOutput = "db_env_user" + + // UserTypeOutputOidc captures enum value "oidc" + UserTypeOutputOidc UserTypeOutput = "oidc" +) + +// for schema +var userTypeOutputEnum []interface{} + +func init() { + var res []UserTypeOutput + if err := json.Unmarshal([]byte(`["db_user","db_env_user","oidc"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + userTypeOutputEnum = append(userTypeOutputEnum, v) + } +} + +func (m UserTypeOutput) validateUserTypeOutputEnum(path, location string, value UserTypeOutput) error { + if err := validate.EnumCase(path, location, value, userTypeOutputEnum, true); err != nil { + return err + } + return nil +} + +// Validate validates this user type output +func (m UserTypeOutput) Validate(formats strfmt.Registry) error { + var res []error + + // value enum + if err := m.validateUserTypeOutputEnum("", "body", m); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// ContextValidate validates this user type output based on context it is used +func (m UserTypeOutput) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/entities/models/vector.go b/platform/dbops/binaries/weaviate-src/entities/models/vector.go new file mode 100644 index 0000000000000000000000000000000000000000..9ff89c5ecfc993de39f892f65f59c30307687cb3 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/models/vector.go @@ -0,0 +1,22 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// Vector A vector representation of the object. If provided at object creation, this wil take precedence over any vectorizer setting. +// +// swagger:model Vector +type Vector interface{} diff --git a/platform/dbops/binaries/weaviate-src/entities/models/vector_config.go b/platform/dbops/binaries/weaviate-src/entities/models/vector_config.go new file mode 100644 index 0000000000000000000000000000000000000000..36f4fd95e59f9dca727493626d408c546e249171 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/models/vector_config.go @@ -0,0 +1,67 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// VectorConfig vector config +// +// swagger:model VectorConfig +type VectorConfig struct { + + // Vector-index config, that is specific to the type of index selected in vectorIndexType + VectorIndexConfig interface{} `json:"vectorIndexConfig,omitempty"` + + // Name of the vector index to use, eg. (HNSW) + VectorIndexType string `json:"vectorIndexType,omitempty"` + + // Configuration of a specific vectorizer used by this vector + Vectorizer interface{} `json:"vectorizer,omitempty"` +} + +// Validate validates this vector config +func (m *VectorConfig) Validate(formats strfmt.Registry) error { + return nil +} + +// ContextValidate validates this vector config based on context it is used +func (m *VectorConfig) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *VectorConfig) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *VectorConfig) UnmarshalBinary(b []byte) error { + var res VectorConfig + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/entities/models/vector_weights.go b/platform/dbops/binaries/weaviate-src/entities/models/vector_weights.go new file mode 100644 index 0000000000000000000000000000000000000000..b2b7bd978ac01d71b36ba8c198f99c13c6a5c4cc --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/models/vector_weights.go @@ -0,0 +1,22 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// VectorWeights Allow custom overrides of vector weights as math expressions. E.g. "pancake": "7" will set the weight for the word pancake to 7 in the vectorization, whereas "w * 3" would triple the originally calculated word. This is an open object, with OpenAPI Specification 3.0 this will be more detailed. See Weaviate docs for more info. In the future this will become a key/value (string/string) object. +// +// swagger:model VectorWeights +type VectorWeights interface{} diff --git a/platform/dbops/binaries/weaviate-src/entities/models/vectors.go b/platform/dbops/binaries/weaviate-src/entities/models/vectors.go new file mode 100644 index 0000000000000000000000000000000000000000..7fbdf5fe46192a9b0f1785238265975c4242d158 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/models/vectors.go @@ -0,0 +1,72 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/go-openapi/strfmt" +) + +// Vectors A map of named vectors for multi-vector representations. +// +// swagger:model Vectors +type Vectors map[string]Vector + +// Validate validates this vectors +func (m Vectors) Validate(formats strfmt.Registry) error { + return nil +} + +// ContextValidate validates this vectors based on context it is used +func (m Vectors) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// UnmarshalJSON custom unmarshalling method +func (v *Vectors) UnmarshalJSON(data []byte) error { + var rawVectors map[string]json.RawMessage + if err := json.Unmarshal(data, &rawVectors); err != nil { + return err + } + + if len(rawVectors) > 0 { + *v = make(Vectors) + for targetVector, rawMessage := range rawVectors { + // Try unmarshaling as []float32 + var vector []float32 + if err := json.Unmarshal(rawMessage, &vector); err == nil { + if len(vector) > 0 { + (*v)[targetVector] = vector + } + continue + } + // Try unmarshaling as [][]float32 + var multiVector [][]float32 + if err := json.Unmarshal(rawMessage, &multiVector); err == nil { + if len(multiVector) > 0 { + (*v)[targetVector] = multiVector + } + continue + } + return fmt.Errorf("vectors: cannot unmarshal vector into either []float32 or [][]float32 for target vector %s", targetVector) + } + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/entities/models/where_filter.go b/platform/dbops/binaries/weaviate-src/entities/models/where_filter.go new file mode 100644 index 0000000000000000000000000000000000000000..0cf19d9a025c1024c784a80a0897fe4b96088728 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/models/where_filter.go @@ -0,0 +1,317 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "encoding/json" + "strconv" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// WhereFilter Filter search results using a where filter +// +// swagger:model WhereFilter +type WhereFilter struct { + + // combine multiple where filters, requires 'And' or 'Or' operator + Operands []*WhereFilter `json:"operands"` + + // operator to use + // Example: GreaterThanEqual + // Enum: [And Or Equal Like NotEqual GreaterThan GreaterThanEqual LessThan LessThanEqual WithinGeoRange IsNull ContainsAny ContainsAll ContainsNone Not] + Operator string `json:"operator,omitempty"` + + // path to the property currently being filtered + // Example: ["inCity","City","name"] + Path []string `json:"path"` + + // value as boolean + // Example: false + ValueBoolean *bool `json:"valueBoolean,omitempty"` + + // value as boolean + // Example: [true,false] + ValueBooleanArray []bool `json:"valueBooleanArray,omitempty"` + + // value as date (as string) + // Example: TODO + ValueDate *string `json:"valueDate,omitempty"` + + // value as date (as string) + // Example: TODO + ValueDateArray []string `json:"valueDateArray,omitempty"` + + // value as geo coordinates and distance + ValueGeoRange *WhereFilterGeoRange `json:"valueGeoRange,omitempty"` + + // value as integer + // Example: 2000 + ValueInt *int64 `json:"valueInt,omitempty"` + + // value as integer + // Example: [100, 200] + ValueIntArray []int64 `json:"valueIntArray,omitempty"` + + // value as number/float + // Example: 3.14 + ValueNumber *float64 `json:"valueNumber,omitempty"` + + // value as number/float + // Example: [3.14] + ValueNumberArray []float64 `json:"valueNumberArray,omitempty"` + + // value as text (deprecated as of v1.19; alias for valueText) + // Example: my search term + ValueString *string `json:"valueString,omitempty"` + + // value as text (deprecated as of v1.19; alias for valueText) + // Example: ["my search term"] + ValueStringArray []string `json:"valueStringArray,omitempty"` + + // value as text + // Example: my search term + ValueText *string `json:"valueText,omitempty"` + + // value as text + // Example: ["my search term"] + ValueTextArray []string `json:"valueTextArray,omitempty"` +} + +// Validate validates this where filter +func (m *WhereFilter) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateOperands(formats); err != nil { + res = append(res, err) + } + + if err := m.validateOperator(formats); err != nil { + res = append(res, err) + } + + if err := m.validateValueGeoRange(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *WhereFilter) validateOperands(formats strfmt.Registry) error { + if swag.IsZero(m.Operands) { // not required + return nil + } + + for i := 0; i < len(m.Operands); i++ { + if swag.IsZero(m.Operands[i]) { // not required + continue + } + + if m.Operands[i] != nil { + if err := m.Operands[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("operands" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("operands" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +var whereFilterTypeOperatorPropEnum []interface{} + +func init() { + var res []string + if err := json.Unmarshal([]byte(`["And","Or","Equal","Like","NotEqual","GreaterThan","GreaterThanEqual","LessThan","LessThanEqual","WithinGeoRange","IsNull","ContainsAny","ContainsAll","ContainsNone","Not"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + whereFilterTypeOperatorPropEnum = append(whereFilterTypeOperatorPropEnum, v) + } +} + +const ( + + // WhereFilterOperatorAnd captures enum value "And" + WhereFilterOperatorAnd string = "And" + + // WhereFilterOperatorOr captures enum value "Or" + WhereFilterOperatorOr string = "Or" + + // WhereFilterOperatorEqual captures enum value "Equal" + WhereFilterOperatorEqual string = "Equal" + + // WhereFilterOperatorLike captures enum value "Like" + WhereFilterOperatorLike string = "Like" + + // WhereFilterOperatorNotEqual captures enum value "NotEqual" + WhereFilterOperatorNotEqual string = "NotEqual" + + // WhereFilterOperatorGreaterThan captures enum value "GreaterThan" + WhereFilterOperatorGreaterThan string = "GreaterThan" + + // WhereFilterOperatorGreaterThanEqual captures enum value "GreaterThanEqual" + WhereFilterOperatorGreaterThanEqual string = "GreaterThanEqual" + + // WhereFilterOperatorLessThan captures enum value "LessThan" + WhereFilterOperatorLessThan string = "LessThan" + + // WhereFilterOperatorLessThanEqual captures enum value "LessThanEqual" + WhereFilterOperatorLessThanEqual string = "LessThanEqual" + + // WhereFilterOperatorWithinGeoRange captures enum value "WithinGeoRange" + WhereFilterOperatorWithinGeoRange string = "WithinGeoRange" + + // WhereFilterOperatorIsNull captures enum value "IsNull" + WhereFilterOperatorIsNull string = "IsNull" + + // WhereFilterOperatorContainsAny captures enum value "ContainsAny" + WhereFilterOperatorContainsAny string = "ContainsAny" + + // WhereFilterOperatorContainsAll captures enum value "ContainsAll" + WhereFilterOperatorContainsAll string = "ContainsAll" + + // WhereFilterOperatorContainsNone captures enum value "ContainsNone" + WhereFilterOperatorContainsNone string = "ContainsNone" + + // WhereFilterOperatorNot captures enum value "Not" + WhereFilterOperatorNot string = "Not" +) + +// prop value enum +func (m *WhereFilter) validateOperatorEnum(path, location string, value string) error { + if err := validate.EnumCase(path, location, value, whereFilterTypeOperatorPropEnum, true); err != nil { + return err + } + return nil +} + +func (m *WhereFilter) validateOperator(formats strfmt.Registry) error { + if swag.IsZero(m.Operator) { // not required + return nil + } + + // value enum + if err := m.validateOperatorEnum("operator", "body", m.Operator); err != nil { + return err + } + + return nil +} + +func (m *WhereFilter) validateValueGeoRange(formats strfmt.Registry) error { + if swag.IsZero(m.ValueGeoRange) { // not required + return nil + } + + if m.ValueGeoRange != nil { + if err := m.ValueGeoRange.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("valueGeoRange") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("valueGeoRange") + } + return err + } + } + + return nil +} + +// ContextValidate validate this where filter based on the context it is used +func (m *WhereFilter) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateOperands(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidateValueGeoRange(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *WhereFilter) contextValidateOperands(ctx context.Context, formats strfmt.Registry) error { + + for i := 0; i < len(m.Operands); i++ { + + if m.Operands[i] != nil { + if err := m.Operands[i].ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("operands" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("operands" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +func (m *WhereFilter) contextValidateValueGeoRange(ctx context.Context, formats strfmt.Registry) error { + + if m.ValueGeoRange != nil { + if err := m.ValueGeoRange.ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("valueGeoRange") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("valueGeoRange") + } + return err + } + } + + return nil +} + +// MarshalBinary interface implementation +func (m *WhereFilter) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *WhereFilter) UnmarshalBinary(b []byte) error { + var res WhereFilter + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/entities/models/where_filter_geo_range.go b/platform/dbops/binaries/weaviate-src/entities/models/where_filter_geo_range.go new file mode 100644 index 0000000000000000000000000000000000000000..4707cd6a761691f2a0876d34dc99b875930a0c6d --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/models/where_filter_geo_range.go @@ -0,0 +1,198 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// WhereFilterGeoRange filter within a distance of a georange +// +// swagger:model WhereFilterGeoRange +type WhereFilterGeoRange struct { + + // distance + Distance *WhereFilterGeoRangeDistance `json:"distance,omitempty"` + + // geo coordinates + GeoCoordinates *GeoCoordinates `json:"geoCoordinates,omitempty"` +} + +// Validate validates this where filter geo range +func (m *WhereFilterGeoRange) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateDistance(formats); err != nil { + res = append(res, err) + } + + if err := m.validateGeoCoordinates(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *WhereFilterGeoRange) validateDistance(formats strfmt.Registry) error { + if swag.IsZero(m.Distance) { // not required + return nil + } + + if m.Distance != nil { + if err := m.Distance.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("distance") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("distance") + } + return err + } + } + + return nil +} + +func (m *WhereFilterGeoRange) validateGeoCoordinates(formats strfmt.Registry) error { + if swag.IsZero(m.GeoCoordinates) { // not required + return nil + } + + if m.GeoCoordinates != nil { + if err := m.GeoCoordinates.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("geoCoordinates") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("geoCoordinates") + } + return err + } + } + + return nil +} + +// ContextValidate validate this where filter geo range based on the context it is used +func (m *WhereFilterGeoRange) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateDistance(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidateGeoCoordinates(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *WhereFilterGeoRange) contextValidateDistance(ctx context.Context, formats strfmt.Registry) error { + + if m.Distance != nil { + if err := m.Distance.ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("distance") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("distance") + } + return err + } + } + + return nil +} + +func (m *WhereFilterGeoRange) contextValidateGeoCoordinates(ctx context.Context, formats strfmt.Registry) error { + + if m.GeoCoordinates != nil { + if err := m.GeoCoordinates.ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("geoCoordinates") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("geoCoordinates") + } + return err + } + } + + return nil +} + +// MarshalBinary interface implementation +func (m *WhereFilterGeoRange) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *WhereFilterGeoRange) UnmarshalBinary(b []byte) error { + var res WhereFilterGeoRange + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} + +// WhereFilterGeoRangeDistance where filter geo range distance +// +// swagger:model WhereFilterGeoRangeDistance +type WhereFilterGeoRangeDistance struct { + + // max + Max float64 `json:"max,omitempty"` +} + +// Validate validates this where filter geo range distance +func (m *WhereFilterGeoRangeDistance) Validate(formats strfmt.Registry) error { + return nil +} + +// ContextValidate validates this where filter geo range distance based on context it is used +func (m *WhereFilterGeoRangeDistance) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *WhereFilterGeoRangeDistance) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *WhereFilterGeoRangeDistance) UnmarshalBinary(b []byte) error { + var res WhereFilterGeoRangeDistance + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/entities/modelsext/class.go b/platform/dbops/binaries/weaviate-src/entities/modelsext/class.go new file mode 100644 index 0000000000000000000000000000000000000000..384569cac456787fd20affd7225f21adc4370141 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/modelsext/class.go @@ -0,0 +1,41 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package modelsext + +import "github.com/weaviate/weaviate/entities/models" + +// DefaultNamedVectorName is a default vector named used to create a named vector or to allow access +// to legacy vector through named vector API. +const DefaultNamedVectorName = "default" + +// ClassHasLegacyVectorIndex checks whether there is a legacy index configured on a class. +func ClassHasLegacyVectorIndex(class *models.Class) bool { + return class.Vectorizer != "" || class.VectorIndexConfig != nil || class.VectorIndexType != "" +} + +// ClassGetVectorConfig returns the vector config for a given class and target vector. +// There is a special case for the default vector name, which is used to access the legacy vector. +func ClassGetVectorConfig(class *models.Class, targetVector string) (models.VectorConfig, bool) { + if cfg, ok := class.VectorConfig[targetVector]; ok { + return cfg, ok + } + + if (ClassHasLegacyVectorIndex(class) && targetVector == DefaultNamedVectorName) || targetVector == "" { + return models.VectorConfig{ + VectorIndexConfig: class.VectorIndexConfig, + VectorIndexType: class.VectorIndexType, + Vectorizer: class.Vectorizer, + }, true + } + + return models.VectorConfig{}, false +} diff --git a/platform/dbops/binaries/weaviate-src/entities/modelsext/class_test.go b/platform/dbops/binaries/weaviate-src/entities/modelsext/class_test.go new file mode 100644 index 0000000000000000000000000000000000000000..61488b5566799872bcb9e556ec5d6e1eefa303e1 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/modelsext/class_test.go @@ -0,0 +1,164 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package modelsext + +import ( + "testing" + + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/entities/models" +) + +func TestClassHasLegacyVectorIndex(t *testing.T) { + for _, tt := range []struct { + name string + class *models.Class + want bool + }{ + { + name: "all fields are empty or nil", + class: &models.Class{ + Vectorizer: "", + VectorIndexConfig: nil, + VectorIndexType: "", + }, + want: false, + }, + { + name: "Vectorizer is not empty", + class: &models.Class{ + Vectorizer: "some_vectorizer", + }, + want: true, + }, + { + name: "VectorIndexConfig is not nil", + class: &models.Class{ + VectorIndexConfig: map[string]interface{}{"distance": "cosine"}, + }, + want: true, + }, + { + name: "VectorIndexType is not empty", + class: &models.Class{ + VectorIndexType: "hnsw", + }, + want: true, + }, + } { + t.Run(tt.name, func(t *testing.T) { + require.Equal(t, tt.want, ClassHasLegacyVectorIndex(tt.class)) + }) + } +} + +func TestClassGetVectorConfig(t *testing.T) { + var ( + customConfig = models.VectorConfig{ + Vectorizer: "custom-vectorizer", + VectorIndexType: "flat", + VectorIndexConfig: map[string]interface{}{ + "distance": "euclidean", + }, + } + + legacyConfig = models.VectorConfig{ + Vectorizer: "legacy-vectorizer", + VectorIndexType: "hnsw", + VectorIndexConfig: map[string]interface{}{ + "distance": "cosine", + }, + } + + mixedClass = &models.Class{ + Vectorizer: "legacy-vectorizer", + VectorIndexType: "hnsw", + VectorIndexConfig: map[string]interface{}{ + "distance": "cosine", + }, + VectorConfig: map[string]models.VectorConfig{ + "custom": customConfig, + }, + } + ) + + for _, tt := range []struct { + name string + class *models.Class + targetVector string + + expectConfig *models.VectorConfig + }{ + { + name: "named vector not present", + class: mixedClass, + targetVector: "non-existent", + + expectConfig: nil, + }, + { + name: "legacy vector via empty string", + class: mixedClass, + targetVector: "", + + expectConfig: &legacyConfig, + }, + { + name: "legacy vector via default named target vector", + class: mixedClass, + targetVector: DefaultNamedVectorName, + + expectConfig: &legacyConfig, + }, + { + name: "named vector via its name", + class: mixedClass, + targetVector: "custom", + + expectConfig: &customConfig, + }, + { + name: "legacy vector without named vectors", + class: &models.Class{ + Vectorizer: "legacy-vectorizer", + VectorIndexType: "hnsw", + VectorIndexConfig: map[string]interface{}{ + "distance": "cosine", + }, + }, + targetVector: "", + + expectConfig: &legacyConfig, + }, + { + name: "named vector without legacy vectors", + class: &models.Class{ + VectorConfig: map[string]models.VectorConfig{ + "custom": customConfig, + }, + }, + targetVector: "custom", + + expectConfig: &customConfig, + }, + } { + t.Run(tt.name, func(t *testing.T) { + cfg, ok := ClassGetVectorConfig(tt.class, tt.targetVector) + if tt.expectConfig == nil { + require.False(t, ok) + } else { + require.True(t, ok) + require.Equal(t, *tt.expectConfig, cfg) + } + }) + } +} diff --git a/platform/dbops/binaries/weaviate-src/entities/modelsext/doc.go b/platform/dbops/binaries/weaviate-src/entities/modelsext/doc.go new file mode 100644 index 0000000000000000000000000000000000000000..1d3fddb9772e9114cb73f4d1c4d8be023bf9ccf1 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/modelsext/doc.go @@ -0,0 +1,21 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Package modelsext provides extension methods to the structures in the models package. As this package is +// generated, we cannot put any functionality or helper methods straight into it. +// +// As the models package is used very widely throughout the repository, extensions package +// should not import anything outside the models package as well to avoid any circular dependencies. +// +// To keep things tidy, use these conventions: +// 1. Helpers for the struct in models/X.go should be in modelsext/X.go . +// 2. Functions should be named . +package modelsext diff --git a/platform/dbops/binaries/weaviate-src/entities/modulecapabilities/additional.go b/platform/dbops/binaries/weaviate-src/entities/modulecapabilities/additional.go new file mode 100644 index 0000000000000000000000000000000000000000..834594057a1b8eb3a21e749ddac27aa46d8624d4 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/modulecapabilities/additional.go @@ -0,0 +1,68 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package modulecapabilities + +import ( + "context" + + "github.com/weaviate/weaviate/entities/dto" + "github.com/weaviate/weaviate/entities/models" + + "github.com/tailor-inc/graphql" + "github.com/tailor-inc/graphql/language/ast" + "github.com/weaviate/weaviate/entities/moduletools" + "github.com/weaviate/weaviate/entities/search" +) + +// GraphQLFieldFn generates graphql field based on classname +type GraphQLFieldFn = func(classname string) *graphql.Field + +// ExtractAdditionalFn extracts parameters from graphql queries +type ExtractAdditionalFn = func(param []*ast.Argument, class *models.Class) interface{} + +// AdditionalPropertyWithSearchVector defines additional property params +// with the ability to pass search vector +type AdditionalPropertyWithSearchVector[T dto.Embedding] interface { + SetSearchVector(vector T) +} + +// AdditionalPropertyFn defines interface for additional property +// functions performing given logic +type AdditionalPropertyFn = func(ctx context.Context, + in []search.Result, params interface{}, limit *int, + argumentModuleParams map[string]interface{}, cfg moduletools.ClassConfig) ([]search.Result, error) + +// AdditionalSearch defines on which type of query a given +// additional logic can be performed +type AdditionalSearch struct { + ObjectGet AdditionalPropertyFn + ObjectList AdditionalPropertyFn + ExploreGet AdditionalPropertyFn + ExploreList AdditionalPropertyFn +} + +// AdditionalProperty defines all the needed settings / methods +// to be set in order to add the additional property to Weaviate +type AdditionalProperty struct { + RestNames []string + DefaultValue interface{} + GraphQLNames []string + GraphQLFieldFunction GraphQLFieldFn + GraphQLExtractFunction ExtractAdditionalFn + SearchFunctions AdditionalSearch +} + +// AdditionalProperties groups whole interface methods needed +// for adding the capability of additional properties +type AdditionalProperties interface { + AdditionalProperties() map[string]AdditionalProperty +} diff --git a/platform/dbops/binaries/weaviate-src/entities/modulecapabilities/backup.go b/platform/dbops/binaries/weaviate-src/entities/modulecapabilities/backup.go new file mode 100644 index 0000000000000000000000000000000000000000..871cd5463e5508f76579f0a33544fd7b95ac4407 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/modulecapabilities/backup.go @@ -0,0 +1,54 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package modulecapabilities + +import ( + "context" + "io" + + "github.com/weaviate/weaviate/entities/backup" +) + +type BackupBackend interface { + // IsExternal returns whether the storage is an external storage (e.g. gcs, s3) + IsExternal() bool + // Name returns backend's name + Name() string + // HomeDir is the base storage location of all backup files, which can be a bucket, a directory, etc. + HomeDir(backupID, overrideBucket, overridePath string) string + + // GetObject giving backupID and key + GetObject(ctx context.Context, backupID, key, overrideBucket, overridePath string) ([]byte, error) + // AllBackups returns the top level metadata for all attempted backups + AllBackups(ctx context.Context) ([]*backup.DistributedBackupDescriptor, error) + + // WriteToFile writes an object in the specified file with path destPath + // The file will be created if it doesn't exist + // The file will be overwritten if it exists + WriteToFile(ctx context.Context, backupID, key, destPath, overrideBucket, overridePath string) error + + // SourceDataPath is data path of all source files + SourceDataPath() string + + // PutObject writes bytes to the object with key `key` + // bucketName and bucketPath override the initialised bucketName and bucketPath + PutObject(ctx context.Context, backupID, key, overrideBucket, overridePath string, byes []byte) error + + // Initialize initializes backup provider and make sure that app have access rights to write into the object store. + Initialize(ctx context.Context, backupID, overrideBucket, overridePath string) error + + // Write writes the content of the reader to the object with key + // bucketName and bucketPath override the initialised bucketName and bucketPath + // Allows restores from a different bucket to the designated backup bucket + Write(ctx context.Context, backupID, key, overrideBucket, overridePath string, r io.ReadCloser) (int64, error) + Read(ctx context.Context, backupID, key, overrideBucket, overridePath string, w io.WriteCloser) (int64, error) +} diff --git a/platform/dbops/binaries/weaviate-src/entities/modulecapabilities/classification.go b/platform/dbops/binaries/weaviate-src/entities/modulecapabilities/classification.go new file mode 100644 index 0000000000000000000000000000000000000000..5b11bf1eb9a0473bf944046a7833680bea1d3b16 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/modulecapabilities/classification.go @@ -0,0 +1,70 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package modulecapabilities + +import ( + "context" + + "github.com/weaviate/weaviate/entities/filters" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/search" +) + +type VectorClassSearchParams struct { + Filters *filters.LocalFilter + Pagination *filters.Pagination + ClassName string + Properties []string +} + +type VectorClassSearchRepo interface { + VectorClassSearch(ctx context.Context, params VectorClassSearchParams) ([]search.Result, error) +} + +type ClassifyParams struct { + GetClass func(string) *models.Class + Params models.Classification + Filters Filters + UnclassifiedItems []search.Result + VectorRepo VectorClassSearchRepo +} + +type Filters interface { + Source() *filters.LocalFilter + Target() *filters.LocalFilter + TrainingSet() *filters.LocalFilter +} + +type Writer interface { + Start() + Store(item search.Result) error + Stop() WriterResults +} + +type WriterResults interface { + SuccessCount() int64 + ErrorCount() int64 + Err() error +} + +type ClassifyItemFn func(item search.Result, itemIndex int, + params models.Classification, filters Filters, writer Writer) error + +type Classifier interface { + Name() string + ClassifyFn(params ClassifyParams) (ClassifyItemFn, error) + ParseClassifierSettings(params *models.Classification) error +} + +type ClassificationProvider interface { + Classifiers() []Classifier +} diff --git a/platform/dbops/binaries/weaviate-src/entities/modulecapabilities/client.go b/platform/dbops/binaries/weaviate-src/entities/modulecapabilities/client.go new file mode 100644 index 0000000000000000000000000000000000000000..3b2d5f0db76d2ce7c24328ddbbffab1c0a09949a --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/modulecapabilities/client.go @@ -0,0 +1,29 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package modulecapabilities + +import "context" + +type VectorizerClient interface { + MultiVectorForWord(ctx context.Context, + words []string) ([][]float32, error) + VectorOnlyForCorpi(ctx context.Context, corpi []string, + overrides map[string]string) ([]float32, error) +} + +type MetaProvider interface { + MetaInfo() (map[string]interface{}, error) +} + +type Client interface { + Vectorizers() map[string]VectorizerClient +} diff --git a/platform/dbops/binaries/weaviate-src/entities/modulecapabilities/config.go b/platform/dbops/binaries/weaviate-src/entities/modulecapabilities/config.go new file mode 100644 index 0000000000000000000000000000000000000000..49907a0399d4be2562852aa42f2f54289c5f54cb --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/modulecapabilities/config.go @@ -0,0 +1,48 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package modulecapabilities + +import ( + "context" + + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/moduletools" + "github.com/weaviate/weaviate/entities/schema" +) + +// ClassConfigurator is an optional capability interface which a module MAY +// implement. If it is implemented, all methods will be called when the user +// adds or updates a class which has the module set as the vectorizer +type ClassConfigurator interface { + // ClassDefaults provides the defaults for a per-class module config. The + // module provider will merge the props into the user-specified config with + // the user-provided values taking precedence + ClassConfigDefaults() map[string]interface{} + + // PropertyConfigDefaults provides the defaults for a per-property module + // config. The module provider will merge the props into the user-specified + // config with the user-provided values taking precedence. The property's + // dataType MAY be taken into consideration when deciding defaults. + // dataType is not guaranteed to be non-nil, it might be nil in the case a + // user specified an invalid dataType, as some validation only occurs after + // defaults are set. + PropertyConfigDefaults(dataType *schema.DataType) map[string]interface{} + + // ValidateClass MAY validate anything about the class, except the config of + // another module. The specified ClassConfig can be used to easily retrieve + // the config specific for the module. For example, a module could iterate + // over class.Properties and call classConfig.Property(prop.Name) to validate + // the per-property config. A module MUST NOT extract another module's config + // from class.ModuleConfig["other-modules-name"]. + ValidateClass(ctx context.Context, class *models.Class, + classConfig moduletools.ClassConfig) error +} diff --git a/platform/dbops/binaries/weaviate-src/entities/modulecapabilities/generative.go b/platform/dbops/binaries/weaviate-src/entities/modulecapabilities/generative.go new file mode 100644 index 0000000000000000000000000000000000000000..dbd975b02b0ad7fd8553ecd4e1632da89c7726d8 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/modulecapabilities/generative.go @@ -0,0 +1,72 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package modulecapabilities + +import ( + "context" + + "github.com/tailor-inc/graphql" + "github.com/tailor-inc/graphql/language/ast" + "github.com/weaviate/weaviate/entities/moduletools" +) + +// GraphQLFieldFn generates graphql input fields +type GraphQLInputFieldFn = func(classname string) *graphql.InputObjectFieldConfig + +// ExtractRequestParamsFn extracts specific generative API parameters from graphql queries +type ExtractRequestParamsFn = func(field *ast.ObjectField) interface{} + +// GenerateDebugInformation exposes debug information +type GenerateDebugInformation struct { + Prompt string +} + +// GenerateResponse defines generative response. Params files hold module specific +// response parameters +type GenerateResponse struct { + Result *string + Params map[string]interface{} + Debug *GenerateDebugInformation +} + +// GenerateProperties defines the properties to be supplied as part of the generative request. +// They must be differentiated at this point due to the different ways third-parties handle them. +type GenerateProperties struct { + Text map[string]string + Blob map[string]*string +} + +// GenerativeClient defines generative client +type GenerativeClient interface { + GenerateSingleResult(ctx context.Context, + properties *GenerateProperties, prompt string, requestParams interface{}, debug bool, cfg moduletools.ClassConfig, + ) (*GenerateResponse, error) + GenerateAllResults(ctx context.Context, + properties []*GenerateProperties, task string, requestParams interface{}, debug bool, cfg moduletools.ClassConfig, + ) (*GenerateResponse, error) +} + +// GenerativeProperty defines all needed additional request / response parameters +// only client setting is manadatory as we can have generative modules +// that don't expose any additional request / response params. +type GenerativeProperty struct { + Client GenerativeClient + RequestParamsFunction GraphQLInputFieldFn + ResponseParamsFunction GraphQLFieldFn + ExtractRequestParamsFunction ExtractRequestParamsFn +} + +// AdditionalGenerativeProperties groups whole interface methods needed +// for adding the capability of additional generative properties +type AdditionalGenerativeProperties interface { + AdditionalGenerativeProperties() map[string]GenerativeProperty +} diff --git a/platform/dbops/binaries/weaviate-src/entities/modulecapabilities/graphql.go b/platform/dbops/binaries/weaviate-src/entities/modulecapabilities/graphql.go new file mode 100644 index 0000000000000000000000000000000000000000..bf5e8fde3f7134589e8f55f220e75130ed1f7ac1 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/modulecapabilities/graphql.go @@ -0,0 +1,56 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package modulecapabilities + +import ( + "github.com/tailor-inc/graphql" + "github.com/weaviate/weaviate/entities/dto" +) + +// GetArgumentsFn generates get graphql config for a given classname +type GetArgumentsFn = func(classname string) *graphql.ArgumentConfig + +// AggregateArgumentsFn generates aggregate graphql config for a given classname +type AggregateArgumentsFn = func(classname string) *graphql.ArgumentConfig + +// ExploreArgumentsFn generates explore graphql config +type ExploreArgumentsFn = func() *graphql.ArgumentConfig + +// ExtractFn extracts graphql params to given struct implementation +type ExtractFn = func(param map[string]interface{}) (interface{}, *dto.TargetCombination, error) + +// NearParam defines params with certainty information +type NearParam interface { + GetCertainty() float64 + GetDistance() float64 + GetTargetVectors() []string + SimilarityMetricProvided() bool +} + +// ValidateFn validates a given module param +type ValidateFn = func(param interface{}) error + +// GraphQLArgument defines all the needed settings / methods +// to add a module specific graphql argument +type GraphQLArgument struct { + GetArgumentsFunction GetArgumentsFn + AggregateArgumentsFunction AggregateArgumentsFn + ExploreArgumentsFunction ExploreArgumentsFn + ExtractFunction ExtractFn + ValidateFunction ValidateFn +} + +// GraphQLArguments defines the capabilities of modules to add their +// arguments to graphql API +type GraphQLArguments interface { + Arguments() map[string]GraphQLArgument +} diff --git a/platform/dbops/binaries/weaviate-src/entities/modulecapabilities/mock_backup_backend.go b/platform/dbops/binaries/weaviate-src/entities/modulecapabilities/mock_backup_backend.go new file mode 100644 index 0000000000000000000000000000000000000000..446a1c8b50f090ca5e39a1067e5a1a67e4916598 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/modulecapabilities/mock_backup_backend.go @@ -0,0 +1,627 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by mockery v2.53.2. DO NOT EDIT. + +package modulecapabilities + +import ( + context "context" + + backup "github.com/weaviate/weaviate/entities/backup" + + io "io" + + mock "github.com/stretchr/testify/mock" +) + +// MockBackupBackend is an autogenerated mock type for the BackupBackend type +type MockBackupBackend struct { + mock.Mock +} + +type MockBackupBackend_Expecter struct { + mock *mock.Mock +} + +func (_m *MockBackupBackend) EXPECT() *MockBackupBackend_Expecter { + return &MockBackupBackend_Expecter{mock: &_m.Mock} +} + +// AllBackups provides a mock function with given fields: ctx +func (_m *MockBackupBackend) AllBackups(ctx context.Context) ([]*backup.DistributedBackupDescriptor, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for AllBackups") + } + + var r0 []*backup.DistributedBackupDescriptor + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) ([]*backup.DistributedBackupDescriptor, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) []*backup.DistributedBackupDescriptor); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*backup.DistributedBackupDescriptor) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockBackupBackend_AllBackups_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'AllBackups' +type MockBackupBackend_AllBackups_Call struct { + *mock.Call +} + +// AllBackups is a helper method to define mock.On call +// - ctx context.Context +func (_e *MockBackupBackend_Expecter) AllBackups(ctx interface{}) *MockBackupBackend_AllBackups_Call { + return &MockBackupBackend_AllBackups_Call{Call: _e.mock.On("AllBackups", ctx)} +} + +func (_c *MockBackupBackend_AllBackups_Call) Run(run func(ctx context.Context)) *MockBackupBackend_AllBackups_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *MockBackupBackend_AllBackups_Call) Return(_a0 []*backup.DistributedBackupDescriptor, _a1 error) *MockBackupBackend_AllBackups_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockBackupBackend_AllBackups_Call) RunAndReturn(run func(context.Context) ([]*backup.DistributedBackupDescriptor, error)) *MockBackupBackend_AllBackups_Call { + _c.Call.Return(run) + return _c +} + +// GetObject provides a mock function with given fields: ctx, backupID, key, overrideBucket, overridePath +func (_m *MockBackupBackend) GetObject(ctx context.Context, backupID string, key string, overrideBucket string, overridePath string) ([]byte, error) { + ret := _m.Called(ctx, backupID, key, overrideBucket, overridePath) + + if len(ret) == 0 { + panic("no return value specified for GetObject") + } + + var r0 []byte + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string, string, string, string) ([]byte, error)); ok { + return rf(ctx, backupID, key, overrideBucket, overridePath) + } + if rf, ok := ret.Get(0).(func(context.Context, string, string, string, string) []byte); ok { + r0 = rf(ctx, backupID, key, overrideBucket, overridePath) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]byte) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string, string, string, string) error); ok { + r1 = rf(ctx, backupID, key, overrideBucket, overridePath) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockBackupBackend_GetObject_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetObject' +type MockBackupBackend_GetObject_Call struct { + *mock.Call +} + +// GetObject is a helper method to define mock.On call +// - ctx context.Context +// - backupID string +// - key string +// - overrideBucket string +// - overridePath string +func (_e *MockBackupBackend_Expecter) GetObject(ctx interface{}, backupID interface{}, key interface{}, overrideBucket interface{}, overridePath interface{}) *MockBackupBackend_GetObject_Call { + return &MockBackupBackend_GetObject_Call{Call: _e.mock.On("GetObject", ctx, backupID, key, overrideBucket, overridePath)} +} + +func (_c *MockBackupBackend_GetObject_Call) Run(run func(ctx context.Context, backupID string, key string, overrideBucket string, overridePath string)) *MockBackupBackend_GetObject_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string), args[2].(string), args[3].(string), args[4].(string)) + }) + return _c +} + +func (_c *MockBackupBackend_GetObject_Call) Return(_a0 []byte, _a1 error) *MockBackupBackend_GetObject_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockBackupBackend_GetObject_Call) RunAndReturn(run func(context.Context, string, string, string, string) ([]byte, error)) *MockBackupBackend_GetObject_Call { + _c.Call.Return(run) + return _c +} + +// HomeDir provides a mock function with given fields: backupID, overrideBucket, overridePath +func (_m *MockBackupBackend) HomeDir(backupID string, overrideBucket string, overridePath string) string { + ret := _m.Called(backupID, overrideBucket, overridePath) + + if len(ret) == 0 { + panic("no return value specified for HomeDir") + } + + var r0 string + if rf, ok := ret.Get(0).(func(string, string, string) string); ok { + r0 = rf(backupID, overrideBucket, overridePath) + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +// MockBackupBackend_HomeDir_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'HomeDir' +type MockBackupBackend_HomeDir_Call struct { + *mock.Call +} + +// HomeDir is a helper method to define mock.On call +// - backupID string +// - overrideBucket string +// - overridePath string +func (_e *MockBackupBackend_Expecter) HomeDir(backupID interface{}, overrideBucket interface{}, overridePath interface{}) *MockBackupBackend_HomeDir_Call { + return &MockBackupBackend_HomeDir_Call{Call: _e.mock.On("HomeDir", backupID, overrideBucket, overridePath)} +} + +func (_c *MockBackupBackend_HomeDir_Call) Run(run func(backupID string, overrideBucket string, overridePath string)) *MockBackupBackend_HomeDir_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(string), args[1].(string), args[2].(string)) + }) + return _c +} + +func (_c *MockBackupBackend_HomeDir_Call) Return(_a0 string) *MockBackupBackend_HomeDir_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockBackupBackend_HomeDir_Call) RunAndReturn(run func(string, string, string) string) *MockBackupBackend_HomeDir_Call { + _c.Call.Return(run) + return _c +} + +// Initialize provides a mock function with given fields: ctx, backupID, overrideBucket, overridePath +func (_m *MockBackupBackend) Initialize(ctx context.Context, backupID string, overrideBucket string, overridePath string) error { + ret := _m.Called(ctx, backupID, overrideBucket, overridePath) + + if len(ret) == 0 { + panic("no return value specified for Initialize") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, string, string, string) error); ok { + r0 = rf(ctx, backupID, overrideBucket, overridePath) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockBackupBackend_Initialize_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Initialize' +type MockBackupBackend_Initialize_Call struct { + *mock.Call +} + +// Initialize is a helper method to define mock.On call +// - ctx context.Context +// - backupID string +// - overrideBucket string +// - overridePath string +func (_e *MockBackupBackend_Expecter) Initialize(ctx interface{}, backupID interface{}, overrideBucket interface{}, overridePath interface{}) *MockBackupBackend_Initialize_Call { + return &MockBackupBackend_Initialize_Call{Call: _e.mock.On("Initialize", ctx, backupID, overrideBucket, overridePath)} +} + +func (_c *MockBackupBackend_Initialize_Call) Run(run func(ctx context.Context, backupID string, overrideBucket string, overridePath string)) *MockBackupBackend_Initialize_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string), args[2].(string), args[3].(string)) + }) + return _c +} + +func (_c *MockBackupBackend_Initialize_Call) Return(_a0 error) *MockBackupBackend_Initialize_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockBackupBackend_Initialize_Call) RunAndReturn(run func(context.Context, string, string, string) error) *MockBackupBackend_Initialize_Call { + _c.Call.Return(run) + return _c +} + +// IsExternal provides a mock function with no fields +func (_m *MockBackupBackend) IsExternal() bool { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for IsExternal") + } + + var r0 bool + if rf, ok := ret.Get(0).(func() bool); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(bool) + } + + return r0 +} + +// MockBackupBackend_IsExternal_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'IsExternal' +type MockBackupBackend_IsExternal_Call struct { + *mock.Call +} + +// IsExternal is a helper method to define mock.On call +func (_e *MockBackupBackend_Expecter) IsExternal() *MockBackupBackend_IsExternal_Call { + return &MockBackupBackend_IsExternal_Call{Call: _e.mock.On("IsExternal")} +} + +func (_c *MockBackupBackend_IsExternal_Call) Run(run func()) *MockBackupBackend_IsExternal_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *MockBackupBackend_IsExternal_Call) Return(_a0 bool) *MockBackupBackend_IsExternal_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockBackupBackend_IsExternal_Call) RunAndReturn(run func() bool) *MockBackupBackend_IsExternal_Call { + _c.Call.Return(run) + return _c +} + +// Name provides a mock function with no fields +func (_m *MockBackupBackend) Name() string { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Name") + } + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +// MockBackupBackend_Name_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Name' +type MockBackupBackend_Name_Call struct { + *mock.Call +} + +// Name is a helper method to define mock.On call +func (_e *MockBackupBackend_Expecter) Name() *MockBackupBackend_Name_Call { + return &MockBackupBackend_Name_Call{Call: _e.mock.On("Name")} +} + +func (_c *MockBackupBackend_Name_Call) Run(run func()) *MockBackupBackend_Name_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *MockBackupBackend_Name_Call) Return(_a0 string) *MockBackupBackend_Name_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockBackupBackend_Name_Call) RunAndReturn(run func() string) *MockBackupBackend_Name_Call { + _c.Call.Return(run) + return _c +} + +// PutObject provides a mock function with given fields: ctx, backupID, key, overrideBucket, overridePath, byes +func (_m *MockBackupBackend) PutObject(ctx context.Context, backupID string, key string, overrideBucket string, overridePath string, byes []byte) error { + ret := _m.Called(ctx, backupID, key, overrideBucket, overridePath, byes) + + if len(ret) == 0 { + panic("no return value specified for PutObject") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, string, string, string, string, []byte) error); ok { + r0 = rf(ctx, backupID, key, overrideBucket, overridePath, byes) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockBackupBackend_PutObject_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'PutObject' +type MockBackupBackend_PutObject_Call struct { + *mock.Call +} + +// PutObject is a helper method to define mock.On call +// - ctx context.Context +// - backupID string +// - key string +// - overrideBucket string +// - overridePath string +// - byes []byte +func (_e *MockBackupBackend_Expecter) PutObject(ctx interface{}, backupID interface{}, key interface{}, overrideBucket interface{}, overridePath interface{}, byes interface{}) *MockBackupBackend_PutObject_Call { + return &MockBackupBackend_PutObject_Call{Call: _e.mock.On("PutObject", ctx, backupID, key, overrideBucket, overridePath, byes)} +} + +func (_c *MockBackupBackend_PutObject_Call) Run(run func(ctx context.Context, backupID string, key string, overrideBucket string, overridePath string, byes []byte)) *MockBackupBackend_PutObject_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string), args[2].(string), args[3].(string), args[4].(string), args[5].([]byte)) + }) + return _c +} + +func (_c *MockBackupBackend_PutObject_Call) Return(_a0 error) *MockBackupBackend_PutObject_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockBackupBackend_PutObject_Call) RunAndReturn(run func(context.Context, string, string, string, string, []byte) error) *MockBackupBackend_PutObject_Call { + _c.Call.Return(run) + return _c +} + +// Read provides a mock function with given fields: ctx, backupID, key, overrideBucket, overridePath, w +func (_m *MockBackupBackend) Read(ctx context.Context, backupID string, key string, overrideBucket string, overridePath string, w io.WriteCloser) (int64, error) { + ret := _m.Called(ctx, backupID, key, overrideBucket, overridePath, w) + + if len(ret) == 0 { + panic("no return value specified for Read") + } + + var r0 int64 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string, string, string, string, io.WriteCloser) (int64, error)); ok { + return rf(ctx, backupID, key, overrideBucket, overridePath, w) + } + if rf, ok := ret.Get(0).(func(context.Context, string, string, string, string, io.WriteCloser) int64); ok { + r0 = rf(ctx, backupID, key, overrideBucket, overridePath, w) + } else { + r0 = ret.Get(0).(int64) + } + + if rf, ok := ret.Get(1).(func(context.Context, string, string, string, string, io.WriteCloser) error); ok { + r1 = rf(ctx, backupID, key, overrideBucket, overridePath, w) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockBackupBackend_Read_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Read' +type MockBackupBackend_Read_Call struct { + *mock.Call +} + +// Read is a helper method to define mock.On call +// - ctx context.Context +// - backupID string +// - key string +// - overrideBucket string +// - overridePath string +// - w io.WriteCloser +func (_e *MockBackupBackend_Expecter) Read(ctx interface{}, backupID interface{}, key interface{}, overrideBucket interface{}, overridePath interface{}, w interface{}) *MockBackupBackend_Read_Call { + return &MockBackupBackend_Read_Call{Call: _e.mock.On("Read", ctx, backupID, key, overrideBucket, overridePath, w)} +} + +func (_c *MockBackupBackend_Read_Call) Run(run func(ctx context.Context, backupID string, key string, overrideBucket string, overridePath string, w io.WriteCloser)) *MockBackupBackend_Read_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string), args[2].(string), args[3].(string), args[4].(string), args[5].(io.WriteCloser)) + }) + return _c +} + +func (_c *MockBackupBackend_Read_Call) Return(_a0 int64, _a1 error) *MockBackupBackend_Read_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockBackupBackend_Read_Call) RunAndReturn(run func(context.Context, string, string, string, string, io.WriteCloser) (int64, error)) *MockBackupBackend_Read_Call { + _c.Call.Return(run) + return _c +} + +// SourceDataPath provides a mock function with no fields +func (_m *MockBackupBackend) SourceDataPath() string { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for SourceDataPath") + } + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +// MockBackupBackend_SourceDataPath_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SourceDataPath' +type MockBackupBackend_SourceDataPath_Call struct { + *mock.Call +} + +// SourceDataPath is a helper method to define mock.On call +func (_e *MockBackupBackend_Expecter) SourceDataPath() *MockBackupBackend_SourceDataPath_Call { + return &MockBackupBackend_SourceDataPath_Call{Call: _e.mock.On("SourceDataPath")} +} + +func (_c *MockBackupBackend_SourceDataPath_Call) Run(run func()) *MockBackupBackend_SourceDataPath_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *MockBackupBackend_SourceDataPath_Call) Return(_a0 string) *MockBackupBackend_SourceDataPath_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockBackupBackend_SourceDataPath_Call) RunAndReturn(run func() string) *MockBackupBackend_SourceDataPath_Call { + _c.Call.Return(run) + return _c +} + +// Write provides a mock function with given fields: ctx, backupID, key, overrideBucket, overridePath, r +func (_m *MockBackupBackend) Write(ctx context.Context, backupID string, key string, overrideBucket string, overridePath string, r io.ReadCloser) (int64, error) { + ret := _m.Called(ctx, backupID, key, overrideBucket, overridePath, r) + + if len(ret) == 0 { + panic("no return value specified for Write") + } + + var r0 int64 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string, string, string, string, io.ReadCloser) (int64, error)); ok { + return rf(ctx, backupID, key, overrideBucket, overridePath, r) + } + if rf, ok := ret.Get(0).(func(context.Context, string, string, string, string, io.ReadCloser) int64); ok { + r0 = rf(ctx, backupID, key, overrideBucket, overridePath, r) + } else { + r0 = ret.Get(0).(int64) + } + + if rf, ok := ret.Get(1).(func(context.Context, string, string, string, string, io.ReadCloser) error); ok { + r1 = rf(ctx, backupID, key, overrideBucket, overridePath, r) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockBackupBackend_Write_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Write' +type MockBackupBackend_Write_Call struct { + *mock.Call +} + +// Write is a helper method to define mock.On call +// - ctx context.Context +// - backupID string +// - key string +// - overrideBucket string +// - overridePath string +// - r io.ReadCloser +func (_e *MockBackupBackend_Expecter) Write(ctx interface{}, backupID interface{}, key interface{}, overrideBucket interface{}, overridePath interface{}, r interface{}) *MockBackupBackend_Write_Call { + return &MockBackupBackend_Write_Call{Call: _e.mock.On("Write", ctx, backupID, key, overrideBucket, overridePath, r)} +} + +func (_c *MockBackupBackend_Write_Call) Run(run func(ctx context.Context, backupID string, key string, overrideBucket string, overridePath string, r io.ReadCloser)) *MockBackupBackend_Write_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string), args[2].(string), args[3].(string), args[4].(string), args[5].(io.ReadCloser)) + }) + return _c +} + +func (_c *MockBackupBackend_Write_Call) Return(_a0 int64, _a1 error) *MockBackupBackend_Write_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockBackupBackend_Write_Call) RunAndReturn(run func(context.Context, string, string, string, string, io.ReadCloser) (int64, error)) *MockBackupBackend_Write_Call { + _c.Call.Return(run) + return _c +} + +// WriteToFile provides a mock function with given fields: ctx, backupID, key, destPath, overrideBucket, overridePath +func (_m *MockBackupBackend) WriteToFile(ctx context.Context, backupID string, key string, destPath string, overrideBucket string, overridePath string) error { + ret := _m.Called(ctx, backupID, key, destPath, overrideBucket, overridePath) + + if len(ret) == 0 { + panic("no return value specified for WriteToFile") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, string, string, string, string, string) error); ok { + r0 = rf(ctx, backupID, key, destPath, overrideBucket, overridePath) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockBackupBackend_WriteToFile_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'WriteToFile' +type MockBackupBackend_WriteToFile_Call struct { + *mock.Call +} + +// WriteToFile is a helper method to define mock.On call +// - ctx context.Context +// - backupID string +// - key string +// - destPath string +// - overrideBucket string +// - overridePath string +func (_e *MockBackupBackend_Expecter) WriteToFile(ctx interface{}, backupID interface{}, key interface{}, destPath interface{}, overrideBucket interface{}, overridePath interface{}) *MockBackupBackend_WriteToFile_Call { + return &MockBackupBackend_WriteToFile_Call{Call: _e.mock.On("WriteToFile", ctx, backupID, key, destPath, overrideBucket, overridePath)} +} + +func (_c *MockBackupBackend_WriteToFile_Call) Run(run func(ctx context.Context, backupID string, key string, destPath string, overrideBucket string, overridePath string)) *MockBackupBackend_WriteToFile_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string), args[2].(string), args[3].(string), args[4].(string), args[5].(string)) + }) + return _c +} + +func (_c *MockBackupBackend_WriteToFile_Call) Return(_a0 error) *MockBackupBackend_WriteToFile_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockBackupBackend_WriteToFile_Call) RunAndReturn(run func(context.Context, string, string, string, string, string) error) *MockBackupBackend_WriteToFile_Call { + _c.Call.Return(run) + return _c +} + +// NewMockBackupBackend creates a new instance of MockBackupBackend. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewMockBackupBackend(t interface { + mock.TestingT + Cleanup(func()) +}) *MockBackupBackend { + mock := &MockBackupBackend{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/platform/dbops/binaries/weaviate-src/entities/modulecapabilities/module.go b/platform/dbops/binaries/weaviate-src/entities/modulecapabilities/module.go new file mode 100644 index 0000000000000000000000000000000000000000..1159c337d2ad84e4e025ef84360dce5fee7def8e --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/modulecapabilities/module.go @@ -0,0 +1,89 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package modulecapabilities + +import ( + "context" + "net/http" + + "github.com/sirupsen/logrus" + + "github.com/weaviate/weaviate/entities/dto" + "github.com/weaviate/weaviate/entities/moduletools" +) + +type ModuleType string + +const ( + Offload ModuleType = "Offload" + Backup ModuleType = "Backup" + Extension ModuleType = "Extension" + Img2Vec ModuleType = "Img2Vec" + Multi2Vec ModuleType = "Multi2Vec" + Multi2Multivec ModuleType = "Multi2Multivec" + Ref2Vec ModuleType = "Ref2Vec" + Text2ManyVec ModuleType = "Text2ManyVec" + Text2Multivec ModuleType = "Text2Multivec" + Text2TextGenerative ModuleType = "Text2TextGenerative" + Text2TextSummarize ModuleType = "Text2TextSummarize" + Text2TextReranker ModuleType = "Text2TextReranker" + Text2TextNER ModuleType = "Text2TextNER" + Text2TextQnA ModuleType = "Text2TextQnA" + Text2Vec ModuleType = "Text2Vec" + Usage ModuleType = "Usage" +) + +type Module interface { + Name() string + Init(ctx context.Context, params moduletools.ModuleInitParams) error + Type() ModuleType +} + +// ModuleWithClose is an optional capability interface for modules that need to be closed +type ModuleWithClose interface { + Module + Close() error +} + +// ModuleWithHTTPHandlers is an optional capability interface for modules that provide HTTP endpoints +type ModuleWithHTTPHandlers interface { + Module + RootHandler() http.Handler +} + +type ModuleExtension interface { + Module + InitExtension(modules []Module) error +} + +type ModuleDependency interface { + Module + InitDependency(modules []Module) error +} + +type Dependency[T dto.Embedding] interface { + ModuleName() string + Argument() string + GraphQLArgument() GraphQLArgument + VectorSearch() VectorForParams[T] +} + +type ModuleHasAltNames interface { + AltNames() []string +} + +// ModuleWithUsageService is an optional capability interface for modules that need a usage service +type ModuleWithUsageService interface { + Module + Logger() logrus.FieldLogger + SetUsageService(usageService any) // Using interface{} to avoid circular dependency +} diff --git a/platform/dbops/binaries/weaviate-src/entities/modulecapabilities/offload.go b/platform/dbops/binaries/weaviate-src/entities/modulecapabilities/offload.go new file mode 100644 index 0000000000000000000000000000000000000000..34db2b57b673263816da7723c4fddda7a0742285 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/modulecapabilities/offload.go @@ -0,0 +1,34 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package modulecapabilities + +import ( + "context" +) + +type OffloadCloud interface { + // VerifyBucket verify if the offload bucket is created + VerifyBucket(ctx context.Context) error + // Upload uploads the content of a shard assigned to specific node to + // cloud provider (S3, Azure Blob storage, Google cloud storage) + // {cloud_provider}://{configured_bucket}/{className}/{shardName}/{nodeName}/{shard content} + Upload(ctx context.Context, className, shardName, nodeName string) error + // Download downloads the content of a shard to desired node from + // cloud provider (S3, Azure Blob storage, Google cloud storage) + // {dataPath}/{className}/{shardName}/{content} + Download(ctx context.Context, className, shardName, nodeName string) error + // Delete deletes content of a shard assigned to specific node in + // cloud provider (S3, Azure Blob storage, Google cloud storage) + // Careful: if shardName and nodeName is passed empty it will delete all class frozen shards in cloud storage + // {cloud_provider}://{configured_bucket}/{className}/{shardName}/{nodeName}/{shard content} + Delete(ctx context.Context, className, shardName, nodeName string) error +} diff --git a/platform/dbops/binaries/weaviate-src/entities/modulecapabilities/searcher.go b/platform/dbops/binaries/weaviate-src/entities/modulecapabilities/searcher.go new file mode 100644 index 0000000000000000000000000000000000000000..7ceb47c20281d341de6757cd68a5c806ba9fb83a --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/modulecapabilities/searcher.go @@ -0,0 +1,50 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package modulecapabilities + +import ( + "context" + + "github.com/go-openapi/strfmt" + "github.com/weaviate/weaviate/entities/dto" + "github.com/weaviate/weaviate/entities/moduletools" +) + +// FindVectorFn method for getting a vector of given object by its ID +// type FindVectorFn = func(ctx context.Context, className string, id strfmt.UUID, tenant, targetVector string) ([]float32, string, error) +type FindVectorFn[T dto.Embedding] interface { + FindVector(ctx context.Context, + className string, id strfmt.UUID, tenant, targetVector string) (T, string, error) +} + +// VectorForParams defines method for passing a raw searcher content to the module +// and exchanging it for a vector. Warning: Argument "cfg" +// (moduletools.ClassConfig) is not guaranteed to be non-nil. Implementations +// have to provide a nil check before using it. It is generally present on +// class-based action, but is not present on Cross-Class requests, such as +// Explore {} +type VectorForParams[T dto.Embedding] interface { + VectorForParams(ctx context.Context, params interface{}, + className string, findVectorFn FindVectorFn[T], cfg moduletools.ClassConfig) (T, error) +} + +// Searcher defines all methods for all searchers +// for getting a vector from a given raw searcher content +type Searcher[T dto.Embedding] interface { + VectorSearches() map[string]VectorForParams[T] +} + +// DependencySearcher defines all of the available searches loaded as a dependency +// for this time it's limited to modules providing []float32 embeddings +type DependencySearcher[T dto.Embedding] interface { + VectorSearches() map[string]map[string]VectorForParams[T] +} diff --git a/platform/dbops/binaries/weaviate-src/entities/modulecapabilities/texttransformer.go b/platform/dbops/binaries/weaviate-src/entities/modulecapabilities/texttransformer.go new file mode 100644 index 0000000000000000000000000000000000000000..a80dad903936a7d9136255b5880e53893196b355 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/modulecapabilities/texttransformer.go @@ -0,0 +1,23 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package modulecapabilities + +// TextTransform performs text transformation operation +type TextTransform interface { + Transform(in []string) ([]string, error) +} + +// TextTransformers defines all text transformers +// for given arguments +type TextTransformers interface { + TextTransformers() map[string]TextTransform +} diff --git a/platform/dbops/binaries/weaviate-src/entities/modulecapabilities/vectorizer.go b/platform/dbops/binaries/weaviate-src/entities/modulecapabilities/vectorizer.go new file mode 100644 index 0000000000000000000000000000000000000000..feeb04859a57616a127c01bcecf53c47faf65073 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/modulecapabilities/vectorizer.go @@ -0,0 +1,53 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package modulecapabilities + +import ( + "context" + + "github.com/go-openapi/strfmt" + "github.com/weaviate/weaviate/entities/additional" + "github.com/weaviate/weaviate/entities/dto" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/moduletools" + "github.com/weaviate/weaviate/entities/search" +) + +type Vectorizer[T dto.Embedding] interface { + // VectorizeObject takes an object and returns a vector and - if applicable - any meta + // information as part of _additional properties + VectorizeObject(ctx context.Context, obj *models.Object, + cfg moduletools.ClassConfig) (T, models.AdditionalProperties, error) + // VectorizableProperties returns which properties the vectorizer looks at. + // If the vectorizer is capable of vectorizing all text properties, the first bool is true. + // Any additional "media"-properties are explicitly mentioned in the []string return + VectorizableProperties(cfg moduletools.ClassConfig) (bool, []string, error) + VectorizeBatch(ctx context.Context, objs []*models.Object, skipObject []bool, cfg moduletools.ClassConfig) ([]T, []models.AdditionalProperties, map[int]error) +} + +type FindObjectFn = func(ctx context.Context, class string, id strfmt.UUID, + props search.SelectProperties, adds additional.Properties, tenant string) (*search.Result, error) + +// ReferenceVectorizer is implemented by ref2vec modules, which calculate a target +// object's vector based only on the vectors of its references. If the object has +// no references, the object will have a nil vector +type ReferenceVectorizer[T dto.Embedding] interface { + // VectorizeObject should mutate the object which is passed in as a pointer-type + // by extending it with the desired vector, which is calculated by the module + VectorizeObject(ctx context.Context, object *models.Object, + cfg moduletools.ClassConfig, findObjectFn FindObjectFn) (T, error) +} + +type InputVectorizer[T dto.Embedding] interface { + VectorizeInput(ctx context.Context, input string, + cfg moduletools.ClassConfig) (T, error) +} diff --git a/platform/dbops/binaries/weaviate-src/entities/moduletools/config.go b/platform/dbops/binaries/weaviate-src/entities/moduletools/config.go new file mode 100644 index 0000000000000000000000000000000000000000..64b9a1a07322dee3a7d66c4ad7ddbf011d8a35d8 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/moduletools/config.go @@ -0,0 +1,30 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package moduletools + +import ( + "github.com/weaviate/weaviate/entities/schema" + "github.com/weaviate/weaviate/usecases/config" +) + +// ClassConfig is a helper type which is passed to the module to read it's +// per-class config. This is - among other places - used when vectorizing and +// when validation schema config +type ClassConfig interface { + TargetVector() string + Tenant() string + Class() map[string]interface{} + ClassByModuleName(moduleName string) map[string]interface{} + Property(propName string) map[string]interface{} + PropertiesDataTypes() map[string]schema.DataType + Config() *config.Config +} diff --git a/platform/dbops/binaries/weaviate-src/entities/moduletools/doc.go b/platform/dbops/binaries/weaviate-src/entities/moduletools/doc.go new file mode 100644 index 0000000000000000000000000000000000000000..bde6d7e5b77b4d97740e4d1451ac6260a3b54aa6 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/moduletools/doc.go @@ -0,0 +1,14 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// moduletools contains helpers that are passed to modules as part of their +// capability methods +package moduletools diff --git a/platform/dbops/binaries/weaviate-src/entities/moduletools/helper.go b/platform/dbops/binaries/weaviate-src/entities/moduletools/helper.go new file mode 100644 index 0000000000000000000000000000000000000000..373e2498546ac365fa1384ed567e3eabce1b2af7 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/moduletools/helper.go @@ -0,0 +1,41 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package moduletools + +import ( + "sort" +) + +func SortStringKeys(schemaMap map[string]interface{}) []string { + keys := make([]string, 0, len(schemaMap)) + for k := range schemaMap { + keys = append(keys, k) + } + sort.Strings(keys) + return keys +} + +func NormalizeWeights(weights []float32) []float32 { + if len(weights) > 0 { + var denominator float32 + for i := range weights { + denominator += weights[i] + } + normalizer := 1 / denominator + normalized := make([]float32, len(weights)) + for i := range weights { + normalized[i] = weights[i] * normalizer + } + return normalized + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/entities/moduletools/init_params.go b/platform/dbops/binaries/weaviate-src/entities/moduletools/init_params.go new file mode 100644 index 0000000000000000000000000000000000000000..ede1dcaadafd2f56bc7f9d418d511c2034c9cb01 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/moduletools/init_params.go @@ -0,0 +1,61 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package moduletools + +import ( + "github.com/prometheus/client_golang/prometheus" + "github.com/sirupsen/logrus" + + "github.com/weaviate/weaviate/usecases/config" +) + +type ModuleInitParams interface { + GetStorageProvider() StorageProvider + GetAppState() interface{} + GetLogger() logrus.FieldLogger + GetConfig() *config.Config + GetMetricsRegisterer() prometheus.Registerer +} + +type InitParams struct { + storageProvider StorageProvider + appState interface{} + config *config.Config + logger logrus.FieldLogger + registerer prometheus.Registerer +} + +func NewInitParams(storageProvider StorageProvider, appState interface{}, + config *config.Config, logger logrus.FieldLogger, registerer prometheus.Registerer, +) ModuleInitParams { + return &InitParams{storageProvider, appState, config, logger, registerer} +} + +func (p *InitParams) GetStorageProvider() StorageProvider { + return p.storageProvider +} + +func (p *InitParams) GetAppState() interface{} { + return p.appState +} + +func (p *InitParams) GetLogger() logrus.FieldLogger { + return p.logger +} + +func (p *InitParams) GetConfig() *config.Config { + return p.config +} + +func (p *InitParams) GetMetricsRegisterer() prometheus.Registerer { + return p.registerer +} diff --git a/platform/dbops/binaries/weaviate-src/entities/moduletools/mock_module_init_params.go b/platform/dbops/binaries/weaviate-src/entities/moduletools/mock_module_init_params.go new file mode 100644 index 0000000000000000000000000000000000000000..eaaa82e09c2b51562562c5a2b94a089cd70bdd29 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/moduletools/mock_module_init_params.go @@ -0,0 +1,285 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by mockery v2.53.2. DO NOT EDIT. + +package moduletools + +import ( + logrus "github.com/sirupsen/logrus" + config "github.com/weaviate/weaviate/usecases/config" + + mock "github.com/stretchr/testify/mock" + + prometheus "github.com/prometheus/client_golang/prometheus" +) + +// MockModuleInitParams is an autogenerated mock type for the ModuleInitParams type +type MockModuleInitParams struct { + mock.Mock +} + +type MockModuleInitParams_Expecter struct { + mock *mock.Mock +} + +func (_m *MockModuleInitParams) EXPECT() *MockModuleInitParams_Expecter { + return &MockModuleInitParams_Expecter{mock: &_m.Mock} +} + +// GetAppState provides a mock function with no fields +func (_m *MockModuleInitParams) GetAppState() interface{} { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GetAppState") + } + + var r0 interface{} + if rf, ok := ret.Get(0).(func() interface{}); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(interface{}) + } + } + + return r0 +} + +// MockModuleInitParams_GetAppState_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetAppState' +type MockModuleInitParams_GetAppState_Call struct { + *mock.Call +} + +// GetAppState is a helper method to define mock.On call +func (_e *MockModuleInitParams_Expecter) GetAppState() *MockModuleInitParams_GetAppState_Call { + return &MockModuleInitParams_GetAppState_Call{Call: _e.mock.On("GetAppState")} +} + +func (_c *MockModuleInitParams_GetAppState_Call) Run(run func()) *MockModuleInitParams_GetAppState_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *MockModuleInitParams_GetAppState_Call) Return(_a0 interface{}) *MockModuleInitParams_GetAppState_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockModuleInitParams_GetAppState_Call) RunAndReturn(run func() interface{}) *MockModuleInitParams_GetAppState_Call { + _c.Call.Return(run) + return _c +} + +// GetConfig provides a mock function with no fields +func (_m *MockModuleInitParams) GetConfig() *config.Config { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GetConfig") + } + + var r0 *config.Config + if rf, ok := ret.Get(0).(func() *config.Config); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*config.Config) + } + } + + return r0 +} + +// MockModuleInitParams_GetConfig_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetConfig' +type MockModuleInitParams_GetConfig_Call struct { + *mock.Call +} + +// GetConfig is a helper method to define mock.On call +func (_e *MockModuleInitParams_Expecter) GetConfig() *MockModuleInitParams_GetConfig_Call { + return &MockModuleInitParams_GetConfig_Call{Call: _e.mock.On("GetConfig")} +} + +func (_c *MockModuleInitParams_GetConfig_Call) Run(run func()) *MockModuleInitParams_GetConfig_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *MockModuleInitParams_GetConfig_Call) Return(_a0 *config.Config) *MockModuleInitParams_GetConfig_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockModuleInitParams_GetConfig_Call) RunAndReturn(run func() *config.Config) *MockModuleInitParams_GetConfig_Call { + _c.Call.Return(run) + return _c +} + +// GetLogger provides a mock function with no fields +func (_m *MockModuleInitParams) GetLogger() logrus.FieldLogger { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GetLogger") + } + + var r0 logrus.FieldLogger + if rf, ok := ret.Get(0).(func() logrus.FieldLogger); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(logrus.FieldLogger) + } + } + + return r0 +} + +// MockModuleInitParams_GetLogger_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetLogger' +type MockModuleInitParams_GetLogger_Call struct { + *mock.Call +} + +// GetLogger is a helper method to define mock.On call +func (_e *MockModuleInitParams_Expecter) GetLogger() *MockModuleInitParams_GetLogger_Call { + return &MockModuleInitParams_GetLogger_Call{Call: _e.mock.On("GetLogger")} +} + +func (_c *MockModuleInitParams_GetLogger_Call) Run(run func()) *MockModuleInitParams_GetLogger_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *MockModuleInitParams_GetLogger_Call) Return(_a0 logrus.FieldLogger) *MockModuleInitParams_GetLogger_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockModuleInitParams_GetLogger_Call) RunAndReturn(run func() logrus.FieldLogger) *MockModuleInitParams_GetLogger_Call { + _c.Call.Return(run) + return _c +} + +// GetMetricsRegisterer provides a mock function with no fields +func (_m *MockModuleInitParams) GetMetricsRegisterer() prometheus.Registerer { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GetMetricsRegisterer") + } + + var r0 prometheus.Registerer + if rf, ok := ret.Get(0).(func() prometheus.Registerer); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(prometheus.Registerer) + } + } + + return r0 +} + +// MockModuleInitParams_GetMetricsRegisterer_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetMetricsRegisterer' +type MockModuleInitParams_GetMetricsRegisterer_Call struct { + *mock.Call +} + +// GetMetricsRegisterer is a helper method to define mock.On call +func (_e *MockModuleInitParams_Expecter) GetMetricsRegisterer() *MockModuleInitParams_GetMetricsRegisterer_Call { + return &MockModuleInitParams_GetMetricsRegisterer_Call{Call: _e.mock.On("GetMetricsRegisterer")} +} + +func (_c *MockModuleInitParams_GetMetricsRegisterer_Call) Run(run func()) *MockModuleInitParams_GetMetricsRegisterer_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *MockModuleInitParams_GetMetricsRegisterer_Call) Return(_a0 prometheus.Registerer) *MockModuleInitParams_GetMetricsRegisterer_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockModuleInitParams_GetMetricsRegisterer_Call) RunAndReturn(run func() prometheus.Registerer) *MockModuleInitParams_GetMetricsRegisterer_Call { + _c.Call.Return(run) + return _c +} + +// GetStorageProvider provides a mock function with no fields +func (_m *MockModuleInitParams) GetStorageProvider() StorageProvider { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GetStorageProvider") + } + + var r0 StorageProvider + if rf, ok := ret.Get(0).(func() StorageProvider); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(StorageProvider) + } + } + + return r0 +} + +// MockModuleInitParams_GetStorageProvider_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetStorageProvider' +type MockModuleInitParams_GetStorageProvider_Call struct { + *mock.Call +} + +// GetStorageProvider is a helper method to define mock.On call +func (_e *MockModuleInitParams_Expecter) GetStorageProvider() *MockModuleInitParams_GetStorageProvider_Call { + return &MockModuleInitParams_GetStorageProvider_Call{Call: _e.mock.On("GetStorageProvider")} +} + +func (_c *MockModuleInitParams_GetStorageProvider_Call) Run(run func()) *MockModuleInitParams_GetStorageProvider_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *MockModuleInitParams_GetStorageProvider_Call) Return(_a0 StorageProvider) *MockModuleInitParams_GetStorageProvider_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockModuleInitParams_GetStorageProvider_Call) RunAndReturn(run func() StorageProvider) *MockModuleInitParams_GetStorageProvider_Call { + _c.Call.Return(run) + return _c +} + +// NewMockModuleInitParams creates a new instance of MockModuleInitParams. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewMockModuleInitParams(t interface { + mock.TestingT + Cleanup(func()) +}) *MockModuleInitParams { + mock := &MockModuleInitParams{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/platform/dbops/binaries/weaviate-src/entities/moduletools/storage.go b/platform/dbops/binaries/weaviate-src/entities/moduletools/storage.go new file mode 100644 index 0000000000000000000000000000000000000000..823088e40770e603af50e90ad66fedb79f6b3544 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/moduletools/storage.go @@ -0,0 +1,25 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package moduletools + +type StorageProvider interface { + Storage(name string) (Storage, error) + DataPath() string +} + +type ScanFn func(k, v []byte) (bool, error) + +type Storage interface { + Get(key []byte) ([]byte, error) + Scan(scan ScanFn) error + Put(key, value []byte) error +} diff --git a/platform/dbops/binaries/weaviate-src/entities/multi/get.go b/platform/dbops/binaries/weaviate-src/entities/multi/get.go new file mode 100644 index 0000000000000000000000000000000000000000..0c9b48f1b2d9645722e8cd10b74d160cd041accb --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/multi/get.go @@ -0,0 +1,18 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package multi + +type Identifier struct { + ID string + ClassName string + OriginalPosition int +} diff --git a/platform/dbops/binaries/weaviate-src/entities/replication/config.go b/platform/dbops/binaries/weaviate-src/entities/replication/config.go new file mode 100644 index 0000000000000000000000000000000000000000..94d05547d5408e9d1c30d374c1629f3b6cfbab73 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/replication/config.go @@ -0,0 +1,26 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package replication + +import "github.com/weaviate/weaviate/usecases/config/runtime" + +// GlobalConfig represents system-wide config that may restrict settings of an +// individual class +type GlobalConfig struct { + AsyncReplicationDisabled *runtime.DynamicValue[bool] `json:"async_replication_disabled" yaml:"async_replication_disabled"` + // MinimumFactor can enforce replication. For example, with MinimumFactor set + // to 2, users can no longer create classes with a factor of 1, therefore + // forcing them to have replicated classes. + MinimumFactor int `json:"minimum_factor" yaml:"minimum_factor"` + + DeletionStrategy string `json:"deletion_strategy" yaml:"deletion_strategy"` +} diff --git a/platform/dbops/binaries/weaviate-src/entities/schema/accessors.go b/platform/dbops/binaries/weaviate-src/entities/schema/accessors.go new file mode 100644 index 0000000000000000000000000000000000000000..12f0a4b20648b7264ec7ffca00f1babebb42f138 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/schema/accessors.go @@ -0,0 +1,78 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package schema + +import ( + "github.com/weaviate/weaviate/entities/models" +) + +func (s *Schema) GetClass(className string) *models.Class { + class, err := GetClassByName(s.Objects, className) + if err != nil { + return nil + } + + return class +} + +// FindClassByName will find either a Thing or Class by name. +func (s *Schema) FindClassByName(className ClassName) *models.Class { + semSchemaClass, err := GetClassByName(s.Objects, string(className)) + if err == nil { + return semSchemaClass + } + + return nil +} + +// func (s *Schema) GetKindOfClass(className ClassName) (kind.Kind, bool) { +// _, err := GetClassByName(s.Objects, string(className)) +// if err == nil { +// return kind.Object, true +// } + +// return "", false +// } + +func (s *Schema) GetProperty(className ClassName, propName PropertyName) (*models.Property, error) { + semSchemaClass, err := GetClassByName(s.Objects, string(className)) + if err != nil { + return nil, err + } + + semProp, err := GetPropertyByName(semSchemaClass, string(propName)) + if err != nil { + return nil, err + } + + return semProp, nil +} + +func (s *Schema) GetPropsOfType(propType string) []ClassAndProperty { + return extractAllOfPropType(s.Objects.Classes, propType) +} + +func extractAllOfPropType(classes []*models.Class, propType string) []ClassAndProperty { + var result []ClassAndProperty + for _, class := range classes { + for _, prop := range class.Properties { + if prop.DataType[0] == propType { + result = append(result, ClassAndProperty{ + ClassName: ClassName(class.Class), + PropertyName: PropertyName(prop.Name), + }) + } + } + } + + return result +} diff --git a/platform/dbops/binaries/weaviate-src/entities/schema/accessors_test.go b/platform/dbops/binaries/weaviate-src/entities/schema/accessors_test.go new file mode 100644 index 0000000000000000000000000000000000000000..849ddca54d6a363e78981dde30374f0ef7fd300f --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/schema/accessors_test.go @@ -0,0 +1,110 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package schema + +import ( + "errors" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/weaviate/weaviate/entities/models" +) + +func Test_Accessors(t *testing.T) { + car := &models.Class{ + Class: "Car", + Properties: []*models.Property{ + {Name: "modelName", DataType: DataTypeText.PropString(), Tokenization: models.PropertyTokenizationWhitespace}, + {Name: "manufacturerName", DataType: DataTypeText.PropString(), Tokenization: models.PropertyTokenizationWhitespace}, + {Name: "horsepower", DataType: []string{"int"}}, + }, + } + + train := &models.Class{ + Class: "Train", + Properties: []*models.Property{ + {Name: "capacity", DataType: []string{"int"}}, + {Name: "trainCompany", DataType: DataTypeText.PropString(), Tokenization: models.PropertyTokenizationWhitespace}, + }, + } + + action := &models.Class{ + Class: "SomeAction", + Properties: []*models.Property{}, + } + + sch := Empty() + sch.Objects.Classes = []*models.Class{car, train, action} + + t.Run("GetClass by kind and name", func(t *testing.T) { + class := sch.GetClass("Car") + assert.Equal(t, car, class) + + class = sch.GetClass("Invalid") + assert.Equal(t, (*models.Class)(nil), class) + }) + + t.Run("FindClass by name (without providing the kind)", func(t *testing.T) { + class := sch.FindClassByName("Car") + assert.Equal(t, car, class) + + class = sch.FindClassByName("SomeAction") + assert.Equal(t, action, class) + + class = sch.FindClassByName("Invalid") + assert.Equal(t, (*models.Class)(nil), class) + }) + + t.Run("GetPropsOfType", func(t *testing.T) { + props := sch.GetPropsOfType(DataTypeText.String()) + + expectedProps := []ClassAndProperty{ + { + ClassName: "Car", + PropertyName: "modelName", + }, + { + ClassName: "Car", + PropertyName: "manufacturerName", + }, + { + ClassName: "Train", + PropertyName: "trainCompany", + }, + } + + assert.ElementsMatch(t, expectedProps, props) + }) + + t.Run("GetProperty by kind, classname, name", func(t *testing.T) { + prop, err := sch.GetProperty("Car", "modelName") + assert.Nil(t, err) + + expectedProp := &models.Property{ + Name: "modelName", + DataType: DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationWhitespace, + } + + assert.Equal(t, expectedProp, prop) + }) + + t.Run("GetProperty for invalid class", func(t *testing.T) { + _, err := sch.GetProperty("WrongClass", "modelName") + assert.Equal(t, errors.New("no such class with name 'WrongClass' found in the schema. Check your schema files for which classes are available"), err) + }) + + t.Run("GetProperty for invalid prop", func(t *testing.T) { + _, err := sch.GetProperty("Car", "wrongProperty") + assert.Equal(t, errors.New("no such prop with name 'wrongProperty' found in class 'Car' in the schema. Check your schema files for which properties in this class are available"), err) + }) +} diff --git a/platform/dbops/binaries/weaviate-src/entities/schema/backward_compat.go b/platform/dbops/binaries/weaviate-src/entities/schema/backward_compat.go new file mode 100644 index 0000000000000000000000000000000000000000..11ca7075a7bea2cbd988c940aa543deb6f9bb356 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/schema/backward_compat.go @@ -0,0 +1,206 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package schema + +import ( + errors_ "errors" + "fmt" + "strings" + + "github.com/weaviate/weaviate/entities/models" +) + +type PropertyInterface interface { + GetName() string + GetNestedProperties() []*models.NestedProperty +} + +// GetClassByName returns the class by its name +func GetClassByName(s *models.Schema, className string) (*models.Class, error) { + if s == nil { + return nil, fmt.Errorf(ErrorNoSuchClass, className) + } + + for _, class := range s.Classes { + // Check if the name of the class is the given name, that's the class we need + if class.Class == className { + return class, nil + } + } + return nil, fmt.Errorf(ErrorNoSuchClass, className) +} + +// GetPropertyByName returns the class by its name +func GetPropertyByName(c *models.Class, propName string) (*models.Property, error) { + for _, prop := range c.Properties { + // Check if the name of the property is the given name, that's the property we need + if prop.Name == strings.Split(propName, ".")[0] { + return prop, nil + } + } + return nil, fmt.Errorf(ErrorNoSuchProperty, propName, c.Class) +} + +// GetPropertyDataType checks whether the given string is a valid data type +func GetPropertyDataType(class *models.Class, propertyName string) (*DataType, error) { + // Get the class-property + prop, err := GetPropertyByName(class, propertyName) + if err != nil { + return nil, err + } + + // Init the return value + var returnDataType DataType + + // For each data type + for _, dataType := range prop.DataType { + if len(dataType) == 0 { + return nil, fmt.Errorf("invalid-dataType") + } + // Get the first letter to see if it is a capital + firstLetter := string(dataType[0]) + if strings.ToUpper(firstLetter) == firstLetter { + returnDataType = DataTypeCRef + } else { + // Get the value-data type (non-cref), return error if there is one, otherwise assign it to return data type + valueDataType, err := GetValueDataTypeFromString(dataType) + if err != nil { + return nil, err + } + returnDataType = *valueDataType + } + } + return &returnDataType, nil +} + +func GetNestedPropertyByName[P PropertyInterface](p P, propName string) (*models.NestedProperty, error) { + // For each nested-property + for _, prop := range p.GetNestedProperties() { + // Check if the name of the property is the given name, that's the property we need + if prop.Name == strings.Split(propName, ".")[0] { + return prop, nil + } + } + + return nil, fmt.Errorf(ErrorNoSuchProperty, propName, p.GetName()) +} + +func GetNestedPropertyDataType[P PropertyInterface](p P, propertyName string) (*DataType, error) { + // Get the class-property + prop, err := GetNestedPropertyByName(p, propertyName) + if err != nil { + return nil, err + } + + // Init the return value + var returnDataType DataType + + // For each data type + for _, dataType := range prop.DataType { + if len(dataType) == 0 { + return nil, fmt.Errorf("invalid-dataType") + } + // Get the first letter to see if it is a capital + firstLetter := string(dataType[0]) + if strings.ToUpper(firstLetter) == firstLetter { + returnDataType = DataTypeCRef + } else { + // Get the value-data type (non-cref), return error if there is one, otherwise assign it to return data type + valueDataType, err := GetValueDataTypeFromString(dataType) + if err != nil { + return nil, err + } + returnDataType = *valueDataType + } + } + return &returnDataType, nil +} + +// GetValueDataTypeFromString checks whether the given string is a valid data type +func GetValueDataTypeFromString(dt string) (*DataType, error) { + var returnDataType DataType + + if IsValidValueDataType(dt) { + returnDataType = DataType(dt) + } else { + return nil, errors_.New(ErrorNoSuchDatatype) + } + + return &returnDataType, nil +} + +// IsValidValueDataType checks whether the given string is a valid data type +func IsValidValueDataType(dt string) bool { + switch dt { + case + string(DataTypeString), + string(DataTypeText), + string(DataTypeInt), + string(DataTypeNumber), + string(DataTypeBoolean), + string(DataTypeDate), + string(DataTypeGeoCoordinates), + string(DataTypePhoneNumber), + string(DataTypeBlob), + string(DataTypeUUID), + string(DataTypeUUIDArray), + string(DataTypeStringArray), + string(DataTypeTextArray), + string(DataTypeIntArray), + string(DataTypeNumberArray), + string(DataTypeBooleanArray), + string(DataTypeDateArray), + string(DataTypeObject), + string(DataTypeObjectArray): + return true + } + return false +} + +func IsRefDataType(dt []string) bool { + firstLetter := string(dt[0][0]) + return strings.ToUpper(firstLetter) == firstLetter +} + +func IsBlobDataType(dt []string) bool { + for i := range dt { + if dt[i] == string(DataTypeBlob) { + return true + } + } + return false +} + +func IsArrayDataType(dt []string) bool { + for i := range dt { + switch DataType(dt[i]) { + case DataTypeStringArray, DataTypeTextArray, DataTypeIntArray, + DataTypeNumberArray, DataTypeBooleanArray, DataTypeDateArray, + DataTypeUUIDArray: + return true + default: + // move to the next loop + } + } + return false +} + +func GetPropertyNamesFromClass(class *models.Class, includeRef bool) []string { + var propertyNames []string + for _, prop := range class.Properties { + if !includeRef && IsRefDataType(prop.DataType) { + continue + } + propertyNames = append(propertyNames, prop.Name) + } + return propertyNames +} diff --git a/platform/dbops/binaries/weaviate-src/entities/schema/backward_compat_test.go b/platform/dbops/binaries/weaviate-src/entities/schema/backward_compat_test.go new file mode 100644 index 0000000000000000000000000000000000000000..38ac6ad1de0f6f7e5e5b3f9c011bac14341b1a71 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/schema/backward_compat_test.go @@ -0,0 +1,105 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package schema + +import ( + "testing" +) + +func TestIsArrayDataType(t *testing.T) { + type args struct { + dt []string + } + tests := []struct { + name string + args args + want bool + }{ + { + name: "is string array", + args: args{ + dt: DataTypeTextArray.PropString(), + }, + want: true, + }, + { + name: "is not string array", + args: args{ + dt: DataTypeText.PropString(), + }, + want: false, + }, + { + name: "is text array", + args: args{ + dt: []string{"text[]"}, + }, + want: true, + }, + { + name: "is not text array", + args: args{ + dt: []string{"text"}, + }, + want: false, + }, + { + name: "is number array", + args: args{ + dt: []string{"number[]"}, + }, + want: true, + }, + { + name: "is not number array", + args: args{ + dt: []string{"number"}, + }, + want: false, + }, + { + name: "is int array", + args: args{ + dt: []string{"int[]"}, + }, + want: true, + }, + { + name: "is not int array", + args: args{ + dt: []string{"int"}, + }, + want: false, + }, + { + name: "is not uuid array", + args: args{ + dt: []string{"uuid"}, + }, + want: false, + }, + { + name: "is uuid array", + args: args{ + dt: []string{"uuid[]"}, + }, + want: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := IsArrayDataType(tt.args.dt); got != tt.want { + t.Errorf("IsArrayDataType() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/platform/dbops/binaries/weaviate-src/entities/schema/collection.go b/platform/dbops/binaries/weaviate-src/entities/schema/collection.go new file mode 100644 index 0000000000000000000000000000000000000000..a1cf482af63e0804c7f34daf2779801c65e5e906 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/schema/collection.go @@ -0,0 +1,407 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package schema + +import ( + "fmt" + + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/schema/config" + vIndex "github.com/weaviate/weaviate/entities/vectorindex" + "github.com/weaviate/weaviate/entities/vectorindex/flat" + "github.com/weaviate/weaviate/entities/vectorindex/hnsw" + sharding "github.com/weaviate/weaviate/usecases/sharding/config" +) + +type Collection struct { + Name string `json:"name,omitempty"` + Description string `json:"description,omitempty"` + + // inverted index config + InvertedIndexConfig InvertedIndexConfig `json:"invertedIndex,omitempty"` + + // TODO-RAFT START + // Can we also get rid of the interface{} in the value side ? + // Configuration specific to modules this Weaviate instance has installed + ModuleConfig map[string]interface{} `json:"moduleConfig,omitempty"` + // TODO-RAFT END + + // multi tenancy config + MultiTenancyConfig MultiTenancyConfig `json:"multiTenancyConfig,omitempty"` + + // The properties of the class. + Properties []Property `json:"properties"` + + // replication config + ReplicationConfig ReplicationConfig `json:"replicationConfig,omitempty"` + + // Manage how the index should be sharded and distributed in the cluster + ShardingConfig ShardingConfig `json:"shardingConfig,omitempty"` + + // VectorIndexType which vector index to use + VectorIndexType VectorIndexType `json:"vectorIndexType,omitempty"` + + // VectorIndexConfig underlying implementation depends on VectorIndexType + VectorIndexConfig config.VectorIndexConfig `json:"vectorIndexConfig,omitempty"` + + // Specify how the vectors for this class should be determined. The options are either 'none' - this means you have to import a vector + // with each object yourself - or the name of a module that provides vectorization capabilities, such as 'text2vec-contextionary'. If + // left empty, it will use the globally configured default which can itself either be 'none' or a specific module. + Vectorizer string `json:"vectorizer,omitempty"` +} + +type VectorIndexType int + +const ( + // VectorIndexTypeEmpty is used when we parse an unexpected index type + VectorIndexTypeEmpty VectorIndexType = iota + VectorIndexTypeHNSW + VectorIndexTypeFlat +) + +var ( + vectorIndexTypeToString = map[VectorIndexType]string{ + VectorIndexTypeHNSW: vIndex.VectorIndexTypeHNSW, + VectorIndexTypeFlat: vIndex.VectorIndexTypeFLAT, + VectorIndexTypeEmpty: "", + } + stringToVectorIndexType = map[string]VectorIndexType{ + vIndex.VectorIndexTypeHNSW: VectorIndexTypeHNSW, + vIndex.VectorIndexTypeFLAT: VectorIndexTypeFlat, + "": VectorIndexTypeEmpty, + } +) + +type MultiTenancyConfig struct { + Enabled bool `json:"enabled"` +} + +type ReplicationConfig struct { + // Factor represent replication factor + Factor int64 `json:"factor,omitempty"` +} + +type ShardingConfig struct { + VirtualPerPhysical int `json:"virtualPerPhysical"` + DesiredCount int `json:"desiredCount"` + ActualCount int `json:"actualCount"` + DesiredVirtualCount int `json:"desiredVirtualCount"` + ActualVirtualCount int `json:"actualVirtualCount"` + Key string `json:"key"` + Strategy string `json:"strategy"` + Function string `json:"function"` +} + +type Property struct { + // Name of the property as URI relative to the schema URL. + Name string `json:"name,omitempty"` + + // Description of the property. + Description string `json:"description,omitempty"` + + // Can be a reference to another type when it starts with a capital (for example Person), otherwise "string" or "int". + // TODO-RAFT: Can we make DataType a slice of interface where other type and native type implements it ? + DataType []string `json:"data_type"` + + // Optional. Should this property be indexed in the inverted index. Defaults to true. If you choose false, you will not be able to use this property in where filters. This property has no affect on vectorization decisions done by modules + IndexFilterable bool `json:"indexFilterable,omitempty"` + + // Optional. Should this property be indexed in the inverted index. Defaults to true. If you choose false, you will not be able to use this property in where filters, bm25 or hybrid search. This property has no affect on vectorization decisions done by modules (deprecated as of v1.19; use indexFilterable or/and indexSearchable instead) + IndexInverted bool `json:"indexInverted,omitempty"` + + // Optional. Should this property be indexed in the inverted index. Defaults to true. Applicable only to properties of data type text and text[]. If you choose false, you will not be able to use this property in bm25 or hybrid search. This property has no affect on vectorization decisions done by modules + IndexSearchable bool `json:"indexSearchable,omitempty"` + + // Optional. Should this property be indexed in the inverted index. Defaults to false. Provides better performance for range queries compared to filterable index in large datasets. Applicable only to properties of data type int, number, date." + IndexRangeFilters bool `json:"indexRangeFilters,omitempty"` + + // Configuration specific to modules this Weaviate instance has installed + ModuleConfig map[string]interface{} `json:"moduleConfig,omitempty"` + + // The properties of the nested object(s). Applies to object and object[] data types. + NestedProperties []NestedProperty `json:"nestedProperties,omitempty"` + + // Determines tokenization of the property as separate words or whole field. Optional. Applies to text and text[] data types. Allowed values are `word` (default; splits on any non-alphanumerical, lowercases), `lowercase` (splits on white spaces, lowercases), `whitespace` (splits on white spaces), `field` (trims). Not supported for remaining data types + // Enum: [word lowercase whitespace field] + Tokenization string `json:"tokenization,omitempty"` +} + +type NestedProperty struct { + // name + Name string `json:"name,omitempty"` + // description + Description string `json:"description,omitempty"` + // data type + DataType []string `json:"data_type"` + + // index filterable + IndexFilterable bool `json:"index_filterable,omitempty"` + + // index searchable + IndexSearchable bool `json:"index_searchable,omitempty"` + + // index range filters + IndexRangeFilters bool `json:"index_range_filters,omitempty"` + + // nested properties + NestedProperties []NestedProperty `json:"nested_properties,omitempty"` + + // tokenization + // Enum: [word lowercase whitespace field] + Tokenization string `json:"tokenization,omitempty"` +} + +// NestedPropertyFromModel returns a NestedProperty copied from m. +func NestedPropertyFromModel(m models.NestedProperty) NestedProperty { + n := NestedProperty{} + + n.DataType = m.DataType + n.Description = m.Description + if m.IndexFilterable != nil { + n.IndexFilterable = *m.IndexFilterable + } else { + n.IndexFilterable = true + } + if m.IndexSearchable != nil { + n.IndexSearchable = *m.IndexSearchable + } else { + n.IndexSearchable = true + } + if m.IndexRangeFilters != nil { + n.IndexRangeFilters = *m.IndexRangeFilters + } else { + n.IndexRangeFilters = false + } + n.Name = m.Name + n.Tokenization = m.Tokenization + if len(m.NestedProperties) > 0 { + n.NestedProperties = make([]NestedProperty, 0, len(m.NestedProperties)) + for _, npm := range m.NestedProperties { + np := NestedPropertyFromModel(*npm) + + n.NestedProperties = append(n.NestedProperties, np) + } + } + + return n +} + +// NestedPropertyToModel returns a models.NestedProperty from n. If the original models.NestedProperty from which n was created had nil pointers they will +// be replaced with default initialized structs in NestedProperty. +func NestedPropertyToModel(n NestedProperty) models.NestedProperty { + var m models.NestedProperty + + m.DataType = n.DataType + m.Description = n.Description + indexFilterable := n.IndexFilterable + m.IndexFilterable = &indexFilterable + indexSearchable := n.IndexSearchable + m.IndexSearchable = &indexSearchable + indexRangeFilters := n.IndexRangeFilters + m.IndexRangeFilters = &indexRangeFilters + m.Name = n.Name + m.Tokenization = n.Tokenization + if len(n.NestedProperties) > 0 { + m.NestedProperties = make([]*models.NestedProperty, 0, len(n.NestedProperties)) + for _, np := range n.NestedProperties { + npm := NestedPropertyToModel(np) + m.NestedProperties = append(m.NestedProperties, &npm) + } + } + + return m +} + +// PropertyFromModel returns a Property copied from m. +func PropertyFromModel(m models.Property) Property { + p := Property{} + + p.Name = m.Name + p.DataType = m.DataType + p.Description = m.Description + if m.IndexFilterable != nil { + p.IndexFilterable = *m.IndexFilterable + } else { + p.IndexFilterable = true + } + if m.IndexInverted != nil { + p.IndexInverted = *m.IndexInverted + } else { + p.IndexInverted = true + } + if m.IndexSearchable != nil { + p.IndexSearchable = *m.IndexSearchable + } else { + p.IndexSearchable = true + } + if m.IndexRangeFilters != nil { + p.IndexRangeFilters = *m.IndexRangeFilters + } else { + p.IndexRangeFilters = false + } + if v, ok := m.ModuleConfig.(map[string]interface{}); ok { + p.ModuleConfig = v + } + p.Tokenization = m.Tokenization + if len(m.NestedProperties) > 0 { + p.NestedProperties = make([]NestedProperty, 0, len(m.NestedProperties)) + for _, npm := range m.NestedProperties { + np := NestedPropertyFromModel(*npm) + + p.NestedProperties = append(p.NestedProperties, np) + } + } + + return p +} + +// PropertyToModel returns a models.Property from p. If the original models.Property from which n was created had nil pointers they will be replaced +// with default initialized structs in Property. +func PropertyToModel(p Property) models.Property { + var m models.Property + + m.DataType = p.DataType + m.Description = p.Description + indexFilterable := p.IndexFilterable + m.IndexFilterable = &indexFilterable + indexInverted := p.IndexInverted + m.IndexInverted = &indexInverted + indexSearchable := p.IndexSearchable + m.IndexSearchable = &indexSearchable + indexRangeFilters := p.IndexRangeFilters + m.IndexRangeFilters = &indexRangeFilters + m.ModuleConfig = p.ModuleConfig + m.Name = p.Name + m.Tokenization = p.Tokenization + if len(p.NestedProperties) > 0 { + m.NestedProperties = make([]*models.NestedProperty, 0, len(p.NestedProperties)) + for _, np := range p.NestedProperties { + npm := NestedPropertyToModel(np) + m.NestedProperties = append(m.NestedProperties, &npm) + } + } + + return m +} + +// ShardingConfigFromModel returns a ShardingConfig copied from m. +// If m isn't a sharding.Config underneath the interface{}, a default initialized ShardingConfig will be returned. +func ShardingConfigFromModel(m interface{}) ShardingConfig { + sc := ShardingConfig{} + + v, ok := m.(sharding.Config) + if !ok { + return sc + } + + sc.ActualCount = v.ActualCount + sc.ActualVirtualCount = v.ActualVirtualCount + sc.DesiredCount = v.DesiredCount + sc.DesiredVirtualCount = v.DesiredVirtualCount + sc.Function = v.Function + sc.Key = v.Key + sc.Strategy = v.Strategy + sc.VirtualPerPhysical = v.VirtualPerPhysical + + return sc +} + +// ShardingConfigToModel returns an interface{} containing a sharding.Config from s. +func ShardingConfigToModel(s ShardingConfig) interface{} { + var m sharding.Config + + m.ActualCount = s.ActualCount + m.ActualVirtualCount = s.ActualVirtualCount + m.DesiredCount = s.DesiredCount + m.DesiredVirtualCount = s.DesiredVirtualCount + m.Function = s.Function + m.Key = s.Key + m.Strategy = s.Strategy + m.VirtualPerPhysical = s.VirtualPerPhysical + + return m +} + +// CollectionFromClass returns a Collection copied from m. +func CollectionFromClass(m models.Class) (Collection, error) { + c := Collection{} + + c.Name = m.Class + c.Description = m.Description + if m.InvertedIndexConfig != nil { + c.InvertedIndexConfig = InvertedIndexConfigFromModel(*m.InvertedIndexConfig) + if v, ok := m.ModuleConfig.(map[string]interface{}); ok { + c.ModuleConfig = v + } + } + if m.MultiTenancyConfig != nil { + c.MultiTenancyConfig.Enabled = m.MultiTenancyConfig.Enabled + } + c.Properties = make([]Property, len(m.Properties)) + c.Vectorizer = m.Vectorizer + + for i, mp := range m.Properties { + p := PropertyFromModel(*mp) + + c.Properties[i] = p + } + if m.ReplicationConfig != nil { + c.ReplicationConfig.Factor = m.ReplicationConfig.Factor + } + if m.ShardingConfig != nil { + c.ShardingConfig = ShardingConfigFromModel(m.ShardingConfig) + } + + vIndex, ok := stringToVectorIndexType[m.VectorIndexType] + if !ok { + return c, fmt.Errorf("unknown vector index: %s", m.VectorIndexType) + } + + c.VectorIndexType = vIndex + switch vIndex { + case VectorIndexTypeHNSW: + c.VectorIndexConfig = m.VectorIndexConfig.(hnsw.UserConfig) + case VectorIndexTypeFlat: + c.VectorIndexConfig = m.VectorIndexConfig.(flat.UserConfig) + default: + } + + return c, nil +} + +// CollectionToClass returns a models.Class from c. If the original models.Class from which c was created had nil pointers they will be replaced +// with default initialized structs in models.Class. +func CollectionToClass(c Collection) models.Class { + var m models.Class + + m.Class = c.Name + m.Description = c.Description + iic := InvertedIndexConfigToModel(c.InvertedIndexConfig) + m.InvertedIndexConfig = &iic + m.ModuleConfig = c.ModuleConfig + var mtc models.MultiTenancyConfig + mtc.Enabled = c.MultiTenancyConfig.Enabled + m.MultiTenancyConfig = &mtc + m.Properties = make([]*models.Property, len(c.Properties)) + for i, p := range c.Properties { + mp := PropertyToModel(p) + m.Properties[i] = &mp + } + var rc models.ReplicationConfig + rc.Factor = c.ReplicationConfig.Factor + m.ReplicationConfig = &rc + m.ShardingConfig = ShardingConfigToModel(c.ShardingConfig) + m.VectorIndexType = vectorIndexTypeToString[c.VectorIndexType] + m.VectorIndexConfig = c.VectorIndexConfig + m.Vectorizer = c.Vectorizer + return m +} diff --git a/platform/dbops/binaries/weaviate-src/entities/schema/collection_model_test.go b/platform/dbops/binaries/weaviate-src/entities/schema/collection_model_test.go new file mode 100644 index 0000000000000000000000000000000000000000..0924f7e0be9b06ea6d08c169ea36043eebd87d62 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/schema/collection_model_test.go @@ -0,0 +1,410 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package schema + +import ( + "testing" + + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/vectorindex/hnsw" + sharding "github.com/weaviate/weaviate/usecases/sharding/config" +) + +var ( + vTrue = true + vFalse = false + emptyModuleConfig map[string]interface{} +) + +func TestCollectionFromAndToModel(t *testing.T) { + tests := []struct { + name string + inputModel models.Class + outputModel models.Class + }{ + { + name: "empty", + inputModel: models.Class{}, + outputModel: models.Class{ + InvertedIndexConfig: &models.InvertedIndexConfig{ + Bm25: &models.BM25Config{B: 0, K1: 0}, + Stopwords: &models.StopwordConfig{Additions: nil, Preset: "", Removals: nil}, + }, + MultiTenancyConfig: &models.MultiTenancyConfig{}, + ModuleConfig: emptyModuleConfig, + Properties: make([]*models.Property, 0), + ReplicationConfig: &models.ReplicationConfig{}, + ShardingConfig: sharding.Config{}, + VectorIndexType: "", + }, + }, + { + name: "unknown", + inputModel: models.Class{VectorIndexType: "unknown"}, + }, + + { + name: "all elements", + inputModel: models.Class{ + Class: "class", + Description: "description", + InvertedIndexConfig: &models.InvertedIndexConfig{}, + ModuleConfig: map[string]string{}, + MultiTenancyConfig: &models.MultiTenancyConfig{}, + Properties: []*models.Property{ + { + Name: "objectProperty", + DataType: DataTypeObject.PropString(), + IndexFilterable: &vTrue, + IndexSearchable: &vFalse, + IndexRangeFilters: &vFalse, + Tokenization: "", + NestedProperties: []*models.NestedProperty{ + { + Name: "text", + DataType: DataTypeText.PropString(), + }, + { + Name: "texts", + DataType: DataTypeTextArray.PropString(), + }, + { + Name: "number", + DataType: DataTypeNumber.PropString(), + }, + { + Name: "numbers", + DataType: DataTypeNumberArray.PropString(), + }, + { + Name: "int", + DataType: DataTypeInt.PropString(), + }, + { + Name: "ints", + DataType: DataTypeIntArray.PropString(), + }, + { + Name: "date", + DataType: DataTypeDate.PropString(), + }, + { + Name: "dates", + DataType: DataTypeDateArray.PropString(), + }, + { + Name: "bool", + DataType: DataTypeBoolean.PropString(), + }, + { + Name: "bools", + DataType: DataTypeBooleanArray.PropString(), + }, + { + Name: "uuid", + DataType: DataTypeUUID.PropString(), + }, + { + Name: "uuids", + DataType: DataTypeUUIDArray.PropString(), + }, + { + Name: "nested_int", + DataType: DataTypeInt.PropString(), + IndexFilterable: &vTrue, + IndexSearchable: &vFalse, + IndexRangeFilters: &vFalse, + Tokenization: "", + }, + { + Name: "nested_number", + DataType: DataTypeNumber.PropString(), + IndexFilterable: &vTrue, + IndexSearchable: &vFalse, + IndexRangeFilters: &vFalse, + Tokenization: "", + }, + { + Name: "nested_text", + DataType: DataTypeText.PropString(), + IndexFilterable: &vTrue, + IndexSearchable: &vTrue, + IndexRangeFilters: &vFalse, + Tokenization: models.PropertyTokenizationWord, + }, + { + Name: "nested_objects", + DataType: DataTypeObject.PropString(), + IndexFilterable: &vTrue, + IndexSearchable: &vFalse, + IndexRangeFilters: &vFalse, + Tokenization: "", + NestedProperties: []*models.NestedProperty{ + { + Name: "nested_bool_lvl2", + DataType: DataTypeBoolean.PropString(), + IndexFilterable: &vTrue, + IndexSearchable: &vFalse, + IndexRangeFilters: &vFalse, + Tokenization: "", + }, + { + Name: "nested_numbers_lvl2", + DataType: DataTypeNumberArray.PropString(), + IndexFilterable: &vTrue, + IndexSearchable: &vFalse, + IndexRangeFilters: &vFalse, + Tokenization: "", + }, + }, + }, + { + Name: "nested_array_objects", + DataType: DataTypeObjectArray.PropString(), + IndexFilterable: &vTrue, + IndexSearchable: &vFalse, + IndexRangeFilters: &vFalse, + Tokenization: "", + NestedProperties: []*models.NestedProperty{ + { + Name: "nested_bool_lvl2", + DataType: DataTypeBoolean.PropString(), + IndexFilterable: &vTrue, + IndexSearchable: &vFalse, + IndexRangeFilters: &vFalse, + Tokenization: "", + }, + { + Name: "nested_numbers_lvl2", + DataType: DataTypeNumberArray.PropString(), + IndexFilterable: &vTrue, + IndexSearchable: &vFalse, + IndexRangeFilters: &vFalse, + Tokenization: "", + }, + }, + }, + }, + }, + }, + ReplicationConfig: &models.ReplicationConfig{}, + ShardingConfig: sharding.Config{}, + VectorIndexType: "hnsw", + VectorIndexConfig: hnsw.UserConfig{}, + }, + outputModel: models.Class{ + Class: "class", + Description: "description", + InvertedIndexConfig: &models.InvertedIndexConfig{ + Bm25: &models.BM25Config{B: 0, K1: 0}, + Stopwords: &models.StopwordConfig{Additions: nil, Preset: "", Removals: nil}, + }, + ModuleConfig: emptyModuleConfig, + MultiTenancyConfig: &models.MultiTenancyConfig{}, + Properties: []*models.Property{ + { + Name: "objectProperty", + DataType: DataTypeObject.PropString(), + IndexFilterable: &vTrue, + IndexInverted: &vTrue, + IndexSearchable: &vFalse, + IndexRangeFilters: &vFalse, + Tokenization: "", + ModuleConfig: emptyModuleConfig, + NestedProperties: []*models.NestedProperty{ + { + Name: "text", + IndexFilterable: &vTrue, + IndexSearchable: &vTrue, + IndexRangeFilters: &vFalse, + DataType: DataTypeText.PropString(), + }, + { + Name: "texts", + IndexFilterable: &vTrue, + IndexSearchable: &vTrue, + IndexRangeFilters: &vFalse, + DataType: DataTypeTextArray.PropString(), + }, + { + Name: "number", + IndexFilterable: &vTrue, + IndexSearchable: &vTrue, + IndexRangeFilters: &vFalse, + DataType: DataTypeNumber.PropString(), + }, + { + Name: "numbers", + IndexFilterable: &vTrue, + IndexSearchable: &vTrue, + IndexRangeFilters: &vFalse, + DataType: DataTypeNumberArray.PropString(), + }, + { + Name: "int", + IndexFilterable: &vTrue, + IndexSearchable: &vTrue, + IndexRangeFilters: &vFalse, + DataType: DataTypeInt.PropString(), + }, + { + Name: "ints", + IndexFilterable: &vTrue, + IndexSearchable: &vTrue, + IndexRangeFilters: &vFalse, + DataType: DataTypeIntArray.PropString(), + }, + { + Name: "date", + IndexFilterable: &vTrue, + IndexSearchable: &vTrue, + IndexRangeFilters: &vFalse, + DataType: DataTypeDate.PropString(), + }, + { + Name: "dates", + IndexFilterable: &vTrue, + IndexSearchable: &vTrue, + IndexRangeFilters: &vFalse, + DataType: DataTypeDateArray.PropString(), + }, + { + Name: "bool", + IndexFilterable: &vTrue, + IndexSearchable: &vTrue, + IndexRangeFilters: &vFalse, + DataType: DataTypeBoolean.PropString(), + }, + { + Name: "bools", + IndexFilterable: &vTrue, + IndexSearchable: &vTrue, + IndexRangeFilters: &vFalse, + DataType: DataTypeBooleanArray.PropString(), + }, + { + Name: "uuid", + IndexFilterable: &vTrue, + IndexSearchable: &vTrue, + IndexRangeFilters: &vFalse, + DataType: DataTypeUUID.PropString(), + }, + { + Name: "uuids", + IndexFilterable: &vTrue, + IndexSearchable: &vTrue, + IndexRangeFilters: &vFalse, + DataType: DataTypeUUIDArray.PropString(), + }, + { + Name: "nested_int", + DataType: DataTypeInt.PropString(), + IndexFilterable: &vTrue, + IndexSearchable: &vFalse, + IndexRangeFilters: &vFalse, + Tokenization: "", + }, + { + Name: "nested_number", + DataType: DataTypeNumber.PropString(), + IndexFilterable: &vTrue, + IndexSearchable: &vFalse, + IndexRangeFilters: &vFalse, + Tokenization: "", + }, + { + Name: "nested_text", + DataType: DataTypeText.PropString(), + IndexFilterable: &vTrue, + IndexSearchable: &vTrue, + IndexRangeFilters: &vFalse, + Tokenization: models.PropertyTokenizationWord, + }, + { + Name: "nested_objects", + DataType: DataTypeObject.PropString(), + IndexFilterable: &vTrue, + IndexSearchable: &vFalse, + IndexRangeFilters: &vFalse, + Tokenization: "", + NestedProperties: []*models.NestedProperty{ + { + Name: "nested_bool_lvl2", + DataType: DataTypeBoolean.PropString(), + IndexFilterable: &vTrue, + IndexSearchable: &vFalse, + IndexRangeFilters: &vFalse, + Tokenization: "", + }, + { + Name: "nested_numbers_lvl2", + DataType: DataTypeNumberArray.PropString(), + IndexFilterable: &vTrue, + IndexSearchable: &vFalse, + IndexRangeFilters: &vFalse, + Tokenization: "", + }, + }, + }, + { + Name: "nested_array_objects", + DataType: DataTypeObjectArray.PropString(), + IndexFilterable: &vTrue, + IndexSearchable: &vFalse, + IndexRangeFilters: &vFalse, + Tokenization: "", + NestedProperties: []*models.NestedProperty{ + { + Name: "nested_bool_lvl2", + DataType: DataTypeBoolean.PropString(), + IndexFilterable: &vTrue, + IndexSearchable: &vFalse, + IndexRangeFilters: &vFalse, + Tokenization: "", + }, + { + Name: "nested_numbers_lvl2", + DataType: DataTypeNumberArray.PropString(), + IndexFilterable: &vTrue, + IndexSearchable: &vFalse, + IndexRangeFilters: &vFalse, + Tokenization: "", + }, + }, + }, + }, + }, + }, + ReplicationConfig: &models.ReplicationConfig{}, + ShardingConfig: sharding.Config{}, + VectorIndexType: "hnsw", + VectorIndexConfig: hnsw.UserConfig{}, + }, + }, + } + + for _, tc := range tests { + tc := tc + t.Run(tc.name, func(t *testing.T) { + c, err := CollectionFromClass(tc.inputModel) + if vi := tc.inputModel.VectorIndexType; vi != "" && vi != "hnsw" && vi != "flat" { + require.NotNil(t, err) + return + } + require.Nil(t, err) + m := CollectionToClass(c) + + require.Equal(t, tc.outputModel, m) + }) + } +} diff --git a/platform/dbops/binaries/weaviate-src/entities/schema/data_types.go b/platform/dbops/binaries/weaviate-src/entities/schema/data_types.go new file mode 100644 index 0000000000000000000000000000000000000000..bd22b579cdd10fa6f0d66b70222befa9d686bea4 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/schema/data_types.go @@ -0,0 +1,333 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package schema + +import ( + "errors" + "fmt" + "slices" + "strings" + "unicode" + + "github.com/weaviate/weaviate/entities/models" +) + +type DataType string + +const ( + // DataTypeCRef The data type is a cross-reference, it is starting with a capital letter + DataTypeCRef DataType = "cref" + // DataTypeText The data type is a value of type string + DataTypeText DataType = "text" + // DataTypeInt The data type is a value of type int + DataTypeInt DataType = "int" + // DataTypeNumber The data type is a value of type number/float + DataTypeNumber DataType = "number" + // DataTypeBoolean The data type is a value of type boolean + DataTypeBoolean DataType = "boolean" + // DataTypeDate The data type is a value of type date + DataTypeDate DataType = "date" + // DataTypeGeoCoordinates is used to represent geo coordinates, i.e. latitude + // and longitude pairs of locations on earth + DataTypeGeoCoordinates DataType = "geoCoordinates" + // DataTypePhoneNumber represents a parsed/to-be-parsed phone number + DataTypePhoneNumber DataType = "phoneNumber" + // DataTypeBlob represents a base64 encoded data + DataTypeBlob DataType = "blob" + // DataTypeTextArray The data type is a value of type string array + DataTypeTextArray DataType = "text[]" + // DataTypeIntArray The data type is a value of type int array + DataTypeIntArray DataType = "int[]" + // DataTypeNumberArray The data type is a value of type number/float array + DataTypeNumberArray DataType = "number[]" + // DataTypeBooleanArray The data type is a value of type boolean array + DataTypeBooleanArray DataType = "boolean[]" + // DataTypeDateArray The data type is a value of type date array + DataTypeDateArray DataType = "date[]" + // DataTypeUUID is a native UUID data type. It is stored in it's raw byte + // representation and therefore takes up less space than storing a UUID as a + // string + DataTypeUUID DataType = "uuid" + // DataTypeUUIDArray is the array version of DataTypeUUID + DataTypeUUIDArray DataType = "uuid[]" + + DataTypeObject DataType = "object" + DataTypeObjectArray DataType = "object[]" + + // deprecated as of v1.19, replaced by DataTypeText + relevant tokenization setting + // DataTypeString The data type is a value of type string + DataTypeString DataType = "string" + // deprecated as of v1.19, replaced by DataTypeTextArray + relevant tokenization setting + // DataTypeArrayString The data type is a value of type string array + DataTypeStringArray DataType = "string[]" +) + +func (dt DataType) String() string { + return string(dt) +} + +func (dt DataType) PropString() []string { + return []string{dt.String()} +} + +func (dt DataType) AsName() string { + return strings.ReplaceAll(dt.String(), "[]", "Array") +} + +func (dt DataType) IsPrimitive() bool { + return slices.Contains(PrimitiveDataTypes, dt) +} + +var PrimitiveDataTypes []DataType = []DataType{ + DataTypeText, DataTypeInt, DataTypeNumber, DataTypeBoolean, DataTypeDate, + DataTypeGeoCoordinates, DataTypePhoneNumber, DataTypeBlob, DataTypeTextArray, + DataTypeIntArray, DataTypeNumberArray, DataTypeBooleanArray, DataTypeDateArray, + DataTypeUUID, DataTypeUUIDArray, +} + +var NestedDataTypes []DataType = []DataType{ + DataTypeObject, DataTypeObjectArray, +} + +var DeprecatedPrimitiveDataTypes []DataType = []DataType{ + // deprecated as of v1.19 + DataTypeString, DataTypeStringArray, +} + +var allPrimitiveDataTypes []DataType = append(PrimitiveDataTypes, DeprecatedPrimitiveDataTypes...) + +type PropertyKind int + +const ( + PropertyKindPrimitive PropertyKind = 1 + PropertyKindRef PropertyKind = 2 + PropertyKindNested PropertyKind = 3 +) + +type PropertyDataType interface { + Kind() PropertyKind + IsPrimitive() bool + AsPrimitive() DataType + IsReference() bool + Classes() []ClassName + ContainsClass(name ClassName) bool + IsNested() bool + AsNested() DataType +} + +type propertyDataType struct { + kind PropertyKind + primitiveType DataType + classes []ClassName + nestedType DataType +} + +// IsPropertyLength returns if a string is a filters for property length. They have the form len(*PROPNAME*) +func IsPropertyLength(propName string, offset int) (string, bool) { + isPropLengthFilter := len(propName) > 4+offset && propName[offset:offset+4] == "len(" && propName[len(propName)-1:] == ")" + + if isPropLengthFilter { + return propName[offset+4 : len(propName)-1], isPropLengthFilter + } + return "", false +} + +func IsArrayType(dt DataType) (DataType, bool) { + switch dt { + case DataTypeStringArray: + return DataTypeString, true + case DataTypeTextArray: + return DataTypeText, true + case DataTypeNumberArray: + return DataTypeNumber, true + case DataTypeIntArray: + return DataTypeInt, true + case DataTypeBooleanArray: + return DataTypeBoolean, true + case DataTypeDateArray: + return DataTypeDate, true + case DataTypeUUIDArray: + return DataTypeUUID, true + case DataTypeObjectArray: + return DataTypeObject, true + default: + return "", false + } +} + +func (p *propertyDataType) Kind() PropertyKind { + return p.kind +} + +func (p *propertyDataType) IsPrimitive() bool { + return p.kind == PropertyKindPrimitive +} + +func (p *propertyDataType) AsPrimitive() DataType { + if !p.IsPrimitive() { + panic("not primitive type") + } + + return p.primitiveType +} + +func (p *propertyDataType) IsReference() bool { + return p.kind == PropertyKindRef +} + +func (p *propertyDataType) Classes() []ClassName { + if !p.IsReference() { + panic("not MultipleRef type") + } + + return p.classes +} + +func (p *propertyDataType) ContainsClass(needle ClassName) bool { + if !p.IsReference() { + panic("not MultipleRef type") + } + + for _, class := range p.classes { + if class == needle { + return true + } + } + + return false +} + +func (p *propertyDataType) IsNested() bool { + return p.kind == PropertyKindNested +} + +func (p *propertyDataType) AsNested() DataType { + if !p.IsNested() { + panic("not nested type") + } + return p.nestedType +} + +// Based on the schema, return a valid description of the defined datatype +// +// Note that this function will error if referenced classes do not exist. If +// you don't want such validation, use [Schema.FindPropertyDataTypeRelaxedRefs] +// instead and set relax to true +func (s *Schema) FindPropertyDataType(dataType []string) (PropertyDataType, error) { + return FindPropertyDataTypeWithRefs(s.GetClass, dataType, false, "") +} + +// FindPropertyDataTypeWithRefs is a no auth wrapper for FindPropertyDataTypeWithRefsAndAuth +func FindPropertyDataTypeWithRefs(authorizedGetClass func(string) *models.Class, dataType []string, relaxCrossRefValidation bool, beloningToClass ClassName) (PropertyDataType, error) { + wrapperFunc := func(name string) (*models.Class, error) { return authorizedGetClass(name), nil } + return FindPropertyDataTypeWithRefsAndAuth(wrapperFunc, dataType, relaxCrossRefValidation, beloningToClass) +} + +// FindPropertyDataTypeWithRefsAndAuth Based on the schema, return a valid description of the defined datatype +// If relaxCrossRefValidation is set, there is no check if the referenced class +// exists in the schema. This can be helpful in scenarios, such as restoring +// from a backup where we have no guarantee over the order of class creation. +// If belongingToClass is set and equal to referenced class, check whether class +// exists in the schema is skipped. This is done to allow creating class schema with +// properties referencing to itself. Previously such properties had to be created separately +// only after creation of class schema +func FindPropertyDataTypeWithRefsAndAuth(authorizedGetClass func(string) (*models.Class, error), dataType []string, relaxCrossRefValidation bool, beloningToClass ClassName) (PropertyDataType, error) { + if len(dataType) < 1 { + return nil, errors.New("dataType must have at least one element") + } + if len(dataType) == 1 { + for _, dt := range append(PrimitiveDataTypes, DeprecatedPrimitiveDataTypes...) { + if dataType[0] == dt.String() { + return &propertyDataType{ + kind: PropertyKindPrimitive, + primitiveType: dt, + }, nil + } + } + for _, dt := range NestedDataTypes { + if dataType[0] == dt.String() { + return &propertyDataType{ + kind: PropertyKindNested, + nestedType: dt, + }, nil + } + } + if len(dataType[0]) == 0 { + return nil, fmt.Errorf("dataType cannot be an empty string") + } + firstLetter := rune(dataType[0][0]) + if unicode.IsLower(firstLetter) { + return nil, fmt.Errorf("unknown primitive data type '%s'", dataType[0]) + } + } + + /* implies len(dataType) > 1, or first element is a class already */ + var classes []ClassName + + for _, someDataType := range dataType { + className, err := ValidateClassName(someDataType) + if err != nil { + return nil, err + } + + if beloningToClass != className && !relaxCrossRefValidation { + class, err := authorizedGetClass(className.String()) + if err != nil { + return nil, err + } + if class == nil { + return nil, ErrRefToNonexistentClass + } + } + + classes = append(classes, className) + } + + return &propertyDataType{ + kind: PropertyKindRef, + classes: classes, + }, nil +} + +func AsPrimitive(dataType []string) (DataType, bool) { + if len(dataType) == 1 { + for i := range allPrimitiveDataTypes { + if dataType[0] == allPrimitiveDataTypes[i].String() { + return allPrimitiveDataTypes[i], true + } + } + if len(dataType[0]) == 0 { + return "", true + } + } + return "", false +} + +func AsNested(dataType []string) (DataType, bool) { + if len(dataType) == 1 { + for _, dt := range NestedDataTypes { + if dataType[0] == dt.String() { + return dt, true + } + } + } + return "", false +} + +func IsNested(dataType DataType) bool { + for _, dt := range NestedDataTypes { + if dt == dataType { + return true + } + } + return false +} diff --git a/platform/dbops/binaries/weaviate-src/entities/schema/data_types_test.go b/platform/dbops/binaries/weaviate-src/entities/schema/data_types_test.go new file mode 100644 index 0000000000000000000000000000000000000000..8e14d2cdf8cd8cbc188b6b4b3f1425eea7fd6919 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/schema/data_types_test.go @@ -0,0 +1,417 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package schema + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/entities/models" +) + +func TestDetectPrimitiveTypes(t *testing.T) { + s := Empty() + + for _, dt := range append(PrimitiveDataTypes, DeprecatedPrimitiveDataTypes...) { + pdt, err := s.FindPropertyDataType(dt.PropString()) + + assert.Nil(t, err) + assert.True(t, pdt.IsPrimitive()) + assert.Equal(t, dt, pdt.AsPrimitive()) + + assert.False(t, pdt.IsNested()) + assert.False(t, pdt.IsReference()) + } +} + +func TestDetectNestedTypes(t *testing.T) { + s := Empty() + + for _, dt := range NestedDataTypes { + ndt, err := s.FindPropertyDataType(dt.PropString()) + + assert.Nil(t, err) + assert.True(t, ndt.IsNested()) + assert.Equal(t, dt, ndt.AsNested()) + + assert.False(t, ndt.IsPrimitive()) + assert.False(t, ndt.IsReference()) + } +} + +func TestExistingClassSingleRef(t *testing.T) { + className := "ExistingClass" + s := Empty() + s.Objects.Classes = []*models.Class{{Class: className}} + + pdt, err := s.FindPropertyDataType([]string{className}) + + assert.Nil(t, err) + assert.True(t, pdt.IsReference()) + assert.True(t, pdt.ContainsClass(ClassName(className))) +} + +func TestNonExistingClassSingleRef(t *testing.T) { + className := "NonExistingClass" + s := Empty() + + pdt, err := s.FindPropertyDataType([]string{className}) + + assert.EqualError(t, err, ErrRefToNonexistentClass.Error()) + assert.Nil(t, pdt) +} + +func TestNonExistingClassRelaxedCrossValidation(t *testing.T) { + className := "NonExistingClass" + s := Empty() + + pdt, err := FindPropertyDataTypeWithRefs(s.GetClass, []string{className}, true, ClassName("AnotherNonExistingClass")) + + assert.Nil(t, err) + assert.True(t, pdt.IsReference()) + assert.True(t, pdt.ContainsClass(ClassName(className))) +} + +func TestNonExistingClassPropertyBelongsTo(t *testing.T) { + className := "NonExistingClass" + s := Empty() + + pdt, err := FindPropertyDataTypeWithRefs(s.GetClass, []string{className}, false, ClassName(className)) + + assert.Nil(t, err) + assert.True(t, pdt.IsReference()) + assert.True(t, pdt.ContainsClass(ClassName(className))) +} + +func TestGetPropertyDataType(t *testing.T) { + class := &models.Class{Class: "TestClass"} + dataTypes := []string{ + "string", "text", "int", "number", "boolean", + "date", "geoCoordinates", "phoneNumber", "blob", "Ref", "invalid", + "string[]", "text[]", "int[]", "number[]", "boolean[]", "date[]", + "uuid", "uuid[]", + + "object", "object[]", + } + class.Properties = make([]*models.Property, len(dataTypes)) + for i, dtString := range dataTypes { + class.Properties[i] = &models.Property{ + Name: dtString + "Prop", + DataType: []string{dtString}, + } + } + + type test struct { + propName string + expectedDataType *DataType + expectedErr error + } + + tests := []test{ + { + propName: "stringProp", + expectedDataType: ptDataType(DataTypeString), + }, + { + propName: "textProp", + expectedDataType: ptDataType(DataTypeText), + }, + { + propName: "numberProp", + expectedDataType: ptDataType(DataTypeNumber), + }, + { + propName: "intProp", + expectedDataType: ptDataType(DataTypeInt), + }, + { + propName: "booleanProp", + expectedDataType: ptDataType(DataTypeBoolean), + }, + { + propName: "dateProp", + expectedDataType: ptDataType(DataTypeDate), + }, + { + propName: "phoneNumberProp", + expectedDataType: ptDataType(DataTypePhoneNumber), + }, + { + propName: "geoCoordinatesProp", + expectedDataType: ptDataType(DataTypeGeoCoordinates), + }, + { + propName: "blobProp", + expectedDataType: ptDataType(DataTypeBlob), + }, + { + propName: "string[]Prop", + expectedDataType: ptDataType(DataTypeStringArray), + }, + { + propName: "text[]Prop", + expectedDataType: ptDataType(DataTypeTextArray), + }, + { + propName: "int[]Prop", + expectedDataType: ptDataType(DataTypeIntArray), + }, + { + propName: "number[]Prop", + expectedDataType: ptDataType(DataTypeNumberArray), + }, + { + propName: "boolean[]Prop", + expectedDataType: ptDataType(DataTypeBooleanArray), + }, + { + propName: "date[]Prop", + expectedDataType: ptDataType(DataTypeDateArray), + }, + { + propName: "uuidProp", + expectedDataType: ptDataType(DataTypeUUID), + }, + { + propName: "uuid[]Prop", + expectedDataType: ptDataType(DataTypeUUIDArray), + }, + { + propName: "objectProp", + expectedDataType: ptDataType(DataTypeObject), + }, + { + propName: "object[]Prop", + expectedDataType: ptDataType(DataTypeObjectArray), + }, + { + propName: "RefProp", + expectedDataType: ptDataType(DataTypeCRef), + }, + { + propName: "wrongProp", + expectedDataType: nil, + expectedErr: fmt.Errorf("no such prop with name 'wrongProp' found in class 'TestClass' in the schema. Check your schema files for which properties in this class are available"), + }, + { + propName: "invalidProp", + expectedDataType: nil, + expectedErr: fmt.Errorf("given value-DataType does not exist"), + }, + } + + for _, test := range tests { + t.Run(test.propName, func(t *testing.T) { + dt, err := GetPropertyDataType(class, test.propName) + require.Equal(t, test.expectedErr, err) + assert.Equal(t, test.expectedDataType, dt) + }) + } +} + +func Test_DataType_AsPrimitive(t *testing.T) { + type testCase struct { + name string + inputDataType []string + expectedDataType DataType + expectedIsPrimitive bool + } + + runTestCases := func(t *testing.T, testCases []testCase) { + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + dataType, ok := AsPrimitive(tc.inputDataType) + assert.Equal(t, tc.expectedDataType, dataType) + assert.Equal(t, tc.expectedIsPrimitive, ok) + }) + } + } + + t.Run("is primitive data type", func(t *testing.T) { + testCases := []testCase{} + for _, dt := range append(PrimitiveDataTypes, DeprecatedPrimitiveDataTypes...) { + inputDataType := dt.PropString() + testCases = append(testCases, testCase{ + name: fmt.Sprintf("%v", inputDataType), + inputDataType: inputDataType, + expectedDataType: dt, + expectedIsPrimitive: true, + }) + } + + runTestCases(t, testCases) + }) + + t.Run("is empty data type", func(t *testing.T) { + testCases := []testCase{} + for _, dtStr := range []string{""} { + inputDataType := []string{dtStr} + testCases = append(testCases, testCase{ + name: fmt.Sprintf("%v", inputDataType), + inputDataType: inputDataType, + expectedDataType: "", + expectedIsPrimitive: true, + }) + } + + runTestCases(t, testCases) + }) + + t.Run("is non existent data type", func(t *testing.T) { + testCases := []testCase{} + for _, dtStr := range []string{"non-existent"} { + inputDataType := []string{dtStr} + testCases = append(testCases, testCase{ + name: fmt.Sprintf("%v", inputDataType), + inputDataType: inputDataType, + expectedDataType: "", + expectedIsPrimitive: false, + }) + } + + runTestCases(t, testCases) + }) + + t.Run("is nested data type", func(t *testing.T) { + testCases := []testCase{} + for _, dt := range NestedDataTypes { + inputDataType := dt.PropString() + testCases = append(testCases, testCase{ + name: fmt.Sprintf("%v", inputDataType), + inputDataType: inputDataType, + expectedDataType: "", + expectedIsPrimitive: false, + }) + } + + runTestCases(t, testCases) + }) + + t.Run("is reference data type", func(t *testing.T) { + testCases := []testCase{} + for _, inputDataType := range [][]string{ + {"SomeClass"}, + {"SomeOtherClass", "AndAnotherOne"}, + } { + testCases = append(testCases, testCase{ + name: fmt.Sprintf("%v", inputDataType), + inputDataType: inputDataType, + expectedDataType: "", + expectedIsPrimitive: false, + }) + } + + runTestCases(t, testCases) + }) +} + +func Test_DataType_AsNested(t *testing.T) { + type testCase struct { + name string + inputDataType []string + expectedDataType DataType + expectedIsNested bool + } + + runTestCases := func(t *testing.T, testCases []testCase) { + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + dataType, ok := AsNested(tc.inputDataType) + assert.Equal(t, tc.expectedDataType, dataType) + assert.Equal(t, tc.expectedIsNested, ok) + }) + } + } + + t.Run("is nested data type", func(t *testing.T) { + testCases := []testCase{} + for _, dt := range NestedDataTypes { + inputDataType := dt.PropString() + testCases = append(testCases, testCase{ + name: fmt.Sprintf("%v", inputDataType), + inputDataType: inputDataType, + expectedDataType: dt, + expectedIsNested: true, + }) + } + + runTestCases(t, testCases) + }) + + t.Run("is empty data type", func(t *testing.T) { + testCases := []testCase{} + for _, dtStr := range []string{""} { + inputDataType := []string{dtStr} + testCases = append(testCases, testCase{ + name: fmt.Sprintf("%v", inputDataType), + inputDataType: inputDataType, + expectedDataType: "", + expectedIsNested: false, + }) + } + + runTestCases(t, testCases) + }) + + t.Run("is non existent data type", func(t *testing.T) { + testCases := []testCase{} + for _, dtStr := range []string{"non-existent"} { + inputDataType := []string{dtStr} + testCases = append(testCases, testCase{ + name: fmt.Sprintf("%v", inputDataType), + inputDataType: inputDataType, + expectedDataType: "", + expectedIsNested: false, + }) + } + + runTestCases(t, testCases) + }) + + t.Run("is primitive data type", func(t *testing.T) { + testCases := []testCase{} + for _, dt := range append(PrimitiveDataTypes, DeprecatedPrimitiveDataTypes...) { + inputDataType := dt.PropString() + testCases = append(testCases, testCase{ + name: fmt.Sprintf("%v", inputDataType), + inputDataType: inputDataType, + expectedDataType: "", + expectedIsNested: false, + }) + } + + runTestCases(t, testCases) + }) + + t.Run("is reference data type", func(t *testing.T) { + testCases := []testCase{} + for _, inputDataType := range [][]string{ + {"SomeClass"}, + {"SomeOtherClass", "AndAnotherOne"}, + } { + testCases = append(testCases, testCase{ + name: fmt.Sprintf("%v", inputDataType), + inputDataType: inputDataType, + expectedDataType: "", + expectedIsNested: false, + }) + } + + runTestCases(t, testCases) + }) +} + +func ptDataType(dt DataType) *DataType { + return &dt +} diff --git a/platform/dbops/binaries/weaviate-src/entities/schema/errors.go b/platform/dbops/binaries/weaviate-src/entities/schema/errors.go new file mode 100644 index 0000000000000000000000000000000000000000..cb6c4d4e900df72cc95d8e6176cd81ed1c4c37ce --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/schema/errors.go @@ -0,0 +1,22 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package schema + +import "errors" + +const ( + ErrorNoSuchClass string = "no such class with name '%s' found in the schema. Check your schema files for which classes are available" + ErrorNoSuchProperty string = "no such prop with name '%s' found in class '%s' in the schema. Check your schema files for which properties in this class are available" + ErrorNoSuchDatatype string = "given value-DataType does not exist" +) + +var ErrRefToNonexistentClass = errors.New("reference property to nonexistent class") diff --git a/platform/dbops/binaries/weaviate-src/entities/schema/inverted_index_config.go b/platform/dbops/binaries/weaviate-src/entities/schema/inverted_index_config.go new file mode 100644 index 0000000000000000000000000000000000000000..64e5bc2e1fa44a593c29c701e7f00c1bac42a0ee --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/schema/inverted_index_config.go @@ -0,0 +1,70 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package schema + +import ( + "github.com/weaviate/weaviate/entities/models" +) + +type InvertedIndexConfig struct { + BM25 BM25Config + Stopwords models.StopwordConfig + CleanupIntervalSeconds uint64 + IndexTimestamps bool + IndexNullState bool + IndexPropertyLength bool + UsingBlockMaxWAND bool +} + +type BM25Config struct { + K1 float64 + B float64 +} + +func InvertedIndexConfigFromModel(m models.InvertedIndexConfig) InvertedIndexConfig { + i := InvertedIndexConfig{} + + if m.Bm25 != nil { + i.BM25.K1 = float64(m.Bm25.K1) + i.BM25.B = float64(m.Bm25.B) + } + if m.Stopwords != nil { + i.Stopwords = *m.Stopwords + } + i.CleanupIntervalSeconds = uint64(m.CleanupIntervalSeconds) + i.IndexTimestamps = m.IndexTimestamps + i.IndexNullState = m.IndexNullState + i.IndexPropertyLength = m.IndexPropertyLength + i.UsingBlockMaxWAND = m.UsingBlockMaxWAND + + return i +} + +func InvertedIndexConfigToModel(i InvertedIndexConfig) models.InvertedIndexConfig { + m := models.InvertedIndexConfig{} + + m.Bm25 = &models.BM25Config{} + m.Bm25.K1 = float32(i.BM25.K1) + m.Bm25.B = float32(i.BM25.B) + + m.Stopwords = &models.StopwordConfig{} + // Force a copy to avoid references + *m.Stopwords = i.Stopwords + + m.CleanupIntervalSeconds = int64(i.CleanupIntervalSeconds) + m.IndexTimestamps = i.IndexTimestamps + m.IndexNullState = i.IndexNullState + m.IndexPropertyLength = i.IndexPropertyLength + m.UsingBlockMaxWAND = i.UsingBlockMaxWAND + + return m +} diff --git a/platform/dbops/binaries/weaviate-src/entities/schema/multi_tenancy.go b/platform/dbops/binaries/weaviate-src/entities/schema/multi_tenancy.go new file mode 100644 index 0000000000000000000000000000000000000000..fe4113a6cced112891f66e418836ea31f51ee143 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/schema/multi_tenancy.go @@ -0,0 +1,54 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package schema + +import "github.com/weaviate/weaviate/entities/models" + +func MultiTenancyEnabled(class *models.Class) bool { + if class == nil { + return false + } + + if class.MultiTenancyConfig != nil { + return class.MultiTenancyConfig.Enabled + } + return false +} + +func AutoTenantCreationEnabled(class *models.Class) bool { + if class == nil { + return false + } + + if class.MultiTenancyConfig != nil { + return class.MultiTenancyConfig.AutoTenantCreation + } + return false +} + +func AutoTenantActivationEnabled(class *models.Class) bool { + if class == nil { + return false + } + + if class.MultiTenancyConfig != nil { + return class.MultiTenancyConfig.AutoTenantActivation + } + return false +} + +func ActivityStatus(status string) string { + if status == "" { + return models.TenantActivityStatusHOT + } + return status +} diff --git a/platform/dbops/binaries/weaviate-src/entities/schema/nested_properties.go b/platform/dbops/binaries/weaviate-src/entities/schema/nested_properties.go new file mode 100644 index 0000000000000000000000000000000000000000..6ccd00d09320e1c5e0140ced1f157748baaed9e5 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/schema/nested_properties.go @@ -0,0 +1,83 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package schema + +import "github.com/weaviate/weaviate/entities/models" + +// Merges nestPropsNew with nestPropsOld +// Returns new slice without changing input ones and +// bool indicating whether merged slice is different than the old one +func MergeRecursivelyNestedProperties(nestPropsOld, nestPropsNew []*models.NestedProperty, +) ([]*models.NestedProperty, bool) { + merged := false + nestPropsMerged := make([]*models.NestedProperty, len(nestPropsOld), len(nestPropsOld)+len(nestPropsNew)) + copy(nestPropsMerged, nestPropsOld) + + existingIndexMap := map[string]int{} + for i := range nestPropsMerged { + existingIndexMap[nestPropsMerged[i].Name] = i + } + + for _, nestPropNew := range nestPropsNew { + i, exists := existingIndexMap[nestPropNew.Name] + if !exists { + existingIndexMap[nestPropNew.Name] = len(nestPropsMerged) + nestPropsMerged = append(nestPropsMerged, nestPropNew) + merged = true + } else if _, isNested := AsNested(nestPropsMerged[i].DataType); isNested { + if recurNestProps, recurMerged := MergeRecursivelyNestedProperties( + nestPropsMerged[i].NestedProperties, + nestPropNew.NestedProperties, + ); recurMerged { + nestPropCopy := *nestPropsMerged[i] + nestPropCopy.NestedProperties = recurNestProps + + nestPropsMerged[i] = &nestPropCopy + merged = true + } + } + } + + return nestPropsMerged, merged +} + +// Determines diff between nestPropsNew and nestPropsOld slices +func DiffRecursivelyNestedProperties(nestPropsOld, nestPropsNew []*models.NestedProperty, +) []*models.NestedProperty { + nestPropsDiff := make([]*models.NestedProperty, 0, len(nestPropsNew)) + + existingIndexMap := map[string]int{} + for i := range nestPropsOld { + existingIndexMap[nestPropsOld[i].Name] = i + } + + for _, nestPropNew := range nestPropsNew { + i, exists := existingIndexMap[nestPropNew.Name] + if !exists { + existingIndexMap[nestPropNew.Name] = len(nestPropsDiff) + nestPropsDiff = append(nestPropsDiff, nestPropNew) + } else if _, isNested := AsNested(nestPropsOld[i].DataType); isNested { + if recurNestProps := DiffRecursivelyNestedProperties( + nestPropsOld[i].NestedProperties, + nestPropNew.NestedProperties, + ); len(recurNestProps) > 0 { + nestPropCopy := *nestPropsOld[i] + nestPropCopy.NestedProperties = recurNestProps + + existingIndexMap[nestPropCopy.Name] = len(nestPropsDiff) + nestPropsDiff = append(nestPropsDiff, &nestPropCopy) + } + } + } + + return nestPropsDiff +} diff --git a/platform/dbops/binaries/weaviate-src/entities/schema/nested_properties_test.go b/platform/dbops/binaries/weaviate-src/entities/schema/nested_properties_test.go new file mode 100644 index 0000000000000000000000000000000000000000..234a66b2a11ea120fd16a493007e2c4f1445a244 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/schema/nested_properties_test.go @@ -0,0 +1,424 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package schema_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/schema" + "github.com/weaviate/weaviate/entities/schema/test_utils" +) + +func Test_MergeRecursivelyNestedProperties(t *testing.T) { + vFalse := false + vTrue := true + + emptyProps := []*models.NestedProperty{} + nestedProps1 := []*models.NestedProperty{ + { + Name: "nested_int", + DataType: schema.DataTypeInt.PropString(), + IndexFilterable: &vTrue, + IndexSearchable: &vFalse, + Tokenization: "", + }, + { + Name: "nested_text", + DataType: schema.DataTypeText.PropString(), + IndexFilterable: &vTrue, + IndexSearchable: &vTrue, + Tokenization: models.PropertyTokenizationWord, + }, + { + Name: "nested_objects", + DataType: schema.DataTypeObjectArray.PropString(), + IndexFilterable: &vTrue, + IndexSearchable: &vFalse, + Tokenization: "", + NestedProperties: []*models.NestedProperty{ + { + Name: "nested_bool_lvl2", + DataType: schema.DataTypeBoolean.PropString(), + IndexFilterable: &vTrue, + IndexSearchable: &vFalse, + Tokenization: "", + }, + { + Name: "nested_numbers_lvl2", + DataType: schema.DataTypeNumberArray.PropString(), + IndexFilterable: &vTrue, + IndexSearchable: &vFalse, + Tokenization: "", + }, + }, + }, + } + nestedProps2 := []*models.NestedProperty{ + { + Name: "nested_number", + DataType: schema.DataTypeNumber.PropString(), + IndexFilterable: &vTrue, + IndexSearchable: &vFalse, + Tokenization: "", + }, + { + Name: "nested_text", + DataType: schema.DataTypeText.PropString(), + IndexFilterable: &vTrue, + IndexSearchable: &vTrue, + Tokenization: models.PropertyTokenizationField, // different setting than (1) + }, + { + Name: "nested_objects", + DataType: schema.DataTypeObjectArray.PropString(), + IndexFilterable: &vTrue, + IndexSearchable: &vFalse, + Tokenization: "", + NestedProperties: []*models.NestedProperty{ + { + Name: "nested_date_lvl2", + DataType: schema.DataTypeDate.PropString(), + IndexFilterable: &vTrue, + IndexSearchable: &vFalse, + Tokenization: "", + }, + { + Name: "nested_numbers_lvl2", + DataType: schema.DataTypeNumberArray.PropString(), + IndexFilterable: &vFalse, // different setting than (1) + IndexSearchable: &vFalse, + Tokenization: "", + }, + }, + }, + } + + mergedProps_1_2 := []*models.NestedProperty{ + { + Name: "nested_int", + DataType: schema.DataTypeInt.PropString(), + IndexFilterable: &vTrue, + IndexSearchable: &vFalse, + Tokenization: "", + }, + { + Name: "nested_number", + DataType: schema.DataTypeNumber.PropString(), + IndexFilterable: &vTrue, + IndexSearchable: &vFalse, + Tokenization: "", + }, + { + Name: "nested_text", + DataType: schema.DataTypeText.PropString(), + IndexFilterable: &vTrue, + IndexSearchable: &vTrue, + Tokenization: models.PropertyTokenizationWord, // from (1) + }, + { + Name: "nested_objects", + DataType: schema.DataTypeObjectArray.PropString(), + IndexFilterable: &vTrue, + IndexSearchable: &vFalse, + Tokenization: "", + NestedProperties: []*models.NestedProperty{ + { + Name: "nested_bool_lvl2", + DataType: schema.DataTypeBoolean.PropString(), + IndexFilterable: &vTrue, + IndexSearchable: &vFalse, + Tokenization: "", + }, + { + Name: "nested_date_lvl2", + DataType: schema.DataTypeDate.PropString(), + IndexFilterable: &vTrue, + IndexSearchable: &vFalse, + Tokenization: "", + }, + { + Name: "nested_numbers_lvl2", + DataType: schema.DataTypeNumberArray.PropString(), + IndexFilterable: &vTrue, // from (1) + IndexSearchable: &vFalse, + Tokenization: "", + }, + }, + }, + } + + mergedProps_2_1 := []*models.NestedProperty{ + { + Name: "nested_int", + DataType: schema.DataTypeInt.PropString(), + IndexFilterable: &vTrue, + IndexSearchable: &vFalse, + Tokenization: "", + }, + { + Name: "nested_number", + DataType: schema.DataTypeNumber.PropString(), + IndexFilterable: &vTrue, + IndexSearchable: &vFalse, + Tokenization: "", + }, + { + Name: "nested_text", + DataType: schema.DataTypeText.PropString(), + IndexFilterable: &vTrue, + IndexSearchable: &vTrue, + Tokenization: models.PropertyTokenizationField, // from (2) + }, + { + Name: "nested_objects", + DataType: schema.DataTypeObjectArray.PropString(), + IndexFilterable: &vTrue, + IndexSearchable: &vFalse, + Tokenization: "", + NestedProperties: []*models.NestedProperty{ + { + Name: "nested_bool_lvl2", + DataType: schema.DataTypeBoolean.PropString(), + IndexFilterable: &vTrue, + IndexSearchable: &vFalse, + Tokenization: "", + }, + { + Name: "nested_date_lvl2", + DataType: schema.DataTypeDate.PropString(), + IndexFilterable: &vTrue, + IndexSearchable: &vFalse, + Tokenization: "", + }, + { + Name: "nested_numbers_lvl2", + DataType: schema.DataTypeNumberArray.PropString(), + IndexFilterable: &vFalse, // from (2) + IndexSearchable: &vFalse, + Tokenization: "", + }, + }, + }, + } + + t.Run("empty + nested", func(t *testing.T) { + nestedProps, merged := schema.MergeRecursivelyNestedProperties(emptyProps, nestedProps1) + + assert.True(t, merged) + assert.Equal(t, nestedProps1, nestedProps) + }) + + t.Run("nested + empty", func(t *testing.T) { + nestedProps, merged := schema.MergeRecursivelyNestedProperties(nestedProps1, emptyProps) + + assert.False(t, merged) + assert.Equal(t, nestedProps1, nestedProps) + }) + + t.Run("2 x nested", func(t *testing.T) { + nestedProps, merged := schema.MergeRecursivelyNestedProperties(nestedProps1, nestedProps1) + + assert.False(t, merged) + assert.Equal(t, nestedProps1, nestedProps) + }) + + t.Run("nested1 + nested2", func(t *testing.T) { + nestedProps, merged := schema.MergeRecursivelyNestedProperties(nestedProps1, nestedProps2) + + assert.True(t, merged) + assert.NotEqual(t, nestedProps1, nestedProps) + assert.NotEqual(t, nestedProps2, nestedProps) + test_utils.AssertNestedPropsMatch(t, mergedProps_1_2, nestedProps) + }) + + t.Run("nested2 + nested1", func(t *testing.T) { + nestedProps, merged := schema.MergeRecursivelyNestedProperties(nestedProps2, nestedProps1) + + assert.True(t, merged) + assert.NotEqual(t, nestedProps1, nestedProps) + assert.NotEqual(t, nestedProps2, nestedProps) + test_utils.AssertNestedPropsMatch(t, mergedProps_2_1, nestedProps) + }) +} + +func Test_DiffRecursivelyNestedProperties(t *testing.T) { + vFalse := false + vTrue := true + + emptyProps := []*models.NestedProperty{} + nestedProps1 := []*models.NestedProperty{ + { + Name: "nested_int", + DataType: schema.DataTypeInt.PropString(), + IndexFilterable: &vTrue, + IndexSearchable: &vFalse, + Tokenization: "", + }, + { + Name: "nested_text", + DataType: schema.DataTypeText.PropString(), + IndexFilterable: &vTrue, + IndexSearchable: &vTrue, + Tokenization: models.PropertyTokenizationWord, + }, + { + Name: "nested_objects", + DataType: schema.DataTypeObjectArray.PropString(), + IndexFilterable: &vTrue, + IndexSearchable: &vFalse, + Tokenization: "", + NestedProperties: []*models.NestedProperty{ + { + Name: "nested_bool_lvl2", + DataType: schema.DataTypeBoolean.PropString(), + IndexFilterable: &vTrue, + IndexSearchable: &vFalse, + Tokenization: "", + }, + { + Name: "nested_numbers_lvl2", + DataType: schema.DataTypeNumberArray.PropString(), + IndexFilterable: &vTrue, + IndexSearchable: &vFalse, + Tokenization: "", + }, + }, + }, + } + nestedProps2 := []*models.NestedProperty{ + { + Name: "nested_number", + DataType: schema.DataTypeNumber.PropString(), + IndexFilterable: &vTrue, + IndexSearchable: &vFalse, + Tokenization: "", + }, + { + Name: "nested_text", + DataType: schema.DataTypeText.PropString(), + IndexFilterable: &vTrue, + IndexSearchable: &vTrue, + Tokenization: models.PropertyTokenizationField, // different setting than (1) + }, + { + Name: "nested_objects", + DataType: schema.DataTypeObjectArray.PropString(), + IndexFilterable: &vTrue, + IndexSearchable: &vFalse, + Tokenization: "", + NestedProperties: []*models.NestedProperty{ + { + Name: "nested_date_lvl2", + DataType: schema.DataTypeDate.PropString(), + IndexFilterable: &vTrue, + IndexSearchable: &vFalse, + Tokenization: "", + }, + { + Name: "nested_numbers_lvl2", + DataType: schema.DataTypeNumberArray.PropString(), + IndexFilterable: &vFalse, // different setting than (1) + IndexSearchable: &vFalse, + Tokenization: "", + }, + }, + }, + } + + mergedProps_1_2 := []*models.NestedProperty{ + { + Name: "nested_number", + DataType: schema.DataTypeNumber.PropString(), + IndexFilterable: &vTrue, + IndexSearchable: &vFalse, + Tokenization: "", + }, + { + Name: "nested_objects", + DataType: schema.DataTypeObjectArray.PropString(), + IndexFilterable: &vTrue, + IndexSearchable: &vFalse, + Tokenization: "", + NestedProperties: []*models.NestedProperty{ + { + Name: "nested_date_lvl2", + DataType: schema.DataTypeDate.PropString(), + IndexFilterable: &vTrue, + IndexSearchable: &vFalse, + Tokenization: "", + }, + }, + }, + } + + mergedProps_2_1 := []*models.NestedProperty{ + { + Name: "nested_int", + DataType: schema.DataTypeInt.PropString(), + IndexFilterable: &vTrue, + IndexSearchable: &vFalse, + Tokenization: "", + }, + { + Name: "nested_objects", + DataType: schema.DataTypeObjectArray.PropString(), + IndexFilterable: &vTrue, + IndexSearchable: &vFalse, + Tokenization: "", + NestedProperties: []*models.NestedProperty{ + { + Name: "nested_bool_lvl2", + DataType: schema.DataTypeBoolean.PropString(), + IndexFilterable: &vTrue, + IndexSearchable: &vFalse, + Tokenization: "", + }, + }, + }, + } + + t.Run("empty + nested", func(t *testing.T) { + nestedProps := schema.DiffRecursivelyNestedProperties(emptyProps, nestedProps1) + + assert.Equal(t, nestedProps1, nestedProps) + }) + + t.Run("nested + empty", func(t *testing.T) { + nestedProps := schema.DiffRecursivelyNestedProperties(nestedProps1, emptyProps) + + assert.Empty(t, nestedProps) + }) + + t.Run("2 x nested", func(t *testing.T) { + nestedProps := schema.DiffRecursivelyNestedProperties(nestedProps1, nestedProps1) + + assert.Empty(t, nestedProps) + }) + + t.Run("nested1 + nested2", func(t *testing.T) { + nestedProps := schema.DiffRecursivelyNestedProperties(nestedProps1, nestedProps2) + + assert.NotEqual(t, nestedProps1, nestedProps) + assert.NotEqual(t, nestedProps2, nestedProps) + test_utils.AssertNestedPropsMatch(t, mergedProps_1_2, nestedProps) + }) + + t.Run("nested2 + nested1", func(t *testing.T) { + nestedProps := schema.DiffRecursivelyNestedProperties(nestedProps2, nestedProps1) + + assert.NotEqual(t, nestedProps1, nestedProps) + assert.NotEqual(t, nestedProps2, nestedProps) + test_utils.AssertNestedPropsMatch(t, mergedProps_2_1, nestedProps) + }) +} diff --git a/platform/dbops/binaries/weaviate-src/entities/schema/properties.go b/platform/dbops/binaries/weaviate-src/entities/schema/properties.go new file mode 100644 index 0000000000000000000000000000000000000000..4af9d67b09b433c67825b1a70c58dbb766b5b259 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/schema/properties.go @@ -0,0 +1,78 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package schema + +import "github.com/weaviate/weaviate/entities/models" + +// DedupProperties removes from newProps slice properties already present in oldProps slice. +// If property of nested type (object/object[]) is present in both slices, +// diff is calculated to contain only those nested properties that are missing from +// old property model +func DedupProperties(oldProps, newProps []*models.Property) []*models.Property { + uniqueProps := make([]*models.Property, 0, len(newProps)) + + oldPropsByName := make(map[string]int, len(oldProps)) + for idx := range oldProps { + oldPropsByName[oldProps[idx].Name] = idx + } + uniquePropsByName := make(map[string]int, len(newProps)) + markedToDiff := make([]string, 0, len(newProps)) + + for _, newProp := range newProps { + propName := LowercaseFirstLetter(newProp.Name) + + uniqueIdx, uniqueExists := uniquePropsByName[propName] + if !uniqueExists { + oldIdx, oldExists := oldPropsByName[propName] + if !oldExists { + uniquePropsByName[propName] = len(uniqueProps) + uniqueProps = append(uniqueProps, newProp) + } else { + oldProp := oldProps[oldIdx] + if _, isNested := AsNested(oldProp.DataType); isNested { + mergedNestedProps, merged := MergeRecursivelyNestedProperties( + oldProp.NestedProperties, newProp.NestedProperties) + if merged { + oldPropCopy := *oldProp + oldPropCopy.NestedProperties = mergedNestedProps + uniquePropsByName[propName] = len(uniqueProps) + uniqueProps = append(uniqueProps, &oldPropCopy) + + markedToDiff = append(markedToDiff, propName) + } + } + } + } else { + uniqueProp := uniqueProps[uniqueIdx] + if _, isNested := AsNested(uniqueProp.DataType); isNested { + mergedNestedProps, merged := MergeRecursivelyNestedProperties( + uniqueProp.NestedProperties, newProp.NestedProperties) + if merged { + uniquePropCopy := *uniqueProp + uniquePropCopy.NestedProperties = mergedNestedProps + uniqueProps[uniqueIdx] = &uniquePropCopy + } + } + } + } + + for _, propName := range markedToDiff { + uniqueIdx := uniquePropsByName[propName] + oldIdx := oldPropsByName[propName] + + diffNestedProps := DiffRecursivelyNestedProperties( + oldProps[oldIdx].NestedProperties, uniqueProps[uniqueIdx].NestedProperties) + uniqueProps[uniqueIdx].NestedProperties = diffNestedProps + } + + return uniqueProps +} diff --git a/platform/dbops/binaries/weaviate-src/entities/schema/properties_test.go b/platform/dbops/binaries/weaviate-src/entities/schema/properties_test.go new file mode 100644 index 0000000000000000000000000000000000000000..43d8a68b5d7bc7fd3924872270ff5eadd333c749 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/schema/properties_test.go @@ -0,0 +1,631 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package schema_test + +import ( + "testing" + + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/schema" + "github.com/weaviate/weaviate/entities/schema/test_utils" +) + +func Test_DedupProperties(t *testing.T) { + vFalse := false + vTrue := true + + nestedProps1 := []*models.NestedProperty{ + { + Name: "nested_int", + DataType: schema.DataTypeInt.PropString(), + IndexFilterable: &vTrue, + IndexSearchable: &vFalse, + Tokenization: "", + }, + { + Name: "nested_text", + DataType: schema.DataTypeText.PropString(), + IndexFilterable: &vTrue, + IndexSearchable: &vTrue, + Tokenization: models.PropertyTokenizationWord, + }, + { + Name: "nested_objects", + DataType: schema.DataTypeObjectArray.PropString(), + IndexFilterable: &vTrue, + IndexSearchable: &vFalse, + Tokenization: "", + NestedProperties: []*models.NestedProperty{ + { + Name: "nested_bool_lvl2", + DataType: schema.DataTypeBoolean.PropString(), + IndexFilterable: &vTrue, + IndexSearchable: &vFalse, + Tokenization: "", + }, + { + Name: "nested_numbers_lvl2", + DataType: schema.DataTypeNumberArray.PropString(), + IndexFilterable: &vTrue, + IndexSearchable: &vFalse, + Tokenization: "", + }, + }, + }, + } + nestedProps2 := []*models.NestedProperty{ + { + Name: "nested_number", + DataType: schema.DataTypeNumber.PropString(), + IndexFilterable: &vTrue, + IndexSearchable: &vFalse, + Tokenization: "", + }, + { + Name: "nested_text", + DataType: schema.DataTypeText.PropString(), + IndexFilterable: &vTrue, + IndexSearchable: &vTrue, + Tokenization: models.PropertyTokenizationField, // different setting than (1) + }, + { + Name: "nested_objects", + DataType: schema.DataTypeObjectArray.PropString(), + IndexFilterable: &vTrue, + IndexSearchable: &vFalse, + Tokenization: "", + NestedProperties: []*models.NestedProperty{ + { + Name: "nested_date_lvl2", + DataType: schema.DataTypeDate.PropString(), + IndexFilterable: &vTrue, + IndexSearchable: &vFalse, + Tokenization: "", + }, + { + Name: "nested_numbers_lvl2", + DataType: schema.DataTypeNumberArray.PropString(), + IndexFilterable: &vFalse, // different setting than (1) + IndexSearchable: &vFalse, + Tokenization: "", + }, + }, + }, + } + + props1 := []*models.Property{ + { + Name: "text", + DataType: schema.DataTypeText.PropString(), + IndexFilterable: &vTrue, + IndexSearchable: &vTrue, + Tokenization: models.PropertyTokenizationWord, + }, + { + Name: "number", + DataType: schema.DataTypeNumber.PropString(), + IndexFilterable: &vTrue, + IndexSearchable: &vFalse, + Tokenization: "", + }, + { + Name: "both_diff_json1", + DataType: schema.DataTypeObject.PropString(), + IndexFilterable: &vFalse, + IndexSearchable: &vFalse, + Tokenization: "", + NestedProperties: nestedProps1, + }, + { + Name: "both_diff_json2", + DataType: schema.DataTypeObject.PropString(), + IndexFilterable: &vFalse, + IndexSearchable: &vFalse, + Tokenization: "", + NestedProperties: nestedProps2, + }, + { + Name: "both_same_json1", + DataType: schema.DataTypeObject.PropString(), + IndexFilterable: &vFalse, + IndexSearchable: &vFalse, + Tokenization: "", + NestedProperties: nestedProps1, + }, + { + Name: "both_same_json2", + DataType: schema.DataTypeObject.PropString(), + IndexFilterable: &vFalse, + IndexSearchable: &vFalse, + Tokenization: "", + NestedProperties: nestedProps2, + }, + { + Name: "one_json1", + DataType: schema.DataTypeObject.PropString(), + IndexFilterable: &vFalse, + IndexSearchable: &vFalse, + Tokenization: "", + NestedProperties: nestedProps1, + }, + { + Name: "one_dup_json1", + DataType: schema.DataTypeObject.PropString(), + IndexFilterable: &vFalse, + IndexSearchable: &vFalse, + Tokenization: "", + NestedProperties: nestedProps1, + }, + { + Name: "one_dup_json1", + DataType: schema.DataTypeObject.PropString(), + IndexFilterable: &vFalse, + IndexSearchable: &vFalse, + Tokenization: "", + NestedProperties: nestedProps2, + }, + } + props2 := []*models.Property{ + { + Name: "text", + DataType: schema.DataTypeText.PropString(), + IndexFilterable: &vTrue, + IndexSearchable: &vFalse, + Tokenization: models.PropertyTokenizationWord, + }, + { + Name: "int", + DataType: schema.DataTypeInt.PropString(), + IndexFilterable: &vTrue, + IndexSearchable: &vFalse, + Tokenization: "", + }, + { + Name: "bool", + DataType: schema.DataTypeBoolean.PropString(), + IndexFilterable: &vTrue, + IndexSearchable: &vFalse, + Tokenization: "", + }, + { + Name: "both_diff_json1", + DataType: schema.DataTypeObject.PropString(), + IndexFilterable: &vFalse, + IndexSearchable: &vFalse, + Tokenization: "", + NestedProperties: nestedProps2, + }, + { + Name: "both_diff_json2", + DataType: schema.DataTypeObject.PropString(), + IndexFilterable: &vFalse, + IndexSearchable: &vFalse, + Tokenization: "", + NestedProperties: nestedProps1, + }, + { + Name: "both_same_json1", + DataType: schema.DataTypeObject.PropString(), + IndexFilterable: &vFalse, + IndexSearchable: &vFalse, + Tokenization: "", + NestedProperties: nestedProps1, + }, + { + Name: "both_same_json2", + DataType: schema.DataTypeObject.PropString(), + IndexFilterable: &vFalse, + IndexSearchable: &vFalse, + Tokenization: "", + NestedProperties: nestedProps2, + }, + { + Name: "one_json2", + DataType: schema.DataTypeObject.PropString(), + IndexFilterable: &vFalse, + IndexSearchable: &vFalse, + Tokenization: "", + NestedProperties: nestedProps2, + }, + { + Name: "one_dup_json2", + DataType: schema.DataTypeObject.PropString(), + IndexFilterable: &vFalse, + IndexSearchable: &vFalse, + Tokenization: "", + NestedProperties: nestedProps2, + }, + { + Name: "one_dup_json2", + DataType: schema.DataTypeObject.PropString(), + IndexFilterable: &vFalse, + IndexSearchable: &vFalse, + Tokenization: "", + NestedProperties: nestedProps1, + }, + } + + t.Run("props1 + props2", func(t *testing.T) { + props := schema.DedupProperties(props1, props2) + + test_utils.AssertPropsMatch(t, props, []*models.Property{ + { + Name: "int", + DataType: schema.DataTypeInt.PropString(), + IndexFilterable: &vTrue, + IndexSearchable: &vFalse, + Tokenization: "", + }, + { + Name: "bool", + DataType: schema.DataTypeBoolean.PropString(), + IndexFilterable: &vTrue, + IndexSearchable: &vFalse, + Tokenization: "", + }, + { + Name: "both_diff_json1", + DataType: schema.DataTypeObject.PropString(), + IndexFilterable: &vFalse, + IndexSearchable: &vFalse, + Tokenization: "", + NestedProperties: []*models.NestedProperty{ + { + Name: "nested_number", + DataType: schema.DataTypeNumber.PropString(), + IndexFilterable: &vTrue, + IndexSearchable: &vFalse, + Tokenization: "", + }, + { + Name: "nested_objects", + DataType: schema.DataTypeObjectArray.PropString(), + IndexFilterable: &vTrue, + IndexSearchable: &vFalse, + Tokenization: "", + NestedProperties: []*models.NestedProperty{ + { + Name: "nested_date_lvl2", + DataType: schema.DataTypeDate.PropString(), + IndexFilterable: &vTrue, + IndexSearchable: &vFalse, + Tokenization: "", + }, + }, + }, + }, + }, + { + Name: "both_diff_json2", + DataType: schema.DataTypeObject.PropString(), + IndexFilterable: &vFalse, + IndexSearchable: &vFalse, + Tokenization: "", + NestedProperties: []*models.NestedProperty{ + { + Name: "nested_int", + DataType: schema.DataTypeInt.PropString(), + IndexFilterable: &vTrue, + IndexSearchable: &vFalse, + Tokenization: "", + }, + { + Name: "nested_objects", + DataType: schema.DataTypeObjectArray.PropString(), + IndexFilterable: &vTrue, + IndexSearchable: &vFalse, + Tokenization: "", + NestedProperties: []*models.NestedProperty{ + { + Name: "nested_bool_lvl2", + DataType: schema.DataTypeBoolean.PropString(), + IndexFilterable: &vTrue, + IndexSearchable: &vFalse, + Tokenization: "", + }, + }, + }, + }, + }, + { + Name: "one_json2", + DataType: schema.DataTypeObject.PropString(), + IndexFilterable: &vFalse, + IndexSearchable: &vFalse, + Tokenization: "", + NestedProperties: []*models.NestedProperty{ + { + Name: "nested_number", + DataType: schema.DataTypeNumber.PropString(), + IndexFilterable: &vTrue, + IndexSearchable: &vFalse, + Tokenization: "", + }, + { + Name: "nested_text", + DataType: schema.DataTypeText.PropString(), + IndexFilterable: &vTrue, + IndexSearchable: &vTrue, + Tokenization: models.PropertyTokenizationField, // different setting than (1) + }, + { + Name: "nested_objects", + DataType: schema.DataTypeObjectArray.PropString(), + IndexFilterable: &vTrue, + IndexSearchable: &vFalse, + Tokenization: "", + NestedProperties: []*models.NestedProperty{ + { + Name: "nested_date_lvl2", + DataType: schema.DataTypeDate.PropString(), + IndexFilterable: &vTrue, + IndexSearchable: &vFalse, + Tokenization: "", + }, + { + Name: "nested_numbers_lvl2", + DataType: schema.DataTypeNumberArray.PropString(), + IndexFilterable: &vFalse, // different setting than (1) + IndexSearchable: &vFalse, + Tokenization: "", + }, + }, + }, + }, + }, + { + Name: "one_dup_json2", + DataType: schema.DataTypeObject.PropString(), + IndexFilterable: &vFalse, + IndexSearchable: &vFalse, + Tokenization: "", + NestedProperties: []*models.NestedProperty{ + { + Name: "nested_int", + DataType: schema.DataTypeInt.PropString(), + IndexFilterable: &vTrue, + IndexSearchable: &vFalse, + Tokenization: "", + }, + { + Name: "nested_number", + DataType: schema.DataTypeNumber.PropString(), + IndexFilterable: &vTrue, + IndexSearchable: &vFalse, + Tokenization: "", + }, + { + Name: "nested_text", + DataType: schema.DataTypeText.PropString(), + IndexFilterable: &vTrue, + IndexSearchable: &vTrue, + Tokenization: models.PropertyTokenizationField, // from (2) + }, + { + Name: "nested_objects", + DataType: schema.DataTypeObjectArray.PropString(), + IndexFilterable: &vTrue, + IndexSearchable: &vFalse, + Tokenization: "", + NestedProperties: []*models.NestedProperty{ + { + Name: "nested_bool_lvl2", + DataType: schema.DataTypeBoolean.PropString(), + IndexFilterable: &vTrue, + IndexSearchable: &vFalse, + Tokenization: "", + }, + { + Name: "nested_date_lvl2", + DataType: schema.DataTypeDate.PropString(), + IndexFilterable: &vTrue, + IndexSearchable: &vFalse, + Tokenization: "", + }, + { + Name: "nested_numbers_lvl2", + DataType: schema.DataTypeNumberArray.PropString(), + IndexFilterable: &vFalse, // from (2) + IndexSearchable: &vFalse, + Tokenization: "", + }, + }, + }, + }, + }, + }) + }) + + t.Run("props2 + props1", func(t *testing.T) { + props := schema.DedupProperties(props2, props1) + + test_utils.AssertPropsMatch(t, props, []*models.Property{ + { + Name: "number", + DataType: schema.DataTypeNumber.PropString(), + IndexFilterable: &vTrue, + IndexSearchable: &vFalse, + Tokenization: "", + }, + { + Name: "both_diff_json1", + DataType: schema.DataTypeObject.PropString(), + IndexFilterable: &vFalse, + IndexSearchable: &vFalse, + Tokenization: "", + NestedProperties: []*models.NestedProperty{ + { + Name: "nested_int", + DataType: schema.DataTypeInt.PropString(), + IndexFilterable: &vTrue, + IndexSearchable: &vFalse, + Tokenization: "", + }, + { + Name: "nested_objects", + DataType: schema.DataTypeObjectArray.PropString(), + IndexFilterable: &vTrue, + IndexSearchable: &vFalse, + Tokenization: "", + NestedProperties: []*models.NestedProperty{ + { + Name: "nested_bool_lvl2", + DataType: schema.DataTypeBoolean.PropString(), + IndexFilterable: &vTrue, + IndexSearchable: &vFalse, + Tokenization: "", + }, + }, + }, + }, + }, + { + Name: "both_diff_json2", + DataType: schema.DataTypeObject.PropString(), + IndexFilterable: &vFalse, + IndexSearchable: &vFalse, + Tokenization: "", + NestedProperties: []*models.NestedProperty{ + { + Name: "nested_number", + DataType: schema.DataTypeNumber.PropString(), + IndexFilterable: &vTrue, + IndexSearchable: &vFalse, + Tokenization: "", + }, + { + Name: "nested_objects", + DataType: schema.DataTypeObjectArray.PropString(), + IndexFilterable: &vTrue, + IndexSearchable: &vFalse, + Tokenization: "", + NestedProperties: []*models.NestedProperty{ + { + Name: "nested_date_lvl2", + DataType: schema.DataTypeDate.PropString(), + IndexFilterable: &vTrue, + IndexSearchable: &vFalse, + Tokenization: "", + }, + }, + }, + }, + }, + { + Name: "one_json1", + DataType: schema.DataTypeObject.PropString(), + IndexFilterable: &vFalse, + IndexSearchable: &vFalse, + Tokenization: "", + NestedProperties: []*models.NestedProperty{ + { + Name: "nested_int", + DataType: schema.DataTypeInt.PropString(), + IndexFilterable: &vTrue, + IndexSearchable: &vFalse, + Tokenization: "", + }, + { + Name: "nested_text", + DataType: schema.DataTypeText.PropString(), + IndexFilterable: &vTrue, + IndexSearchable: &vTrue, + Tokenization: models.PropertyTokenizationWord, + }, + { + Name: "nested_objects", + DataType: schema.DataTypeObjectArray.PropString(), + IndexFilterable: &vTrue, + IndexSearchable: &vFalse, + Tokenization: "", + NestedProperties: []*models.NestedProperty{ + { + Name: "nested_bool_lvl2", + DataType: schema.DataTypeBoolean.PropString(), + IndexFilterable: &vTrue, + IndexSearchable: &vFalse, + Tokenization: "", + }, + { + Name: "nested_numbers_lvl2", + DataType: schema.DataTypeNumberArray.PropString(), + IndexFilterable: &vTrue, + IndexSearchable: &vFalse, + Tokenization: "", + }, + }, + }, + }, + }, + { + Name: "one_dup_json1", + DataType: schema.DataTypeObject.PropString(), + IndexFilterable: &vFalse, + IndexSearchable: &vFalse, + Tokenization: "", + NestedProperties: []*models.NestedProperty{ + { + Name: "nested_int", + DataType: schema.DataTypeInt.PropString(), + IndexFilterable: &vTrue, + IndexSearchable: &vFalse, + Tokenization: "", + }, + { + Name: "nested_number", + DataType: schema.DataTypeNumber.PropString(), + IndexFilterable: &vTrue, + IndexSearchable: &vFalse, + Tokenization: "", + }, + { + Name: "nested_text", + DataType: schema.DataTypeText.PropString(), + IndexFilterable: &vTrue, + IndexSearchable: &vTrue, + Tokenization: models.PropertyTokenizationWord, // from (1) + }, + { + Name: "nested_objects", + DataType: schema.DataTypeObjectArray.PropString(), + IndexFilterable: &vTrue, + IndexSearchable: &vFalse, + Tokenization: "", + NestedProperties: []*models.NestedProperty{ + { + Name: "nested_bool_lvl2", + DataType: schema.DataTypeBoolean.PropString(), + IndexFilterable: &vTrue, + IndexSearchable: &vFalse, + Tokenization: "", + }, + { + Name: "nested_date_lvl2", + DataType: schema.DataTypeDate.PropString(), + IndexFilterable: &vTrue, + IndexSearchable: &vFalse, + Tokenization: "", + }, + { + Name: "nested_numbers_lvl2", + DataType: schema.DataTypeNumberArray.PropString(), + IndexFilterable: &vTrue, // from (1) + IndexSearchable: &vFalse, + Tokenization: "", + }, + }, + }, + }, + }, + }) + }) +} diff --git a/platform/dbops/binaries/weaviate-src/entities/schema/schema.go b/platform/dbops/binaries/weaviate-src/entities/schema/schema.go new file mode 100644 index 0000000000000000000000000000000000000000..0e5f26f769a77756e4f47ba487865cf7841006b8 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/schema/schema.go @@ -0,0 +1,112 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package schema + +import ( + "strings" + + "github.com/weaviate/weaviate/entities/models" +) + +// Newtype to denote that this string is used as a Class name +type ClassName string + +func (c ClassName) String() string { + return string(c) +} + +// Newtype to denote that this string is used as a Property name +type PropertyName string + +func (p PropertyName) String() string { + return string(p) +} + +type ClassAndProperty struct { + ClassName ClassName + PropertyName PropertyName +} + +// Describes the schema that is used in Weaviate. +type Schema struct { + Objects *models.Schema +} + +func Empty() Schema { + return Schema{ + Objects: &models.Schema{ + Classes: []*models.Class{}, + }, + } +} + +type SchemaWithAliases struct { + Schema + Aliases map[string]string +} + +// Return one of the semantic schema's +func (s *Schema) SemanticSchemaFor() *models.Schema { + return s.Objects +} + +func UppercaseClassName(name string) string { + if len(name) < 1 { + return name + } + + if len(name) == 1 { + return strings.ToUpper(name) + } + + return strings.ToUpper(string(name[0])) + name[1:] +} + +func UppercaseClassesNames(names ...string) []string { + for idx := range names { + names[idx] = UppercaseClassName(names[idx]) + } + + return names +} + +func LowercaseAllPropertyNames(props []*models.Property) []*models.Property { + for i, prop := range props { + props[i].Name = LowercaseFirstLetter(prop.Name) + } + + return props +} + +func LowercaseFirstLetter(name string) string { + if len(name) < 1 { + return name + } + + if len(name) == 1 { + return strings.ToLower(name) + } + + return strings.ToLower(string(name[0])) + name[1:] +} + +func LowercaseFirstLetterOfStrings(in []string) []string { + if len(in) < 1 { + return in + } + out := make([]string, len(in)) + for i, str := range in { + out[i] = LowercaseFirstLetter(str) + } + + return out +} diff --git a/platform/dbops/binaries/weaviate-src/entities/schema/validation.go b/platform/dbops/binaries/weaviate-src/entities/schema/validation.go new file mode 100644 index 0000000000000000000000000000000000000000..04d5cee5a3b774aa26890b446bb8ff4ac3d6a4d5 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/schema/validation.go @@ -0,0 +1,187 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package schema + +import ( + "fmt" + "regexp" +) + +var ( + validateClassNameRegex = regexp.MustCompile(`^` + ClassNameRegexCore + `$`) + validateTenantNameRegex = regexp.MustCompile(`^` + ShardNameRegexCore + `$`) + validatePropertyNameRegex = regexp.MustCompile(`^` + PropertyNameRegex + `$`) + validateNestedPropertyNameRegex = regexp.MustCompile(`^` + NestedPropertyNameRegex + `$`) + reservedPropertyNames = []string{"_additional", "_id", "id"} +) + +const ( + // Restricted by max length allowed for dir name (255 chars) + // As dir containing class data is named after class, 255 chars are allowed + classNameMaxLength = 255 + ClassNameRegexCore = `[A-Z][_0-9A-Za-z]{0,254}` + // ClassNameRegexAllowRegex allowed chars in class name including regex patterns, 255 chars are allowed + ClassNameRegexAllowRegex = `^(\*|[A-Z][_0-9A-Za-z\-.*+?^$()|{}\[\]\\]{0,254})$` + // ShardNameRegexCore allowed chars in shard name, 64 chars are allowed + ShardNameRegexCore = `[A-Za-z0-9\-\_]{1,64}` + // ShardNameRegexAllowRegex allowed chars in shard name including regex patterns, 64 chars are allowed + ShardNameRegexAllowRegex = `^[A-Za-z0-9\-_.*+?^$()|{}\[\]\\*]{1,64}$` + // Restricted by max length allowed for dir name (255 chars) + // Property name is used to build dir names of various purposes containing property + // related data. Among them might be (depending on the settings): + // - geo.{property_name}.hnsw.commitlog.d + // - property_{property_name}__meta_count + // - property_{property_name}_propertyLength + // Last one seems to add the most additional characters (24) to property name, + // therefore poperty max lentgh should not exceed 255 - 24 = 231 chars. + propertyNameMaxLength = 231 + PropertyNameRegex = `[_A-Za-z][_0-9A-Za-z]{0,230}` + // Nested properties names are not used to build directory names (yet), + // no max length restriction is imposed + NestedPropertyNameRegex = `[_A-Za-z][_0-9A-Za-z]*` + // Target vector names must be GraphQL compliant names no longer then 230 characters + TargetVectorNameMaxLength = 230 + TargetVectorNameRegex = `[_A-Za-z][_0-9A-Za-z]{0,229}` +) + +// ValidateClassName validates that this string is a valid class name (format wise) +func ValidateClassName(name string) (ClassName, error) { + c, err := validateClassOrAliasName(name, false) + if err != nil { + return "", err + } + return ClassName(c), nil +} + +func ValidateAliasName(name string) (string, error) { + return validateClassOrAliasName(name, true) +} + +// ValidateClassNameIncludesRegex validates that this string is a valid class name (format wise) +// can include regex pattern +func ValidateClassNameIncludesRegex(name string) (ClassName, error) { + if len(name) > classNameMaxLength { + return "", fmt.Errorf("'%s' is not a valid class name. Name should not be longer than %d characters", + name, classNameMaxLength) + } + if !regexp.MustCompile(ClassNameRegexAllowRegex).MatchString(name) { + return "", fmt.Errorf("'%s' is not a valid class name", name) + } + return ClassName(name), nil +} + +func validateClassOrAliasName(name string, isAlias bool) (string, error) { + typ := "class" + if isAlias { + typ = "alias" + } + + if len(name) > classNameMaxLength { + return "", fmt.Errorf("'%s' is not a valid %s name. Name should not be longer than %d characters", + name, typ, classNameMaxLength) + } + if !validateClassNameRegex.MatchString(name) { + return "", fmt.Errorf("'%s' is not a valid %s name", name, typ) + } + return name, nil +} + +// ValidateTenantName validates that this string is a valid tenant name (format wise) +func ValidateTenantName(name string) error { + if !validateTenantNameRegex.MatchString(name) { + var msg string + if name == "" { + msg = "empty tenant name" + } else { + msg = fmt.Sprintf( + " '%s' is not a valid tenant name. should only contain alphanumeric characters (a-z, A-Z, 0-9), "+ + "underscore (_), and hyphen (-), with a length between 1 and 64 characters", + name, + ) + } + return fmt.Errorf("%s", msg) + } + return nil +} + +// ValidateTenantNameIncludesRegex validates that this string is a valid tenant name (format wise) +// can include regex pattern +func ValidateTenantNameIncludesRegex(name string) error { + if !regexp.MustCompile(ShardNameRegexAllowRegex).MatchString(name) { + var msg string + if name == "" { + msg = "empty tenant name" + } else { + msg = fmt.Sprintf( + " '%s' is not a valid tenant name. should only contain alphanumeric characters (a-z, A-Z, 0-9), "+ + "underscore (_), and hyphen (-), with a length between 1 and 64 characters", + name, + ) + } + return fmt.Errorf("%s", msg) + } + return nil +} + +// ValidatePropertyName validates that this string is a valid property name +func ValidatePropertyName(name string) (PropertyName, error) { + if len(name) > propertyNameMaxLength { + return "", fmt.Errorf("'%s' is not a valid property name. Name should not be longer than %d characters", + name, propertyNameMaxLength) + } + if !validatePropertyNameRegex.MatchString(name) { + return "", fmt.Errorf("'%s' is not a valid property name. "+ + "Property names in Weaviate are restricted to valid GraphQL names, "+ + "which must be “/%s/”", name, PropertyNameRegex) + } + return PropertyName(name), nil +} + +// ValidateNestedPropertyName validates that this string is a valid nested property name +func ValidateNestedPropertyName(name, prefix string) error { + if !validateNestedPropertyNameRegex.MatchString(name) { + return fmt.Errorf("'%s' is not a valid nested property name of '%s'. "+ + "NestedProperty names in Weaviate are restricted to valid GraphQL names, "+ + "which must be “/%s/”", name, prefix, NestedPropertyNameRegex) + } + return nil +} + +// ValidateReservedPropertyName validates that a string is not a reserved property name +func ValidateReservedPropertyName(name string) error { + for i := range reservedPropertyNames { + if name == reservedPropertyNames[i] { + return fmt.Errorf("'%s' is a reserved property name", name) + } + } + return nil +} + +// AssertValidClassName assert that this string is a valid class name or +// panics and should therefore most likely not be used +func AssertValidClassName(name string) ClassName { + n, err := ValidateClassName(name) + if err != nil { + panic(fmt.Sprintf("Did not expect to be handled '%s', an invalid class name", name)) + } + return n +} + +// AssertValidPropertyName asserts that this string is a valid property name or +// panics and should therefore most likely never be used. +func AssertValidPropertyName(name string) PropertyName { + n, err := ValidatePropertyName(name) + if err != nil { + panic(fmt.Sprintf("Did not expect to be handled '%s', an invalid property name", name)) + } + return n +} diff --git a/platform/dbops/binaries/weaviate-src/entities/schema/validation_test.go b/platform/dbops/binaries/weaviate-src/entities/schema/validation_test.go new file mode 100644 index 0000000000000000000000000000000000000000..248280c7e0fec6c26a788133a4ead63e642cb691 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/schema/validation_test.go @@ -0,0 +1,309 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package schema + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestValidateOKClassName(t *testing.T) { + for _, name := range []string{ + "A", + "FooBar", + "FooBar2", + "Foo_______bar__with_numbers___1234567890_and_2", + "C_123456___foo_bar_2", + "NormalClassNameWithNumber1", + "Normal__Class__Name__With__Number__1", + "CClassName", + "ThisClassNameHasExactly255Characters_MaximumAllowed____________________qwertyuiopasdfghjklzxcvbnm1234567890_qwertyuiopasdfghjklzxcvbnm1234567890_qwertyuiopasdfghjklzxcvbnm1234567890_qwertyuiopasdfghjklzxcvbnm1234567890_qwertyuiopasdfghjklzxcvbnm1234567890", + } { + t.Run(name, func(t *testing.T) { + _, err := ValidateClassName(name) + assert.NoError(t, err) + }) + } +} + +func TestFailValidateBadClassName(t *testing.T) { + for _, name := range []string{ + "", + "Foo Bar", + "foo", + "fooBar", + "_foo", + "ThisClassNameHasMoreThan255Characters_MaximumAllowed____________________qwertyuiopasdfghjklzxcvbnm1234567890_qwertyuiopasdfghjklzxcvbnm1234567890_qwertyuiopasdfghjklzxcvbnm1234567890_qwertyuiopasdfghjklzxcvbnm1234567890_qwertyuiopasdfghjklzxcvbnm1234567890", + "_String", "string", + "_DateTime", "dateTime", "datetime", + "_Int", "int", + "_Float", "float", + "_Boolean", "boolean", + "_ID", "id", + "_FieldSet", "fieldSet", "fieldset", + } { + t.Run(name, func(t *testing.T) { + _, err := ValidateClassName(name) + assert.Error(t, err) + }) + } +} + +func TestValidateOKPropertyName(t *testing.T) { + for _, name := range []string{ + "fooBar", + "fooBar2", + "_fooBar2", + "intField", + "hasAction", + "_foo_bar_2", + "______foo_bar_2", + "___123456___foo_bar_2", + "a_very_Long_property_Name__22_with_numbers_9", + "a_very_Long_property_Name__22_with_numbers_9880888800888800008", + "FooBar", + "ThisPropertyNameHasExactly231Characters_MaximumAllowed______________________________qwertyuiopasdfghjklzxcvbnm1234567890_qwertyuiopasdfghjklzxcvbnm1234567890_qwertyuiopasdfghjklzxcvbnm1234567890_qwertyuiopasdfghjklzxcvbnm1234567890", + } { + t.Run(name, func(t *testing.T) { + _, err := ValidatePropertyName(name) + assert.NoError(t, err) + }) + } +} + +func TestFailValidateBadPropertyName(t *testing.T) { + for _, name := range []string{ + "foo Bar", + "a_very_Long_property_Name__22_with-dash_9", + "1_FooBar", + "ThisPropertyNameHasMoreThan231Characters_MaximumAllowed______________________________qwertyuiopasdfghjklzxcvbnm1234567890_qwertyuiopasdfghjklzxcvbnm1234567890_qwertyuiopasdfghjklzxcvbnm1234567890_qwertyuiopasdfghjklzxcvbnm1234567890", + } { + t.Run(name, func(t *testing.T) { + _, err := ValidatePropertyName(name) + assert.Error(t, err) + }) + } +} + +func TestValidateReservedPropertyName(t *testing.T) { + type args struct { + name string + } + tests := []struct { + name string + args args + wantErr bool + }{ + { + name: "Reserved name: _additional", + args: args{ + name: "_additional", + }, + wantErr: true, + }, + { + name: "Reserved name: id", + args: args{ + name: "id", + }, + wantErr: true, + }, + { + name: "Reserved name: _id", + args: args{ + name: "_id", + }, + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if err := ValidateReservedPropertyName(tt.args.name); (err != nil) != tt.wantErr { + t.Errorf("ValidateReservedPropertyName() error = %v, wantErr %v", err, tt.wantErr) + } + }) + } +} + +func TestValidateTenantName(t *testing.T) { + tests := []struct { + name string + input string + expectedErr string + }{ + { + name: "valid tenant name", + input: "ValidTenantName123", + expectedErr: "", + }, + { + name: "valid tenant name with hyphen", + input: "Valid-Tenant-Name", + expectedErr: "", + }, + { + name: "valid tenant name with underscore", + input: "Valid_Tenant_Name", + expectedErr: "", + }, + { + name: "empty tenant name", + input: "", + expectedErr: "empty tenant name", + }, + { + name: "invalid tenant name with space", + input: "Invalid Tenant Name", + expectedErr: " 'Invalid Tenant Name' is not a valid tenant name. should only contain alphanumeric characters (a-z, A-Z, 0-9), underscore (_), and hyphen (-), with a length between 1 and 64 characters", + }, + { + name: "invalid tenant name with special character", + input: "InvalidTenantName!", + expectedErr: " 'InvalidTenantName!' is not a valid tenant name. should only contain alphanumeric characters (a-z, A-Z, 0-9), underscore (_), and hyphen (-), with a length between 1 and 64 characters", + }, + { + name: "tenant name too long", + input: "ThisTenantNameIsWayTooLongAndShouldNotBeValidBecauseItExceedsTheMaximumAllowedLength", + expectedErr: " 'ThisTenantNameIsWayTooLongAndShouldNotBeValidBecauseItExceedsTheMaximumAllowedLength' is not a valid tenant name. should only contain alphanumeric characters (a-z, A-Z, 0-9), underscore (_), and hyphen (-), with a length between 1 and 64 characters", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := ValidateTenantName(tt.input) + if tt.expectedErr != "" { + assert.Error(t, err) + assert.Equal(t, tt.expectedErr, err.Error()) + } else { + assert.NoError(t, err) + } + }) + } +} + +func TestValidateClassNameIncludesRegex(t *testing.T) { + tests := []struct { + name string + input string + expectedErr string + }{ + { + name: "valid class name", + input: "ValidClassName", + expectedErr: "", + }, + { + name: "valid class name", + input: "*", + expectedErr: "", + }, + { + name: "valid class name with regex pattern", + input: "ValidClassName.*", + expectedErr: "", + }, + { + name: "invalid class name with special character", + input: "InvalidClassName!", + expectedErr: "not a valid class name", + }, + { + name: "invalid class name with space", + input: "Invalid ClassName", + expectedErr: "not a valid class name", + }, + { + name: "invalid class name with spaces", + input: "InvalidClassName WithSpaces", + expectedErr: "not a valid class name", + }, + { + name: "class name too long", + input: "ThisClassNameHasExactly256Characters_MaximumAllowed____________________qwertyuiopasdfghjklzxcvbnm1234567890_qwertyuiopasdfghjklzxcvbnm1234567890_qwertyuiopasdfghjklzxcvbnm1234567890_qwertyuiopasdfghjklzxcvbnm1234567890_qwertyuiopasdfghjklzxcvbnm1234567890A", + expectedErr: "not a valid class name", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + _, err := ValidateClassNameIncludesRegex(tt.input) + if tt.expectedErr != "" { + assert.Error(t, err) + assert.Contains(t, err.Error(), tt.expectedErr) + } else { + assert.NoError(t, err) + } + }) + } +} + +func TestValidateTenantNameIncludesRegex(t *testing.T) { + tests := []struct { + name string + input string + expectedErr string + }{ + { + name: "valid tenant name", + input: "ValidTenantName", + expectedErr: "", + }, + { + name: "valid tenant name", + input: "*", + expectedErr: "", + }, + { + name: "valid tenant name with hyphen", + input: "Valid-Tenant-Name", + expectedErr: "", + }, + { + name: "valid tenant name with underscore", + input: "Valid_Tenant_Name", + expectedErr: "", + }, + { + name: "empty tenant name", + input: "", + expectedErr: "empty tenant name", + }, + { + name: "invalid tenant name with space", + input: "Invalid Tenant Name", + expectedErr: "not a valid tenant name", + }, + { + name: "invalid tenant name with special character", + input: "InvalidTenantName!", + expectedErr: "not a valid tenant name", + }, + { + name: "tenant name too long", + input: "ThisTenantNameIsWayTooLongAndShouldNotBeValidBecauseItExceedsTheMaximumAllowedLength", + expectedErr: "not a valid tenant name", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := ValidateTenantNameIncludesRegex(tt.input) + if tt.expectedErr != "" { + assert.Error(t, err) + assert.Contains(t, err.Error(), tt.expectedErr) + } else { + assert.NoError(t, err) + } + }) + } +} diff --git a/platform/dbops/binaries/weaviate-src/entities/search/ref.go b/platform/dbops/binaries/weaviate-src/entities/search/ref.go new file mode 100644 index 0000000000000000000000000000000000000000..d6a63172f300c279d5b795a8adb05b69f3333d16 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/search/ref.go @@ -0,0 +1,20 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package search + +// LocalRef to be filled by the search backend to indicate that the +// particular reference field is a local ref and does not require further +// resolving, as opposed to a NetworkRef. +type LocalRef struct { + Class string + Fields map[string]interface{} +} diff --git a/platform/dbops/binaries/weaviate-src/entities/search/result.go b/platform/dbops/binaries/weaviate-src/entities/search/result.go new file mode 100644 index 0000000000000000000000000000000000000000..afab4bed4cd35a196dae19978161ae03d5cbfd70 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/search/result.go @@ -0,0 +1,92 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package search + +import ( + "github.com/go-openapi/strfmt" + "github.com/weaviate/weaviate/entities/models" +) + +// Result contains some info of a concept (kind), but not all. For +// additional info the ID can be used to retrieve the full concept from the +// connector storage +type Result struct { + ID strfmt.UUID + DocID *uint64 + ClassName string + Score float32 + SecondarySortValue float32 + ExplainScore string + Dist float32 + Vector []float32 + Vectors models.Vectors + Beacon string + Certainty float32 + Schema models.PropertySchema + Created int64 + Updated int64 + AdditionalProperties models.AdditionalProperties + VectorWeights map[string]string + IsConsistent bool + Tenant string + // Vectors map[string][]float32 + + // Dimensions in case search was vector-based, 0 otherwise + Dims int +} + +type Results []Result + +func (r Result) Object() *models.Object { + return r.ObjectWithVector(true) +} + +func (r Result) ObjectWithVector(includeVector bool) *models.Object { + schema, ok := r.Schema.(map[string]interface{}) + if ok { + delete(schema, "id") + } + + t := &models.Object{ + Class: r.ClassName, + ID: r.ID, + Properties: schema, + CreationTimeUnix: r.Created, + LastUpdateTimeUnix: r.Updated, + VectorWeights: r.VectorWeights, + Tenant: r.Tenant, + } + + if r.AdditionalProperties != nil { + t.Additional = r.AdditionalProperties + } + + if includeVector { + t.Vector = r.Vector + t.Vectors = r.Vectors + } + + return t +} + +func (rs Results) Objects() []*models.Object { + return rs.ObjectsWithVector(true) +} + +func (rs Results) ObjectsWithVector(includeVector bool) []*models.Object { + objects := make([]*models.Object, len(rs)) + for i, res := range rs { + objects[i] = res.ObjectWithVector(includeVector) + } + + return objects +} diff --git a/platform/dbops/binaries/weaviate-src/entities/search/select_property.go b/platform/dbops/binaries/weaviate-src/entities/search/select_property.go new file mode 100644 index 0000000000000000000000000000000000000000..3fc8c34b3fc4c816a87971eb807f5e67a3fab23a --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/search/select_property.go @@ -0,0 +1,152 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package search + +import ( + "fmt" + "regexp" + + "github.com/weaviate/weaviate/entities/additional" + "github.com/weaviate/weaviate/entities/schema" +) + +type SelectProperty struct { + Name string `json:"name"` + + IsPrimitive bool `json:"isPrimitive"` + + IsObject bool `json:"isObject"` + + // Include the __typename in all the Refs below. + IncludeTypeName bool `json:"includeTypeName"` + + // Not a primitive nor nested type? Then select these properties. + Refs []SelectClass `json:"refs"` + + // Nested type? Then select these properties. + Props []SelectProperty `json:"objs"` +} + +type SelectClass struct { + ClassName string `json:"className"` + RefProperties SelectProperties `json:"refProperties"` + AdditionalProperties additional.Properties `json:"additionalProperties"` +} + +// FindSelectClass by specifying the exact class name +func (sp SelectProperty) FindSelectClass(className schema.ClassName) *SelectClass { + for _, selectClass := range sp.Refs { + if selectClass.ClassName == string(className) { + return &selectClass + } + } + + return nil +} + +// FindSelectObject by specifying the exact object name +func (sp SelectProperty) FindSelectProperty(name string) *SelectProperty { + for _, selectProp := range sp.Props { + if selectProp.Name == name { + return &selectProp + } + } + + return nil +} + +// HasPeer returns true if any of the referenced classes are from the specified +// peer +func (sp SelectProperty) HasPeer(peerName string) bool { + r := regexp.MustCompile(fmt.Sprintf("^%s__", peerName)) + for _, selectClass := range sp.Refs { + if r.MatchString(selectClass.ClassName) { + return true + } + } + + return false +} + +type SelectProperties []SelectProperty + +func (sp SelectProperties) HasRefs() bool { + for _, p := range sp { + if len(p.Refs) > 0 { + return true + } + } + return false +} + +func (sp SelectProperties) HasProps() bool { + for _, p := range sp { + if len(p.Props) > 0 { + return true + } + } + return false +} + +func (sp SelectProperties) ShouldResolve(path []string) (bool, error) { + if len(path)%2 != 0 || len(path) == 0 { + return false, fmt.Errorf("used incorrectly: path must have even number of segments in the form of " + + "refProp, className, refProp, className, etc") + } + + // the above gives us the guarantee that path contains at least two elements + property := path[0] + class := schema.ClassName(path[1]) + + for _, p := range sp { + if p.IsPrimitive { + continue + } + + if p.Name != property { + continue + } + + selectClass := p.FindSelectClass(class) + if selectClass == nil { + continue + } + + if len(path) > 2 { + // we're not done yet, this one's nested + return selectClass.RefProperties.ShouldResolve(path[2:]) + } + + // we are done and found the path + return true, nil + } + + return false, nil +} + +func (sp SelectProperties) FindProperty(propName string) *SelectProperty { + for _, prop := range sp { + if prop.Name == propName { + return &prop + } + } + + return nil +} + +func (sp SelectProperties) GetPropertyNames() []string { + names := make([]string, len(sp)) + for i := range sp { + names[i] = sp[i].Name + } + return names +} diff --git a/platform/dbops/binaries/weaviate-src/entities/searchparams/retrieval.go b/platform/dbops/binaries/weaviate-src/entities/searchparams/retrieval.go new file mode 100644 index 0000000000000000000000000000000000000000..181fb436508c38d7f0b601c61b30a69f0596e29a --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/searchparams/retrieval.go @@ -0,0 +1,158 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package searchparams + +import ( + "fmt" + "strings" + + "github.com/weaviate/weaviate/entities/search" + + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/schema" +) + +type NearVector struct { + Certainty float64 `json:"certainty"` + Distance float64 `json:"distance"` + WithDistance bool `json:"-"` + Vectors []models.Vector `json:"vectors"` + TargetVectors []string `json:"targetVectors"` +} + +type KeywordRanking struct { + Type string `json:"type"` + Properties []string `json:"properties"` + Query string `json:"query"` + AdditionalExplanations bool `json:"additionalExplanations"` + MinimumOrTokensMatch int `json:"minimumOrTokensMatch"` + SearchOperator string `json:"searchOperator"` +} + +// Indicates whether property should be indexed +// Index holds document ids with property of/containing particular value +// and number of its occurrences in that property +// (index created using bucket of StrategyMapCollection) +func HasSearchableIndex(prop *models.Property) bool { + switch dt, _ := schema.AsPrimitive(prop.DataType); dt { + case schema.DataTypeText, schema.DataTypeTextArray: + // by default property has searchable index only for text/text[] props + if prop.IndexSearchable == nil { + return true + } + return *prop.IndexSearchable + default: + return false + } +} + +func PropertyHasSearchableIndex(class *models.Class, tentativePropertyName string) bool { + if class == nil { + return false + } + + propertyName := strings.Split(tentativePropertyName, "^")[0] + p, err := schema.GetPropertyByName(class, propertyName) + if err != nil { + return false + } + return HasSearchableIndex(p) +} + +// GetPropertyByName returns the class by its name +func GetPropertyByName(c *models.Class, propName string) (*models.Property, error) { + for _, prop := range c.Properties { + // Check if the name of the property is the given name, that's the property we need + if prop.Name == strings.Split(propName, ".")[0] { + return prop, nil + } + } + return nil, fmt.Errorf("property %v not found %v", propName, c.Class) +} + +func (k *KeywordRanking) ChooseSearchableProperties(class *models.Class) { + var validProperties []string + for _, prop := range k.Properties { + property, err := GetPropertyByName(class, prop) + if err != nil { + continue + } + if HasSearchableIndex(property) { + validProperties = append(validProperties, prop) + } + } + k.Properties = validProperties +} + +type WeightedSearchResult struct { + SearchParams interface{} `json:"searchParams"` + Weight float64 `json:"weight"` + Type string `json:"type"` +} + +type HybridSearch struct { + SubSearches interface{} `json:"subSearches"` + Type string `json:"type"` + Alpha float64 `json:"alpha"` + Query string `json:"query"` + Vector models.Vector `json:"vector"` + Properties []string `json:"properties"` + TargetVectors []string `json:"targetVectors"` + FusionAlgorithm int `json:"fusionalgorithm"` + Distance float32 `json:"distance"` + WithDistance bool `json:"withDistance"` + MinimumOrTokensMatch int `json:"minimumOrTokenMatch"` + SearchOperator string `json:"searchOperator"` + NearTextParams *NearTextParams + NearVectorParams *NearVector +} + +type NearObject struct { + ID string `json:"id"` + Beacon string `json:"beacon"` + Certainty float64 `json:"certainty"` + Distance float64 `json:"distance"` + WithDistance bool `json:"-"` + TargetVectors []string `json:"targetVectors"` +} + +type ObjectMove struct { + ID string + Beacon string +} + +// ExploreMove moves an existing Search Vector closer (or further away from) a specific other search term +type ExploreMove struct { + Values []string + Force float32 + Objects []ObjectMove +} + +type NearTextParams struct { + Values []string + Limit int + MoveTo ExploreMove + MoveAwayFrom ExploreMove + Certainty float64 + Distance float64 + WithDistance bool + Network bool + Autocorrect bool + TargetVectors []string +} + +type GroupBy struct { + Property string + Groups int + ObjectsPerGroup int + Properties search.SelectProperties +} diff --git a/platform/dbops/binaries/weaviate-src/entities/sentry/config.go b/platform/dbops/binaries/weaviate-src/entities/sentry/config.go new file mode 100644 index 0000000000000000000000000000000000000000..b3f2831a3737d1028b04e8abfa969538d9373624 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/sentry/config.go @@ -0,0 +1,152 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package sentry + +import ( + "errors" + "fmt" + "os" + "regexp" + "strconv" + "strings" + + "github.com/weaviate/weaviate/entities/config" +) + +// ConfigOpts all map to environment variables. For example: +// - SENTRY_ENABLED=true -> ConfigOpts.Enabled=true +type ConfigOpts struct { + Enabled bool `json:"enabled" yaml:"enabled"` + DSN string `json:"dsn" yaml:"dsn"` + Debug bool `json:"debug" yaml:"debug"` + Tags map[string]string `json:"tags" yaml:"tags"` + Release string `json:"release" yaml:"release"` + Environment string `json:"environment" yaml:"environment"` + ErrorReportingDisabled bool `json:"error_reporting_disabled" yaml:"error_reporting_disabled"` + TracingDisabled bool `json:"tracing_disabled" yaml:"tracing_disabled"` + ProfilingDisabled bool `json:"profiling_disabled" yaml:"profiling_disabled"` + ErrorSampleRate float64 `json:"error_sample_rate" yaml:"error_sample_rate"` + TracesSampleRate float64 `json:"traces_sample_rate" yaml:"traces_sample_rate"` + ProfileSampleRate float64 `json:"profile_sample_rate" yaml:"profile_sample_rate"` + ClusterId string `json:"cluster_id" yaml:"cluster_id"` + ClusterOwner string `json:"cluster_owner" yaml:"cluster_owner"` +} + +// Config Global Singleton that can be accessed from anywhere in the app. This +// is required because panic recovery can happen anywhere in the app. +var Config *ConfigOpts + +// InitSentryConfig from environment. Errors if called more than once. +func InitSentryConfig() (*ConfigOpts, error) { + if Config != nil { + return nil, fmt.Errorf("sentry config already initialized") + } else { + Config = &ConfigOpts{} + } + + Config.Enabled = config.Enabled(os.Getenv("SENTRY_ENABLED")) + if !Config.Enabled { + return Config, nil + } + + Config.DSN = os.Getenv("SENTRY_DSN") + if Config.DSN == "" { + return nil, fmt.Errorf("sentry enabled but no DSN provided") + } + + Config.Environment = os.Getenv("SENTRY_ENVIRONMENT") + if Config.Environment == "" { + Config.Environment = "unknown" + } + + Config.ClusterOwner = os.Getenv("SENTRY_CLUSTER_OWNER") + Config.ClusterId = os.Getenv("SENTRY_CLUSTER_ID") + + // Configure error sampling + Config.ErrorReportingDisabled = config.Enabled(os.Getenv("SENTRY_ERROR_REPORTING_DISABLED")) + if Config.ErrorReportingDisabled { + Config.ErrorSampleRate = 0.0 + } else if errorSampleRate, err := strconv.ParseFloat(os.Getenv("SENTRY_ERROR_SAMPLE_RATE"), 64); err == nil && errorSampleRate <= 1.0 && errorSampleRate >= 0.0 { + Config.ErrorSampleRate = errorSampleRate + } else { + // By default we sample all errors + Config.ErrorSampleRate = 1.0 + } + + // Configure tracing & tracing sample rate + Config.TracingDisabled = config.Enabled(os.Getenv("SENTRY_TRACING_DISABLED")) + if Config.TracingDisabled { + Config.TracesSampleRate = 0.0 + } else if tracesSampleRate, err := strconv.ParseFloat(os.Getenv("SENTRY_TRACES_SAMPLE_RATE"), 64); err == nil && tracesSampleRate <= 1.0 && tracesSampleRate >= 0.0 { + Config.TracesSampleRate = tracesSampleRate + } else { + // By default we traces only 10% + Config.TracesSampleRate = 0.1 + } + + // Configure profiling & profile sample rate + Config.ProfilingDisabled = config.Enabled(os.Getenv("SENTRY_PROFILING_DISABLED")) + if Config.ProfilingDisabled { + Config.ProfileSampleRate = 0.0 + } else if profileSampleRate, err := strconv.ParseFloat(os.Getenv("SENTRY_PROFILE_SAMPLE_RATE"), 64); err == nil && profileSampleRate <= 1.0 && profileSampleRate >= 0.0 { + Config.ProfileSampleRate = profileSampleRate + } else { + // By default we profile everything that we are tracing + Config.ProfileSampleRate = 1.0 + } + + Config.Debug = config.Enabled(os.Getenv("SENTRY_DEBUG")) + Config.Release = os.Getenv("SENTRY_RELEASE") + + if tags, err := parseTags(); err != nil { + return nil, err + } else { + Config.Tags = tags + } + + return Config, nil +} + +var ( + tagKeyPattern = regexp.MustCompile(`^[a-zA-Z0-9_.:-]{1,32}$`) + tagValuePattern = regexp.MustCompile(`^[^\n]{1,200}$`) +) + +func parseTags() (map[string]string, error) { + tags := make(map[string]string) + for _, env := range os.Environ() { + if !strings.HasPrefix(env, "SENTRY_TAG_") { + continue + } + + parts := strings.SplitN(env[len("SENTRY_TAG_"):], "=", 2) + if len(parts) != 2 { + continue + } + key, value := parts[0], parts[1] + if !tagKeyPattern.MatchString(key) { + return nil, errors.New("invalid tag key: " + key) + } + if !tagValuePattern.MatchString(value) { + return nil, errors.New("invalid tag value for key: " + key) + } + tags[key] = value + } + return tags, nil +} + +func Enabled() bool { + if Config == nil { + return false + } + return Config.Enabled +} diff --git a/platform/dbops/binaries/weaviate-src/entities/sentry/config_test.go b/platform/dbops/binaries/weaviate-src/entities/sentry/config_test.go new file mode 100644 index 0000000000000000000000000000000000000000..42b55b2245ba2f4ab939be98d885685608e35aca --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/sentry/config_test.go @@ -0,0 +1,386 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package sentry + +import ( + "errors" + "os" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestSentryEnabled(t *testing.T) { + factors := []struct { + name string + value []string + expected bool + expectedErr bool + }{ + {"Valid: true", []string{"true"}, true, false}, + {"Valid: false", []string{"false"}, false, false}, + {"Valid: 1", []string{"1"}, true, false}, + {"Valid: 0", []string{"0"}, false, false}, + {"Valid: on", []string{"on"}, true, false}, + {"Valid: off", []string{"off"}, false, false}, + {"not given", []string{}, false, false}, + } + for _, tt := range factors { + t.Run(tt.name, func(t *testing.T) { + if len(tt.value) == 1 { + t.Setenv("SENTRY_ENABLED", tt.value[0]) + t.Setenv("SENTRY_DSN", "http://dsn") + } + Config = nil + conf, err := InitSentryConfig() + + if tt.expectedErr { + require.NotNil(t, err) + } else { + require.Nil(t, err) + require.Equal(t, tt.expected, conf.Enabled) + } + }) + } +} + +func TestSentryConfig(t *testing.T) { + type test struct { + name string + vars map[string]string + expectErr bool + expectedConfig ConfigOpts + } + + tests := []test{ + { + name: "enabled, everything set", + vars: map[string]string{ + "SENTRY_ENABLED": "true", + "SENTRY_DSN": "http://dsn", + "SENTRY_DEBUG": "true", + "SENTRY_TAG_hello": "world", + "SENTRY_CLUSTER_OWNER": "im_the_owner", + "SENTRY_CLUSTER_ID": "id123", + }, + expectErr: false, + expectedConfig: ConfigOpts{ + Enabled: true, + DSN: "http://dsn", + Debug: true, + Environment: "unknown", + ErrorSampleRate: 1.0, + TracesSampleRate: 0.1, + ProfileSampleRate: 1.0, + ClusterId: "id123", + ClusterOwner: "im_the_owner", + Tags: map[string]string{ + "hello": "world", + }, + }, + }, + { + name: "enabled, without optional vars", + vars: map[string]string{ + "SENTRY_ENABLED": "true", + "SENTRY_DSN": "http://dsn", + }, + expectErr: false, + expectedConfig: ConfigOpts{ + Enabled: true, + DSN: "http://dsn", + Debug: false, + Environment: "unknown", + ErrorSampleRate: 1.0, + TracesSampleRate: 0.1, + ProfileSampleRate: 1.0, + Tags: map[string]string{}, + }, + }, + { + name: "enabled, with environment and release", + vars: map[string]string{ + "SENTRY_ENABLED": "true", + "SENTRY_ENVIRONMENT": "prod", + "SENTRY_RELEASE": "123.321", + "SENTRY_DSN": "http://dsn", + }, + expectErr: false, + expectedConfig: ConfigOpts{ + Enabled: true, + DSN: "http://dsn", + Debug: false, + Environment: "prod", + Release: "123.321", + ErrorSampleRate: 1.0, + TracesSampleRate: 0.1, + ProfileSampleRate: 1.0, + Tags: map[string]string{}, + }, + }, + { + name: "enabled, with everything disabled", + vars: map[string]string{ + "SENTRY_ENABLED": "true", + "SENTRY_DSN": "http://dsn", + "SENTRY_ERROR_REPORTING_DISABLED": "true", + "SENTRY_PROFILING_DISABLED": "true", + "SENTRY_TRACING_DISABLED": "true", + }, + expectErr: false, + expectedConfig: ConfigOpts{ + Enabled: true, + DSN: "http://dsn", + Environment: "unknown", + Debug: false, + ErrorReportingDisabled: true, + ProfilingDisabled: true, + TracingDisabled: true, + ErrorSampleRate: 0.0, + TracesSampleRate: 0.0, + ProfileSampleRate: 0.0, + Tags: map[string]string{}, + }, + }, + { + name: "enabled, with error only disabled", + vars: map[string]string{ + "SENTRY_ENABLED": "true", + "SENTRY_DSN": "http://dsn", + "SENTRY_ERROR_REPORTING_DISABLED": "true", + }, + expectErr: false, + expectedConfig: ConfigOpts{ + Enabled: true, + DSN: "http://dsn", + Environment: "unknown", + Debug: false, + ErrorReportingDisabled: true, + ErrorSampleRate: 0.0, + TracesSampleRate: 0.1, + ProfileSampleRate: 1.0, + Tags: map[string]string{}, + }, + }, + { + name: "enabled, with traces only disabled", + vars: map[string]string{ + "SENTRY_ENABLED": "true", + "SENTRY_DSN": "http://dsn", + "SENTRY_TRACING_DISABLED": "true", + }, + expectErr: false, + expectedConfig: ConfigOpts{ + Enabled: true, + DSN: "http://dsn", + Environment: "unknown", + Debug: false, + TracingDisabled: true, + ErrorSampleRate: 1.0, + TracesSampleRate: 0.0, + ProfileSampleRate: 1.0, + Tags: map[string]string{}, + }, + }, + { + name: "enabled, with profile only disabled", + vars: map[string]string{ + "SENTRY_ENABLED": "true", + "SENTRY_DSN": "http://dsn", + "SENTRY_PROFILING_DISABLED": "true", + }, + expectErr: false, + expectedConfig: ConfigOpts{ + Enabled: true, + DSN: "http://dsn", + Environment: "unknown", + Debug: false, + ProfilingDisabled: true, + ErrorSampleRate: 1.0, + TracesSampleRate: 0.1, + ProfileSampleRate: 0.0, + Tags: map[string]string{}, + }, + }, + { + name: "enabled, with tracing, profiling and sampling", + vars: map[string]string{ + "SENTRY_ENABLED": "true", + "SENTRY_DSN": "http://dsn", + "SENTRY_ERROR_SAMPLE_RATE": "0.75", + "SENTRY_TRACES_SAMPLE_RATE": "0.55", + "SENTRY_PROFILE_SAMPLE_RATE": "0.55", + }, + expectErr: false, + expectedConfig: ConfigOpts{ + Enabled: true, + DSN: "http://dsn", + Debug: false, + TracingDisabled: false, + Environment: "unknown", + ErrorSampleRate: 0.75, + TracesSampleRate: 0.55, + ProfileSampleRate: 0.55, + Tags: map[string]string{}, + }, + }, + { + name: "enabled, with tracing and too high sampling", + vars: map[string]string{ + "SENTRY_ENABLED": "true", + "SENTRY_DSN": "http://dsn", + "SENTRY_ERROR_SAMPLE_RATE": "1.01", + "SENTRY_TRACES_SAMPLE_RATE": "1.01", + "SENTRY_PROFILE_SAMPLE_RATE": "1.01", + }, + expectErr: false, + expectedConfig: ConfigOpts{ + Enabled: true, + DSN: "http://dsn", + Debug: false, + Environment: "unknown", + ErrorSampleRate: 1.0, + TracesSampleRate: 0.1, + ProfileSampleRate: 1.0, + Tags: map[string]string{}, + }, + }, + { + name: "enabled, with tracing and too low sampling", + vars: map[string]string{ + "SENTRY_ENABLED": "true", + "SENTRY_DSN": "http://dsn", + "SENTRY_ERROR_SAMPLE_RATE": "-1", + "SENTRY_TRACES_SAMPLE_RATE": "-1", + "SENTRY_PROFILE_SAMPLE_RATE": "-1", + }, + expectErr: false, + expectedConfig: ConfigOpts{ + Enabled: true, + DSN: "http://dsn", + Debug: false, + Environment: "unknown", + ErrorSampleRate: 1.0, + TracesSampleRate: 0.1, + ProfileSampleRate: 1.0, + Tags: map[string]string{}, + }, + }, + { + name: "disabled", + vars: map[string]string{ + "SENTRY_ENABLED": "false", + }, + expectErr: false, + expectedConfig: ConfigOpts{ + Enabled: false, + }, + }, + { + name: "enabled, but required fields not set", + vars: map[string]string{ + "SENTRY_ENABLED": "true", + }, + expectErr: true, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + for key, value := range test.vars { + t.Setenv(key, value) + } + + Config = nil + config, err := InitSentryConfig() + + if test.expectErr { + require.NotNil(t, err) + } else { + require.Nil(t, err) + require.Equal(t, &test.expectedConfig, config) + } + }) + } +} + +func TestParseSentryTags(t *testing.T) { + tests := []struct { + name string + envVars map[string]string + expected map[string]string + expectedErr error + }{ + { + name: "valid tags", + envVars: map[string]string{ + "SENTRY_TAG_validKey1": "validValue1", + "SENTRY_TAG_validKey2": "validValue2", + }, + expected: map[string]string{ + "validKey1": "validValue1", + "validKey2": "validValue2", + }, + expectedErr: nil, + }, + { + name: "invalid key", + envVars: map[string]string{ + "SENTRY_TAG_invalidKeyWithMoreThanThirtyTwoCharacters12345": "value", + }, + expected: map[string]string{}, + expectedErr: errors.New("invalid tag key: invalidKeyWithMoreThanThirtyTwoCharacters12345"), + }, + { + name: "invalid value", + envVars: map[string]string{ + "SENTRY_TAG_validKey": "value\nwith\nnewlines", + }, + expected: map[string]string{}, + expectedErr: errors.New("invalid tag value for key: validKey"), + }, + { + name: "mixed valid and invalid", + envVars: map[string]string{ + "SENTRY_TAG_validKey1": "validValue1", + "SENTRY_TAG_invalidKeyWithMoreThanThirtyTwoChars": "value", + "SENTRY_TAG_validKey2": "validValue2", + "SENTRY_TAG_validKey3": "value", + }, + expected: map[string]string{ + "validKey1": "validValue1", + }, + expectedErr: errors.New("invalid tag key: invalidKeyWithMoreThanThirtyTwoChars"), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + for key, value := range tt.envVars { + os.Setenv(key, value) + } + + tags, err := parseTags() + if tt.expectedErr != nil { + assert.EqualError(t, err, tt.expectedErr.Error()) + } else { + require.NoError(t, err) + assert.Equal(t, tt.expected, tags) + } + + for key := range tt.envVars { + os.Unsetenv(key) + } + }) + } +} diff --git a/platform/dbops/binaries/weaviate-src/entities/sentry/errors.go b/platform/dbops/binaries/weaviate-src/entities/sentry/errors.go new file mode 100644 index 0000000000000000000000000000000000000000..26966d0b5c4c3cb756a93245f97318d3fbca5b24 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/sentry/errors.go @@ -0,0 +1,32 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package sentry + +import ( + libsentry "github.com/getsentry/sentry-go" +) + +func Recover(err any) { + if !Enabled() { + return + } + + libsentry.CurrentHub().Recover(err) +} + +func CaptureException(err error) { + if !Enabled() { + return + } + + libsentry.CaptureException(err) +} diff --git a/platform/dbops/binaries/weaviate-src/entities/storagestate/status.go b/platform/dbops/binaries/weaviate-src/entities/storagestate/status.go new file mode 100644 index 0000000000000000000000000000000000000000..9be08fce23700e52c7f0cdaa3da613fdccd3aae6 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/storagestate/status.go @@ -0,0 +1,58 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package storagestate + +import ( + "errors" + "fmt" +) + +const ( + StatusReadOnly Status = "READONLY" + StatusIndexing Status = "INDEXING" + StatusLoading Status = "LOADING" + StatusLazyLoading Status = "LAZY_LOADING" + StatusReady Status = "READY" + StatusShutdown Status = "SHUTDOWN" +) + +var ErrStatusReadOnlyWithReason = func(reason string) error { + return fmt.Errorf("store is read-only due to: %v", reason) +} + +var ( + ErrStatusReadOnly = errors.New("store is read-only") + ErrInvalidStatus = errors.New("invalid storage status") +) + +type Status string + +func (s Status) String() string { + return string(s) +} + +func ValidateStatus(in string) (status Status, err error) { + switch in { + case string(StatusReadOnly): + status = StatusReadOnly + case string(StatusIndexing): + status = StatusIndexing + case string(StatusReady): + status = StatusReady + case string(StatusShutdown): + status = StatusShutdown + default: + err = ErrInvalidStatus + } + + return +} diff --git a/platform/dbops/binaries/weaviate-src/entities/storagestate/status_test.go b/platform/dbops/binaries/weaviate-src/entities/storagestate/status_test.go new file mode 100644 index 0000000000000000000000000000000000000000..e36ba020f396e5c17b0ffbd880b3195450c5f3ed --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/storagestate/status_test.go @@ -0,0 +1,53 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package storagestate + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestStatusValidation(t *testing.T) { + t.Run("with invalid status", func(t *testing.T) { + tests := []string{ + "READ_ONLY", + "read only", + "ok", + "WRITEONLY", + "INDESKING", + "", + } + + for _, test := range tests { + _, err := ValidateStatus(test) + require.EqualError(t, ErrInvalidStatus, err.Error()) + } + }) + + t.Run("with valid status", func(t *testing.T) { + tests := []struct { + in string + expected Status + }{ + {"READONLY", StatusReadOnly}, + {"READY", StatusReady}, + {"INDEXING", StatusIndexing}, + } + + for _, test := range tests { + status, err := ValidateStatus(test.in) + require.Nil(t, err) + require.Equal(t, test.expected, status) + } + }) +} diff --git a/platform/dbops/binaries/weaviate-src/entities/storobj/buffer_pool.go b/platform/dbops/binaries/weaviate-src/entities/storobj/buffer_pool.go new file mode 100644 index 0000000000000000000000000000000000000000..4248463829fbe1e3d51593d2c317674ccab3df5b --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/entities/storobj/buffer_pool.go @@ -0,0 +1,50 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package storobj + +import "sync" + +func newBufferPool(initialSize int) *bufferPool { + return &bufferPool{ + pool: sync.Pool{ + New: func() any { + // initialize with len=0 to make sure we get a consistent result + // whether it's a new or used buffer. Every buffer will always have + // len=0 and cap>=initialSize + return make([]byte, 0, initialSize) + }, + }, + } +} + +type bufferPool struct { + pool sync.Pool +} + +func (b *bufferPool) Get() []byte { + buf := b.pool.Get().([]byte) + + return buf +} + +func (b *bufferPool) Put(buf []byte) { + // make sure the length is reset before putting it back into the pool. This + // way all buffers will always have len=0, either because they are brand-new + // or because they have been reset. + buf = buf[:0] + + //nolint:staticcheck // I disagree with the linter, this doesn't need to be a + //pointer. Even if we copy the slicestruct header, the backing array is + //what's allocation-heavy and that will be reused. The profiles in the PR + //description prove this. + b.pool.Put(buf) +}