diff --git a/platform/dbops/binaries/weaviate-src/usecases/classification/classifier_run.go b/platform/dbops/binaries/weaviate-src/usecases/classification/classifier_run.go new file mode 100644 index 0000000000000000000000000000000000000000..c567fcd627ad1182cc15bad2a6af740f265b8e01 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/classification/classifier_run.go @@ -0,0 +1,234 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package classification + +import ( + "context" + "fmt" + "runtime" + "time" + + "github.com/go-openapi/strfmt" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/weaviate/weaviate/entities/additional" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/modulecapabilities" + "github.com/weaviate/weaviate/entities/search" +) + +// the contents of this file deal with anything about a classification run +// which is generic, whereas the individual classify_item fns can be found in +// the respective files such as classifier_run_knn.go + +func (c *Classifier) run(params models.Classification, + filters Filters, +) { + ctx, cancel := contextWithTimeout(30 * time.Minute) + defer cancel() + + go c.monitorClassification(ctx, cancel, params.Class) + + c.logBegin(params, filters) + unclassifiedItems, err := c.vectorRepo.GetUnclassified(ctx, + params.Class, params.ClassifyProperties, params.BasedOnProperties, filters.Source()) + if err != nil { + c.failRunWithError(params, errors.Wrap(err, "retrieve to-be-classifieds")) + return + } + + if len(unclassifiedItems) == 0 { + c.failRunWithError(params, + fmt.Errorf("no classes to be classified - did you run a previous classification already?")) + return + } + c.logItemsFetched(params, unclassifiedItems) + + classifyItem, err := c.prepareRun(params, filters, unclassifiedItems) + if err != nil { + c.failRunWithError(params, errors.Wrap(err, "prepare classification")) + return + } + + params, err = c.runItems(ctx, classifyItem, params, filters, unclassifiedItems) + if err != nil { + c.failRunWithError(params, err) + return + } + + c.succeedRun(params) +} + +func (c *Classifier) monitorClassification(ctx context.Context, cancelFn context.CancelFunc, className string) { + ticker := time.NewTicker(100 * time.Millisecond) + defer ticker.Stop() + for { + select { + case <-ctx.Done(): + return + case <-ticker.C: + class := c.schemaGetter.ReadOnlyClass(className) + if class == nil { + cancelFn() + return + } + } + } +} + +func (c *Classifier) prepareRun(params models.Classification, filters Filters, + unclassifiedItems []search.Result, +) (ClassifyItemFn, error) { + c.logBeginPreparation(params) + defer c.logFinishPreparation(params) + + if params.Type == "knn" { + return c.classifyItemUsingKNN, nil + } + + if params.Type == "zeroshot" { + return c.classifyItemUsingZeroShot, nil + } + + if c.modulesProvider != nil { + classifyItemFn, err := c.modulesProvider.GetClassificationFn(params.Class, params.Type, + c.getClassifyParams(params, filters, unclassifiedItems)) + if err != nil { + return nil, errors.Wrapf(err, "cannot classify") + } + if classifyItemFn == nil { + return nil, errors.Errorf("cannot classify: empty classifier for %s", params.Type) + } + classification := &moduleClassification{classifyItemFn} + return classification.classifyFn, nil + } + + return nil, errors.Errorf("unsupported type '%s', have no classify item fn for this", params.Type) +} + +func (c *Classifier) getClassifyParams(params models.Classification, + filters Filters, unclassifiedItems []search.Result, +) modulecapabilities.ClassifyParams { + return modulecapabilities.ClassifyParams{ + GetClass: c.schemaGetter.ReadOnlyClass, + Params: params, + Filters: filters, + UnclassifiedItems: unclassifiedItems, + VectorRepo: c.vectorClassSearchRepo, + } +} + +// runItems splits the job list into batches that can be worked on parallelly +// depending on the available CPUs +func (c *Classifier) runItems(ctx context.Context, classifyItem ClassifyItemFn, params models.Classification, filters Filters, + items []search.Result, +) (models.Classification, error) { + workerCount := runtime.GOMAXPROCS(0) + if len(items) < workerCount { + workerCount = len(items) + } + + workers := newRunWorkers(workerCount, classifyItem, params, filters, c.vectorRepo, c.logger) + workers.addJobs(items) + res := workers.work(ctx) + + params.Meta.Completed = strfmt.DateTime(time.Now()) + params.Meta.CountSucceeded = res.successCount + params.Meta.CountFailed = res.errorCount + params.Meta.Count = res.successCount + res.errorCount + + return params, res.err +} + +func (c *Classifier) succeedRun(params models.Classification) { + params.Status = models.ClassificationStatusCompleted + ctx, cancel := contextWithTimeout(2 * time.Second) + defer cancel() + err := c.repo.Put(ctx, params) + if err != nil { + c.logExecutionError("store succeeded run", err, params) + } + c.logFinish(params) +} + +func (c *Classifier) failRunWithError(params models.Classification, err error) { + params.Status = models.ClassificationStatusFailed + params.Error = fmt.Sprintf("classification failed: %v", err) + err = c.repo.Put(context.Background(), params) + if err != nil { + c.logExecutionError("store failed run", err, params) + } + c.logFinish(params) +} + +func (c *Classifier) extendItemWithObjectMeta(item *search.Result, + params models.Classification, classified []string, +) { + // don't overwrite existing non-classification meta info + if item.AdditionalProperties == nil { + item.AdditionalProperties = models.AdditionalProperties{} + } + + item.AdditionalProperties["classification"] = additional.Classification{ + ID: params.ID, + Scope: params.ClassifyProperties, + ClassifiedFields: classified, + Completed: strfmt.DateTime(time.Now()), + } +} + +func contextWithTimeout(d time.Duration) (context.Context, context.CancelFunc) { + return context.WithTimeout(context.Background(), d) +} + +// Logging helper methods +func (c *Classifier) logBase(params models.Classification, event string) *logrus.Entry { + return c.logger.WithField("action", "classification_run"). + WithField("event", event). + WithField("params", params). + WithField("classification_type", params.Type) +} + +func (c *Classifier) logBegin(params models.Classification, filters Filters) { + c.logBase(params, "classification_begin"). + WithField("filters", filters). + Debug("classification started") +} + +func (c *Classifier) logFinish(params models.Classification) { + c.logBase(params, "classification_finish"). + WithField("status", params.Status). + Debug("classification finished") +} + +func (c *Classifier) logItemsFetched(params models.Classification, items search.Results) { + c.logBase(params, "classification_items_fetched"). + WithField("status", params.Status). + WithField("item_count", len(items)). + Debug("fetched source items") +} + +func (c *Classifier) logBeginPreparation(params models.Classification) { + c.logBase(params, "classification_preparation_begin"). + Debug("begin run preparation") +} + +func (c *Classifier) logFinishPreparation(params models.Classification) { + c.logBase(params, "classification_preparation_finish"). + Debug("finish run preparation") +} + +func (c *Classifier) logExecutionError(event string, err error, params models.Classification) { + c.logBase(params, event). + WithError(err). + Error("classification execution failure") +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/classification/classifier_run_knn.go b/platform/dbops/binaries/weaviate-src/usecases/classification/classifier_run_knn.go new file mode 100644 index 0000000000000000000000000000000000000000..a0421e00b54a8fabf02b23033fa9d0a3b7a48a84 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/classification/classifier_run_knn.go @@ -0,0 +1,62 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package classification + +import ( + "fmt" + "time" + + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/search" +) + +func (c *Classifier) classifyItemUsingKNN(item search.Result, itemIndex int, + params models.Classification, filters Filters, writer Writer, +) error { + ctx, cancel := contextWithTimeout(2 * time.Second) + defer cancel() + + // this type assertion is safe to make, since we have passed the parsing stage + settings := params.Settings.(*ParamsKNN) + + // K is guaranteed to be set by now, no danger in dereferencing the pointer + res, err := c.vectorRepo.AggregateNeighbors(ctx, item.Vector, + item.ClassName, + params.ClassifyProperties, int(*settings.K), filters.TrainingSet()) + if err != nil { + return fmt.Errorf("classify %s/%s: %w", item.ClassName, item.ID, err) + } + + var classified []string + + for _, agg := range res { + meta := agg.Meta() + item.Schema.(map[string]interface{})[agg.Property] = models.MultipleRef{ + &models.SingleRef{ + Beacon: agg.Beacon, + Classification: meta, + }, + } + + // append list of actually classified (can differ from scope!) properties, + // so we can build the object meta information + classified = append(classified, agg.Property) + } + + c.extendItemWithObjectMeta(&item, params, classified) + err = writer.Store(item) + if err != nil { + return fmt.Errorf("store %s/%s: %w", item.ClassName, item.ID, err) + } + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/classification/classifier_run_worker.go b/platform/dbops/binaries/weaviate-src/usecases/classification/classifier_run_worker.go new file mode 100644 index 0000000000000000000000000000000000000000..b43651dbaf320dc090155563ad6b298728a5b0b9 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/classification/classifier_run_worker.go @@ -0,0 +1,158 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package classification + +import ( + "context" + "sync" + "sync/atomic" + + "github.com/sirupsen/logrus" + enterrors "github.com/weaviate/weaviate/entities/errors" + + "github.com/pkg/errors" + "github.com/weaviate/weaviate/entities/errorcompounder" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/search" +) + +type runWorker struct { + jobs []search.Result + successCount *int64 + errorCount *int64 + ec *errorcompounder.SafeErrorCompounder + classify ClassifyItemFn + batchWriter Writer + params models.Classification + filters Filters + id int + workerCount int +} + +func (w *runWorker) addJob(job search.Result) { + w.jobs = append(w.jobs, job) +} + +func (w *runWorker) work(ctx context.Context, wg *sync.WaitGroup) { + defer wg.Done() + + for i, item := range w.jobs { + // check if the whole classification operation has been cancelled + // if yes, then abort the classifier worker + if err := ctx.Err(); err != nil { + w.ec.Add(err) + atomic.AddInt64(w.errorCount, 1) + break + } + originalIndex := (i * w.workerCount) + w.id + err := w.classify(item, originalIndex, w.params, w.filters, w.batchWriter) + if err != nil { + w.ec.Add(err) + atomic.AddInt64(w.errorCount, 1) + } else { + atomic.AddInt64(w.successCount, 1) + } + } +} + +func newRunWorker(id int, workerCount int, rw *runWorkers) *runWorker { + return &runWorker{ + successCount: rw.successCount, + errorCount: rw.errorCount, + ec: rw.ec, + params: rw.params, + filters: rw.filters, + classify: rw.classify, + batchWriter: rw.batchWriter, + id: id, + workerCount: workerCount, + } +} + +type runWorkers struct { + workers []*runWorker + successCount *int64 + errorCount *int64 + ec *errorcompounder.SafeErrorCompounder + classify ClassifyItemFn + params models.Classification + filters Filters + batchWriter Writer + logger logrus.FieldLogger +} + +func newRunWorkers(amount int, classifyFn ClassifyItemFn, + params models.Classification, filters Filters, vectorRepo vectorRepo, logger logrus.FieldLogger, +) *runWorkers { + var successCount int64 + var errorCount int64 + + rw := &runWorkers{ + workers: make([]*runWorker, amount), + successCount: &successCount, + errorCount: &errorCount, + ec: &errorcompounder.SafeErrorCompounder{}, + classify: classifyFn, + params: params, + filters: filters, + batchWriter: newBatchWriter(vectorRepo, logger), + logger: logger, + } + + for i := 0; i < amount; i++ { + rw.workers[i] = newRunWorker(i, amount, rw) + } + + return rw +} + +func (ws *runWorkers) addJobs(jobs []search.Result) { + for i, job := range jobs { + ws.workers[i%len(ws.workers)].addJob(job) + } +} + +func (ws *runWorkers) work(ctx context.Context) runWorkerResults { + ws.batchWriter.Start() + + wg := &sync.WaitGroup{} + for _, worker := range ws.workers { + worker := worker + wg.Add(1) + enterrors.GoWrapper(func() { worker.work(ctx, wg) }, ws.logger) + + } + + wg.Wait() + + res := ws.batchWriter.Stop() + + if res.SuccessCount() != *ws.successCount || res.ErrorCount() != *ws.errorCount { + ws.ec.Add(errors.New("data save error")) + } + + if res.Err() != nil { + ws.ec.Add(res.Err()) + } + + return runWorkerResults{ + successCount: *ws.successCount, + errorCount: *ws.errorCount, + err: ws.ec.ToError(), + } +} + +type runWorkerResults struct { + successCount int64 + errorCount int64 + err error +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/classification/classifier_run_zeroshot.go b/platform/dbops/binaries/weaviate-src/usecases/classification/classifier_run_zeroshot.go new file mode 100644 index 0000000000000000000000000000000000000000..e712f6831f500c79fa90e03f15279c349dfa0f2f --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/classification/classifier_run_zeroshot.go @@ -0,0 +1,75 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package classification + +import ( + "fmt" + "time" + + "github.com/pkg/errors" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/schema/crossref" + "github.com/weaviate/weaviate/entities/search" +) + +func (c *Classifier) classifyItemUsingZeroShot(item search.Result, itemIndex int, + params models.Classification, filters Filters, writer Writer, +) error { + ctx, cancel := contextWithTimeout(2 * time.Second) + defer cancel() + + properties := params.ClassifyProperties + + class := c.schemaGetter.ReadOnlyClass(item.ClassName) + if class == nil { + return fmt.Errorf("zeroshot: search: could not find class %s in schema", item.ClassName) + } + + classifyProp := []string{} + for _, prop := range properties { + for _, classProp := range class.Properties { + if classProp.Name == prop { + classifyProp = append(classifyProp, classProp.DataType...) + } + } + } + + var classified []string + for _, className := range classifyProp { + for _, prop := range properties { + res, err := c.vectorRepo.ZeroShotSearch(ctx, item.Vector, className, + params.ClassifyProperties, filters.Target()) + if err != nil { + return errors.Wrap(err, "zeroshot: search") + } + + if len(res) > 0 { + cref := crossref.NewLocalhost(res[0].ClassName, res[0].ID) + item.Schema.(map[string]interface{})[prop] = models.MultipleRef{ + &models.SingleRef{ + Beacon: cref.SingleRef().Beacon, + Classification: &models.ReferenceMetaClassification{}, + }, + } + classified = append(classified, prop) + } + } + } + + c.extendItemWithObjectMeta(&item, params, classified) + err := writer.Store(item) + if err != nil { + return errors.Errorf("store %s/%s: %v", item.ClassName, item.ID, err) + } + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/classification/classifier_test.go b/platform/dbops/binaries/weaviate-src/usecases/classification/classifier_test.go new file mode 100644 index 0000000000000000000000000000000000000000..848104cca5cd5dc0ad24f36f8227a35865fb3e61 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/classification/classifier_test.go @@ -0,0 +1,619 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package classification + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "testing" + "time" + + "github.com/go-openapi/strfmt" + "github.com/sirupsen/logrus" + "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/weaviate/weaviate/entities/models" + testhelper "github.com/weaviate/weaviate/test/helper" + "github.com/weaviate/weaviate/usecases/auth/authorization/mocks" +) + +func newNullLogger() *logrus.Logger { + log, _ := test.NewNullLogger() + return log +} + +func Test_Classifier_KNN(t *testing.T) { + t.Run("with invalid data", func(t *testing.T) { + sg := &fakeSchemaGetter{testSchema()} + _, err := New(sg, nil, nil, mocks.NewMockAuthorizer(), newNullLogger(), nil). + Schedule(context.Background(), nil, models.Classification{}) + assert.NotNil(t, err, "should error with invalid user input") + }) + + var id strfmt.UUID + // so we can reuse it for follow up requests, such as checking the status + + t.Run("with valid data", func(t *testing.T) { + sg := &fakeSchemaGetter{testSchema()} + repo := newFakeClassificationRepo() + authorizer := mocks.NewMockAuthorizer() + vectorRepo := newFakeVectorRepoKNN(testDataToBeClassified(), testDataAlreadyClassified()) + classifier := New(sg, repo, vectorRepo, authorizer, newNullLogger(), nil) + + params := models.Classification{ + Class: "Article", + BasedOnProperties: []string{"description"}, + ClassifyProperties: []string{"exactCategory", "mainCategory"}, + Settings: map[string]interface{}{ + "k": json.Number("1"), + }, + } + + t.Run("scheduling a classification", func(t *testing.T) { + class, err := classifier.Schedule(context.Background(), nil, params) + require.Nil(t, err, "should not error") + require.NotNil(t, class) + + assert.Len(t, class.ID, 36, "an id was assigned") + id = class.ID + }) + + t.Run("retrieving the same classification by id", func(t *testing.T) { + class, err := classifier.Get(context.Background(), nil, id) + require.Nil(t, err) + require.NotNil(t, class) + assert.Equal(t, id, class.ID) + assert.Equal(t, models.ClassificationStatusRunning, class.Status) + }) + + // TODO: improve by polling instead + time.Sleep(500 * time.Millisecond) + + t.Run("status is now completed", func(t *testing.T) { + class, err := classifier.Get(context.Background(), nil, id) + require.Nil(t, err) + require.NotNil(t, class) + assert.Equal(t, models.ClassificationStatusCompleted, class.Status) + }) + + t.Run("the classifier updated the actions with the classified references", func(t *testing.T) { + vectorRepo.Lock() + require.Len(t, vectorRepo.db, 6) + vectorRepo.Unlock() + + t.Run("food", func(t *testing.T) { + idArticleFoodOne := "06a1e824-889c-4649-97f9-1ed3fa401d8e" + idArticleFoodTwo := "6402e649-b1e0-40ea-b192-a64eab0d5e56" + + checkRef(t, vectorRepo, idArticleFoodOne, "exactCategory", idCategoryFoodAndDrink) + checkRef(t, vectorRepo, idArticleFoodTwo, "mainCategory", idMainCategoryFoodAndDrink) + }) + + t.Run("politics", func(t *testing.T) { + idArticlePoliticsOne := "75ba35af-6a08-40ae-b442-3bec69b355f9" + idArticlePoliticsTwo := "f850439a-d3cd-4f17-8fbf-5a64405645cd" + + checkRef(t, vectorRepo, idArticlePoliticsOne, "exactCategory", idCategoryPolitics) + checkRef(t, vectorRepo, idArticlePoliticsTwo, "mainCategory", idMainCategoryPoliticsAndSociety) + }) + + t.Run("society", func(t *testing.T) { + idArticleSocietyOne := "a2bbcbdc-76e1-477d-9e72-a6d2cfb50109" + idArticleSocietyTwo := "069410c3-4b9e-4f68-8034-32a066cb7997" + + checkRef(t, vectorRepo, idArticleSocietyOne, "exactCategory", idCategorySociety) + checkRef(t, vectorRepo, idArticleSocietyTwo, "mainCategory", idMainCategoryPoliticsAndSociety) + }) + }) + }) + + t.Run("when errors occur during classification", func(t *testing.T) { + sg := &fakeSchemaGetter{testSchema()} + repo := newFakeClassificationRepo() + authorizer := mocks.NewMockAuthorizer() + vectorRepo := newFakeVectorRepoKNN(testDataToBeClassified(), testDataAlreadyClassified()) + vectorRepo.errorOnAggregate = errors.New("something went wrong") + classifier := New(sg, repo, vectorRepo, authorizer, newNullLogger(), nil) + + params := models.Classification{ + Class: "Article", + BasedOnProperties: []string{"description"}, + ClassifyProperties: []string{"exactCategory", "mainCategory"}, + Settings: map[string]interface{}{ + "k": json.Number("1"), + }, + } + + t.Run("scheduling a classification", func(t *testing.T) { + class, err := classifier.Schedule(context.Background(), nil, params) + require.Nil(t, err, "should not error") + require.NotNil(t, class) + + assert.Len(t, class.ID, 36, "an id was assigned") + id = class.ID + }) + + waitForStatusToNoLongerBeRunning(t, classifier, id) + + t.Run("status is now failed", func(t *testing.T) { + class, err := classifier.Get(context.Background(), nil, id) + require.Nil(t, err) + require.NotNil(t, class) + assert.Equal(t, models.ClassificationStatusFailed, class.Status) + expectedErrStrings := []string{ + "classification failed: ", + "classify Article/75ba35af-6a08-40ae-b442-3bec69b355f9: something went wrong", + "classify Article/f850439a-d3cd-4f17-8fbf-5a64405645cd: something went wrong", + "classify Article/a2bbcbdc-76e1-477d-9e72-a6d2cfb50109: something went wrong", + "classify Article/069410c3-4b9e-4f68-8034-32a066cb7997: something went wrong", + "classify Article/06a1e824-889c-4649-97f9-1ed3fa401d8e: something went wrong", + "classify Article/6402e649-b1e0-40ea-b192-a64eab0d5e56: something went wrong", + } + + for _, msg := range expectedErrStrings { + assert.Contains(t, class.Error, msg) + } + }) + }) + + t.Run("when there is nothing to be classified", func(t *testing.T) { + sg := &fakeSchemaGetter{testSchema()} + repo := newFakeClassificationRepo() + authorizer := mocks.NewMockAuthorizer() + vectorRepo := newFakeVectorRepoKNN(nil, testDataAlreadyClassified()) + classifier := New(sg, repo, vectorRepo, authorizer, newNullLogger(), nil) + + params := models.Classification{ + Class: "Article", + BasedOnProperties: []string{"description"}, + ClassifyProperties: []string{"exactCategory", "mainCategory"}, + Settings: map[string]interface{}{ + "k": json.Number("1"), + }, + } + + t.Run("scheduling a classification", func(t *testing.T) { + class, err := classifier.Schedule(context.Background(), nil, params) + require.Nil(t, err, "should not error") + require.NotNil(t, class) + + assert.Len(t, class.ID, 36, "an id was assigned") + id = class.ID + }) + + waitForStatusToNoLongerBeRunning(t, classifier, id) + + t.Run("status is now failed", func(t *testing.T) { + class, err := classifier.Get(context.Background(), nil, id) + require.Nil(t, err) + require.NotNil(t, class) + assert.Equal(t, models.ClassificationStatusFailed, class.Status) + expectedErr := "classification failed: " + + "no classes to be classified - did you run a previous classification already?" + assert.Equal(t, expectedErr, class.Error) + }) + }) +} + +func Test_Classifier_Custom_Classifier(t *testing.T) { + var id strfmt.UUID + // so we can reuse it for follow up requests, such as checking the status + + t.Run("with unreconginzed custom module classifier name", func(t *testing.T) { + sg := &fakeSchemaGetter{testSchema()} + repo := newFakeClassificationRepo() + authorizer := mocks.NewMockAuthorizer() + + vectorRepo := newFakeVectorRepoContextual(testDataToBeClassified(), testDataPossibleTargets()) + logger, _ := test.NewNullLogger() + + // vectorizer := &fakeVectorizer{words: testDataVectors()} + modulesProvider := NewFakeModulesProvider() + classifier := New(sg, repo, vectorRepo, authorizer, logger, modulesProvider) + + notRecoginzedContextual := "text2vec-contextionary-custom-not-recognized" + params := models.Classification{ + Class: "Article", + BasedOnProperties: []string{"description"}, + ClassifyProperties: []string{"exactCategory", "mainCategory"}, + Type: notRecoginzedContextual, + } + + t.Run("scheduling an unrecognized classification", func(t *testing.T) { + class, err := classifier.Schedule(context.Background(), nil, params) + require.Nil(t, err, "should not error") + require.NotNil(t, class) + + assert.Len(t, class.ID, 36, "an id was assigned") + id = class.ID + }) + + t.Run("retrieving the same classification by id", func(t *testing.T) { + class, err := classifier.Get(context.Background(), nil, id) + require.Nil(t, err) + require.NotNil(t, class) + assert.Equal(t, id, class.ID) + }) + + // TODO: improve by polling instead + time.Sleep(500 * time.Millisecond) + + t.Run("status is failed", func(t *testing.T) { + class, err := classifier.Get(context.Background(), nil, id) + require.Nil(t, err) + require.NotNil(t, class) + assert.Equal(t, models.ClassificationStatusFailed, class.Status) + assert.Equal(t, notRecoginzedContextual, class.Type) + assert.Contains(t, class.Error, "classifier "+notRecoginzedContextual+" not found") + }) + }) + + t.Run("with valid data", func(t *testing.T) { + sg := &fakeSchemaGetter{testSchema()} + repo := newFakeClassificationRepo() + authorizer := mocks.NewMockAuthorizer() + + vectorRepo := newFakeVectorRepoContextual(testDataToBeClassified(), testDataPossibleTargets()) + logger, _ := test.NewNullLogger() + + modulesProvider := NewFakeModulesProvider() + classifier := New(sg, repo, vectorRepo, authorizer, logger, modulesProvider) + + contextual := "text2vec-contextionary-custom-contextual" + params := models.Classification{ + Class: "Article", + BasedOnProperties: []string{"description"}, + ClassifyProperties: []string{"exactCategory", "mainCategory"}, + Type: contextual, + } + + t.Run("scheduling a classification", func(t *testing.T) { + class, err := classifier.Schedule(context.Background(), nil, params) + require.Nil(t, err, "should not error") + require.NotNil(t, class) + + assert.Len(t, class.ID, 36, "an id was assigned") + id = class.ID + }) + + t.Run("retrieving the same classification by id", func(t *testing.T) { + class, err := classifier.Get(context.Background(), nil, id) + require.Nil(t, err) + require.NotNil(t, class) + assert.Equal(t, id, class.ID) + }) + + // TODO: improve by polling instead + time.Sleep(500 * time.Millisecond) + + t.Run("status is now completed", func(t *testing.T) { + class, err := classifier.Get(context.Background(), nil, id) + require.Nil(t, err) + require.NotNil(t, class) + assert.Equal(t, models.ClassificationStatusCompleted, class.Status) + }) + + t.Run("the classifier updated the actions with the classified references", func(t *testing.T) { + vectorRepo.Lock() + require.Len(t, vectorRepo.db, 6) + vectorRepo.Unlock() + + t.Run("food", func(t *testing.T) { + idArticleFoodOne := "06a1e824-889c-4649-97f9-1ed3fa401d8e" + idArticleFoodTwo := "6402e649-b1e0-40ea-b192-a64eab0d5e56" + + checkRef(t, vectorRepo, idArticleFoodOne, "exactCategory", idCategoryFoodAndDrink) + checkRef(t, vectorRepo, idArticleFoodTwo, "mainCategory", idMainCategoryFoodAndDrink) + }) + + t.Run("politics", func(t *testing.T) { + idArticlePoliticsOne := "75ba35af-6a08-40ae-b442-3bec69b355f9" + idArticlePoliticsTwo := "f850439a-d3cd-4f17-8fbf-5a64405645cd" + + checkRef(t, vectorRepo, idArticlePoliticsOne, "exactCategory", idCategoryPolitics) + checkRef(t, vectorRepo, idArticlePoliticsTwo, "mainCategory", idMainCategoryPoliticsAndSociety) + }) + + t.Run("society", func(t *testing.T) { + idArticleSocietyOne := "a2bbcbdc-76e1-477d-9e72-a6d2cfb50109" + idArticleSocietyTwo := "069410c3-4b9e-4f68-8034-32a066cb7997" + + checkRef(t, vectorRepo, idArticleSocietyOne, "exactCategory", idCategorySociety) + checkRef(t, vectorRepo, idArticleSocietyTwo, "mainCategory", idMainCategoryPoliticsAndSociety) + }) + }) + }) + + t.Run("when errors occur during classification", func(t *testing.T) { + sg := &fakeSchemaGetter{testSchema()} + repo := newFakeClassificationRepo() + authorizer := mocks.NewMockAuthorizer() + vectorRepo := newFakeVectorRepoKNN(testDataToBeClassified(), testDataAlreadyClassified()) + vectorRepo.errorOnAggregate = errors.New("something went wrong") + logger, _ := test.NewNullLogger() + classifier := New(sg, repo, vectorRepo, authorizer, logger, nil) + + params := models.Classification{ + Class: "Article", + BasedOnProperties: []string{"description"}, + ClassifyProperties: []string{"exactCategory", "mainCategory"}, + Settings: map[string]interface{}{ + "k": json.Number("1"), + }, + } + + t.Run("scheduling a classification", func(t *testing.T) { + class, err := classifier.Schedule(context.Background(), nil, params) + require.Nil(t, err, "should not error") + require.NotNil(t, class) + + assert.Len(t, class.ID, 36, "an id was assigned") + id = class.ID + }) + + waitForStatusToNoLongerBeRunning(t, classifier, id) + + t.Run("status is now failed", func(t *testing.T) { + class, err := classifier.Get(context.Background(), nil, id) + require.Nil(t, err) + require.NotNil(t, class) + assert.Equal(t, models.ClassificationStatusFailed, class.Status) + expectedErrStrings := []string{ + "classification failed: ", + "classify Article/75ba35af-6a08-40ae-b442-3bec69b355f9: something went wrong", + "classify Article/f850439a-d3cd-4f17-8fbf-5a64405645cd: something went wrong", + "classify Article/a2bbcbdc-76e1-477d-9e72-a6d2cfb50109: something went wrong", + "classify Article/069410c3-4b9e-4f68-8034-32a066cb7997: something went wrong", + "classify Article/06a1e824-889c-4649-97f9-1ed3fa401d8e: something went wrong", + "classify Article/6402e649-b1e0-40ea-b192-a64eab0d5e56: something went wrong", + } + for _, msg := range expectedErrStrings { + assert.Contains(t, class.Error, msg) + } + }) + }) + + t.Run("when there is nothing to be classified", func(t *testing.T) { + sg := &fakeSchemaGetter{testSchema()} + repo := newFakeClassificationRepo() + authorizer := mocks.NewMockAuthorizer() + vectorRepo := newFakeVectorRepoKNN(nil, testDataAlreadyClassified()) + logger, _ := test.NewNullLogger() + classifier := New(sg, repo, vectorRepo, authorizer, logger, nil) + + params := models.Classification{ + Class: "Article", + BasedOnProperties: []string{"description"}, + ClassifyProperties: []string{"exactCategory", "mainCategory"}, + Settings: map[string]interface{}{ + "k": json.Number("1"), + }, + } + + t.Run("scheduling a classification", func(t *testing.T) { + class, err := classifier.Schedule(context.Background(), nil, params) + require.Nil(t, err, "should not error") + require.NotNil(t, class) + + assert.Len(t, class.ID, 36, "an id was assigned") + id = class.ID + }) + + waitForStatusToNoLongerBeRunning(t, classifier, id) + + t.Run("status is now failed", func(t *testing.T) { + class, err := classifier.Get(context.Background(), nil, id) + require.Nil(t, err) + require.NotNil(t, class) + assert.Equal(t, models.ClassificationStatusFailed, class.Status) + expectedErr := "classification failed: " + + "no classes to be classified - did you run a previous classification already?" + assert.Equal(t, expectedErr, class.Error) + }) + }) +} + +func Test_Classifier_WhereFilterValidation(t *testing.T) { + t.Run("when invalid whereFilters are received", func(t *testing.T) { + sg := &fakeSchemaGetter{testSchema()} + repo := newFakeClassificationRepo() + authorizer := mocks.NewMockAuthorizer() + vectorRepo := newFakeVectorRepoKNN(testDataToBeClassified(), testDataAlreadyClassified()) + classifier := New(sg, repo, vectorRepo, authorizer, newNullLogger(), nil) + + t.Run("with only one of the where filters being set", func(t *testing.T) { + whereFilter := &models.WhereFilter{ + Path: []string{"id"}, + Operator: "Like", + ValueText: ptString("*"), + } + testData := []struct { + name string + classificationType string + classificationFilters *models.ClassificationFilters + }{ + { + name: "Contextual only source where filter set", + classificationType: TypeContextual, + classificationFilters: &models.ClassificationFilters{ + SourceWhere: whereFilter, + }, + }, + { + name: "Contextual only target where filter set", + classificationType: TypeContextual, + classificationFilters: &models.ClassificationFilters{ + TargetWhere: whereFilter, + }, + }, + { + name: "ZeroShot only source where filter set", + classificationType: TypeZeroShot, + classificationFilters: &models.ClassificationFilters{ + SourceWhere: whereFilter, + }, + }, + { + name: "ZeroShot only target where filter set", + classificationType: TypeZeroShot, + classificationFilters: &models.ClassificationFilters{ + TargetWhere: whereFilter, + }, + }, + { + name: "KNN only source where filter set", + classificationType: TypeKNN, + classificationFilters: &models.ClassificationFilters{ + SourceWhere: whereFilter, + }, + }, + { + name: "KNN only training set where filter set", + classificationType: TypeKNN, + classificationFilters: &models.ClassificationFilters{ + TrainingSetWhere: whereFilter, + }, + }, + } + for _, td := range testData { + t.Run(td.name, func(t *testing.T) { + params := models.Classification{ + Class: "Article", + BasedOnProperties: []string{"description"}, + ClassifyProperties: []string{"exactCategory", "mainCategory"}, + Settings: map[string]interface{}{ + "k": json.Number("1"), + }, + Type: td.classificationType, + Filters: td.classificationFilters, + } + class, err := classifier.Schedule(context.Background(), nil, params) + assert.Nil(t, err) + assert.NotNil(t, class) + + assert.Len(t, class.ID, 36, "an id was assigned") + waitForStatusToNoLongerBeRunning(t, classifier, class.ID) + }) + } + }) + }) + + t.Run("[deprecated string] when valueString whereFilters are received", func(t *testing.T) { + sg := &fakeSchemaGetter{testSchema()} + repo := newFakeClassificationRepo() + authorizer := mocks.NewMockAuthorizer() + vectorRepo := newFakeVectorRepoKNN(testDataToBeClassified(), testDataAlreadyClassified()) + classifier := New(sg, repo, vectorRepo, authorizer, newNullLogger(), nil) + + validFilter := &models.WhereFilter{ + Path: []string{"description"}, + Operator: "Equal", + ValueText: ptString("valueText is valid"), + } + deprecatedFilter := &models.WhereFilter{ + Path: []string{"description"}, + Operator: "Equal", + ValueString: ptString("valueString is accepted"), + } + + t.Run("with deprecated sourceFilter", func(t *testing.T) { + params := models.Classification{ + Class: "Article", + BasedOnProperties: []string{"description"}, + ClassifyProperties: []string{"exactCategory", "mainCategory"}, + Settings: map[string]interface{}{ + "k": json.Number("1"), + }, + Filters: &models.ClassificationFilters{ + SourceWhere: deprecatedFilter, + }, + Type: TypeContextual, + } + + _, err := classifier.Schedule(context.Background(), nil, params) + assert.Nil(t, err) + }) + + t.Run("with deprecated targetFilter", func(t *testing.T) { + params := models.Classification{ + Class: "Article", + BasedOnProperties: []string{"description"}, + ClassifyProperties: []string{"exactCategory", "mainCategory"}, + Settings: map[string]interface{}{ + "k": json.Number("1"), + }, + Filters: &models.ClassificationFilters{ + SourceWhere: validFilter, + TargetWhere: deprecatedFilter, + }, + Type: TypeContextual, + } + + _, err := classifier.Schedule(context.Background(), nil, params) + assert.Nil(t, err) + }) + + t.Run("with deprecated trainingFilter", func(t *testing.T) { + params := models.Classification{ + Class: "Article", + BasedOnProperties: []string{"description"}, + ClassifyProperties: []string{"exactCategory", "mainCategory"}, + Settings: map[string]interface{}{ + "k": json.Number("1"), + }, + Filters: &models.ClassificationFilters{ + SourceWhere: validFilter, + TrainingSetWhere: deprecatedFilter, + }, + Type: TypeKNN, + } + + _, err := classifier.Schedule(context.Background(), nil, params) + assert.Nil(t, err) + }) + }) +} + +type genericFakeRepo interface { + get(strfmt.UUID) (*models.Object, bool) +} + +func checkRef(t *testing.T, repo genericFakeRepo, source, propName, target string) { + object, ok := repo.get(strfmt.UUID(source)) + require.True(t, ok, "object must be present") + + schema, ok := object.Properties.(map[string]interface{}) + require.True(t, ok, "schema must be map") + + prop, ok := schema[propName] + require.True(t, ok, "ref prop must be present") + + refs, ok := prop.(models.MultipleRef) + require.True(t, ok, "ref prop must be models.MultipleRef") + require.Len(t, refs, 1, "refs must have len 1") + + assert.Equal(t, fmt.Sprintf("weaviate://localhost/%s", target), refs[0].Beacon.String(), "beacon must match") +} + +func waitForStatusToNoLongerBeRunning(t *testing.T, classifier *Classifier, id strfmt.UUID) { + testhelper.AssertEventuallyEqualWithFrequencyAndTimeout(t, true, func() interface{} { + class, err := classifier.Get(context.Background(), nil, id) + require.Nil(t, err) + require.NotNil(t, class) + + return class.Status != models.ClassificationStatusRunning + }, 100*time.Millisecond, 20*time.Second, "wait until status in no longer running") +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/classification/classifier_vector_repo.go b/platform/dbops/binaries/weaviate-src/usecases/classification/classifier_vector_repo.go new file mode 100644 index 0000000000000000000000000000000000000000..8b7bdaf8d6645515fcceb9696460ea14f9acd6d4 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/classification/classifier_vector_repo.go @@ -0,0 +1,50 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package classification + +import ( + "context" + + "github.com/weaviate/weaviate/entities/dto" + "github.com/weaviate/weaviate/entities/modulecapabilities" + "github.com/weaviate/weaviate/entities/search" +) + +type vectorClassSearchRepo struct { + vectorRepo vectorRepo +} + +func newVectorClassSearchRepo(vectorRepo vectorRepo) *vectorClassSearchRepo { + return &vectorClassSearchRepo{vectorRepo} +} + +func (r *vectorClassSearchRepo) VectorClassSearch(ctx context.Context, + params modulecapabilities.VectorClassSearchParams, +) ([]search.Result, error) { + return r.vectorRepo.VectorSearch(ctx, dto.GetParams{ + Filters: params.Filters, + Pagination: params.Pagination, + ClassName: params.ClassName, + Properties: r.getProperties(params.Properties), + }, nil, nil) +} + +func (r *vectorClassSearchRepo) getProperties(properties []string) search.SelectProperties { + if len(properties) > 0 { + props := search.SelectProperties{} + for i := range properties { + props = append(props, search.SelectProperty{Name: properties[i]}) + } + return props + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/classification/fakes_for_test.go b/platform/dbops/binaries/weaviate-src/usecases/classification/fakes_for_test.go new file mode 100644 index 0000000000000000000000000000000000000000..d849c5c547c92f0b20af1500cee926ad33277bad --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/classification/fakes_for_test.go @@ -0,0 +1,477 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package classification + +import ( + "context" + "fmt" + "math" + "sort" + "sync" + "time" + + "github.com/go-openapi/strfmt" + "github.com/pkg/errors" + "github.com/weaviate/weaviate/entities/additional" + "github.com/weaviate/weaviate/entities/dto" + libfilters "github.com/weaviate/weaviate/entities/filters" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/modulecapabilities" + "github.com/weaviate/weaviate/entities/schema" + "github.com/weaviate/weaviate/entities/search" + "github.com/weaviate/weaviate/usecases/objects" + "github.com/weaviate/weaviate/usecases/sharding" +) + +type fakeSchemaGetter struct { + schema schema.Schema +} + +func (f *fakeSchemaGetter) GetSchemaSkipAuth() schema.Schema { + return f.schema +} + +func (f *fakeSchemaGetter) ReadOnlyClass(class string) *models.Class { + return f.schema.GetClass(class) +} + +func (f *fakeSchemaGetter) ResolveAlias(string) string { + return "" +} + +func (f *fakeSchemaGetter) GetAliasesForClass(string) []*models.Alias { + return nil +} + +func (f *fakeSchemaGetter) CopyShardingState(class string) *sharding.State { + panic("not implemented") +} + +func (f *fakeSchemaGetter) ShardOwner(class, shard string) (string, error) { + return shard, nil +} + +func (f *fakeSchemaGetter) ShardReplicas(class, shard string) ([]string, error) { + return []string{shard}, nil +} + +func (f *fakeSchemaGetter) TenantsShards(_ context.Context, class string, tenants ...string) (map[string]string, error) { + res := map[string]string{} + for _, t := range tenants { + res[t] = models.TenantActivityStatusHOT + } + return res, nil +} + +func (f *fakeSchemaGetter) OptimisticTenantStatus(_ context.Context, class string, tenant string) (map[string]string, error) { + res := map[string]string{} + res[tenant] = models.TenantActivityStatusHOT + return res, nil +} + +func (f *fakeSchemaGetter) ShardFromUUID(class string, uuid []byte) string { return string(uuid) } + +func (f *fakeSchemaGetter) Nodes() []string { + panic("not implemented") +} + +func (f *fakeSchemaGetter) NodeName() string { + panic("not implemented") +} + +func (f *fakeSchemaGetter) ClusterHealthScore() int { + panic("not implemented") +} + +func (f *fakeSchemaGetter) ResolveParentNodes(string, string, +) (map[string]string, error) { + panic("not implemented") +} + +func (f *fakeSchemaGetter) Statistics() map[string]any { + panic("not implemented") +} + +type fakeClassificationRepo struct { + sync.Mutex + db map[strfmt.UUID]models.Classification +} + +func newFakeClassificationRepo() *fakeClassificationRepo { + return &fakeClassificationRepo{ + db: map[strfmt.UUID]models.Classification{}, + } +} + +func (f *fakeClassificationRepo) Put(ctx context.Context, class models.Classification) error { + f.Lock() + defer f.Unlock() + + f.db[class.ID] = class + return nil +} + +func (f *fakeClassificationRepo) Get(ctx context.Context, id strfmt.UUID) (*models.Classification, error) { + f.Lock() + defer f.Unlock() + + class, ok := f.db[id] + if !ok { + return nil, nil + } + + return &class, nil +} + +func newFakeVectorRepoKNN(unclassified, classified search.Results) *fakeVectorRepoKNN { + return &fakeVectorRepoKNN{ + unclassified: unclassified, + classified: classified, + db: map[strfmt.UUID]*models.Object{}, + } +} + +// read requests are specified through unclassified and classified, +// write requests (Put[Kind]) are stored in the db map +type fakeVectorRepoKNN struct { + sync.Mutex + unclassified []search.Result + classified []search.Result + db map[strfmt.UUID]*models.Object + errorOnAggregate error + batchStorageDelay time.Duration +} + +func (f *fakeVectorRepoKNN) GetUnclassified(ctx context.Context, + class string, properties []string, propsToReturn []string, + filter *libfilters.LocalFilter, +) ([]search.Result, error) { + f.Lock() + defer f.Unlock() + return f.unclassified, nil +} + +func (f *fakeVectorRepoKNN) AggregateNeighbors(ctx context.Context, vector []float32, + class string, properties []string, k int, + filter *libfilters.LocalFilter, +) ([]NeighborRef, error) { + f.Lock() + defer f.Unlock() + + // simulate that this takes some time + time.Sleep(1 * time.Millisecond) + + if k != 1 { + return nil, fmt.Errorf("fake vector repo only supports k=1") + } + + results := f.classified + sort.SliceStable(results, func(i, j int) bool { + simI, err := cosineSim(results[i].Vector, vector) + if err != nil { + panic(err.Error()) + } + + simJ, err := cosineSim(results[j].Vector, vector) + if err != nil { + panic(err.Error()) + } + return simI > simJ + }) + + var out []NeighborRef + schema := results[0].Schema.(map[string]interface{}) + for _, propName := range properties { + prop, ok := schema[propName] + if !ok { + return nil, fmt.Errorf("missing prop %s", propName) + } + + refs := prop.(models.MultipleRef) + if len(refs) != 1 { + return nil, fmt.Errorf("wrong length %d", len(refs)) + } + + out = append(out, NeighborRef{ + Beacon: refs[0].Beacon, + WinningCount: 1, + OverallCount: 1, + LosingCount: 1, + Property: propName, + }) + } + + return out, f.errorOnAggregate +} + +func (f *fakeVectorRepoKNN) ZeroShotSearch(ctx context.Context, vector []float32, + class string, properties []string, + filter *libfilters.LocalFilter, +) ([]search.Result, error) { + return []search.Result{}, nil +} + +func (f *fakeVectorRepoKNN) VectorSearch(ctx context.Context, + params dto.GetParams, targetVectors []string, searchVectors []models.Vector, +) ([]search.Result, error) { + f.Lock() + defer f.Unlock() + return nil, fmt.Errorf("vector class search not implemented in fake") +} + +func (f *fakeVectorRepoKNN) BatchPutObjects(ctx context.Context, objects objects.BatchObjects, repl *additional.ReplicationProperties, schemaVersion uint64) (objects.BatchObjects, error) { + f.Lock() + defer f.Unlock() + + if f.batchStorageDelay > 0 { + time.Sleep(f.batchStorageDelay) + } + + for _, batchObject := range objects { + f.db[batchObject.Object.ID] = batchObject.Object + } + return objects, nil +} + +func (f *fakeVectorRepoKNN) get(id strfmt.UUID) (*models.Object, bool) { + f.Lock() + defer f.Unlock() + t, ok := f.db[id] + return t, ok +} + +func newFakeVectorRepoContextual(unclassified, targets search.Results) *fakeVectorRepoContextual { + return &fakeVectorRepoContextual{ + unclassified: unclassified, + targets: targets, + db: map[strfmt.UUID]*models.Object{}, + } +} + +// read requests are specified through unclassified and classified, +// write requests (Put[Kind]) are stored in the db map +type fakeVectorRepoContextual struct { + sync.Mutex + unclassified []search.Result + targets []search.Result + db map[strfmt.UUID]*models.Object + errorOnAggregate error +} + +func (f *fakeVectorRepoContextual) get(id strfmt.UUID) (*models.Object, bool) { + f.Lock() + defer f.Unlock() + t, ok := f.db[id] + return t, ok +} + +func (f *fakeVectorRepoContextual) GetUnclassified(ctx context.Context, + class string, properties []string, propsToReturn []string, + filter *libfilters.LocalFilter, +) ([]search.Result, error) { + return f.unclassified, nil +} + +func (f *fakeVectorRepoContextual) AggregateNeighbors(ctx context.Context, vector []float32, + class string, properties []string, k int, + filter *libfilters.LocalFilter, +) ([]NeighborRef, error) { + panic("not implemented") +} + +func (f *fakeVectorRepoContextual) ZeroShotSearch(ctx context.Context, vector []float32, + class string, properties []string, + filter *libfilters.LocalFilter, +) ([]search.Result, error) { + panic("not implemented") +} + +func (f *fakeVectorRepoContextual) BatchPutObjects(ctx context.Context, objects objects.BatchObjects, repl *additional.ReplicationProperties, schemaVersion uint64) (objects.BatchObjects, error) { + f.Lock() + defer f.Unlock() + for _, batchObject := range objects { + f.db[batchObject.Object.ID] = batchObject.Object + } + return objects, nil +} + +func (f *fakeVectorRepoContextual) VectorSearch(ctx context.Context, + params dto.GetParams, targetVectors []string, searchVectors []models.Vector, +) ([]search.Result, error) { + if searchVectors[0] == nil { + filteredTargets := matchClassName(f.targets, params.ClassName) + return filteredTargets, nil + } + + switch searchVector := searchVectors[0].(type) { + case []float32: + // simulate that this takes some time + time.Sleep(5 * time.Millisecond) + + filteredTargets := matchClassName(f.targets, params.ClassName) + results := filteredTargets + sort.SliceStable(results, func(i, j int) bool { + simI, err := cosineSim(results[i].Vector, searchVector) + if err != nil { + panic(err.Error()) + } + + simJ, err := cosineSim(results[j].Vector, searchVector) + if err != nil { + panic(err.Error()) + } + return simI > simJ + }) + + if len(results) == 0 { + return nil, f.errorOnAggregate + } + + out := []search.Result{ + results[0], + } + + return out, f.errorOnAggregate + default: + return nil, fmt.Errorf("unsupported search vector type: %T", searchVectors[0]) + } +} + +func cosineSim(a, b []float32) (float32, error) { + if len(a) != len(b) { + return 0, fmt.Errorf("vectors have different dimensions") + } + + var ( + sumProduct float64 + sumASquare float64 + sumBSquare float64 + ) + + for i := range a { + sumProduct += float64(a[i] * b[i]) + sumASquare += float64(a[i] * a[i]) + sumBSquare += float64(b[i] * b[i]) + } + + return float32(sumProduct / (math.Sqrt(sumASquare) * math.Sqrt(sumBSquare))), nil +} + +func matchClassName(in []search.Result, className string) []search.Result { + var out []search.Result + for _, item := range in { + if item.ClassName == className { + out = append(out, item) + } + } + + return out +} + +type fakeModuleClassifyFn struct { + fakeExactCategoryMappings map[string]string + fakeMainCategoryMappings map[string]string +} + +func NewFakeModuleClassifyFn() *fakeModuleClassifyFn { + return &fakeModuleClassifyFn{ + fakeExactCategoryMappings: map[string]string{ + "75ba35af-6a08-40ae-b442-3bec69b355f9": "1b204f16-7da6-44fd-bbd2-8cc4a7414bc3", + "a2bbcbdc-76e1-477d-9e72-a6d2cfb50109": "ec500f39-1dc9-4580-9bd1-55a8ea8e37a2", + "069410c3-4b9e-4f68-8034-32a066cb7997": "ec500f39-1dc9-4580-9bd1-55a8ea8e37a2", + "06a1e824-889c-4649-97f9-1ed3fa401d8e": "027b708a-31ca-43ea-9001-88bec864c79c", + }, + fakeMainCategoryMappings: map[string]string{ + "6402e649-b1e0-40ea-b192-a64eab0d5e56": "5a3d909a-4f0d-4168-8f5c-cd3074d1e79a", + "f850439a-d3cd-4f17-8fbf-5a64405645cd": "39c6abe3-4bbe-4c4e-9e60-ca5e99ec6b4e", + "069410c3-4b9e-4f68-8034-32a066cb7997": "39c6abe3-4bbe-4c4e-9e60-ca5e99ec6b4e", + }, + } +} + +func (c *fakeModuleClassifyFn) classifyFn(item search.Result, itemIndex int, + params models.Classification, filters modulecapabilities.Filters, writer modulecapabilities.Writer, +) error { + var classified []string + + classifiedProp := c.fakeClassification(&item, "exactCategory", c.fakeExactCategoryMappings) + if len(classifiedProp) > 0 { + classified = append(classified, classifiedProp) + } + + classifiedProp = c.fakeClassification(&item, "mainCategory", c.fakeMainCategoryMappings) + if len(classifiedProp) > 0 { + classified = append(classified, classifiedProp) + } + + c.extendItemWithObjectMeta(&item, params, classified) + + err := writer.Store(item) + if err != nil { + return fmt.Errorf("store %s/%s: %w", item.ClassName, item.ID, err) + } + return nil +} + +func (c *fakeModuleClassifyFn) fakeClassification(item *search.Result, propName string, + fakes map[string]string, +) string { + if target, ok := fakes[item.ID.String()]; ok { + beacon := "weaviate://localhost/" + target + item.Schema.(map[string]interface{})[propName] = models.MultipleRef{ + &models.SingleRef{ + Beacon: strfmt.URI(beacon), + Classification: nil, + }, + } + return propName + } + return "" +} + +func (c *fakeModuleClassifyFn) extendItemWithObjectMeta(item *search.Result, + params models.Classification, classified []string, +) { + if item.AdditionalProperties == nil { + item.AdditionalProperties = models.AdditionalProperties{} + } + + item.AdditionalProperties["classification"] = additional.Classification{ + ID: params.ID, + Scope: params.ClassifyProperties, + ClassifiedFields: classified, + Completed: strfmt.DateTime(time.Now()), + } +} + +type fakeModulesProvider struct { + fakeModuleClassifyFn *fakeModuleClassifyFn +} + +func NewFakeModulesProvider() *fakeModulesProvider { + return &fakeModulesProvider{NewFakeModuleClassifyFn()} +} + +func (m *fakeModulesProvider) ParseClassifierSettings(name string, + params *models.Classification, +) error { + return nil +} + +func (m *fakeModulesProvider) GetClassificationFn(className, name string, + params modulecapabilities.ClassifyParams, +) (modulecapabilities.ClassifyItemFn, error) { + if name == "text2vec-contextionary-custom-contextual" { + return m.fakeModuleClassifyFn.classifyFn, nil + } + return nil, errors.Errorf("classifier %s not found", name) +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/classification/ref_meta.go b/platform/dbops/binaries/weaviate-src/usecases/classification/ref_meta.go new file mode 100644 index 0000000000000000000000000000000000000000..3fe4c4d937035705acb656d47254389bf87d3a0f --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/classification/ref_meta.go @@ -0,0 +1,55 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package classification + +import "github.com/weaviate/weaviate/entities/models" + +// NeighborRefDistances include various distances about the winning and losing +// groups (knn) +type NeighborRefDistances struct { + ClosestOverallDistance float32 + + // Winning + ClosestWinningDistance float32 + MeanWinningDistance float32 + + // Losing (optional) + MeanLosingDistance *float32 + ClosestLosingDistance *float32 +} + +func (r NeighborRef) Meta() *models.ReferenceMetaClassification { + out := &models.ReferenceMetaClassification{ + OverallCount: int64(r.OverallCount), + WinningCount: int64(r.WinningCount), + LosingCount: int64(r.LosingCount), + ClosestOverallDistance: float64(r.Distances.ClosestOverallDistance), + WinningDistance: float64(r.Distances.MeanWinningDistance), // deprecated, remove in 0.23.0 + MeanWinningDistance: float64(r.Distances.MeanWinningDistance), + ClosestWinningDistance: float64(r.Distances.ClosestWinningDistance), + } + + if r.Distances.MeanLosingDistance != nil { + out.MeanLosingDistance = ptFloat64(float64(*r.Distances.MeanLosingDistance)) + out.LosingDistance = ptFloat64(float64(*r.Distances.MeanLosingDistance)) // deprecated + } + + if r.Distances.ClosestLosingDistance != nil { + out.ClosestLosingDistance = ptFloat64(float64(*r.Distances.ClosestLosingDistance)) + } + + return out +} + +func ptFloat64(in float64) *float64 { + return &in +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/classification/ref_meta_test.go b/platform/dbops/binaries/weaviate-src/usecases/classification/ref_meta_test.go new file mode 100644 index 0000000000000000000000000000000000000000..a5999e959acd2e24aca05878fda9703558af7bca --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/classification/ref_meta_test.go @@ -0,0 +1,98 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package classification + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/weaviate/weaviate/entities/models" +) + +func Test_RefMeta(t *testing.T) { + t.Run("without a losing group", func(t *testing.T) { + source := NeighborRef{ + WinningCount: 3, + OverallCount: 3, + LosingCount: 0, + Distances: NeighborRefDistances{ + ClosestWinningDistance: 0.1, + ClosestOverallDistance: 0.1, + MeanWinningDistance: 0.2, + }, + } + + expected := &models.ReferenceMetaClassification{ + ClosestWinningDistance: 0.1, + ClosestOverallDistance: 0.1, + MeanWinningDistance: 0.2, + WinningDistance: 0.2, // deprecated, must be removed in 0.23.0 + OverallCount: 3, + WinningCount: 3, + LosingCount: 0, + } + + actual := source.Meta() + assert.InDelta(t, expected.ClosestWinningDistance, actual.ClosestWinningDistance, 0.001) + assert.InDelta(t, expected.ClosestOverallDistance, actual.ClosestOverallDistance, 0.001) + assert.InDelta(t, expected.MeanWinningDistance, actual.MeanWinningDistance, 0.001) + assert.InDelta(t, expected.WinningDistance, actual.WinningDistance, 0.001) + assert.Equal(t, expected.OverallCount, actual.OverallCount) + assert.Equal(t, expected.WinningCount, actual.WinningCount) + assert.Equal(t, expected.LosingCount, actual.LosingCount) + }) + + t.Run("with a losing group", func(t *testing.T) { + source := NeighborRef{ + WinningCount: 3, + OverallCount: 5, + LosingCount: 2, + Distances: NeighborRefDistances{ + ClosestWinningDistance: 0.1, + ClosestOverallDistance: 0.1, + MeanWinningDistance: 0.2, + ClosestLosingDistance: ptFloat32(0.15), + MeanLosingDistance: ptFloat32(0.25), + }, + } + + expected := &models.ReferenceMetaClassification{ + ClosestOverallDistance: 0.1, + ClosestWinningDistance: 0.1, + MeanWinningDistance: 0.2, + WinningDistance: 0.2, // deprecated, must be removed in 0.23.0 + ClosestLosingDistance: ptFloat64(0.15), + MeanLosingDistance: ptFloat64(0.25), + LosingDistance: ptFloat64(0.25), // deprecated, must be removed in 0.23.0 + OverallCount: 5, + WinningCount: 3, + LosingCount: 2, + } + + actual := source.Meta() + assert.InDelta(t, expected.ClosestOverallDistance, actual.ClosestOverallDistance, 0.001) + assert.InDelta(t, expected.ClosestWinningDistance, actual.ClosestWinningDistance, 0.001) + assert.InDelta(t, expected.MeanWinningDistance, actual.MeanWinningDistance, 0.001) + assert.InDelta(t, expected.WinningDistance, actual.WinningDistance, 0.001) + assert.InDelta(t, *expected.ClosestLosingDistance, *actual.ClosestLosingDistance, 0.001) + assert.InDelta(t, *expected.MeanLosingDistance, *actual.MeanLosingDistance, 0.001) + assert.InDelta(t, *expected.LosingDistance, *actual.LosingDistance, 0.001) + assert.Equal(t, expected.OverallCount, actual.OverallCount) + assert.Equal(t, expected.OverallCount, actual.OverallCount) + assert.Equal(t, expected.WinningCount, actual.WinningCount) + assert.Equal(t, expected.LosingCount, actual.LosingCount) + }) +} + +func ptFloat32(in float32) *float32 { + return &in +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/classification/schema_for_test.go b/platform/dbops/binaries/weaviate-src/usecases/classification/schema_for_test.go new file mode 100644 index 0000000000000000000000000000000000000000..66c3560d8235cafa12338102d103da44165acc34 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/classification/schema_for_test.go @@ -0,0 +1,219 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package classification + +import ( + "fmt" + + "github.com/go-openapi/strfmt" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/schema" + "github.com/weaviate/weaviate/entities/search" +) + +func testSchema() schema.Schema { + return schema.Schema{ + Objects: &models.Schema{ + Classes: []*models.Class{ + { + Class: "ExactCategory", + }, + { + Class: "MainCategory", + }, + { + Class: "Article", + Properties: []*models.Property{ + { + Name: "description", + DataType: []string{string(schema.DataTypeText)}, + }, + { + Name: "name", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationWhitespace, + }, + { + Name: "exactCategory", + DataType: []string{"ExactCategory"}, + }, + { + Name: "mainCategory", + DataType: []string{"MainCategory"}, + }, + { + Name: "categories", + DataType: []string{"ExactCategory"}, + }, + { + Name: "anyCategory", + DataType: []string{"MainCategory", "ExactCategory"}, + }, + { + Name: "words", + DataType: schema.DataTypeInt.PropString(), + }, + }, + }, + }, + }, + } +} + +// vector position close to [1,0,0] means -> politics, [0,1,0] means -> society, [0, 0, 1] -> food&drink +func testDataToBeClassified() search.Results { + return search.Results{ + search.Result{ + ID: "75ba35af-6a08-40ae-b442-3bec69b355f9", + ClassName: "Article", + Vector: []float32{0.78, 0, 0}, + Schema: map[string]interface{}{ + "description": "Barack Obama is a former US president", + }, + }, + search.Result{ + ID: "f850439a-d3cd-4f17-8fbf-5a64405645cd", + ClassName: "Article", + Vector: []float32{0.90, 0, 0}, + Schema: map[string]interface{}{ + "description": "Michelle Obama is Barack Obamas wife", + }, + }, + search.Result{ + ID: "a2bbcbdc-76e1-477d-9e72-a6d2cfb50109", + ClassName: "Article", + Vector: []float32{0, 0.78, 0}, + Schema: map[string]interface{}{ + "description": "Johnny Depp is an actor", + }, + }, + search.Result{ + ID: "069410c3-4b9e-4f68-8034-32a066cb7997", + ClassName: "Article", + Vector: []float32{0, 0.90, 0}, + Schema: map[string]interface{}{ + "description": "Brad Pitt starred in a Quentin Tarantino movie", + }, + }, + search.Result{ + ID: "06a1e824-889c-4649-97f9-1ed3fa401d8e", + ClassName: "Article", + Vector: []float32{0, 0, 0.78}, + Schema: map[string]interface{}{ + "description": "Ice Cream often contains a lot of sugar", + }, + }, + search.Result{ + ID: "6402e649-b1e0-40ea-b192-a64eab0d5e56", + ClassName: "Article", + Vector: []float32{0, 0, 0.90}, + Schema: map[string]interface{}{ + "description": "French Fries are more common in Belgium and the US than in France", + }, + }, + } +} + +const ( + idMainCategoryPoliticsAndSociety = "39c6abe3-4bbe-4c4e-9e60-ca5e99ec6b4e" + idMainCategoryFoodAndDrink = "5a3d909a-4f0d-4168-8f5c-cd3074d1e79a" + idCategoryPolitics = "1b204f16-7da6-44fd-bbd2-8cc4a7414bc3" + idCategorySociety = "ec500f39-1dc9-4580-9bd1-55a8ea8e37a2" + idCategoryFoodAndDrink = "027b708a-31ca-43ea-9001-88bec864c79c" +) + +// only used for contextual type classification +func testDataPossibleTargets() search.Results { + return search.Results{ + search.Result{ + ID: idMainCategoryPoliticsAndSociety, + ClassName: "MainCategory", + Vector: []float32{1.01, 1.01, 0}, + Schema: map[string]interface{}{ + "name": "Politics and Society", + }, + }, + search.Result{ + ID: idMainCategoryFoodAndDrink, + ClassName: "MainCategory", + Vector: []float32{0, 0, 0.99}, + Schema: map[string]interface{}{ + "name": "Food and Drinks", + }, + }, + search.Result{ + ID: idCategoryPolitics, + ClassName: "ExactCategory", + Vector: []float32{0.99, 0, 0}, + Schema: map[string]interface{}{ + "name": "Politics", + }, + }, + search.Result{ + ID: idCategorySociety, + ClassName: "ExactCategory", + Vector: []float32{0, 0.90, 0}, + Schema: map[string]interface{}{ + "name": "Society", + }, + }, + search.Result{ + ID: idCategoryFoodAndDrink, + ClassName: "ExactCategory", + Vector: []float32{0, 0, 0.99}, + Schema: map[string]interface{}{ + "name": "Food and Drink", + }, + }, + } +} + +func beaconRef(target string) *models.SingleRef { + beacon := fmt.Sprintf("weaviate://localhost/%s", target) + return &models.SingleRef{Beacon: strfmt.URI(beacon)} +} + +// only used for knn-type +func testDataAlreadyClassified() search.Results { + return search.Results{ + search.Result{ + ID: "8aeecd06-55a0-462c-9853-81b31a284d80", + ClassName: "Article", + Vector: []float32{1, 0, 0}, + Schema: map[string]interface{}{ + "description": "This article talks about politics", + "exactCategory": models.MultipleRef{beaconRef(idCategoryPolitics)}, + "mainCategory": models.MultipleRef{beaconRef(idMainCategoryPoliticsAndSociety)}, + }, + }, + search.Result{ + ID: "9f4c1847-2567-4de7-8861-34cf47a071ae", + ClassName: "Article", + Vector: []float32{0, 1, 0}, + Schema: map[string]interface{}{ + "description": "This articles talks about society", + "exactCategory": models.MultipleRef{beaconRef(idCategorySociety)}, + "mainCategory": models.MultipleRef{beaconRef(idMainCategoryPoliticsAndSociety)}, + }, + }, + search.Result{ + ID: "926416ec-8fb1-4e40-ab8c-37b226b3d68e", + ClassName: "Article", + Vector: []float32{0, 0, 1}, + Schema: map[string]interface{}{ + "description": "This article talks about food", + "exactCategory": models.MultipleRef{beaconRef(idCategoryFoodAndDrink)}, + "mainCategory": models.MultipleRef{beaconRef(idMainCategoryFoodAndDrink)}, + }, + }, + } +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/classification/transactions.go b/platform/dbops/binaries/weaviate-src/usecases/classification/transactions.go new file mode 100644 index 0000000000000000000000000000000000000000..78f033c3308e55ddc7981010394946bebd64b092 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/classification/transactions.go @@ -0,0 +1,48 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package classification + +import ( + "encoding/json" + + "github.com/pkg/errors" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/usecases/cluster" +) + +const TransactionPut cluster.TransactionType = "put_single" + +type TransactionPutPayload struct { + Classification models.Classification `json:"classification"` +} + +func UnmarshalTransaction(txType cluster.TransactionType, + payload json.RawMessage, +) (interface{}, error) { + switch txType { + case TransactionPut: + return unmarshalPut(payload) + + default: + return nil, errors.Errorf("unrecognized schema transaction type %q", txType) + + } +} + +func unmarshalPut(payload json.RawMessage) (interface{}, error) { + var pl TransactionPutPayload + if err := json.Unmarshal(payload, &pl); err != nil { + return nil, err + } + + return pl, nil +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/classification/validation.go b/platform/dbops/binaries/weaviate-src/usecases/classification/validation.go new file mode 100644 index 0000000000000000000000000000000000000000..044dcae3b1169fee66b93b9d972e8c2f8f2c856a --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/classification/validation.go @@ -0,0 +1,198 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package classification + +import ( + "fmt" + + "github.com/weaviate/weaviate/entities/errorcompounder" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/schema" +) + +const ( + TypeKNN = "knn" + TypeContextual = "text2vec-contextionary-contextual" + TypeZeroShot = "zeroshot" +) + +type Validator struct { + authorizedGetClass func(string) (*models.Class, error) + errors *errorcompounder.SafeErrorCompounder + subject models.Classification +} + +func NewValidator(authorizedGetClass func(string) (*models.Class, error), subject models.Classification) *Validator { + return &Validator{ + authorizedGetClass: authorizedGetClass, + errors: &errorcompounder.SafeErrorCompounder{}, + subject: subject, + } +} + +func (v *Validator) Do() error { + v.validate() + + err := v.errors.First() + if err != nil { + return fmt.Errorf("invalid classification: %w", err) + } + + return nil +} + +func (v *Validator) validate() { + if v.subject.Class == "" { + v.errors.Add(fmt.Errorf("class must be set")) + return + } + + class, err := v.authorizedGetClass(v.subject.Class) + if err != nil { + v.errors.Add(err) + return + } + if class == nil { + v.errors.Addf("class '%s' not found in schema", v.subject.Class) + return + } + + v.contextualTypeFeasibility() + v.knnTypeFeasibility() + v.basedOnProperties(class) + v.classifyProperties(class) +} + +func (v *Validator) contextualTypeFeasibility() { + if !v.typeText2vecContextionaryContextual() { + return + } + + if v.subject.Filters != nil && v.subject.Filters.TrainingSetWhere != nil { + v.errors.Addf("type is 'text2vec-contextionary-contextual', but 'trainingSetWhere' filter is set, for 'text2vec-contextionary-contextual' there is no training data, instead limit possible target data directly through setting 'targetWhere'") + } +} + +func (v *Validator) knnTypeFeasibility() { + if !v.typeKNN() { + return + } + + if v.subject.Filters != nil && v.subject.Filters.TargetWhere != nil { + v.errors.Addf("type is 'knn', but 'targetWhere' filter is set, for 'knn' you cannot limit target data directly, instead limit training data through setting 'trainingSetWhere'") + } +} + +func (v *Validator) basedOnProperties(class *models.Class) { + if len(v.subject.BasedOnProperties) == 0 { + v.errors.Addf("basedOnProperties must have at least one property") + return + } + + if len(v.subject.BasedOnProperties) > 1 { + v.errors.Addf("only a single property in basedOnProperties supported at the moment, got %v", + v.subject.BasedOnProperties) + return + } + + for _, prop := range v.subject.BasedOnProperties { + v.basedOnProperty(class, prop) + } +} + +func (v *Validator) basedOnProperty(class *models.Class, propName string) { + prop, ok := v.propertyByName(class, propName) + if !ok { + v.errors.Addf("basedOnProperties: property '%s' does not exist", propName) + return + } + + dt, err := schema.FindPropertyDataTypeWithRefsAndAuth(v.authorizedGetClass, prop.DataType, false, "") + if err != nil { + v.errors.Addf("basedOnProperties: %v", err) + return + } + + if !dt.IsPrimitive() { + v.errors.Addf("basedOnProperties: property '%s' must be of type 'text'", propName) + return + } + + if dt.AsPrimitive() != schema.DataTypeText { + v.errors.Addf("basedOnProperties: property '%s' must be of type 'text'", propName) + return + } +} + +func (v *Validator) classifyProperties(class *models.Class) { + if len(v.subject.ClassifyProperties) == 0 { + v.errors.Addf("classifyProperties must have at least one property") + return + } + + for _, prop := range v.subject.ClassifyProperties { + v.classifyProperty(class, prop) + } +} + +func (v *Validator) classifyProperty(class *models.Class, propName string) { + prop, ok := v.propertyByName(class, propName) + if !ok { + v.errors.Addf("classifyProperties: property '%s' does not exist", propName) + return + } + + dt, err := schema.FindPropertyDataTypeWithRefsAndAuth(v.authorizedGetClass, prop.DataType, false, "") + if err != nil { + v.errors.Addf("classifyProperties: %w", err) + return + } + + if !dt.IsReference() { + v.errors.Addf("classifyProperties: property '%s' must be of reference type (cref)", propName) + return + } + + if v.typeText2vecContextionaryContextual() { + if len(dt.Classes()) > 1 { + v.errors.Addf("classifyProperties: property '%s'"+ + " has more than one target class, classification of type 'text2vec-contextionary-contextual' requires exactly one target class", propName) + return + } + } +} + +func (v *Validator) propertyByName(class *models.Class, propName string) (*models.Property, bool) { + for _, prop := range class.Properties { + if prop.Name == propName { + return prop, true + } + } + + return nil, false +} + +func (v *Validator) typeText2vecContextionaryContextual() bool { + if v.subject.Type == "" { + return false + } + + return v.subject.Type == TypeContextual +} + +func (v *Validator) typeKNN() bool { + if v.subject.Type == "" { + return true + } + + return v.subject.Type == TypeKNN +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/classification/validation_test.go b/platform/dbops/binaries/weaviate-src/usecases/classification/validation_test.go new file mode 100644 index 0000000000000000000000000000000000000000..7dc2c9be463332877a4f0dda7a49765a7335acb9 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/classification/validation_test.go @@ -0,0 +1,191 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package classification + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/weaviate/weaviate/entities/models" +) + +func Test_ValidateUserInput(t *testing.T) { + type testcase struct { + name string + input models.Classification + expectedError error + } + + // knn or general + tests := []testcase{ + { + name: "missing class", + input: models.Classification{ + BasedOnProperties: []string{"description"}, + ClassifyProperties: []string{"exactCategory"}, + }, + expectedError: fmt.Errorf("invalid classification: class must be set"), + }, + + { + name: "missing basedOnProperty (nil)", + input: models.Classification{ + Class: "Article", + BasedOnProperties: nil, + ClassifyProperties: []string{"exactCategory"}, + }, + expectedError: fmt.Errorf("invalid classification: basedOnProperties must have at least one property"), + }, + { + name: "missing basedOnProperty (len=0)", + input: models.Classification{ + Class: "Article", + BasedOnProperties: []string{}, + ClassifyProperties: []string{"exactCategory"}, + }, + expectedError: fmt.Errorf("invalid classification: basedOnProperties must have at least one property"), + }, + + { + name: "more than one basedOnProperty", + input: models.Classification{ + Class: "Article", + BasedOnProperties: []string{"description", "name"}, + ClassifyProperties: []string{"exactCategory"}, + }, + expectedError: fmt.Errorf("invalid classification: only a single property in basedOnProperties " + + "supported at the moment, got [description name]"), + }, + + { + name: "basedOnProperty does not exist", + input: models.Classification{ + Class: "Article", + BasedOnProperties: []string{"doesNotExist"}, + ClassifyProperties: []string{"exactCategory"}, + }, + expectedError: fmt.Errorf("invalid classification: basedOnProperties: property 'doesNotExist' does not exist"), + }, + + { + name: "basedOnProperty is not of type text", + input: models.Classification{ + Class: "Article", + BasedOnProperties: []string{"words"}, + ClassifyProperties: []string{"exactCategory"}, + }, + expectedError: fmt.Errorf("invalid classification: basedOnProperties: property 'words' must be of type 'text'"), + }, + + { + name: "missing classifyProperties (nil)", + input: models.Classification{ + Class: "Article", + BasedOnProperties: []string{"description"}, + ClassifyProperties: nil, + }, + expectedError: fmt.Errorf("invalid classification: classifyProperties must have at least one property"), + }, + + { + name: "missing classifyProperties (len=0)", + input: models.Classification{ + Class: "Article", + BasedOnProperties: []string{"description"}, + ClassifyProperties: []string{}, + }, + expectedError: fmt.Errorf("invalid classification: classifyProperties must have at least one property"), + }, + + { + name: "classifyProperties does not exist", + input: models.Classification{ + Class: "Article", + BasedOnProperties: []string{"description"}, + ClassifyProperties: []string{"doesNotExist"}, + }, + expectedError: fmt.Errorf("invalid classification: classifyProperties: property 'doesNotExist' does not exist"), + }, + + { + name: "classifyProperties is not of reference type", + input: models.Classification{ + Class: "Article", + BasedOnProperties: []string{"description"}, + ClassifyProperties: []string{"name"}, + }, + expectedError: fmt.Errorf("invalid classification: classifyProperties: property 'name' must be of reference type (cref)"), + }, + + { + name: "multiple missing fields (aborts early as we can't validate properties if class is not set)", + input: models.Classification{}, + expectedError: fmt.Errorf("invalid classification: class must be set"), + }, + + // specific for knn + { + name: "targetWhere is set", + input: models.Classification{ + Class: "Article", + BasedOnProperties: []string{"description"}, + ClassifyProperties: []string{"exactCategory"}, + Filters: &models.ClassificationFilters{ + TargetWhere: &models.WhereFilter{Operator: "Equal", Path: []string{"foo"}, ValueText: ptString("bar")}, + }, + Type: "knn", + }, + expectedError: fmt.Errorf("invalid classification: type is 'knn', but 'targetWhere' filter is set, for 'knn' you cannot limit target data directly, instead limit training data through setting 'trainingSetWhere'"), + }, + + // specific for text2vec-contextionary-contextual + { + name: "classifyProperty has more than one target class", + input: models.Classification{ + Class: "Article", + BasedOnProperties: []string{"description"}, + ClassifyProperties: []string{"anyCategory"}, + Type: "text2vec-contextionary-contextual", + }, + expectedError: fmt.Errorf("invalid classification: classifyProperties: property 'anyCategory' has more than one target class, classification of type 'text2vec-contextionary-contextual' requires exactly one target class"), + }, + + { + name: "trainingSetWhere is set", + input: models.Classification{ + Class: "Article", + BasedOnProperties: []string{"description"}, + ClassifyProperties: []string{"exactCategory"}, + Filters: &models.ClassificationFilters{ + TrainingSetWhere: &models.WhereFilter{Operator: "Equal", Path: []string{"foo"}, ValueText: ptString("bar")}, + }, + Type: "text2vec-contextionary-contextual", + }, + expectedError: fmt.Errorf("invalid classification: type is 'text2vec-contextionary-contextual', but 'trainingSetWhere' filter is set, for 'text2vec-contextionary-contextual' there is no training data, instead limit possible target data directly through setting 'targetWhere'"), + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + fsg := &fakeSchemaGetter{testSchema()} + validator := NewValidator(func(name string) (*models.Class, error) { return fsg.ReadOnlyClass(name), nil }, test.input) + err := validator.Do() + assert.ErrorAs(t, err, &test.expectedError) + }) + } +} + +func ptString(in string) *string { + return &in +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/classification/writer.go b/platform/dbops/binaries/weaviate-src/usecases/classification/writer.go new file mode 100644 index 0000000000000000000000000000000000000000..91fcf73a9d12dea6ef14062dd301e4ff9526bb60 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/classification/writer.go @@ -0,0 +1,141 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package classification + +import ( + "sync" + "time" + + "github.com/sirupsen/logrus" + enterrors "github.com/weaviate/weaviate/entities/errors" + + "github.com/weaviate/weaviate/entities/errorcompounder" + "github.com/weaviate/weaviate/entities/search" + "github.com/weaviate/weaviate/usecases/objects" +) + +type batchWriterResults struct { + successCount int64 + errorCount int64 + err error +} + +func (w batchWriterResults) SuccessCount() int64 { + return w.successCount +} + +func (w batchWriterResults) ErrorCount() int64 { + return w.errorCount +} + +func (w batchWriterResults) Err() error { + return w.err +} + +type batchWriter struct { + mutex sync.RWMutex + vectorRepo vectorRepo + batchItemsCount int + batchIndex int + batchObjects objects.BatchObjects + saveObjectItems chan objects.BatchObjects + errorCount int64 + ec *errorcompounder.SafeErrorCompounder + cancel chan struct{} + batchThreshold int + logger logrus.FieldLogger +} + +func newBatchWriter(vectorRepo vectorRepo, logger logrus.FieldLogger) Writer { + return &batchWriter{ + vectorRepo: vectorRepo, + batchItemsCount: 0, + batchObjects: objects.BatchObjects{}, + saveObjectItems: make(chan objects.BatchObjects), + errorCount: 0, + ec: &errorcompounder.SafeErrorCompounder{}, + cancel: make(chan struct{}), + batchThreshold: 100, + logger: logger, + } +} + +// Store puts an item to batch list +func (r *batchWriter) Store(item search.Result) error { + r.mutex.Lock() + defer r.mutex.Unlock() + return r.storeObject(item) +} + +// Start starts the batch save goroutine +func (r *batchWriter) Start() { + enterrors.GoWrapper(func() { r.batchSave() }, r.logger) +} + +// Stop stops the batch save goroutine and saves the last items +func (r *batchWriter) Stop() WriterResults { + r.cancel <- struct{}{} + r.saveObjects(r.batchObjects) + return batchWriterResults{int64(r.batchItemsCount) - r.errorCount, r.errorCount, r.ec.ToError()} +} + +func (r *batchWriter) storeObject(item search.Result) error { + batchObject := objects.BatchObject{ + UUID: item.ID, + Object: item.Object(), + OriginalIndex: r.batchIndex, + } + r.batchItemsCount++ + r.batchIndex++ + r.batchObjects = append(r.batchObjects, batchObject) + if len(r.batchObjects) >= r.batchThreshold { + r.saveObjectItems <- r.batchObjects + r.batchObjects = objects.BatchObjects{} + r.batchIndex = 0 + } + return nil +} + +// This goroutine is created in order to make possible the batch save operation to be run in background +// and not to block the Store(item) operation invocation which is being done by the worker threads +func (r *batchWriter) batchSave() { + for { + select { + case <-r.cancel: + return + case items := <-r.saveObjectItems: + r.saveObjects(items) + } + } +} + +func (r *batchWriter) saveObjects(items objects.BatchObjects) { + // we need to allow quite some time as this is now a batch, no longer just a + // single item and we don't have any control over what other load is + // currently going on, such as imports. TODO: should this be + // user-configurable? + ctx, cancel := contextWithTimeout(30 * time.Second) + defer cancel() + + if len(items) > 0 { + saved, err := r.vectorRepo.BatchPutObjects(ctx, items, nil, 0) + if err != nil { + r.ec.Add(err) + } + for i := range saved { + if saved[i].Err != nil { + r.ec.Add(saved[i].Err) + r.errorCount++ + } + } + } +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/classification/writer_test.go b/platform/dbops/binaries/weaviate-src/usecases/classification/writer_test.go new file mode 100644 index 0000000000000000000000000000000000000000..6c2f5c3d04aafe634b1687778083475b6597e5f1 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/classification/writer_test.go @@ -0,0 +1,111 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package classification + +import ( + "fmt" + "testing" + + "github.com/sirupsen/logrus/hooks/test" + + "github.com/go-openapi/strfmt" + "github.com/stretchr/testify/assert" + "github.com/weaviate/weaviate/entities/search" +) + +var logger, _ = test.NewNullLogger() + +func testParallelBatchWrite(batchWriter Writer, items search.Results, resultChannel chan<- WriterResults) { + batchWriter.Start() + for _, item := range items { + batchWriter.Store(item) + } + res := batchWriter.Stop() + resultChannel <- res +} + +func generateSearchResultsToSave(size int) search.Results { + items := make(search.Results, 0) + for i := 0; i < size; i++ { + res := search.Result{ + ID: strfmt.UUID(fmt.Sprintf("75ba35af-6a08-40ae-b442-3bec69b35%03d", i)), + ClassName: "Article", + Vector: []float32{0.78, 0, 0}, + Schema: map[string]interface{}{ + "description": "Barack Obama is a former US president", + }, + } + items = append(items, res) + } + return items +} + +func TestWriter_SimpleWrite(t *testing.T) { + // given + searchResultsToBeSaved := testDataToBeClassified() + vectorRepo := newFakeVectorRepoKNN(searchResultsToBeSaved, testDataAlreadyClassified()) + batchWriter := newBatchWriter(vectorRepo, logger) + // when + batchWriter.Start() + for _, item := range searchResultsToBeSaved { + batchWriter.Store(item) + } + res := batchWriter.Stop() + // then + assert.Equal(t, int64(len(searchResultsToBeSaved)), res.SuccessCount()) + assert.Equal(t, int64(0), res.ErrorCount()) + assert.Equal(t, nil, res.Err()) +} + +func TestWriter_LoadWrites(t *testing.T) { + // given + searchResultsCount := 640 + searchResultsToBeSaved := generateSearchResultsToSave(searchResultsCount) + vectorRepo := newFakeVectorRepoKNN(searchResultsToBeSaved, testDataAlreadyClassified()) + batchWriter := newBatchWriter(vectorRepo, logger) + // when + batchWriter.Start() + for _, item := range searchResultsToBeSaved { + batchWriter.Store(item) + } + res := batchWriter.Stop() + // then + assert.Equal(t, int64(searchResultsCount), res.SuccessCount()) + assert.Equal(t, int64(0), res.ErrorCount()) + assert.Equal(t, nil, res.Err()) +} + +func TestWriter_ParallelLoadWrites(t *testing.T) { + // given + searchResultsToBeSavedCount1 := 600 + searchResultsToBeSavedCount2 := 440 + searchResultsToBeSaved1 := generateSearchResultsToSave(searchResultsToBeSavedCount1) + searchResultsToBeSaved2 := generateSearchResultsToSave(searchResultsToBeSavedCount2) + vectorRepo1 := newFakeVectorRepoKNN(searchResultsToBeSaved1, testDataAlreadyClassified()) + batchWriter1 := newBatchWriter(vectorRepo1, logger) + resChannel1 := make(chan WriterResults) + vectorRepo2 := newFakeVectorRepoKNN(searchResultsToBeSaved2, testDataAlreadyClassified()) + batchWriter2 := newBatchWriter(vectorRepo2, logger) + resChannel2 := make(chan WriterResults) + // when + go testParallelBatchWrite(batchWriter1, searchResultsToBeSaved1, resChannel1) + go testParallelBatchWrite(batchWriter2, searchResultsToBeSaved2, resChannel2) + res1 := <-resChannel1 + res2 := <-resChannel2 + // then + assert.Equal(t, int64(searchResultsToBeSavedCount1), res1.SuccessCount()) + assert.Equal(t, int64(0), res1.ErrorCount()) + assert.Equal(t, nil, res1.Err()) + assert.Equal(t, int64(searchResultsToBeSavedCount2), res2.SuccessCount()) + assert.Equal(t, int64(0), res2.ErrorCount()) + assert.Equal(t, nil, res2.Err()) +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/cluster/delegate.go b/platform/dbops/binaries/weaviate-src/usecases/cluster/delegate.go new file mode 100644 index 0000000000000000000000000000000000000000..bf49082c230694191016d450d7e7e3f58f8cbfa4 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/cluster/delegate.go @@ -0,0 +1,320 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package cluster + +import ( + "bytes" + "encoding/binary" + "encoding/json" + "fmt" + "math/rand" + "sort" + "sync" + "time" + + enterrors "github.com/weaviate/weaviate/entities/errors" + + "github.com/hashicorp/memberlist" + "github.com/sirupsen/logrus" +) + +// _OpCode represents the type of supported operation +type _OpCode uint8 + +const ( + // _ProtoVersion internal protocol version for exchanging messages + _ProtoVersion uint8 = 1 + // _OpCodeDisk operation code for getting disk space + _OpCodeDisk _OpCode = 1 + // _ProtoTTL used to decide when to update the cache + _ProtoTTL = time.Second * 8 +) + +// spaceMsg is used to notify other nodes about current disk usage +type spaceMsg struct { + header + DiskUsage + NodeLen uint8 // = len(Node) is required to marshal Node + Node string // node space +} + +// header of an operation +type header struct { + // OpCode operation code + OpCode _OpCode + // ProtoVersion protocol we will speak + ProtoVersion uint8 +} + +// DiskUsage contains total and available space in B +type DiskUsage struct { + // Total disk space + Total uint64 + // Total available space + Available uint64 +} + +// NodeInfo disk space +type NodeInfo struct { + DiskUsage + LastTimeMilli int64 // last update time in milliseconds +} + +func (d *spaceMsg) marshal() (data []byte, err error) { + buf := bytes.NewBuffer(make([]byte, 0, 24+len(d.Node))) + if err := binary.Write(buf, binary.BigEndian, d.header); err != nil { + return nil, err + } + if err := binary.Write(buf, binary.BigEndian, d.DiskUsage); err != nil { + return nil, err + } + // code node name starting by its length + if err := buf.WriteByte(d.NodeLen); err != nil { + return nil, err + } + _, err = buf.Write([]byte(d.Node)) + return buf.Bytes(), err +} + +func (d *spaceMsg) unmarshal(data []byte) (err error) { + rd := bytes.NewReader(data) + if err = binary.Read(rd, binary.BigEndian, &d.header); err != nil { + return + } + if err = binary.Read(rd, binary.BigEndian, &d.DiskUsage); err != nil { + return + } + + // decode node name start by its length + if d.NodeLen, err = rd.ReadByte(); err != nil { + return + } + begin := len(data) - rd.Len() + end := begin + int(d.NodeLen) + // make sure this version is backward compatible + if _ProtoVersion <= 1 && begin+int(d.NodeLen) != len(data) { + begin-- // since previous version doesn't encode the length + end = len(data) + d.NodeLen = uint8(end - begin) + } + d.Node = string(data[begin:end]) + return nil +} + +// delegate implements the memberList delegate interface +type delegate struct { + Name string + dataPath string + log logrus.FieldLogger + sync.Mutex + Cache map[string]NodeInfo + + mutex sync.Mutex + hostInfo NodeInfo + + metadata NodeMetadata +} + +type NodeMetadata struct { + RestPort int `json:"rest_port"` + GrpcPort int `json:"grpc_port"` +} + +func (d *delegate) setOwnSpace(x DiskUsage) { + d.mutex.Lock() + d.hostInfo = NodeInfo{DiskUsage: x, LastTimeMilli: time.Now().UnixMilli()} + d.mutex.Unlock() +} + +func (d *delegate) ownInfo() NodeInfo { + d.mutex.Lock() + defer d.mutex.Unlock() + return d.hostInfo +} + +// init must be called first to initialize the cache +func (d *delegate) init(diskSpace func(path string) (DiskUsage, error)) error { + d.Cache = make(map[string]NodeInfo, 32) + if diskSpace == nil { + return fmt.Errorf("function calculating disk space cannot be empty") + } + lastTime := time.Now() + minUpdatePeriod := time.Second + _ProtoTTL/3 + space, err := diskSpace(d.dataPath) + if err != nil { + lastTime = lastTime.Add(-minUpdatePeriod) + d.log.WithError(err).Error("calculate disk space") + } + + d.setOwnSpace(space) + d.set(d.Name, NodeInfo{space, lastTime.UnixMilli()}) // cache + + // delegate remains alive throughout the entire program. + enterrors.GoWrapper(func() { d.updater(_ProtoTTL, minUpdatePeriod, diskSpace) }, d.log) + return nil +} + +// NodeMeta is used to retrieve meta-data about the current node +// when broadcasting an alive message. It's length is limited to +// the given byte size. This metadata is available in the Node structure. +func (d *delegate) NodeMeta(limit int) (meta []byte) { + data, err := json.Marshal(d.metadata) + if err != nil { + return nil + } + if len(data) > limit { + return nil + } + return data +} + +// LocalState is used for a TCP Push/Pull. This is sent to +// the remote side in addition to the membership information. Any +// data can be sent here. See MergeRemoteState as well. The `join` +// boolean indicates this is for a join instead of a push/pull. +func (d *delegate) LocalState(join bool) []byte { + var ( + info = d.ownInfo() + err error + ) + + d.set(d.Name, info) // cache new value + + x := spaceMsg{ + header{ + OpCode: _OpCodeDisk, + ProtoVersion: _ProtoVersion, + }, + info.DiskUsage, + uint8(len(d.Name)), + d.Name, + } + bytes, err := x.marshal() + if err != nil { + d.log.WithField("action", "delegate.local_state.marshal").WithError(err). + Error("failed to marshal local state") + return nil + } + return bytes +} + +// MergeRemoteState is invoked after a TCP Push/Pull. This is the +// state received from the remote side and is the result of the +// remote side's LocalState call. The 'join' +// boolean indicates this is for a join instead of a push/pull. +func (d *delegate) MergeRemoteState(data []byte, join bool) { + // Does operation match _OpCodeDisk + if _OpCode(data[0]) != _OpCodeDisk { + return + } + var x spaceMsg + if err := x.unmarshal(data); err != nil || x.Node == "" { + d.log.WithFields(logrus.Fields{ + "action": "delegate.merge_remote.unmarshal", + "data": string(data), + }).WithError(err).Error("failed to unmarshal remote state") + return + } + info := NodeInfo{x.DiskUsage, time.Now().UnixMilli()} + d.set(x.Node, info) +} + +func (d *delegate) NotifyMsg(data []byte) {} + +func (d *delegate) GetBroadcasts(overhead, limit int) [][]byte { + return nil +} + +// get returns info about about a specific node in the cluster +func (d *delegate) get(node string) (NodeInfo, bool) { + d.Lock() + defer d.Unlock() + x, ok := d.Cache[node] + return x, ok +} + +func (d *delegate) set(node string, x NodeInfo) { + d.Lock() + defer d.Unlock() + d.Cache[node] = x +} + +// delete key from the cache +func (d *delegate) delete(node string) { + d.Lock() + defer d.Unlock() + delete(d.Cache, node) +} + +// sortCandidates by the amount of free space in descending order +// +// Two nodes are considered equivalent if the difference between their +// free spaces is less than 32MB. +// The free space is just an rough estimate of the actual amount. +// The Lower bound 32MB helps to mitigate the risk of selecting same set of nodes +// when selections happens concurrently on different initiator nodes. +func (d *delegate) sortCandidates(names []string) []string { + rand.Shuffle(len(names), func(i, j int) { names[i], names[j] = names[j], names[i] }) + + d.Lock() + defer d.Unlock() + m := d.Cache + sort.Slice(names, func(i, j int) bool { + return (m[names[j]].Available >> 25) < (m[names[i]].Available >> 25) + }) + + return names +} + +// updater a function which updates node information periodically +func (d *delegate) updater(period, minPeriod time.Duration, du func(path string) (DiskUsage, error)) { + t := time.NewTicker(period) + defer t.Stop() + curTime := time.Now() + for range t.C { + if time.Since(curTime) < minPeriod { // too short + continue // wait for next cycle to avoid overwhelming the disk + } + space, err := du(d.dataPath) + if err != nil { + d.log.WithField("action", "delegate.local_state.disk_usage").WithError(err). + Error("disk space updater failed") + } else { + d.setOwnSpace(space) + } + curTime = time.Now() + } +} + +// events implement memberlist.EventDelegate interface +// EventDelegate is a simpler delegate that is used only to receive +// notifications about members joining and leaving. The methods in this +// delegate may be called by multiple goroutines, but never concurrently. +// This allows you to reason about ordering. +type events struct { + d *delegate +} + +// NotifyJoin is invoked when a node is detected to have joined. +// The Node argument must not be modified. +func (e events) NotifyJoin(*memberlist.Node) {} + +// NotifyLeave is invoked when a node is detected to have left. +// The Node argument must not be modified. +func (e events) NotifyLeave(node *memberlist.Node) { + e.d.delete(node.Name) +} + +// NotifyUpdate is invoked when a node is detected to have +// updated, usually involving the meta data. The Node argument +// must not be modified. +func (e events) NotifyUpdate(*memberlist.Node) {} diff --git a/platform/dbops/binaries/weaviate-src/usecases/cluster/delegate_test.go b/platform/dbops/binaries/weaviate-src/usecases/cluster/delegate_test.go new file mode 100644 index 0000000000000000000000000000000000000000..4318114948fa751c95e2223645072e185446e2f8 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/cluster/delegate_test.go @@ -0,0 +1,313 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package cluster + +import ( + "fmt" + "testing" + "time" + + "github.com/hashicorp/memberlist" + "github.com/pkg/errors" + "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/assert" +) + +func TestDiskSpaceMarshal(t *testing.T) { + for _, name := range []string{"", "host-12:1", "2", "00", "-jhd"} { + want := spaceMsg{ + header{ + ProtoVersion: uint8(1), + OpCode: _OpCode(2), + }, + DiskUsage{ + Total: 256, + Available: 3, + }, + uint8(len(name)), + name, + } + bytes, err := want.marshal() + assert.Nil(t, err) + got := spaceMsg{} + err = got.unmarshal(bytes) + assert.Nil(t, err) + assert.Equal(t, want, got) + } + + // simulate old version + x := spaceMsg{ + header{ + ProtoVersion: uint8(1), + OpCode: _OpCode(2), + }, + DiskUsage{ + Total: 256, + Available: 3, + }, + uint8('0'), + "123", + } + bytes, err := x.marshal() + want := x + want.NodeLen = 4 + want.Node = "0123" + assert.Nil(t, err) + got := spaceMsg{} + err = got.unmarshal(bytes) + assert.Nil(t, err) + assert.Equal(t, want, got) +} + +func TestDelegateGetSet(t *testing.T) { + logger, _ := test.NewNullLogger() + now := time.Now().UnixMilli() - 1 + st := State{ + delegate: delegate{ + Name: "ABC", + dataPath: ".", + log: logger, + Cache: make(map[string]NodeInfo, 32), + }, + } + st.delegate.NotifyMsg(nil) + st.delegate.GetBroadcasts(0, 0) + st.delegate.NodeMeta(0) + spaces := make([]spaceMsg, 32) + for i := range spaces { + node := fmt.Sprintf("N-%d", i+1) + spaces[i] = spaceMsg{ + header: header{ + OpCode: _OpCodeDisk, + ProtoVersion: _ProtoVersion + 2, + }, + DiskUsage: DiskUsage{ + uint64(i + 1), + uint64(i), + }, + Node: node, + NodeLen: uint8(len(node)), + } + } + + done := make(chan struct{}) + go func() { + for _, x := range spaces { + bytes, _ := x.marshal() + st.delegate.MergeRemoteState(bytes, false) + } + done <- struct{}{} + }() + + _, ok := st.delegate.get("X") + assert.False(t, ok) + + for _, x := range spaces { + space, ok := st.NodeInfo(x.Node) + if ok { + assert.Equal(t, x.DiskUsage, space.DiskUsage) + } + } + <-done + for _, x := range spaces { + info, ok := st.NodeInfo(x.Node) + assert.Greater(t, info.LastTimeMilli, now) + want := NodeInfo{x.DiskUsage, info.LastTimeMilli} + assert.Equal(t, want, info) + assert.True(t, ok) + st.delegate.delete(x.Node) + + } + assert.Empty(t, st.delegate.Cache) + st.delegate.init(diskSpace) + assert.Equal(t, 1, len(st.delegate.Cache)) + + st.delegate.MergeRemoteState(st.delegate.LocalState(false), false) + space, ok := st.NodeInfo(st.delegate.Name) + assert.True(t, ok) + assert.Greater(t, space.Total, space.Available) +} + +func TestDelegateMergeRemoteState(t *testing.T) { + logger, _ := test.NewNullLogger() + var ( + node = "N1" + d = delegate{ + Name: node, + dataPath: ".", + log: logger, + Cache: make(map[string]NodeInfo, 32), + } + x = spaceMsg{ + header{ + OpCode: _OpCodeDisk, + ProtoVersion: _ProtoVersion, + }, + DiskUsage{2, 1}, + uint8(len(node)), + node, + } + ) + // valid operation payload + bytes, err := x.marshal() + assert.Nil(t, err) + d.MergeRemoteState(bytes, false) + _, ok := d.get(node) + assert.True(t, ok) + + node = "N2" + // invalid payload => expect marshalling error + d.MergeRemoteState(bytes[:4], false) + assert.Nil(t, err) + _, ok = d.get(node) + assert.False(t, ok) + + // valid payload but operation is not supported + node = "N2" + x.header.OpCode = _OpCodeDisk + 2 + bytes, err = x.marshal() + d.MergeRemoteState(bytes, false) + assert.Nil(t, err) + _, ok = d.get(node) + assert.False(t, ok) +} + +func TestDelegateSort(t *testing.T) { + now := time.Now().UnixMilli() + GB := uint64(1) << 30 + delegate := delegate{ + Name: "ABC", + dataPath: ".", + Cache: make(map[string]NodeInfo, 32), + } + + delegate.set("N1", NodeInfo{DiskUsage{Available: GB}, now}) + delegate.set("N2", NodeInfo{DiskUsage{Available: 3 * GB}, now}) + delegate.set("N3", NodeInfo{DiskUsage{Available: 2 * GB}, now}) + delegate.set("N4", NodeInfo{DiskUsage{Available: 4 * GB}, now}) + got := delegate.sortCandidates([]string{"N1", "N0", "N2", "N4", "N3"}) + assert.Equal(t, []string{"N4", "N2", "N3", "N1", "N0"}, got) + + delegate.set("N1", NodeInfo{DiskUsage{Available: GB - 10}, now}) + // insert equivalent nodes "N2" and "N3" + delegate.set("N2", NodeInfo{DiskUsage{Available: GB + 128}, now}) + delegate.set("N3", NodeInfo{DiskUsage{Available: GB + 512}, now}) + // one block more + delegate.set("N4", NodeInfo{DiskUsage{Available: GB + 1<<25}, now}) + got = delegate.sortCandidates([]string{"N1", "N0", "N2", "N3", "N4"}) + if got[1] == "N2" { + assert.Equal(t, []string{"N4", "N2", "N3", "N1", "N0"}, got) + } else { + assert.Equal(t, []string{"N4", "N3", "N2", "N1", "N0"}, got) + } +} + +func TestDelegateCleanUp(t *testing.T) { + st := State{ + delegate: delegate{ + Name: "N0", + dataPath: ".", + }, + } + diskSpace := func(path string) (DiskUsage, error) { + return DiskUsage{100, 50}, nil + } + st.delegate.init(diskSpace) + _, ok := st.delegate.get("N0") + assert.True(t, ok, "N0 must exist") + st.delegate.set("N1", NodeInfo{LastTimeMilli: 1}) + st.delegate.set("N2", NodeInfo{LastTimeMilli: 2}) + handler := events{&st.delegate} + handler.NotifyJoin(nil) + handler.NotifyUpdate(nil) + handler.NotifyLeave(&memberlist.Node{Name: "N0"}) + handler.NotifyLeave(&memberlist.Node{Name: "N1"}) + handler.NotifyLeave(&memberlist.Node{Name: "N2"}) + assert.Empty(t, st.delegate.Cache) +} + +func TestDelegateLocalState(t *testing.T) { + now := time.Now().UnixMilli() - 1 + errAny := errors.New("any error") + logger, _ := test.NewNullLogger() + + t.Run("FirstError", func(t *testing.T) { + d := delegate{ + Name: "N0", + dataPath: ".", + log: logger, + Cache: map[string]NodeInfo{}, + } + du := func(path string) (DiskUsage, error) { return DiskUsage{}, errAny } + d.init(du) + + // error reading disk space + d.LocalState(true) + assert.Len(t, d.Cache, 1) + }) + + t.Run("Success", func(t *testing.T) { + d := delegate{ + Name: "N0", + dataPath: ".", + log: logger, + Cache: map[string]NodeInfo{}, + } + du := func(path string) (DiskUsage, error) { return DiskUsage{5, 1}, nil } + d.init(du) + // successful case + d.LocalState(true) + got, ok := d.get("N0") + assert.True(t, ok) + assert.Greater(t, got.LastTimeMilli, now) + assert.Equal(t, DiskUsage{5, 1}, got.DiskUsage) + }) +} + +func TestDelegateUpdater(t *testing.T) { + logger, _ := test.NewNullLogger() + now := time.Now().UnixMilli() - 1 + + d := delegate{ + Name: "N0", + dataPath: ".", + log: logger, + Cache: map[string]NodeInfo{}, + } + err := d.init(nil) + assert.NotNil(t, err) + doneCh := make(chan bool) + nCalls := uint64(0) + du := func(path string) (DiskUsage, error) { + nCalls++ + if nCalls == 1 || nCalls == 3 { + return DiskUsage{2 * nCalls, nCalls}, nil + } + if nCalls == 2 { + return DiskUsage{}, fmt.Errorf("any") + } + if nCalls == 4 { + close(doneCh) + } + return DiskUsage{}, fmt.Errorf("any") + } + go d.updater(time.Millisecond, 5*time.Millisecond, du) + + <-doneCh + + // error reading disk space + d.LocalState(true) + got, ok := d.get("N0") + assert.True(t, ok) + assert.Greater(t, got.LastTimeMilli, now) + assert.Equal(t, DiskUsage{3 * 2, 3}, got.DiskUsage) +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/cluster/disk_use_unix.go b/platform/dbops/binaries/weaviate-src/usecases/cluster/disk_use_unix.go new file mode 100644 index 0000000000000000000000000000000000000000..0686c3c8f39cf7b0caf00a54eac7556ea87abeab --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/cluster/disk_use_unix.go @@ -0,0 +1,31 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +//go:build !windows + +package cluster + +import ( + "syscall" +) + +// diskSpace return the disk space usage +func diskSpace(path string) (DiskUsage, error) { + fs := syscall.Statfs_t{} + err := syscall.Statfs(path, &fs) + if err != nil { + return DiskUsage{}, err + } + return DiskUsage{ + Total: fs.Blocks * uint64(fs.Bsize), + Available: fs.Bavail * uint64(fs.Bsize), + }, nil +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/cluster/disk_use_windows.go b/platform/dbops/binaries/weaviate-src/usecases/cluster/disk_use_windows.go new file mode 100644 index 0000000000000000000000000000000000000000..96b7212306bb92bf844e082b1e2b1a5a25a28f41 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/cluster/disk_use_windows.go @@ -0,0 +1,38 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +//go:build windows + +package cluster + +import ( + "golang.org/x/sys/windows" +) + +// diskSpace return the disk space usage +func diskSpace(path string) (DiskUsage, error) { + var freeBytesAvailableToCaller, totalBytes, totalFreeBytes uint64 + + err := windows.GetDiskFreeSpaceEx( + windows.StringToUTF16Ptr(path), + &freeBytesAvailableToCaller, + &totalBytes, + &totalFreeBytes, + ) + if err != nil { + return DiskUsage{}, err + } + + return DiskUsage{ + Total: totalBytes, + Available: freeBytesAvailableToCaller, + }, nil +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/cluster/ideal_node_list.go b/platform/dbops/binaries/weaviate-src/usecases/cluster/ideal_node_list.go new file mode 100644 index 0000000000000000000000000000000000000000..ff58430eaebfdd63d2e04b5484dbc2f9c948acbf --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/cluster/ideal_node_list.go @@ -0,0 +1,97 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package cluster + +import ( + "fmt" + "sort" + "strings" + "sync" + "time" + + "github.com/sirupsen/logrus" + enterrors "github.com/weaviate/weaviate/entities/errors" +) + +type IdealClusterState struct { + memberNames []string + currentState MemberLister + lock sync.Mutex +} + +func NewIdealClusterState(s MemberLister, logger logrus.FieldLogger) *IdealClusterState { + ics := &IdealClusterState{currentState: s} + enterrors.GoWrapper(func() { ics.startPolling() }, logger) + return ics +} + +// Validate returns an error if the actual state does not match the assumed +// ideal state, e.g. because a node has died, or left unexpectedly. +func (ics *IdealClusterState) Validate() error { + ics.lock.Lock() + defer ics.lock.Unlock() + + actual := map[string]struct{}{} + for _, name := range ics.currentState.AllNames() { + actual[name] = struct{}{} + } + + var missing []string + for _, name := range ics.memberNames { + if _, ok := actual[name]; !ok { + missing = append(missing, name) + } + } + + if len(missing) > 0 { + return fmt.Errorf("node(s) %s unhealthy or unavailable", + strings.Join(missing, ", ")) + } + + return nil +} + +func (ics *IdealClusterState) Members() []string { + ics.lock.Lock() + defer ics.lock.Unlock() + + return ics.memberNames +} + +func (ics *IdealClusterState) startPolling() { + t := time.NewTicker(1 * time.Second) + for { + <-t.C + current := ics.currentState.AllNames() + ics.extendList(current) + } +} + +func (ics *IdealClusterState) extendList(current []string) { + ics.lock.Lock() + defer ics.lock.Unlock() + + var unknown []string + known := map[string]struct{}{} + for _, name := range ics.memberNames { + known[name] = struct{}{} + } + + for _, name := range current { + if _, ok := known[name]; !ok { + unknown = append(unknown, name) + } + } + + ics.memberNames = append(ics.memberNames, unknown...) + sort.Strings(ics.memberNames) +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/cluster/iterator.go b/platform/dbops/binaries/weaviate-src/usecases/cluster/iterator.go new file mode 100644 index 0000000000000000000000000000000000000000..a36fc4b016b987fe343eeb5a37ebe00808ffce32 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/cluster/iterator.go @@ -0,0 +1,73 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package cluster + +import ( + "fmt" + "math/rand" +) + +type NodeIterationStrategy int + +const ( + StartRandom NodeIterationStrategy = iota + StartAfter +) + +type NodeIterator struct { + hostnames []string + state int +} + +type HostnameSource interface { + AllNames() []string +} + +func NewNodeIterator(nodeNames []string, + strategy NodeIterationStrategy, +) (*NodeIterator, error) { + if strategy != StartRandom && strategy != StartAfter { + return nil, fmt.Errorf("unsupported strategy: %v", strategy) + } + + startState := 0 + if strategy == StartRandom { + startState = rand.Intn(len(nodeNames)) + } + + return &NodeIterator{ + hostnames: nodeNames, + state: startState, + }, nil +} + +func (n *NodeIterator) SetStartNode(startNode string) { + for i, node := range n.hostnames { + if node == startNode { + n.state = i + 1 + if n.state == len(n.hostnames) { + n.state = 0 + } + break + } + } +} + +func (n *NodeIterator) Next() string { + curr := n.hostnames[n.state] + n.state++ + if n.state == len(n.hostnames) { + n.state = 0 + } + + return curr +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/cluster/iterator_test.go b/platform/dbops/binaries/weaviate-src/usecases/cluster/iterator_test.go new file mode 100644 index 0000000000000000000000000000000000000000..d87d41c0fa4c4ffad3e70797d892bcf82b1b46cb --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/cluster/iterator_test.go @@ -0,0 +1,55 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package cluster + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestNodeIteration(t *testing.T) { + source := []string{"node1", "node2", "node3", "node4"} + it, err := NewNodeIterator(source, StartRandom) + require.Nil(t, err) + + found := map[string]int{} + + for i := 0; i < 20; i++ { + host := it.Next() + found[host]++ + } + + // each host must be contained 5 times + assert.Equal(t, found["node1"], 5) + assert.Equal(t, found["node2"], 5) + assert.Equal(t, found["node3"], 5) + assert.Equal(t, found["node4"], 5) +} + +func TestNodeIterationStartAfter(t *testing.T) { + source := []string{"node1", "node2", "node3", "node4"} + it, err := NewNodeIterator(source, StartAfter) + it.SetStartNode("node2") + require.Nil(t, err) + + iterations := 3 + found := make([]string, iterations) + for i := 0; i < iterations; i++ { + host := it.Next() + found[i] = host + } + + expected := []string{"node3", "node4", "node1"} + assert.Equal(t, expected, found) +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/cluster/log_workaround.go b/platform/dbops/binaries/weaviate-src/usecases/cluster/log_workaround.go new file mode 100644 index 0000000000000000000000000000000000000000..bd9999522e9b2ff304f1de3e6759c2b89b1ae896 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/cluster/log_workaround.go @@ -0,0 +1,53 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package cluster + +import ( + "regexp" + + "github.com/sirupsen/logrus" +) + +type logParser struct { + logrus logrus.FieldLogger + regexp *regexp.Regexp +} + +func newLogParser(logrus logrus.FieldLogger) *logParser { + return &logParser{ + logrus: logrus, + regexp: regexp.MustCompile(`(.*)\[(DEBUG|ERR|ERROR|INFO|WARNING|WARN)](.*)`), + } +} + +func (l *logParser) Write(in []byte) (int, error) { + res := l.regexp.FindSubmatch(in) + if len(res) != 4 { + // unable to parse log message + l.logrus.WithField("in", in).Warn("unable to parse memberlist log message") + } + + switch string(res[2]) { + case "ERR", "ERROR": + l.logrus.Error(string(res[3])) + case "WARN", "WARNING": + l.logrus.Warn(string(res[3])) + case "DEBUG": + l.logrus.Debug(string(res[3])) + case "INFO": + l.logrus.Info(string(res[3])) + default: + l.logrus.WithField("in", in).Warn("unable to parse memberlist log level from message") + } + + return len(in), nil +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/cluster/mock_node_selector.go b/platform/dbops/binaries/weaviate-src/usecases/cluster/mock_node_selector.go new file mode 100644 index 0000000000000000000000000000000000000000..9e726c98a05af3f082b895b8b51d1346d1614ff2 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/cluster/mock_node_selector.go @@ -0,0 +1,435 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by mockery v2.53.2. DO NOT EDIT. + +package cluster + +import mock "github.com/stretchr/testify/mock" + +// MockNodeSelector is an autogenerated mock type for the NodeSelector type +type MockNodeSelector struct { + mock.Mock +} + +type MockNodeSelector_Expecter struct { + mock *mock.Mock +} + +func (_m *MockNodeSelector) EXPECT() *MockNodeSelector_Expecter { + return &MockNodeSelector_Expecter{mock: &_m.Mock} +} + +// AllHostnames provides a mock function with no fields +func (_m *MockNodeSelector) AllHostnames() []string { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for AllHostnames") + } + + var r0 []string + if rf, ok := ret.Get(0).(func() []string); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]string) + } + } + + return r0 +} + +// MockNodeSelector_AllHostnames_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'AllHostnames' +type MockNodeSelector_AllHostnames_Call struct { + *mock.Call +} + +// AllHostnames is a helper method to define mock.On call +func (_e *MockNodeSelector_Expecter) AllHostnames() *MockNodeSelector_AllHostnames_Call { + return &MockNodeSelector_AllHostnames_Call{Call: _e.mock.On("AllHostnames")} +} + +func (_c *MockNodeSelector_AllHostnames_Call) Run(run func()) *MockNodeSelector_AllHostnames_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *MockNodeSelector_AllHostnames_Call) Return(_a0 []string) *MockNodeSelector_AllHostnames_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockNodeSelector_AllHostnames_Call) RunAndReturn(run func() []string) *MockNodeSelector_AllHostnames_Call { + _c.Call.Return(run) + return _c +} + +// LocalName provides a mock function with no fields +func (_m *MockNodeSelector) LocalName() string { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for LocalName") + } + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +// MockNodeSelector_LocalName_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'LocalName' +type MockNodeSelector_LocalName_Call struct { + *mock.Call +} + +// LocalName is a helper method to define mock.On call +func (_e *MockNodeSelector_Expecter) LocalName() *MockNodeSelector_LocalName_Call { + return &MockNodeSelector_LocalName_Call{Call: _e.mock.On("LocalName")} +} + +func (_c *MockNodeSelector_LocalName_Call) Run(run func()) *MockNodeSelector_LocalName_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *MockNodeSelector_LocalName_Call) Return(_a0 string) *MockNodeSelector_LocalName_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockNodeSelector_LocalName_Call) RunAndReturn(run func() string) *MockNodeSelector_LocalName_Call { + _c.Call.Return(run) + return _c +} + +// NodeAddress provides a mock function with given fields: id +func (_m *MockNodeSelector) NodeAddress(id string) string { + ret := _m.Called(id) + + if len(ret) == 0 { + panic("no return value specified for NodeAddress") + } + + var r0 string + if rf, ok := ret.Get(0).(func(string) string); ok { + r0 = rf(id) + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +// MockNodeSelector_NodeAddress_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'NodeAddress' +type MockNodeSelector_NodeAddress_Call struct { + *mock.Call +} + +// NodeAddress is a helper method to define mock.On call +// - id string +func (_e *MockNodeSelector_Expecter) NodeAddress(id interface{}) *MockNodeSelector_NodeAddress_Call { + return &MockNodeSelector_NodeAddress_Call{Call: _e.mock.On("NodeAddress", id)} +} + +func (_c *MockNodeSelector_NodeAddress_Call) Run(run func(id string)) *MockNodeSelector_NodeAddress_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(string)) + }) + return _c +} + +func (_c *MockNodeSelector_NodeAddress_Call) Return(_a0 string) *MockNodeSelector_NodeAddress_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockNodeSelector_NodeAddress_Call) RunAndReturn(run func(string) string) *MockNodeSelector_NodeAddress_Call { + _c.Call.Return(run) + return _c +} + +// NodeGRPCPort provides a mock function with given fields: id +func (_m *MockNodeSelector) NodeGRPCPort(id string) (int, error) { + ret := _m.Called(id) + + if len(ret) == 0 { + panic("no return value specified for NodeGRPCPort") + } + + var r0 int + var r1 error + if rf, ok := ret.Get(0).(func(string) (int, error)); ok { + return rf(id) + } + if rf, ok := ret.Get(0).(func(string) int); ok { + r0 = rf(id) + } else { + r0 = ret.Get(0).(int) + } + + if rf, ok := ret.Get(1).(func(string) error); ok { + r1 = rf(id) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockNodeSelector_NodeGRPCPort_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'NodeGRPCPort' +type MockNodeSelector_NodeGRPCPort_Call struct { + *mock.Call +} + +// NodeGRPCPort is a helper method to define mock.On call +// - id string +func (_e *MockNodeSelector_Expecter) NodeGRPCPort(id interface{}) *MockNodeSelector_NodeGRPCPort_Call { + return &MockNodeSelector_NodeGRPCPort_Call{Call: _e.mock.On("NodeGRPCPort", id)} +} + +func (_c *MockNodeSelector_NodeGRPCPort_Call) Run(run func(id string)) *MockNodeSelector_NodeGRPCPort_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(string)) + }) + return _c +} + +func (_c *MockNodeSelector_NodeGRPCPort_Call) Return(_a0 int, _a1 error) *MockNodeSelector_NodeGRPCPort_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockNodeSelector_NodeGRPCPort_Call) RunAndReturn(run func(string) (int, error)) *MockNodeSelector_NodeGRPCPort_Call { + _c.Call.Return(run) + return _c +} + +// NodeHostname provides a mock function with given fields: name +func (_m *MockNodeSelector) NodeHostname(name string) (string, bool) { + ret := _m.Called(name) + + if len(ret) == 0 { + panic("no return value specified for NodeHostname") + } + + var r0 string + var r1 bool + if rf, ok := ret.Get(0).(func(string) (string, bool)); ok { + return rf(name) + } + if rf, ok := ret.Get(0).(func(string) string); ok { + r0 = rf(name) + } else { + r0 = ret.Get(0).(string) + } + + if rf, ok := ret.Get(1).(func(string) bool); ok { + r1 = rf(name) + } else { + r1 = ret.Get(1).(bool) + } + + return r0, r1 +} + +// MockNodeSelector_NodeHostname_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'NodeHostname' +type MockNodeSelector_NodeHostname_Call struct { + *mock.Call +} + +// NodeHostname is a helper method to define mock.On call +// - name string +func (_e *MockNodeSelector_Expecter) NodeHostname(name interface{}) *MockNodeSelector_NodeHostname_Call { + return &MockNodeSelector_NodeHostname_Call{Call: _e.mock.On("NodeHostname", name)} +} + +func (_c *MockNodeSelector_NodeHostname_Call) Run(run func(name string)) *MockNodeSelector_NodeHostname_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(string)) + }) + return _c +} + +func (_c *MockNodeSelector_NodeHostname_Call) Return(_a0 string, _a1 bool) *MockNodeSelector_NodeHostname_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockNodeSelector_NodeHostname_Call) RunAndReturn(run func(string) (string, bool)) *MockNodeSelector_NodeHostname_Call { + _c.Call.Return(run) + return _c +} + +// NonStorageNodes provides a mock function with no fields +func (_m *MockNodeSelector) NonStorageNodes() []string { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for NonStorageNodes") + } + + var r0 []string + if rf, ok := ret.Get(0).(func() []string); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]string) + } + } + + return r0 +} + +// MockNodeSelector_NonStorageNodes_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'NonStorageNodes' +type MockNodeSelector_NonStorageNodes_Call struct { + *mock.Call +} + +// NonStorageNodes is a helper method to define mock.On call +func (_e *MockNodeSelector_Expecter) NonStorageNodes() *MockNodeSelector_NonStorageNodes_Call { + return &MockNodeSelector_NonStorageNodes_Call{Call: _e.mock.On("NonStorageNodes")} +} + +func (_c *MockNodeSelector_NonStorageNodes_Call) Run(run func()) *MockNodeSelector_NonStorageNodes_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *MockNodeSelector_NonStorageNodes_Call) Return(_a0 []string) *MockNodeSelector_NonStorageNodes_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockNodeSelector_NonStorageNodes_Call) RunAndReturn(run func() []string) *MockNodeSelector_NonStorageNodes_Call { + _c.Call.Return(run) + return _c +} + +// SortCandidates provides a mock function with given fields: nodes +func (_m *MockNodeSelector) SortCandidates(nodes []string) []string { + ret := _m.Called(nodes) + + if len(ret) == 0 { + panic("no return value specified for SortCandidates") + } + + var r0 []string + if rf, ok := ret.Get(0).(func([]string) []string); ok { + r0 = rf(nodes) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]string) + } + } + + return r0 +} + +// MockNodeSelector_SortCandidates_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SortCandidates' +type MockNodeSelector_SortCandidates_Call struct { + *mock.Call +} + +// SortCandidates is a helper method to define mock.On call +// - nodes []string +func (_e *MockNodeSelector_Expecter) SortCandidates(nodes interface{}) *MockNodeSelector_SortCandidates_Call { + return &MockNodeSelector_SortCandidates_Call{Call: _e.mock.On("SortCandidates", nodes)} +} + +func (_c *MockNodeSelector_SortCandidates_Call) Run(run func(nodes []string)) *MockNodeSelector_SortCandidates_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].([]string)) + }) + return _c +} + +func (_c *MockNodeSelector_SortCandidates_Call) Return(_a0 []string) *MockNodeSelector_SortCandidates_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockNodeSelector_SortCandidates_Call) RunAndReturn(run func([]string) []string) *MockNodeSelector_SortCandidates_Call { + _c.Call.Return(run) + return _c +} + +// StorageCandidates provides a mock function with no fields +func (_m *MockNodeSelector) StorageCandidates() []string { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for StorageCandidates") + } + + var r0 []string + if rf, ok := ret.Get(0).(func() []string); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]string) + } + } + + return r0 +} + +// MockNodeSelector_StorageCandidates_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'StorageCandidates' +type MockNodeSelector_StorageCandidates_Call struct { + *mock.Call +} + +// StorageCandidates is a helper method to define mock.On call +func (_e *MockNodeSelector_Expecter) StorageCandidates() *MockNodeSelector_StorageCandidates_Call { + return &MockNodeSelector_StorageCandidates_Call{Call: _e.mock.On("StorageCandidates")} +} + +func (_c *MockNodeSelector_StorageCandidates_Call) Run(run func()) *MockNodeSelector_StorageCandidates_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *MockNodeSelector_StorageCandidates_Call) Return(_a0 []string) *MockNodeSelector_StorageCandidates_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockNodeSelector_StorageCandidates_Call) RunAndReturn(run func() []string) *MockNodeSelector_StorageCandidates_Call { + _c.Call.Return(run) + return _c +} + +// NewMockNodeSelector creates a new instance of MockNodeSelector. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewMockNodeSelector(t interface { + mock.TestingT + Cleanup(func()) +}) *MockNodeSelector { + mock := &MockNodeSelector{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/cluster/state.go b/platform/dbops/binaries/weaviate-src/usecases/cluster/state.go new file mode 100644 index 0000000000000000000000000000000000000000..7a7e2f109eba767759b03bc75c46d26fb5816978 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/cluster/state.go @@ -0,0 +1,460 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package cluster + +import ( + "encoding/json" + "fmt" + "net" + "slices" + "strings" + "sync" + + "github.com/hashicorp/memberlist" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// NodeSelector is an interface to select a portion of the available nodes in memberlist +type NodeSelector interface { + // NodeAddress resolves node id into an ip address without the port. + NodeAddress(id string) string + // NodeGRPCPort returns the gRPC port for a specific node id. + NodeGRPCPort(id string) (int, error) + // StorageCandidates returns list of storage nodes (names) + // sorted by the free amount of disk space in descending orders + StorageCandidates() []string + // NonStorageNodes return nodes from member list which + // they are configured not to be voter only + NonStorageNodes() []string + // SortCandidates Sort passed nodes names by the + // free amount of disk space in descending order + SortCandidates(nodes []string) []string + // LocalName() return local node name + LocalName() string + // NodeHostname return hosts address for a specific node name + NodeHostname(name string) (string, bool) + AllHostnames() []string +} + +type State struct { + config Config + localGrpcPort int + + // that lock to serialize access to memberlist + listLock sync.RWMutex + list *memberlist.Memberlist + nonStorageNodes map[string]struct{} + delegate delegate + maintenanceNodesLock sync.RWMutex +} + +type Config struct { + Hostname string `json:"hostname" yaml:"hostname"` + GossipBindPort int `json:"gossipBindPort" yaml:"gossipBindPort"` + DataBindPort int `json:"dataBindPort" yaml:"dataBindPort"` + Join string `json:"join" yaml:"join"` + IgnoreStartupSchemaSync bool `json:"ignoreStartupSchemaSync" yaml:"ignoreStartupSchemaSync"` + SkipSchemaSyncRepair bool `json:"skipSchemaSyncRepair" yaml:"skipSchemaSyncRepair"` + AuthConfig AuthConfig `json:"auth" yaml:"auth"` + AdvertiseAddr string `json:"advertiseAddr" yaml:"advertiseAddr"` + AdvertisePort int `json:"advertisePort" yaml:"advertisePort"` + // FastFailureDetection mostly for testing purpose, it will make memberlist sensitive and detect + // failures (down nodes) faster. + FastFailureDetection bool `json:"fastFailureDetection" yaml:"fastFailureDetection"` + // LocalHost flag enables running a multi-node setup with the same localhost and different ports + Localhost bool `json:"localhost" yaml:"localhost"` + // MaintenanceNodes is experimental. You should not use this directly, but should use the + // public methods on the State struct. This is a list of nodes (by Hostname) that are in + // maintenance mode (eg return a 418 for all data requests). We use a list here instead of a + // bool because it allows us to set the same config/env vars on all nodes to put a subset of + // them in maintenance mode. In addition, we may want to have the cluster nodes not in + // maintenance mode be aware of which nodes are in maintenance mode in the future. + MaintenanceNodes []string `json:"maintenanceNodes" yaml:"maintenanceNodes"` + // RaftBootstrapExpect is used to detect split-brain scenarios and attempt to rejoin the cluster + // TODO-RAFT-DB-63 : shall be removed once NodeAddress() is moved under raft cluster package + RaftBootstrapExpect int +} + +type AuthConfig struct { + BasicAuth BasicAuth `json:"basic" yaml:"basic"` +} + +type BasicAuth struct { + Username string `json:"username" yaml:"username"` + Password string `json:"password" yaml:"password"` +} + +func (ba BasicAuth) Enabled() bool { + return ba.Username != "" || ba.Password != "" +} + +func Init(userConfig Config, grpcPort, raftBootstrapExpect int, dataPath string, nonStorageNodes map[string]struct{}, logger logrus.FieldLogger) (_ *State, err error) { + userConfig.RaftBootstrapExpect = raftBootstrapExpect + cfg := memberlist.DefaultLANConfig() + cfg.LogOutput = newLogParser(logger) + cfg.Name = userConfig.Hostname + state := State{ + config: userConfig, + localGrpcPort: grpcPort, + nonStorageNodes: nonStorageNodes, + delegate: delegate{ + Name: cfg.Name, + dataPath: dataPath, + log: logger, + metadata: NodeMetadata{ + RestPort: userConfig.DataBindPort, + GrpcPort: grpcPort, + }, + }, + } + + if err := state.delegate.init(diskSpace); err != nil { + logger.WithField("action", "init_state.delete_init").WithError(err). + Error("delegate init failed") + } + cfg.Delegate = &state.delegate + cfg.Events = events{&state.delegate} + if userConfig.GossipBindPort != 0 { + cfg.BindPort = userConfig.GossipBindPort + } + + if userConfig.AdvertiseAddr != "" { + cfg.AdvertiseAddr = userConfig.AdvertiseAddr + } + + if userConfig.AdvertisePort != 0 { + cfg.AdvertisePort = userConfig.AdvertisePort + } + + if userConfig.FastFailureDetection { + cfg.SuspicionMult = 1 + } + + if state.list, err = memberlist.Create(cfg); err != nil { + logger.WithFields(logrus.Fields{ + "action": "memberlist_init", + "hostname": userConfig.Hostname, + "bind_port": userConfig.GossipBindPort, + }).WithError(err).Error("memberlist not created") + return nil, errors.Wrap(err, "create member list") + } + var joinAddr []string + if userConfig.Join != "" { + joinAddr = strings.Split(userConfig.Join, ",") + } + + if len(joinAddr) > 0 { + _, err := net.LookupIP(strings.Split(joinAddr[0], ":")[0]) + if err != nil { + logger.WithFields(logrus.Fields{ + "action": "cluster_attempt_join", + "remote_hostname": joinAddr[0], + }).WithError(err).Warn( + "specified hostname to join cluster cannot be resolved. This is fine" + + "if this is the first node of a new cluster, but problematic otherwise.") + } else { + _, err := state.list.Join(joinAddr) + if err != nil { + logger.WithFields(logrus.Fields{ + "action": "memberlist_init", + "remote_hostname": joinAddr, + }).WithError(err).Error("memberlist join not successful") + return nil, errors.Wrap(err, "join cluster") + } + } + } + + return &state, nil +} + +// Hostnames for all live members, except self. Use AllHostnames to include +// self, prefixes the data port. +func (s *State) Hostnames() []string { + s.listLock.RLock() + defer s.listLock.RUnlock() + + mem := s.list.Members() + out := make([]string, len(mem)) + + i := 0 + for _, m := range mem { + if m.Name == s.list.LocalNode().Name { + continue + } + + out[i] = fmt.Sprintf("%s:%d", m.Addr.String(), s.dataPort(m)) + i++ + } + + return out[:i] +} + +func nodeMetadata(m *memberlist.Node) (NodeMetadata, error) { + if len(m.Meta) == 0 { + return NodeMetadata{}, errors.New("no metadata available") + } + + var meta NodeMetadata + if err := json.Unmarshal(m.Meta, &meta); err != nil { + return NodeMetadata{}, errors.Wrap(err, "unmarshal node metadata") + } + + return meta, nil +} + +func (s *State) dataPort(m *memberlist.Node) int { + meta, err := nodeMetadata(m) + if err != nil { + s.delegate.log.WithFields(logrus.Fields{ + "action": "data_port_fallback", + "node": m.Name, + }).WithError(err).Debug("unable to get node metadata, falling back to default data port") + + return int(m.Port) + 1 // the convention that it's 1 higher than the gossip port + } + + return meta.RestPort +} + +func (s *State) grpcPort(m *memberlist.Node) int { + meta, err := nodeMetadata(m) + if err != nil { + s.delegate.log.WithFields(logrus.Fields{ + "action": "grpc_port_fallback", + "node": m.Name, + }).WithError(err).Debug("unable to get node metadata, falling back to default gRPC port") + + return s.localGrpcPort // fallback to default gRPC port + } + + return meta.GrpcPort +} + +// AllHostnames for live members, including self. +func (s *State) AllHostnames() []string { + s.listLock.RLock() + defer s.listLock.RUnlock() + + if s.list == nil { + return []string{} + } + + mem := s.list.Members() + out := make([]string, len(mem)) + + for i, m := range mem { + out[i] = fmt.Sprintf("%s:%d", m.Addr.String(), s.dataPort(m)) + } + + return out +} + +// All node names (not their hostnames!) for live members, including self. +func (s *State) AllNames() []string { + s.listLock.RLock() + defer s.listLock.RUnlock() + + mem := s.list.Members() + out := make([]string, len(mem)) + + for i, m := range mem { + out[i] = m.Name + } + + return out +} + +// StorageNodes returns all nodes except non storage nodes +func (s *State) storageNodes() []string { + if len(s.nonStorageNodes) == 0 { + return s.AllNames() + } + + s.listLock.RLock() + defer s.listLock.RUnlock() + + members := s.list.Members() + out := make([]string, len(members)) + n := 0 + for _, m := range members { + name := m.Name + if _, ok := s.nonStorageNodes[name]; !ok { + out[n] = m.Name + n++ + } + } + + return out[:n] +} + +// StorageCandidates returns list of storage nodes (names) +// sorted by the free amount of disk space in descending order +func (s *State) StorageCandidates() []string { + return s.delegate.sortCandidates(s.storageNodes()) +} + +// NonStorageNodes return nodes from member list which +// they are configured not to be voter only +func (s *State) NonStorageNodes() []string { + nonStorage := []string{} + for name := range s.nonStorageNodes { + nonStorage = append(nonStorage, name) + } + + return nonStorage +} + +// SortCandidates Sort passed nodes names by the +// free amount of disk space in descending order +func (s *State) SortCandidates(nodes []string) []string { + return s.delegate.sortCandidates(nodes) +} + +// All node names (not their hostnames!) for live members, including self. +func (s *State) NodeCount() int { + s.listLock.RLock() + defer s.listLock.RUnlock() + + return s.list.NumMembers() +} + +// LocalName() return local node name +func (s *State) LocalName() string { + s.listLock.RLock() + defer s.listLock.RUnlock() + + return s.list.LocalNode().Name +} + +func (s *State) ClusterHealthScore() int { + s.listLock.RLock() + defer s.listLock.RUnlock() + + return s.list.GetHealthScore() +} + +func (s *State) NodeHostname(nodeName string) (string, bool) { + s.listLock.RLock() + defer s.listLock.RUnlock() + + for _, mem := range s.list.Members() { + if mem.Name == nodeName { + return fmt.Sprintf("%s:%d", mem.Addr.String(), s.dataPort(mem)), true + } + } + + return "", false +} + +// NodeAddress is used to resolve the node name into an ip address without the port +// TODO-RAFT-DB-63 : shall be replaced by Members() which returns members in the list +func (s *State) NodeAddress(id string) string { + s.listLock.RLock() + defer s.listLock.RUnlock() + + // network interruption detection which can cause a single node to be isolated from the cluster (split brain) + nodeCount := s.list.NumMembers() + var joinAddr []string + if s.config.Join != "" { + joinAddr = strings.Split(s.config.Join, ",") + } + if nodeCount == 1 && len(joinAddr) > 0 && s.config.RaftBootstrapExpect > 1 { + s.delegate.log.WithFields(logrus.Fields{ + "action": "memberlist_rejoin", + "node_count": nodeCount, + }).Warn("detected single node split-brain, attempting to rejoin memberlist cluster") + // Only attempt rejoin if we're supposed to be part of a larger cluster + _, err := s.list.Join(joinAddr) + if err != nil { + s.delegate.log.WithFields(logrus.Fields{ + "action": "memberlist_rejoin", + "remote_hostname": joinAddr, + }).WithError(err).Error("memberlist rejoin not successful") + } else { + s.delegate.log.WithFields(logrus.Fields{ + "action": "memberlist_rejoin", + "node_count": s.list.NumMembers(), + }).Info("Successfully rejoined the memberlist cluster") + } + } + + for _, mem := range s.list.Members() { + if mem.Name == id { + return mem.Addr.String() + } + } + return "" +} + +func (s *State) NodeGRPCPort(nodeID string) (int, error) { + for _, mem := range s.list.Members() { + if mem.Name == nodeID { + return s.grpcPort(mem), nil + } + } + return 0, fmt.Errorf("node not found: %s", nodeID) +} + +func (s *State) SchemaSyncIgnored() bool { + return s.config.IgnoreStartupSchemaSync +} + +func (s *State) SkipSchemaRepair() bool { + return s.config.SkipSchemaSyncRepair +} + +func (s *State) NodeInfo(node string) (NodeInfo, bool) { + return s.delegate.get(node) +} + +// MaintenanceModeEnabledForLocalhost is experimental, may be removed/changed. It returns true if this node is in +// maintenance mode (which means it should return an error for all data requests). +func (s *State) MaintenanceModeEnabledForLocalhost() bool { + return s.nodeInMaintenanceMode(s.config.Hostname) +} + +// SetMaintenanceModeForLocalhost is experimental, may be removed/changed. Enables/disables maintenance +// mode for this node. +func (s *State) SetMaintenanceModeForLocalhost(enabled bool) { + s.setMaintenanceModeForNode(s.config.Hostname, enabled) +} + +func (s *State) setMaintenanceModeForNode(node string, enabled bool) { + s.maintenanceNodesLock.Lock() + defer s.maintenanceNodesLock.Unlock() + + if s.config.MaintenanceNodes == nil { + s.config.MaintenanceNodes = []string{} + } + if !enabled { + // we're disabling maintenance mode, remove the node from the list + for i, enabledNode := range s.config.MaintenanceNodes { + if enabledNode == node { + s.config.MaintenanceNodes = append(s.config.MaintenanceNodes[:i], s.config.MaintenanceNodes[i+1:]...) + } + } + return + } + if !slices.Contains(s.config.MaintenanceNodes, node) { + // we're enabling maintenance mode, add the node to the list + s.config.MaintenanceNodes = append(s.config.MaintenanceNodes, node) + return + } +} + +func (s *State) nodeInMaintenanceMode(node string) bool { + s.maintenanceNodesLock.RLock() + defer s.maintenanceNodesLock.RUnlock() + + return slices.Contains(s.config.MaintenanceNodes, node) +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/cluster/transactions_broadcast.go b/platform/dbops/binaries/weaviate-src/usecases/cluster/transactions_broadcast.go new file mode 100644 index 0000000000000000000000000000000000000000..06ff59e56b4b073cfb79ec815f69cbb7746b4fc6 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/cluster/transactions_broadcast.go @@ -0,0 +1,184 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package cluster + +import ( + "context" + "fmt" + "time" + + "github.com/sirupsen/logrus" + enterrors "github.com/weaviate/weaviate/entities/errors" + + "github.com/pkg/errors" +) + +type TxBroadcaster struct { + state MemberLister + client Client + consensusFn ConsensusFn + ideal *IdealClusterState + logger logrus.FieldLogger +} + +// The Broadcaster is the link between the the current node and all other nodes +// during a tx operation. This makes it a natural place to inject a consensus +// function for read transactions. How consensus is reached is completely opaque +// to the broadcaster and can be controlled through custom business logic. +type ConsensusFn func(ctx context.Context, + in []*Transaction) (*Transaction, error) + +type Client interface { + OpenTransaction(ctx context.Context, host string, tx *Transaction) error + AbortTransaction(ctx context.Context, host string, tx *Transaction) error + CommitTransaction(ctx context.Context, host string, tx *Transaction) error +} + +type MemberLister interface { + AllNames() []string + Hostnames() []string +} + +func NewTxBroadcaster(state MemberLister, client Client, logger logrus.FieldLogger) *TxBroadcaster { + ideal := NewIdealClusterState(state, logger) + return &TxBroadcaster{ + state: state, + client: client, + ideal: ideal, + logger: logger, + } +} + +func (t *TxBroadcaster) SetConsensusFunction(fn ConsensusFn) { + t.consensusFn = fn +} + +func (t *TxBroadcaster) BroadcastTransaction(rootCtx context.Context, tx *Transaction) error { + if !tx.TolerateNodeFailures { + if err := t.ideal.Validate(); err != nil { + return fmt.Errorf("tx does not tolerate node failures: %w", err) + } + } + + hosts := t.state.Hostnames() + resTx := make([]*Transaction, len(hosts)) + eg := enterrors.NewErrorGroupWrapper(t.logger) + for i, host := range hosts { + i := i // https://golang.org/doc/faq#closures_and_goroutines + host := host // https://golang.org/doc/faq#closures_and_goroutines + + eg.Go(func() error { + // make sure we don't block forever if the caller passes in an unlimited + // context. If another node does not respond within the timeout, consider + // the tx open attempt failed. + ctx, cancel := context.WithTimeout(rootCtx, 30*time.Second) + defer cancel() + + t.logger.WithFields(logrus.Fields{ + "action": "broadcast_transaction", + "duration": 30 * time.Second, + }).Debug("context.WithTimeout") + + // the client call can mutate the tx, so we need to work with copies to + // prevent a race and to be able to keep all individual results, so they + // can be passed to the consensus fn + resTx[i] = copyTx(tx) + if err := t.client.OpenTransaction(ctx, host, resTx[i]); err != nil { + return errors.Wrapf(err, "host %q", host) + } + + return nil + }, host) + } + + err := eg.Wait() + if err != nil { + return err + } + + if t.consensusFn != nil { + merged, err := t.consensusFn(rootCtx, resTx) + if err != nil { + return fmt.Errorf("try to reach consenus: %w", err) + } + + if merged != nil { + tx.Payload = merged.Payload + } + } + + return nil +} + +func (t *TxBroadcaster) BroadcastAbortTransaction(rootCtx context.Context, tx *Transaction) error { + eg := enterrors.NewErrorGroupWrapper(t.logger) + for _, host := range t.state.Hostnames() { + host := host // https://golang.org/doc/faq#closures_and_goroutines + eg.Go(func() error { + // make sure we don't block forever if the caller passes in an unlimited + // context. If another node does not respond within the timeout, consider + // the tx abort attempt failed. + ctx, cancel := context.WithTimeout(rootCtx, 30*time.Second) + defer cancel() + + t.logger.WithFields(logrus.Fields{ + "action": "broadcast_abort_transaction", + "duration": 30 * time.Second, + }).Debug("context.WithTimeout") + + if err := t.client.AbortTransaction(ctx, host, tx); err != nil { + return errors.Wrapf(err, "host %q", host) + } + + return nil + }, host) + } + + return eg.Wait() +} + +func (t *TxBroadcaster) BroadcastCommitTransaction(rootCtx context.Context, tx *Transaction) error { + if !tx.TolerateNodeFailures { + if err := t.ideal.Validate(); err != nil { + return fmt.Errorf("tx does not tolerate node failures: %w", err) + } + } + eg := enterrors.NewErrorGroupWrapper(t.logger) + for _, host := range t.state.Hostnames() { + // make sure we don't block forever if the caller passes in an unlimited + // context. If another node does not respond within the timeout, consider + // the tx commit attempt failed. + ctx, cancel := context.WithTimeout(rootCtx, 30*time.Second) + defer cancel() + + t.logger.WithFields(logrus.Fields{ + "action": "broadcast_commit_transaction", + "duration": 30 * time.Second, + }).Debug("context.WithTimeout") + + host := host // https://golang.org/doc/faq#closures_and_goroutines + eg.Go(func() error { + if err := t.client.CommitTransaction(ctx, host, tx); err != nil { + return errors.Wrapf(err, "host %q", host) + } + + return nil + }, host) + } + + return eg.Wait() +} + +func copyTx(in *Transaction) *Transaction { + out := *in + return &out +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/cluster/transactions_broadcast_test.go b/platform/dbops/binaries/weaviate-src/usecases/cluster/transactions_broadcast_test.go new file mode 100644 index 0000000000000000000000000000000000000000..3a936879e0d2083f5e25b45eedccafde24c204ba --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/cluster/transactions_broadcast_test.go @@ -0,0 +1,228 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package cluster + +import ( + "context" + "fmt" + "strings" + "sync" + "testing" + "time" + + "github.com/sirupsen/logrus/hooks/test" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var logger, _ = test.NewNullLogger() + +func TestBroadcastOpenTransaction(t *testing.T) { + client := &fakeClient{} + state := &fakeState{hosts: []string{"host1", "host2", "host3"}} + + bc := NewTxBroadcaster(state, client, logger) + + tx := &Transaction{ID: "foo"} + + err := bc.BroadcastTransaction(context.Background(), tx) + require.Nil(t, err) + + assert.ElementsMatch(t, []string{"host1", "host2", "host3"}, client.openCalled) +} + +func TestBroadcastOpenTransactionWithReturnPayload(t *testing.T) { + client := &fakeClient{} + state := &fakeState{hosts: []string{"host1", "host2", "host3"}} + + bc := NewTxBroadcaster(state, client, logger) + bc.SetConsensusFunction(func(ctx context.Context, + in []*Transaction, + ) (*Transaction, error) { + // instead of actually reaching a consensus this test mock simply merged + // all the individual results. For testing purposes this is even better + // because now we can be sure that every element was considered. + merged := "" + for _, tx := range in { + if len(merged) > 0 { + merged += "," + } + merged += tx.Payload.(string) + } + + return &Transaction{ + Payload: merged, + }, nil + }) + + tx := &Transaction{ID: "foo"} + + err := bc.BroadcastTransaction(context.Background(), tx) + require.Nil(t, err) + + assert.ElementsMatch(t, []string{"host1", "host2", "host3"}, client.openCalled) + + results := strings.Split(tx.Payload.(string), ",") + assert.ElementsMatch(t, []string{ + "hello_from_host1", + "hello_from_host2", + "hello_from_host3", + }, results) +} + +func TestBroadcastOpenTransactionAfterNodeHasDied(t *testing.T) { + client := &fakeClient{} + state := &fakeState{hosts: []string{"host1", "host2", "host3"}} + bc := NewTxBroadcaster(state, client, logger) + + waitUntilIdealStateHasReached(t, bc, 3, 4*time.Second) + + // host2 is dead + state.updateHosts([]string{"host1", "host3"}) + + tx := &Transaction{ID: "foo"} + + err := bc.BroadcastTransaction(context.Background(), tx) + require.NotNil(t, err) + assert.Contains(t, err.Error(), "host2") + + // no node is should have received an open + assert.ElementsMatch(t, []string{}, client.openCalled) +} + +func waitUntilIdealStateHasReached(t *testing.T, bc *TxBroadcaster, goal int, + max time.Duration, +) { + ctx, cancel := context.WithTimeout(context.Background(), max) + defer cancel() + + interval := time.NewTicker(250 * time.Millisecond) + defer interval.Stop() + + for { + select { + case <-ctx.Done(): + t.Error(fmt.Errorf("waiting to reach state goal %d: %w", goal, ctx.Err())) + return + case <-interval.C: + if len(bc.ideal.Members()) == goal { + return + } + } + } +} + +func TestBroadcastAbortTransaction(t *testing.T) { + client := &fakeClient{} + state := &fakeState{hosts: []string{"host1", "host2", "host3"}} + + bc := NewTxBroadcaster(state, client, logger) + + tx := &Transaction{ID: "foo"} + + err := bc.BroadcastAbortTransaction(context.Background(), tx) + require.Nil(t, err) + + assert.ElementsMatch(t, []string{"host1", "host2", "host3"}, client.abortCalled) +} + +func TestBroadcastCommitTransaction(t *testing.T) { + client := &fakeClient{} + state := &fakeState{hosts: []string{"host1", "host2", "host3"}} + + bc := NewTxBroadcaster(state, client, logger) + + tx := &Transaction{ID: "foo"} + + err := bc.BroadcastCommitTransaction(context.Background(), tx) + require.Nil(t, err) + + assert.ElementsMatch(t, []string{"host1", "host2", "host3"}, client.commitCalled) +} + +func TestBroadcastCommitTransactionAfterNodeHasDied(t *testing.T) { + client := &fakeClient{} + state := &fakeState{hosts: []string{"host1", "host2", "host3"}} + bc := NewTxBroadcaster(state, client, logger) + + waitUntilIdealStateHasReached(t, bc, 3, 4*time.Second) + + state.updateHosts([]string{"host1", "host3"}) + + tx := &Transaction{ID: "foo"} + + err := bc.BroadcastCommitTransaction(context.Background(), tx) + require.NotNil(t, err) + assert.Contains(t, err.Error(), "host2") + + // no node should have received the commit + assert.ElementsMatch(t, []string{}, client.commitCalled) +} + +type fakeState struct { + hosts []string + sync.Mutex +} + +func (f *fakeState) updateHosts(newHosts []string) { + f.Lock() + defer f.Unlock() + + f.hosts = newHosts +} + +func (f *fakeState) Hostnames() []string { + f.Lock() + defer f.Unlock() + + return f.hosts +} + +func (f *fakeState) AllNames() []string { + f.Lock() + defer f.Unlock() + + return f.hosts +} + +type fakeClient struct { + sync.Mutex + openCalled []string + abortCalled []string + commitCalled []string +} + +func (f *fakeClient) OpenTransaction(ctx context.Context, host string, tx *Transaction) error { + f.Lock() + defer f.Unlock() + + f.openCalled = append(f.openCalled, host) + tx.Payload = "hello_from_" + host + return nil +} + +func (f *fakeClient) AbortTransaction(ctx context.Context, host string, tx *Transaction) error { + f.Lock() + defer f.Unlock() + + f.abortCalled = append(f.abortCalled, host) + return nil +} + +func (f *fakeClient) CommitTransaction(ctx context.Context, host string, tx *Transaction) error { + f.Lock() + defer f.Unlock() + + f.commitCalled = append(f.commitCalled, host) + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/cluster/transactions_read.go b/platform/dbops/binaries/weaviate-src/usecases/cluster/transactions_read.go new file mode 100644 index 0000000000000000000000000000000000000000..1e6cd6ba19779dd9a4af7c1255f04a9015317cdd --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/cluster/transactions_read.go @@ -0,0 +1,69 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package cluster + +import ( + "context" + "time" + + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + "github.com/sirupsen/logrus" + "github.com/weaviate/weaviate/usecases/monitoring" +) + +func (c *TxManager) CloseReadTransaction(ctx context.Context, + tx *Transaction, +) error { + c.Lock() + if c.currentTransaction == nil || c.currentTransaction.ID != tx.ID { + c.Unlock() + return ErrInvalidTransaction + } + + c.Unlock() + c.slowLog.Update("close_read_started") + + // now that we know we are dealing with a valid transaction: no matter the + // outcome, after this call, we should not have a local transaction anymore + defer func() { + c.Lock() + c.currentTransaction = nil + monitoring.GetMetrics().SchemaTxClosed.With(prometheus.Labels{ + "ownership": "coordinator", + "status": "close_read", + }).Inc() + took := time.Since(c.currentTransactionBegin) + monitoring.GetMetrics().SchemaTxDuration.With(prometheus.Labels{ + "ownership": "coordinator", + "status": "close_read", + }).Observe(took.Seconds()) + c.slowLog.Close("closed_read") + c.Unlock() + }() + + if err := c.remote.BroadcastCommitTransaction(ctx, tx); err != nil { + // we could not open the transaction on every node, therefore we need to + // abort it everywhere. + + if err := c.remote.BroadcastAbortTransaction(ctx, tx); err != nil { + c.logger.WithFields(logrus.Fields{ + "action": "broadcast_abort_read_transaction", + "id": tx.ID, + }).WithError(err).Error("broadcast tx (read-only) abort failed") + } + + return errors.Wrap(err, "broadcast commit read transaction") + } + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/cluster/transactions_slowlog.go b/platform/dbops/binaries/weaviate-src/usecases/cluster/transactions_slowlog.go new file mode 100644 index 0000000000000000000000000000000000000000..d8f4d6ae435086b6e820bd74ddae378f0a8a9bc9 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/cluster/transactions_slowlog.go @@ -0,0 +1,161 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package cluster + +import ( + "os" + "strconv" + "sync" + "time" + + "github.com/sirupsen/logrus" + "github.com/weaviate/weaviate/entities/errors" +) + +func newTxSlowLog(logger logrus.FieldLogger) *txSlowLog { + ageThreshold := 5 * time.Second + changeThreshold := 1 * time.Second + + if age := os.Getenv("TX_SLOW_LOG_AGE_THRESHOLD_SECONDS"); age != "" { + ageParsed, err := strconv.Atoi(age) + if err == nil { + ageThreshold = time.Duration(ageParsed) * time.Second + } + } + + if change := os.Getenv("TX_SLOW_LOG_CHANGE_THRESHOLD_SECONDS"); change != "" { + changeParsed, err := strconv.Atoi(change) + if err == nil { + changeThreshold = time.Duration(changeParsed) * time.Second + } + } + + return &txSlowLog{ + logger: logger, + ageThreshold: ageThreshold, + changeThreshold: changeThreshold, + } +} + +// txSlowLog is meant as a temporary debugging tool for the v1 schema. When the +// v2 schema is ready, this can be thrown away. +type txSlowLog struct { + sync.Mutex + logger logrus.FieldLogger + + // tx-specific + id string + status string + begin time.Time + lastChange time.Time + writable bool + coordinating bool + txPresent bool + logged bool + + // config + ageThreshold time.Duration + changeThreshold time.Duration +} + +func (txsl *txSlowLog) Start(id string, coordinating bool, + writable bool, +) { + txsl.Lock() + defer txsl.Unlock() + + txsl.id = id + txsl.status = "opened" + now := time.Now() + txsl.begin = now + txsl.lastChange = now + txsl.coordinating = coordinating + txsl.writable = writable + txsl.txPresent = true + txsl.logged = false +} + +func (txsl *txSlowLog) Update(status string) { + txsl.Lock() + defer txsl.Unlock() + + txsl.status = status + txsl.lastChange = time.Now() +} + +func (txsl *txSlowLog) Close(status string) { + txsl.Lock() + defer txsl.Unlock() + + txsl.status = status + txsl.lastChange = time.Now() + + // there are two situations where we need to log the end of the transaction: + // + // 1. if it is slower than the age threshold + // + // 2. if we have logged it before (e.g. because it was in a specific state + // longer than expected) + + if txsl.lastChange.Sub(txsl.begin) >= txsl.ageThreshold || txsl.logged { + txsl.logger.WithFields(logrus.Fields{ + "action": "transaction_slow_log", + "event": "tx_closed", + "status": txsl.status, + "total_duration": txsl.lastChange.Sub(txsl.begin), + "tx_id": txsl.id, + "coordinating": txsl.coordinating, + "writable": txsl.writable, + }).Infof("slow transaction completed") + } + + // reset for next usage + txsl.txPresent = false +} + +func (txsl *txSlowLog) StartWatching() { + t := time.Tick(500 * time.Millisecond) + errors.GoWrapper(func() { + for { + <-t + txsl.log() + } + }, txsl.logger) +} + +func (txsl *txSlowLog) log() { + txsl.Lock() + defer txsl.Unlock() + + if !txsl.txPresent { + return + } + + now := time.Now() + age := now.Sub(txsl.begin) + changed := now.Sub(txsl.lastChange) + + if age >= txsl.ageThreshold || changed >= txsl.changeThreshold { + txsl.logger.WithFields(logrus.Fields{ + "action": "transaction_slow_log", + "event": "tx_in_progress", + "status": txsl.status, + "total_duration": age, + "since_last_change": changed, + "tx_id": txsl.id, + "coordinating": txsl.coordinating, + "writable": txsl.writable, + }).Infof("slow transaction in progress") + + txsl.logged = true + } +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/cluster/transactions_test.go b/platform/dbops/binaries/weaviate-src/usecases/cluster/transactions_test.go new file mode 100644 index 0000000000000000000000000000000000000000..0dd0ea2c34dbe24be90dace7e4aa15551e55e987 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/cluster/transactions_test.go @@ -0,0 +1,570 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package cluster + +import ( + "context" + "fmt" + "sync" + "testing" + "time" + + "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestSuccessfulOutgoingWriteTransaction(t *testing.T) { + payload := "my-payload" + trType := TransactionType("my-type") + ctx := context.Background() + + man := newTestTxManager() + + tx, err := man.BeginTransaction(ctx, trType, payload, 0) + require.Nil(t, err) + + err = man.CommitWriteTransaction(ctx, tx) + require.Nil(t, err) +} + +func TestTryingToOpenTwoTransactions(t *testing.T) { + payload := "my-payload" + trType := TransactionType("my-type") + ctx := context.Background() + + man := newTestTxManager() + + tx1, err := man.BeginTransaction(ctx, trType, payload, 0) + require.Nil(t, err) + + tx2, err := man.BeginTransaction(ctx, trType, payload, 0) + assert.Nil(t, tx2) + require.NotNil(t, err) + assert.Equal(t, "concurrent transaction", err.Error()) + + err = man.CommitWriteTransaction(ctx, tx1) + assert.Nil(t, err, "original transaction can still be committed") +} + +func TestTryingToCommitInvalidTransaction(t *testing.T) { + payload := "my-payload" + trType := TransactionType("my-type") + ctx := context.Background() + + man := newTestTxManager() + + tx1, err := man.BeginTransaction(ctx, trType, payload, 0) + require.Nil(t, err) + + invalidTx := &Transaction{ID: "invalid"} + + err = man.CommitWriteTransaction(ctx, invalidTx) + require.NotNil(t, err) + assert.Equal(t, "invalid transaction", err.Error()) + + err = man.CommitWriteTransaction(ctx, tx1) + assert.Nil(t, err, "original transaction can still be committed") +} + +func TestTryingToCommitTransactionPastTTL(t *testing.T) { + payload := "my-payload" + trType := TransactionType("my-type") + ctx := context.Background() + + man := newTestTxManager() + + tx1, err := man.BeginTransaction(ctx, trType, payload, time.Microsecond) + require.Nil(t, err) + + expiredTx := &Transaction{ID: tx1.ID} + + // give the cancel handler some time to run + time.Sleep(50 * time.Millisecond) + + err = man.CommitWriteTransaction(ctx, expiredTx) + require.NotNil(t, err) + assert.Contains(t, err.Error(), "transaction TTL") + + // make sure it is possible to open future transactions + _, err = man.BeginTransaction(context.Background(), trType, payload, 0) + require.Nil(t, err) +} + +func TestTryingToCommitIncomingTransactionPastTTL(t *testing.T) { + payload := "my-payload" + trType := TransactionType("my-type") + ctx := context.Background() + + man := newTestTxManager() + + dl := time.Now().Add(1 * time.Microsecond) + + tx := &Transaction{ + ID: "123456", + Type: trType, + Payload: payload, + Deadline: dl, + } + + man.IncomingBeginTransaction(context.Background(), tx) + + // give the cancel handler some time to run + time.Sleep(50 * time.Millisecond) + + err := man.IncomingCommitTransaction(ctx, tx) + require.NotNil(t, err) + assert.Contains(t, err.Error(), "transaction TTL") + + // make sure it is possible to open future transactions + _, err = man.BeginTransaction(context.Background(), trType, payload, 0) + require.Nil(t, err) +} + +func TestLettingATransactionExpire(t *testing.T) { + payload := "my-payload" + trType := TransactionType("my-type") + ctx := context.Background() + + man := newTestTxManager() + + tx1, err := man.BeginTransaction(ctx, trType, payload, time.Microsecond) + require.Nil(t, err) + + // give the cancel handler some time to run + time.Sleep(50 * time.Millisecond) + + // try to open a new one + _, err = man.BeginTransaction(context.Background(), trType, payload, 0) + require.Nil(t, err) + + // since the old one expired, we now expect a TTL error instead of a + // concurrent tx error when trying to refer to the old one + err = man.CommitWriteTransaction(context.Background(), tx1) + require.NotNil(t, err) + assert.Contains(t, err.Error(), "transaction TTL") +} + +func TestRemoteDoesntAllowOpeningTransaction(t *testing.T) { + payload := "my-payload" + trType := TransactionType("my-type") + ctx := context.Background() + broadcaster := &fakeBroadcaster{ + openErr: ErrConcurrentTransaction, + } + + man := newTestTxManagerWithRemote(broadcaster) + + tx1, err := man.BeginTransaction(ctx, trType, payload, 0) + require.Nil(t, tx1) + require.NotNil(t, err) + assert.Contains(t, err.Error(), "open transaction") + + assert.Len(t, broadcaster.abortCalledId, 36, "a valid uuid was aborted") +} + +func TestRemoteDoesntAllowOpeningTransactionAbortFails(t *testing.T) { + payload := "my-payload" + trType := TransactionType("my-type") + ctx := context.Background() + broadcaster := &fakeBroadcaster{ + openErr: ErrConcurrentTransaction, + abortErr: fmt.Errorf("cannot abort"), + } + + man, hook := newTestTxManagerWithRemoteLoggerHook(broadcaster) + + tx1, err := man.BeginTransaction(ctx, trType, payload, 0) + require.Nil(t, tx1) + require.NotNil(t, err) + assert.Contains(t, err.Error(), "open transaction") + + assert.Len(t, broadcaster.abortCalledId, 36, "a valid uuid was aborted") + + require.Len(t, hook.Entries, 1) + assert.Equal(t, "broadcast tx abort failed", hook.Entries[0].Message) +} + +type fakeBroadcaster struct { + openErr error + commitErr error + abortErr error + abortCalledId string +} + +func (f *fakeBroadcaster) BroadcastTransaction(ctx context.Context, + tx *Transaction, +) error { + return f.openErr +} + +func (f *fakeBroadcaster) BroadcastAbortTransaction(ctx context.Context, + tx *Transaction, +) error { + f.abortCalledId = tx.ID + return f.abortErr +} + +func (f *fakeBroadcaster) BroadcastCommitTransaction(ctx context.Context, + tx *Transaction, +) error { + return f.commitErr +} + +func TestSuccessfulDistributedWriteTransaction(t *testing.T) { + ctx := context.Background() + + var remoteState interface{} + remote := newTestTxManager() + remote.SetCommitFn(func(ctx context.Context, tx *Transaction) error { + remoteState = tx.Payload + return nil + }) + local := NewTxManager(&wrapTxManagerAsBroadcaster{remote}, + &fakeTxPersistence{}, remote.logger) + local.StartAcceptIncoming() + + payload := "my-payload" + trType := TransactionType("my-type") + + tx, err := local.BeginTransaction(ctx, trType, payload, 0) + require.Nil(t, err) + + err = local.CommitWriteTransaction(ctx, tx) + require.Nil(t, err) + + assert.Equal(t, "my-payload", remoteState) +} + +// based on https://github.com/weaviate/weaviate/issues/4637 +func TestDistributedWriteTransactionWithRemoteCommitFailure(t *testing.T) { + ctx := context.Background() + + var remoteState interface{} + remote := newTestTxManager() + remoteShoudError := true + remote.SetCommitFn(func(ctx context.Context, tx *Transaction) error { + if remoteShoudError { + return fmt.Errorf("could not commit") + } + + remoteState = tx.Payload + return nil + }) + local := NewTxManager(&wrapTxManagerAsBroadcaster{remote}, + &fakeTxPersistence{}, remote.logger) + local.StartAcceptIncoming() + + payload := "my-payload" + trType := TransactionType("my-type") + + tx, err := local.BeginTransaction(ctx, trType, payload, 0) + require.Nil(t, err) + + err = local.CommitWriteTransaction(ctx, tx) + // expected that the commit fails if a remote node can't commit + assert.NotNil(t, err) + + remoteShoudError = false + + // now try again and assert that everything works fine Prior to + // https://github.com/weaviate/weaviate/issues/4637 we would now get + // concurrent tx errors + + payload = "my-updated-payload" + newTx, err := local.BeginTransaction(ctx, trType, payload, 0) + require.Nil(t, err) + + err = local.CommitWriteTransaction(ctx, newTx) + require.Nil(t, err) + + assert.Equal(t, "my-updated-payload", remoteState) +} + +func TestConcurrentDistributedTransaction(t *testing.T) { + ctx := context.Background() + + var remoteState interface{} + remote := newTestTxManager() + remote.SetCommitFn(func(ctx context.Context, tx *Transaction) error { + remoteState = tx.Payload + return nil + }) + local := NewTxManager(&wrapTxManagerAsBroadcaster{remote}, + &fakeTxPersistence{}, remote.logger) + + payload := "my-payload" + trType := TransactionType("my-type") + + // open a transaction on the remote to simulate a concurrent transaction. + // Since it uses the fakeBroadcaster it does not tell anyone about it, this + // way we can be sure that the reason for failure is actually a concurrent + // transaction on the remote side, not on the local side. Compare this to a + // situation where broadcasting was bi-directional: Then this transaction + // would have been opened successfully and already be replicated to the + // "local" tx manager. So the next call on "local" would also fail, but for + // the wrong reason: It would fail because another transaction is already in + // place. We, however want to simulate a situation where due to network + // delays, etc. both sides try to open a transaction more or less in + // parallel. + _, err := remote.BeginTransaction(ctx, trType, "wrong payload", 0) + require.Nil(t, err) + + tx, err := local.BeginTransaction(ctx, trType, payload, 0) + require.Nil(t, tx) + require.NotNil(t, err) + assert.Contains(t, err.Error(), "concurrent transaction") + + assert.Equal(t, nil, remoteState, "remote state should not have been updated") +} + +// This test simulates three nodes trying to open a tx at basically the same +// time with the simulated network being so slow that other nodes will try to +// open their own transactions before they receive the incoming tx. This is a +// situation where everyone thinks they were the first to open the tx and there +// is no clear winner. All attempts must fail! +func TestConcurrentOpenAttemptsOnSlowNetwork(t *testing.T) { + ctx := context.Background() + + broadcaster := &slowMultiBroadcaster{delay: 100 * time.Millisecond} + node1 := newTestTxManagerWithRemote(broadcaster) + node2 := newTestTxManagerWithRemote(broadcaster) + node3 := newTestTxManagerWithRemote(broadcaster) + + broadcaster.nodes = []*TxManager{node1, node2, node3} + + trType := TransactionType("my-type") + + wg := &sync.WaitGroup{} + wg.Add(1) + go func() { + defer wg.Done() + _, err := node1.BeginTransaction(ctx, trType, "payload-from-node-1", 0) + assert.NotNil(t, err, "open tx 1 must fail") + }() + + wg.Add(1) + go func() { + defer wg.Done() + _, err := node2.BeginTransaction(ctx, trType, "payload-from-node-2", 0) + assert.NotNil(t, err, "open tx 2 must fail") + }() + + wg.Add(1) + go func() { + defer wg.Done() + _, err := node3.BeginTransaction(ctx, trType, "payload-from-node-3", 0) + assert.NotNil(t, err, "open tx 3 must fail") + }() + + wg.Wait() +} + +type wrapTxManagerAsBroadcaster struct { + txManager *TxManager +} + +func (w *wrapTxManagerAsBroadcaster) BroadcastTransaction(ctx context.Context, + tx *Transaction, +) error { + _, err := w.txManager.IncomingBeginTransaction(ctx, tx) + return err +} + +func (w *wrapTxManagerAsBroadcaster) BroadcastAbortTransaction(ctx context.Context, + tx *Transaction, +) error { + w.txManager.IncomingAbortTransaction(ctx, tx) + return nil +} + +func (w *wrapTxManagerAsBroadcaster) BroadcastCommitTransaction(ctx context.Context, + tx *Transaction, +) error { + return w.txManager.IncomingCommitTransaction(ctx, tx) +} + +type slowMultiBroadcaster struct { + delay time.Duration + nodes []*TxManager +} + +func (b *slowMultiBroadcaster) BroadcastTransaction(ctx context.Context, + tx *Transaction, +) error { + time.Sleep(b.delay) + for _, node := range b.nodes { + if _, err := node.IncomingBeginTransaction(ctx, tx); err != nil { + return err + } + } + return nil +} + +func (b *slowMultiBroadcaster) BroadcastAbortTransaction(ctx context.Context, + tx *Transaction, +) error { + time.Sleep(b.delay) + for _, node := range b.nodes { + node.IncomingAbortTransaction(ctx, tx) + } + + return nil +} + +func (b *slowMultiBroadcaster) BroadcastCommitTransaction(ctx context.Context, + tx *Transaction, +) error { + time.Sleep(b.delay) + for _, node := range b.nodes { + if err := node.IncomingCommitTransaction(ctx, tx); err != nil { + return err + } + } + + return nil +} + +func TestSuccessfulDistributedReadTransaction(t *testing.T) { + ctx := context.Background() + payload := "my-payload" + + remote := newTestTxManager() + remote.SetResponseFn(func(ctx context.Context, tx *Transaction) ([]byte, error) { + tx.Payload = payload + return nil, nil + }) + local := NewTxManager(&wrapTxManagerAsBroadcaster{remote}, + &fakeTxPersistence{}, remote.logger) + // TODO local.SetConsensusFn + + trType := TransactionType("my-read-tx") + + tx, err := local.BeginTransaction(ctx, trType, nil, 0) + require.Nil(t, err) + + local.CloseReadTransaction(ctx, tx) + + assert.Equal(t, "my-payload", tx.Payload) +} + +func TestSuccessfulDistributedTransactionSetAllowUnready(t *testing.T) { + ctx := context.Background() + payload := "my-payload" + + types := []TransactionType{"type0", "type1"} + remote := newTestTxManagerAllowUnready(types) + remote.SetResponseFn(func(ctx context.Context, tx *Transaction) ([]byte, error) { + tx.Payload = payload + return nil, nil + }) + local := NewTxManager(&wrapTxManagerAsBroadcaster{remote}, + &fakeTxPersistence{}, remote.logger) + local.SetAllowUnready(types) + + trType := TransactionType("my-read-tx") + + tx, err := local.BeginTransaction(ctx, trType, nil, 0) + require.Nil(t, err) + + local.CloseReadTransaction(ctx, tx) + + assert.ElementsMatch(t, types, remote.allowUnready) + assert.ElementsMatch(t, types, local.allowUnready) + assert.Equal(t, "my-payload", tx.Payload) +} + +func TestTxWithDeadline(t *testing.T) { + t.Run("expired", func(t *testing.T) { + payload := "my-payload" + trType := TransactionType("my-type") + + ctx := context.Background() + + man := newTestTxManager() + + tx, err := man.BeginTransaction(ctx, trType, payload, 1*time.Nanosecond) + require.Nil(t, err) + + ctx, cancel := context.WithDeadline(context.Background(), tx.Deadline) + defer cancel() + + assert.NotNil(t, ctx.Err()) + }) + + t.Run("still valid", func(t *testing.T) { + payload := "my-payload" + trType := TransactionType("my-type") + + ctx := context.Background() + + man := newTestTxManager() + + tx, err := man.BeginTransaction(ctx, trType, payload, 10*time.Second) + require.Nil(t, err) + + ctx, cancel := context.WithDeadline(context.Background(), tx.Deadline) + defer cancel() + + assert.Nil(t, ctx.Err()) + }) +} + +func newTestTxManager() *TxManager { + logger, _ := test.NewNullLogger() + m := NewTxManager(&fakeBroadcaster{}, &fakeTxPersistence{}, logger) + m.StartAcceptIncoming() + return m +} + +func newTestTxManagerWithRemote(remote Remote) *TxManager { + logger, _ := test.NewNullLogger() + m := NewTxManager(remote, &fakeTxPersistence{}, logger) + m.StartAcceptIncoming() + return m +} + +func newTestTxManagerWithRemoteLoggerHook(remote Remote) (*TxManager, *test.Hook) { + logger, hook := test.NewNullLogger() + m := NewTxManager(remote, &fakeTxPersistence{}, logger) + m.StartAcceptIncoming() + return m, hook +} + +func newTestTxManagerAllowUnready(types []TransactionType) *TxManager { + logger, _ := test.NewNullLogger() + m := NewTxManager(&fakeBroadcaster{}, &fakeTxPersistence{}, logger) + m.SetAllowUnready(types) + m.StartAcceptIncoming() + return m +} + +// does nothing as these do not involve crashes +type fakeTxPersistence struct{} + +func (f *fakeTxPersistence) StoreTx(ctx context.Context, + tx *Transaction, +) error { + return nil +} + +func (f *fakeTxPersistence) DeleteTx(ctx context.Context, + txID string, +) error { + return nil +} + +func (f *fakeTxPersistence) IterateAll(ctx context.Context, + cb func(tx *Transaction), +) error { + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/cluster/transactions_write.go b/platform/dbops/binaries/weaviate-src/usecases/cluster/transactions_write.go new file mode 100644 index 0000000000000000000000000000000000000000..384f657a74fcba2e9a0e252037dda75af89cae93 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/cluster/transactions_write.go @@ -0,0 +1,660 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package cluster + +import ( + "context" + "fmt" + "slices" + "sync" + "time" + + "github.com/prometheus/client_golang/prometheus" + enterrors "github.com/weaviate/weaviate/entities/errors" + "github.com/weaviate/weaviate/usecases/monitoring" + + "github.com/google/uuid" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +type TransactionType string + +var ( + ErrConcurrentTransaction = errors.New("concurrent transaction") + ErrInvalidTransaction = errors.New("invalid transaction") + ErrExpiredTransaction = errors.New("transaction TTL expired") + ErrNotReady = errors.New("server is not ready: either starting up or shutting down") +) + +type Remote interface { + BroadcastTransaction(ctx context.Context, tx *Transaction) error + BroadcastAbortTransaction(ctx context.Context, tx *Transaction) error + BroadcastCommitTransaction(ctx context.Context, tx *Transaction) error +} + +type ( + CommitFn func(ctx context.Context, tx *Transaction) error + ResponseFn func(ctx context.Context, tx *Transaction) ([]byte, error) +) + +type TxManager struct { + sync.Mutex + logger logrus.FieldLogger + + currentTransaction *Transaction + currentTransactionContext context.Context + currentTransactionBegin time.Time + clearTransaction func() + + // any time we start working on a commit, we need to add to this WaitGroup. + // It will block shutdwon until the commit has completed to make sure that we + // can't accidentally shutdown while a tx is committing. + ongoingCommits sync.WaitGroup + + // when a shutdown signal has been received, we will no longer accept any new + // tx's or commits + acceptIncoming bool + + // read transactions that need to run at start can still be served, they have + // no side-effects on the node that accepts them. + // + // If we disallowed them completely, then two unready nodes would be in a + // deadlock as they each require information from the other(s) who can't + // answerbecause they're not ready. + allowUnready []TransactionType + + remote Remote + commitFn CommitFn + responseFn ResponseFn + + // keep the ids of expired transactions around. This way, we can return a + // nicer error message to the user. Instead of just an "invalid transaction" + // which no longer exists, they will get an explicit error message mentioning + // the timeout. + expiredTxIDs []string + + persistence Persistence + + slowLog *txSlowLog +} + +func newDummyCommitResponseFn() func(ctx context.Context, tx *Transaction) error { + return func(ctx context.Context, tx *Transaction) error { + return nil + } +} + +func newDummyResponseFn() func(ctx context.Context, tx *Transaction) ([]byte, error) { + return func(ctx context.Context, tx *Transaction) ([]byte, error) { + return nil, nil + } +} + +func NewTxManager(remote Remote, persistence Persistence, + logger logrus.FieldLogger, +) *TxManager { + txm := &TxManager{ + remote: remote, + + // by setting dummy fns that do nothing on default it is possible to run + // the tx manager with only one set of functions. For example, if the + // specific Tx is only ever used for broadcasting writes, there is no need + // to set a responseFn. However, if the fn was nil, we'd panic. Thus a + // dummy function is a reasonable default - and much cleaner than a + // nil-check on every call. + commitFn: newDummyCommitResponseFn(), + responseFn: newDummyResponseFn(), + logger: logger, + persistence: persistence, + + // ready to serve incoming requests + acceptIncoming: false, + slowLog: newTxSlowLog(logger), + } + + txm.slowLog.StartWatching() + return txm +} + +func (c *TxManager) StartAcceptIncoming() { + c.Lock() + defer c.Unlock() + + c.acceptIncoming = true +} + +func (c *TxManager) SetAllowUnready(types []TransactionType) { + c.Lock() + defer c.Unlock() + + c.allowUnready = types +} + +// HaveDanglingTxs is a way to check if there are any uncommitted transactions +// in the durable storage. This can be used to make decisions about whether a +// failed schema check can be temporarily ignored - with the assumption that +// applying the dangling txs will fix the issue. +func (c *TxManager) HaveDanglingTxs(ctx context.Context, + allowedTypes []TransactionType, +) (found bool) { + c.persistence.IterateAll(context.Background(), func(tx *Transaction) { + if !slices.Contains(allowedTypes, tx.Type) { + return + } + found = true + }) + + return +} + +// TryResumeDanglingTxs loops over the existing transactions and applies them. +// It only does so if the transaction type is explicitly listed as allowed. +// This is because - at the time of creating this - we were not sure if all +// transaction commit functions are idempotent. If one would not be, then +// reapplying a tx or tx commit could potentially be dangerous, as we don't +// know if it was already applied prior to the node death. +// +// For example, think of a "add property 'foo'" tx, that does nothing but +// append the property to the schema. If this ran twice, we might now end up +// with two duplicate properties with the name 'foo' which could in turn create +// other problems. To make sure all txs are resumable (which is what we want +// because that's the only way to avoid schema issues), we need to make sure +// that every single tx is idempotent, then add them to the allow list. +// +// One other limitation is that this method currently does nothing to check if +// a tx was really committed or not. In an ideal world, the node would contact +// the other nodes and ask. However, this sipmler implementation does not do +// this check. Instead [HaveDanglingTxs] is used in combination with the schema +// check. If the schema is not out of sync in the first place, no txs will be +// applied. This does not cover all edge cases, but it seems to work for now. +// This should be improved in the future. +func (c *TxManager) TryResumeDanglingTxs(ctx context.Context, + allowedTypes []TransactionType, +) (applied bool, err error) { + c.persistence.IterateAll(context.Background(), func(tx *Transaction) { + if !slices.Contains(allowedTypes, tx.Type) { + c.logger.WithFields(logrus.Fields{ + "action": "resume_transaction", + "transaction_id": tx.ID, + "transaction_type": tx.Type, + }).Warnf("dangling transaction %q of type %q is not known to be resumable - skipping", + tx.ID, tx.Type) + + return + } + if err = c.commitFn(ctx, tx); err != nil { + return + } + + applied = true + c.logger.WithFields(logrus.Fields{ + "action": "resume_transaction", + "transaction_id": tx.ID, + "transaction_type": tx.Type, + }).Infof("successfully resumed dangling transaction %q of type %q", tx.ID, tx.Type) + }) + + return +} + +func (c *TxManager) resetTxExpiry(ttl time.Duration, id string) { + cancel := func() {} + ctx := context.Background() + if ttl == 0 { + c.currentTransactionContext = context.Background() + } else { + ctx, cancel = context.WithTimeout(ctx, ttl) + c.logger.WithFields(logrus.Fields{ + "action": "reset_tx_expiry", + "duration": ttl, + }).Debug("context.WithTimeout") + c.currentTransactionContext = ctx + } + + // to prevent a goroutine leak for the new routine we're spawning here, + // register a way to terminate it in case the explicit cancel is called + // before the context's done channel fires. + clearCancelListener := make(chan struct{}, 1) + + c.clearTransaction = func() { + c.currentTransaction = nil + c.currentTransactionContext = nil + c.clearTransaction = func() {} + + clearCancelListener <- struct{}{} + close(clearCancelListener) + } + + f := func() { + ctxDone := ctx.Done() + select { + case <-clearCancelListener: + cancel() + return + case <-ctxDone: + c.Lock() + defer c.Unlock() + c.expiredTxIDs = append(c.expiredTxIDs, id) + + if c.currentTransaction == nil { + // tx is already cleaned up, for example from a successful commit. Nothing to do for us + return + } + + if c.currentTransaction.ID != id { + // tx was already cleaned up, then a new tx was started. Any action from + // us would be destructive, as we'd accidentally destroy a perfectly valid + // tx + return + } + + c.clearTransaction() + monitoring.GetMetrics().SchemaTxClosed.With(prometheus.Labels{ + "ownership": "n/a", + "status": "expire", + }).Inc() + took := time.Since(c.currentTransactionBegin) + monitoring.GetMetrics().SchemaTxDuration.With(prometheus.Labels{ + "ownership": "n/a", + "status": "expire", + }).Observe(took.Seconds()) + c.slowLog.Close("expired") + } + } + enterrors.GoWrapper(f, c.logger) +} + +// expired is a helper to return a more meaningful error message to the user. +// Instead of just telling the user that an ID does not exist, this tracks that +// it once existed, but has been cleared because it expired. +// +// This method is not thread-safe as the assumption is that it is called from a +// thread-safe environment where a lock would already be held +func (c *TxManager) expired(id string) bool { + for _, expired := range c.expiredTxIDs { + if expired == id { + return true + } + } + + return false +} + +// SetCommitFn sets a function that is used in Write Transactions, you can +// read from the transaction payload and use that state to alter your local +// state +func (c *TxManager) SetCommitFn(fn CommitFn) { + c.commitFn = fn +} + +// SetResponseFn sets a function that is used in Read Transactions. The +// function sets the local state (by writing it into the Tx Payload). It can +// then be sent to other nodes. Consensus is not part of the ResponseFn. The +// coordinator - who initiated the Tx - is responsible for coming up with +// consensus. Deciding on Consensus requires insights into business logic, as +// from the TX's perspective payloads are opaque. +func (c *TxManager) SetResponseFn(fn ResponseFn) { + c.responseFn = fn +} + +// Begin a Transaction with the specified type and payload. Transactions expire +// after the specified TTL. For a transaction that does not ever expire, pass +// in a ttl of 0. When choosing TTLs keep in mind that clocks might be slightly +// skewed in the cluster, therefore set your TTL for desiredTTL + +// toleratedClockSkew +// +// Regular transactions cannot be opened if the cluster is not considered +// healthy. +func (c *TxManager) BeginTransaction(ctx context.Context, trType TransactionType, + payload interface{}, ttl time.Duration, +) (*Transaction, error) { + return c.beginTransaction(ctx, trType, payload, ttl, false) +} + +// Begin a Transaction that does not require the whole cluster to be healthy. +// This can be used for example in bootstrapping situations when not all nodes +// are present yet, or in disaster recovery situations when a node needs to run +// a transaction in order to re-join a cluster. +func (c *TxManager) BeginTransactionTolerateNodeFailures(ctx context.Context, trType TransactionType, + payload interface{}, ttl time.Duration, +) (*Transaction, error) { + return c.beginTransaction(ctx, trType, payload, ttl, true) +} + +func (c *TxManager) beginTransaction(ctx context.Context, trType TransactionType, + payload interface{}, ttl time.Duration, tolerateNodeFailures bool, +) (*Transaction, error) { + c.Lock() + + if c.currentTransaction != nil { + c.Unlock() + return nil, ErrConcurrentTransaction + } + + tx := &Transaction{ + Type: trType, + ID: uuid.New().String(), + Payload: payload, + TolerateNodeFailures: tolerateNodeFailures, + } + if ttl > 0 { + tx.Deadline = time.Now().Add(ttl) + } else { + // UnixTime == 0 represents unlimited + tx.Deadline = time.UnixMilli(0) + } + c.currentTransaction = tx + c.currentTransactionBegin = time.Now() + c.slowLog.Start(tx.ID, true, !tolerateNodeFailures) + c.Unlock() + + monitoring.GetMetrics().SchemaTxOpened.With(prometheus.Labels{ + "ownership": "coordinator", + }).Inc() + + c.resetTxExpiry(ttl, c.currentTransaction.ID) + + if err := c.remote.BroadcastTransaction(ctx, tx); err != nil { + // we could not open the transaction on every node, therefore we need to + // abort it everywhere. + + if err := c.remote.BroadcastAbortTransaction(ctx, tx); err != nil { + c.logger.WithFields(logrus.Fields{ + "action": "broadcast_abort_transaction", + // before https://github.com/weaviate/weaviate/issues/2625 the next + // line would read + // + // "id": c.currentTransaction.ID + // + // which had the potential for races. The tx itself is immutable and + // therefore always thread-safe. However, the association between the tx + // manager and the current tx is mutable, therefore the + // c.currentTransaction pointer could be nil (nil pointer panic) or + // point to another tx (incorrect log). + "id": tx.ID, + }).WithError(err).Errorf("broadcast tx abort failed") + } + + c.Lock() + c.clearTransaction() + monitoring.GetMetrics().SchemaTxClosed.With(prometheus.Labels{ + "ownership": "coordinator", + "status": "abort", + }).Inc() + took := time.Since(c.currentTransactionBegin) + monitoring.GetMetrics().SchemaTxDuration.With(prometheus.Labels{ + "ownership": "coordinator", + "status": "abort", + }).Observe(took.Seconds()) + c.slowLog.Close("abort_on_open") + c.Unlock() + + return nil, errors.Wrap(err, "broadcast open transaction") + } + + c.Lock() + defer c.Unlock() + c.slowLog.Update("begin_tx_completed") + return c.currentTransaction, nil +} + +func (c *TxManager) CommitWriteTransaction(ctx context.Context, + tx *Transaction, +) error { + c.Lock() + + if !c.acceptIncoming { + c.Unlock() + return ErrNotReady + } + + if c.currentTransaction == nil || c.currentTransaction.ID != tx.ID { + expired := c.expired(tx.ID) + c.Unlock() + if expired { + return ErrExpiredTransaction + } + return ErrInvalidTransaction + } + + c.Unlock() + c.slowLog.Update("commit_started") + + // now that we know we are dealing with a valid transaction: no matter the + // outcome, after this call, we should not have a local transaction anymore + defer func() { + c.Lock() + c.clearTransaction() + monitoring.GetMetrics().SchemaTxClosed.With(prometheus.Labels{ + "ownership": "coordinator", + "status": "commit", + }).Inc() + took := time.Since(c.currentTransactionBegin) + monitoring.GetMetrics().SchemaTxDuration.With(prometheus.Labels{ + "ownership": "coordinator", + "status": "commit", + }).Observe(took.Seconds()) + c.slowLog.Close("committed") + c.Unlock() + }() + + if err := c.remote.BroadcastCommitTransaction(ctx, tx); err != nil { + // the broadcast failed, but we can't do anything about it. If we would + // broadcast an "abort" now (as a previous version did) we'd likely run + // into an inconsistency down the line. Network requests have variable + // time, so there's a chance some nodes would see the abort before the + // commit and vice-versa. Given enough nodes, we would end up with an + // inconsistent state. + // + // A failed commit means the node that didn't receive the commit needs to + // figure out itself how to get back to the correct state (e.g. by + // recovering from a persisted tx), don't jeopardize all the other nodes as + // a result! + c.logger.WithFields(logrus.Fields{ + "action": "broadcast_commit_transaction", + "id": tx.ID, + }).WithError(err).Error("broadcast tx commit failed") + return errors.Wrap(err, "broadcast commit transaction") + } + + return nil +} + +func (c *TxManager) IncomingBeginTransaction(ctx context.Context, + tx *Transaction, +) ([]byte, error) { + c.Lock() + defer c.Unlock() + + if !c.acceptIncoming && !slices.Contains(c.allowUnready, tx.Type) { + return nil, ErrNotReady + } + + if c.currentTransaction != nil && c.currentTransaction.ID != tx.ID { + return nil, ErrConcurrentTransaction + } + + writable := !slices.Contains(c.allowUnready, tx.Type) + c.slowLog.Start(tx.ID, false, writable) + + if err := c.persistence.StoreTx(ctx, tx); err != nil { + return nil, fmt.Errorf("make tx durable: %w", err) + } + + c.currentTransaction = tx + c.currentTransactionBegin = time.Now() + data, err := c.responseFn(ctx, tx) + if err != nil { + return nil, err + } + + monitoring.GetMetrics().SchemaTxOpened.With(prometheus.Labels{ + "ownership": "participant", + }).Inc() + + var ttl time.Duration + if tx.Deadline.UnixMilli() != 0 { + ttl = time.Until(tx.Deadline) + } + c.resetTxExpiry(ttl, tx.ID) + + c.slowLog.Update("incoming_begin_tx_completed") + + return data, nil +} + +func (c *TxManager) IncomingAbortTransaction(ctx context.Context, + tx *Transaction, +) { + c.Lock() + defer c.Unlock() + + if c.currentTransaction == nil || c.currentTransaction.ID != tx.ID { + // don't do anything + return + } + + c.currentTransaction = nil + monitoring.GetMetrics().SchemaTxClosed.With(prometheus.Labels{ + "ownership": "participant", + "status": "abort", + }).Inc() + took := time.Since(c.currentTransactionBegin) + monitoring.GetMetrics().SchemaTxDuration.With(prometheus.Labels{ + "ownership": "participant", + "status": "abort", + }).Observe(took.Seconds()) + c.slowLog.Close("abort_request_received") + + if err := c.persistence.DeleteTx(ctx, tx.ID); err != nil { + c.logger.WithError(err).Error("abort tx") + } +} + +func (c *TxManager) IncomingCommitTransaction(ctx context.Context, + tx *Transaction, +) error { + c.ongoingCommits.Add(1) + defer c.ongoingCommits.Done() + + // requires locking because it accesses c.currentTransaction + txCopy, err := c.incomingCommitTxValidate(ctx, tx) + if err != nil { + return err + } + + c.slowLog.Update("commit_request_received") + + // cleanup requires locking because it accesses c.currentTransaction + defer c.incomingTxCommitCleanup(ctx, tx) + + // commit cannot use locking because of risk of deadlock, see comment inside method + if err := c.incomingTxCommitApplyCommitFn(ctx, txCopy); err != nil { + return err + } + + return nil +} + +func (c *TxManager) incomingCommitTxValidate( + ctx context.Context, tx *Transaction, +) (*Transaction, error) { + c.Lock() + defer c.Unlock() + + if !c.acceptIncoming { + return nil, ErrNotReady + } + + if c.currentTransaction == nil || c.currentTransaction.ID != tx.ID { + expired := c.expired(tx.ID) + if expired { + return nil, ErrExpiredTransaction + } + return nil, ErrInvalidTransaction + } + + txCopy := *c.currentTransaction + return &txCopy, nil +} + +func (c *TxManager) incomingTxCommitApplyCommitFn( + ctx context.Context, tx *Transaction, +) error { + // Important: Do not hold the c.Lock() while applying the commitFn. The + // c.Lock() is only meant to make access to c.currentTransaction thread-safe. + // If we would hold it during apply, there is a risk for a deadlock because + // apply will likely lock the schema Manager. The schema Manager itself + // however, might be waiting for the TxManager in case of concurrent + // requests. + // See https://github.com/weaviate/weaviate/issues/4312 for steps on how to + // reproduce + // + // use transaction from cache, not passed in for two reason: a. protect + // against the transaction being manipulated after being created, b. allow + // an "empty" transaction that only contains the id for less network overhead + // (we don't need to pass the payload around anymore, after it's successfully + // opened - every node has a copy of the payload now) + return c.commitFn(ctx, tx) +} + +func (c *TxManager) incomingTxCommitCleanup( + ctx context.Context, tx *Transaction, +) { + c.Lock() + defer c.Unlock() + c.currentTransaction = nil + + monitoring.GetMetrics().SchemaTxClosed.With(prometheus.Labels{ + "ownership": "participant", + "status": "commit", + }).Inc() + took := time.Since(c.currentTransactionBegin) + monitoring.GetMetrics().SchemaTxDuration.With(prometheus.Labels{ + "ownership": "participant", + "status": "commit", + }).Observe(took.Seconds()) + c.slowLog.Close("committed") + + if err := c.persistence.DeleteTx(ctx, tx.ID); err != nil { + c.logger.WithError(err).WithFields(logrus.Fields{ + "action": "incoming_tx_commit_cleanup", + }).Error("close tx on disk") + } +} + +func (c *TxManager) Shutdown() { + c.Lock() + c.acceptIncoming = false + c.Unlock() + + c.ongoingCommits.Wait() +} + +type Transaction struct { + ID string + Type TransactionType + Payload interface{} + Deadline time.Time + + // If TolerateNodeFailures is false (the default) a transaction cannot be + // opened or committed if a node is confirmed dead. If a node is only + // suspected dead, the TxManager will try, but abort unless all nodes ACK. + TolerateNodeFailures bool +} + +type Persistence interface { + StoreTx(ctx context.Context, tx *Transaction) error + DeleteTx(ctx context.Context, txID string) error + IterateAll(ctx context.Context, cb func(tx *Transaction)) error +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/config/authentication.go b/platform/dbops/binaries/weaviate-src/usecases/config/authentication.go new file mode 100644 index 0000000000000000000000000000000000000000..97a12c2984542cb1949e774b3cf5ba2a21dbcc1b --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/config/authentication.go @@ -0,0 +1,83 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package config + +import ( + "fmt" + + "github.com/weaviate/weaviate/usecases/config/runtime" +) + +// Authentication configuration +type Authentication struct { + OIDC OIDC `json:"oidc" yaml:"oidc"` + AnonymousAccess AnonymousAccess `json:"anonymous_access" yaml:"anonymous_access"` + APIKey StaticAPIKey // don't change name to not break yaml files + DBUsers DbUsers `json:"db_users" yaml:"db_users"` +} + +// DefaultAuthentication is the default authentication scheme when no authentication is provided +var DefaultAuthentication = Authentication{ + AnonymousAccess: AnonymousAccess{ + Enabled: true, + }, +} + +// Validate the Authentication configuration. This only validates at a general +// level. Validation specific to the individual auth methods should happen +// inside their respective packages +func (a Authentication) Validate() error { + if !a.AnyAuthMethodSelected() { + return fmt.Errorf("no authentication scheme configured, you must select at least one") + } + + return nil +} + +func (a Authentication) AnyAuthMethodSelected() bool { + return a.AnonymousAccess.Enabled || a.OIDC.Enabled || a.APIKey.Enabled || a.DBUsers.Enabled +} + +func (a Authentication) AnyApiKeyAvailable() bool { + return a.APIKey.Enabled || a.DBUsers.Enabled +} + +// AnonymousAccess considers users without any auth information as +// authenticated as "anonymous" rather than denying their request immediately. +// Note that enabling anonymous access ONLY affects Authentication, not +// Authorization. +type AnonymousAccess struct { + Enabled bool `json:"enabled" yaml:"enabled"` +} + +// OIDC configures the OIDC middleware +type OIDC struct { + Enabled bool `json:"enabled" yaml:"enabled"` + Issuer *runtime.DynamicValue[string] `json:"issuer" yaml:"issuer"` + ClientID *runtime.DynamicValue[string] `json:"client_id" yaml:"client_id"` + SkipClientIDCheck *runtime.DynamicValue[bool] `yaml:"skip_client_id_check" json:"skip_client_id_check"` + UsernameClaim *runtime.DynamicValue[string] `yaml:"username_claim" json:"username_claim"` + GroupsClaim *runtime.DynamicValue[string] `yaml:"groups_claim" json:"groups_claim"` + Scopes *runtime.DynamicValue[[]string] `yaml:"scopes" json:"scopes"` + Certificate *runtime.DynamicValue[string] `yaml:"certificate" json:"certificate"` + JWKSUrl *runtime.DynamicValue[string] `yaml:"jwks_url" json:"jwks_url"` +} + +type StaticAPIKey struct { + Enabled bool `json:"enabled" yaml:"enabled"` + Users []string `json:"users" yaml:"users"` + AllowedKeys []string `json:"allowed_keys" yaml:"allowed_keys"` +} + +type DbUsers struct { + Enabled bool `json:"enabled" yaml:"enabled"` +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/config/authentication_test.go b/platform/dbops/binaries/weaviate-src/usecases/config/authentication_test.go new file mode 100644 index 0000000000000000000000000000000000000000..42a1457abf3d0eb5febaa5a86cddaec01f843e2f --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/config/authentication_test.go @@ -0,0 +1,98 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package config + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestConfig_Authentication(t *testing.T) { + t.Run("no auth selected", func(t *testing.T) { + auth := Authentication{} + expected := fmt.Errorf("no authentication scheme configured, you must select at least one") + + err := auth.Validate() + + assert.Equal(t, expected, err) + }) + + t.Run("only anonymous selected", func(t *testing.T) { + auth := Authentication{ + AnonymousAccess: AnonymousAccess{ + Enabled: true, + }, + } + + err := auth.Validate() + + assert.Nil(t, err, "should not error") + }) + + t.Run("only oidc selected", func(t *testing.T) { + auth := Authentication{ + OIDC: OIDC{ + Enabled: true, + }, + } + + err := auth.Validate() + + assert.Nil(t, err, "should not error") + }) + + t.Run("oidc and anonymous enabled together", func(t *testing.T) { + // this might seem counter-intuitive at first, but this makes a lot of + // sense when you consider the authorization strategies: for example we + // could allow reads for everyone, but only explicitly authenticated users + // may write + auth := Authentication{ + OIDC: OIDC{ + Enabled: true, + }, + AnonymousAccess: AnonymousAccess{ + Enabled: true, + }, + } + + err := auth.Validate() + + assert.Nil(t, err, "should not error") + }) +} + +func TestDbUserAuth(t *testing.T) { + tests := []struct { + name string + staticEnabled bool + dbEnabled bool + expected bool + }{ + {"none enabled", false, false, false}, + {"both enabled", true, true, true}, + {"only static", true, false, true}, + {"only db", false, true, true}, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + auth := Authentication{ + APIKey: StaticAPIKey{Enabled: test.staticEnabled}, DBUsers: DbUsers{Enabled: test.dbEnabled}, + } + + require.Equal(t, auth.AnyApiKeyAvailable(), test.expected) + }) + } +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/config/authorization.go b/platform/dbops/binaries/weaviate-src/usecases/config/authorization.go new file mode 100644 index 0000000000000000000000000000000000000000..dcdae78f8e0235ddffb2e5cf1ec3a43245ee79ec --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/config/authorization.go @@ -0,0 +1,48 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package config + +import ( + "fmt" + + "github.com/weaviate/weaviate/usecases/auth/authorization/adminlist" + "github.com/weaviate/weaviate/usecases/auth/authorization/rbac/rbacconf" +) + +// Authorization configuration +type Authorization struct { + AdminList adminlist.Config `json:"admin_list" yaml:"admin_list"` + Rbac rbacconf.Config `json:"rbac" yaml:"rbac"` +} + +// Validate the Authorization configuration. This only validates at a general +// level. Validation specific to the individual auth methods should happen +// inside their respective packages +func (a Authorization) Validate() error { + if a.AdminList.Enabled && a.Rbac.Enabled { + return fmt.Errorf("cannot enable adminlist and rbac at the same time") + } + + if a.AdminList.Enabled { + if err := a.AdminList.Validate(); err != nil { + return fmt.Errorf("authorization adminlist: %w", err) + } + } + + if a.Rbac.Enabled { + if err := a.Rbac.Validate(); err != nil { + return fmt.Errorf("authorization rbac: %w", err) + } + } + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/config/authorization_test.go b/platform/dbops/binaries/weaviate-src/usecases/config/authorization_test.go new file mode 100644 index 0000000000000000000000000000000000000000..8b937adb0b224a180b6635fa8540d3bc64f1a35a --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/config/authorization_test.go @@ -0,0 +1,64 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package config + +import ( + "testing" + + "github.com/weaviate/weaviate/usecases/auth/authorization/rbac/rbacconf" + + "github.com/stretchr/testify/assert" + "github.com/weaviate/weaviate/usecases/auth/authorization/adminlist" +) + +func Test_Validation(t *testing.T) { + configs := []struct { + name string + config Authorization + wantErr bool + }{ + { + name: "Only adminlist", + config: Authorization{AdminList: adminlist.Config{Enabled: true}}, + wantErr: false, + }, + { + name: "Only rbac", + config: Authorization{Rbac: rbacconf.Config{Enabled: true, RootUsers: []string{"1"}}}, + wantErr: false, + }, + { + name: "Only adminlist - wrong config", + config: Authorization{AdminList: adminlist.Config{Enabled: true, Users: []string{"1"}, ReadOnlyUsers: []string{"1"}}}, + wantErr: true, + }, + { + name: "both adminlist and rbac", + config: Authorization{ + AdminList: adminlist.Config{Enabled: true}, + Rbac: rbacconf.Config{Enabled: true, RootUsers: []string{"1"}}, + }, + wantErr: true, + }, + } + + for _, tt := range configs { + t.Run(tt.name, func(t *testing.T) { + err := tt.config.Validate() + if tt.wantErr { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + }) + } +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/config/auto_schema_test.go b/platform/dbops/binaries/weaviate-src/usecases/config/auto_schema_test.go new file mode 100644 index 0000000000000000000000000000000000000000..f3e9429ae8ebc3319c89d2b3658cf471f66e129f --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/config/auto_schema_test.go @@ -0,0 +1,65 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package config + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/weaviate/weaviate/entities/schema" +) + +func TestConfig_AutoSchema(t *testing.T) { + t.Run("invalid DefaultNumber", func(t *testing.T) { + auth := AutoSchema{ + DefaultNumber: "float", + DefaultString: schema.DataTypeText.String(), + DefaultDate: "date", + } + expected := fmt.Errorf("autoSchema.defaultNumber must be either 'int' or 'number") + err := auth.Validate() + assert.Equal(t, expected, err) + }) + + t.Run("invalid DefaultString", func(t *testing.T) { + auth := AutoSchema{ + DefaultNumber: "int", + DefaultString: "body", + DefaultDate: "date", + } + expected := fmt.Errorf("autoSchema.defaultString must be either 'string' or 'text") + err := auth.Validate() + assert.Equal(t, expected, err) + }) + + t.Run("invalid DefaultDate", func(t *testing.T) { + auth := AutoSchema{ + DefaultNumber: "int", + DefaultString: schema.DataTypeText.String(), + DefaultDate: "int", + } + expected := fmt.Errorf("autoSchema.defaultDate must be either 'date' or 'string' or 'text") + err := auth.Validate() + assert.Equal(t, expected, err) + }) + + t.Run("all valid AutoSchema configurations", func(t *testing.T) { + auth := AutoSchema{ + DefaultNumber: "int", + DefaultString: schema.DataTypeText.String(), + DefaultDate: "date", + } + err := auth.Validate() + assert.Nil(t, err, "should not error") + }) +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/config/config_handler.go b/platform/dbops/binaries/weaviate-src/usecases/config/config_handler.go new file mode 100644 index 0000000000000000000000000000000000000000..7c4a6f9464a36b952eebc4f3cf90c359dc825324 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/config/config_handler.go @@ -0,0 +1,752 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package config + +import ( + "encoding/json" + "fmt" + "math" + "os" + "regexp" + "strings" + "time" + + "github.com/go-openapi/swag" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "gopkg.in/yaml.v3" + + "github.com/weaviate/weaviate/deprecations" + entcfg "github.com/weaviate/weaviate/entities/config" + "github.com/weaviate/weaviate/entities/replication" + "github.com/weaviate/weaviate/entities/schema" + entsentry "github.com/weaviate/weaviate/entities/sentry" + "github.com/weaviate/weaviate/entities/vectorindex/common" + "github.com/weaviate/weaviate/usecases/cluster" + "github.com/weaviate/weaviate/usecases/config/runtime" + usagetypes "github.com/weaviate/weaviate/usecases/modulecomponents/usage/types" + "github.com/weaviate/weaviate/usecases/monitoring" +) + +// ServerVersion is deprecated. Use `build.Version`. It's there for backward compatiblility. +// ServerVersion is set when the misc handlers are setup. +// When misc handlers are setup, the entire swagger spec +// is already being parsed for the server version. This is +// a good time for us to set ServerVersion, so that the +// spec only needs to be parsed once. +var ServerVersion string + +// DefaultConfigFile is the default file when no config file is provided +const DefaultConfigFile string = "./weaviate.conf.json" + +// DefaultCleanupIntervalSeconds can be overwritten on a per-class basis +const DefaultCleanupIntervalSeconds = int64(60) + +const ( + // These BM25 tuning params can be overwritten on a per-class basis + DefaultBM25k1 = float32(1.2) + DefaultBM25b = float32(0.75) +) + +var DefaultUsingBlockMaxWAND = os.Getenv("USE_INVERTED_SEARCHABLE") == "" || entcfg.Enabled(os.Getenv("USE_INVERTED_SEARCHABLE")) + +const ( + DefaultMaxImportGoroutinesFactor = float64(1.5) + + DefaultDiskUseWarningPercentage = uint64(80) + DefaultDiskUseReadonlyPercentage = uint64(90) + DefaultMemUseWarningPercentage = uint64(80) + // TODO: off by default for now, to make sure + // the measurement is reliable. once + // confirmed, we can set this to 90 + DefaultMemUseReadonlyPercentage = uint64(0) +) + +// Flags are input options +type Flags struct { + ConfigFile string `long:"config-file" description:"path to config file (default: ./weaviate.conf.json)"` + + RaftPort int `long:"raft-port" description:"the port used by Raft for inter-node communication"` + RaftInternalRPCPort int `long:"raft-internal-rpc-port" description:"the port used for internal RPCs within the cluster"` + RaftRPCMessageMaxSize int `long:"raft-rpc-message-max-size" description:"maximum internal raft grpc message size in bytes, defaults to 1073741824"` + RaftJoin []string `long:"raft-join" description:"a comma-separated list of server addresses to join on startup. Each element needs to be in the form NODE_NAME[:NODE_PORT]. If NODE_PORT is not present, raft-internal-rpc-port default value will be used instead"` + RaftBootstrapTimeout int `long:"raft-bootstrap-timeout" description:"the duration for which the raft bootstrap procedure will wait for each node in raft-join to be reachable"` + RaftBootstrapExpect int `long:"raft-bootstrap-expect" description:"specifies the number of server nodes to wait for before bootstrapping the cluster"` + RaftHeartbeatTimeout int `long:"raft-heartbeat-timeout" description:"raft heartbeat timeout"` + RaftElectionTimeout int `long:"raft-election-timeout" description:"raft election timeout"` + RaftSnapshotThreshold int `long:"raft-snap-threshold" description:"number of outstanding log entries before performing a snapshot"` + RaftSnapshotInterval int `long:"raft-snap-interval" description:"controls how often raft checks if it should perform a snapshot"` + RaftMetadataOnlyVoters bool `long:"raft-metadata-only-voters" description:"configures the voters to store metadata exclusively, without storing any other data"` + + RuntimeOverridesEnabled bool `long:"runtime-overrides.enabled" description:"enable runtime overrides config"` + RuntimeOverridesPath string `long:"runtime-overrides.path" description:"path to runtime overrides config"` + RuntimeOverridesLoadInterval time.Duration `long:"runtime-overrides.load-interval" description:"load interval for runtime overrides config"` +} + +type SchemaHandlerConfig struct { + MaximumAllowedCollectionsCount *runtime.DynamicValue[int] `json:"maximum_allowed_collections_count" yaml:"maximum_allowed_collections_count"` +} + +type RuntimeOverrides struct { + Enabled bool `json:"enabled"` + Path string `json:"path" yaml:"path"` + LoadInterval time.Duration `json:"load_interval" yaml:"load_interval"` +} + +// Config outline of the config file +type Config struct { + Name string `json:"name" yaml:"name"` + Debug bool `json:"debug" yaml:"debug"` + QueryDefaults QueryDefaults `json:"query_defaults" yaml:"query_defaults"` + QueryMaximumResults int64 `json:"query_maximum_results" yaml:"query_maximum_results"` + QueryHybridMaximumResults int64 `json:"query_hybrid_maximum_results" yaml:"query_hybrid_maximum_results"` + QueryNestedCrossReferenceLimit int64 `json:"query_nested_cross_reference_limit" yaml:"query_nested_cross_reference_limit"` + QueryCrossReferenceDepthLimit int `json:"query_cross_reference_depth_limit" yaml:"query_cross_reference_depth_limit"` + Contextionary Contextionary `json:"contextionary" yaml:"contextionary"` + Authentication Authentication `json:"authentication" yaml:"authentication"` + Authorization Authorization `json:"authorization" yaml:"authorization"` + Origin string `json:"origin" yaml:"origin"` + Persistence Persistence `json:"persistence" yaml:"persistence"` + DefaultVectorizerModule string `json:"default_vectorizer_module" yaml:"default_vectorizer_module"` + DefaultVectorDistanceMetric string `json:"default_vector_distance_metric" yaml:"default_vector_distance_metric"` + EnableModules string `json:"enable_modules" yaml:"enable_modules"` + EnableApiBasedModules bool `json:"api_based_modules_disabled" yaml:"api_based_modules_disabled"` + ModulesPath string `json:"modules_path" yaml:"modules_path"` + ModuleHttpClientTimeout time.Duration `json:"modules_client_timeout" yaml:"modules_client_timeout"` + AutoSchema AutoSchema `json:"auto_schema" yaml:"auto_schema"` + Cluster cluster.Config `json:"cluster" yaml:"cluster"` + Replication replication.GlobalConfig `json:"replication" yaml:"replication"` + Monitoring monitoring.Config `json:"monitoring" yaml:"monitoring"` + GRPC GRPC `json:"grpc" yaml:"grpc"` + Profiling Profiling `json:"profiling" yaml:"profiling"` + ResourceUsage ResourceUsage `json:"resource_usage" yaml:"resource_usage"` + MaxImportGoroutinesFactor float64 `json:"max_import_goroutine_factor" yaml:"max_import_goroutine_factor"` + MaximumConcurrentGetRequests int `json:"maximum_concurrent_get_requests" yaml:"maximum_concurrent_get_requests"` + MaximumConcurrentShardLoads int `json:"maximum_concurrent_shard_loads" yaml:"maximum_concurrent_shard_loads"` + TrackVectorDimensions bool `json:"track_vector_dimensions" yaml:"track_vector_dimensions"` + TrackVectorDimensionsInterval time.Duration `json:"track_vector_dimensions_interval" yaml:"track_vector_dimensions_interval"` + ReindexVectorDimensionsAtStartup bool `json:"reindex_vector_dimensions_at_startup" yaml:"reindex_vector_dimensions_at_startup"` + DisableLazyLoadShards bool `json:"disable_lazy_load_shards" yaml:"disable_lazy_load_shards"` + ForceFullReplicasSearch bool `json:"force_full_replicas_search" yaml:"force_full_replicas_search"` + TransferInactivityTimeout time.Duration `json:"transfer_inactivity_timeout" yaml:"transfer_inactivity_timeout"` + RecountPropertiesAtStartup bool `json:"recount_properties_at_startup" yaml:"recount_properties_at_startup"` + ReindexSetToRoaringsetAtStartup bool `json:"reindex_set_to_roaringset_at_startup" yaml:"reindex_set_to_roaringset_at_startup"` + ReindexerGoroutinesFactor float64 `json:"reindexer_goroutines_factor" yaml:"reindexer_goroutines_factor"` + ReindexMapToBlockmaxAtStartup bool `json:"reindex_map_to_blockmax_at_startup" yaml:"reindex_map_to_blockmax_at_startup"` + ReindexMapToBlockmaxConfig MapToBlockamaxConfig `json:"reindex_map_to_blockmax_config" yaml:"reindex_map_to_blockmax_config"` + IndexMissingTextFilterableAtStartup bool `json:"index_missing_text_filterable_at_startup" yaml:"index_missing_text_filterable_at_startup"` + DisableGraphQL bool `json:"disable_graphql" yaml:"disable_graphql"` + AvoidMmap bool `json:"avoid_mmap" yaml:"avoid_mmap"` + CORS CORS `json:"cors" yaml:"cors"` + DisableTelemetry bool `json:"disable_telemetry" yaml:"disable_telemetry"` + HNSWStartupWaitForVectorCache bool `json:"hnsw_startup_wait_for_vector_cache" yaml:"hnsw_startup_wait_for_vector_cache"` + HNSWVisitedListPoolMaxSize int `json:"hnsw_visited_list_pool_max_size" yaml:"hnsw_visited_list_pool_max_size"` + HNSWFlatSearchConcurrency int `json:"hnsw_flat_search_concurrency" yaml:"hnsw_flat_search_concurrency"` + HNSWAcornFilterRatio float64 `json:"hnsw_acorn_filter_ratio" yaml:"hnsw_acorn_filter_ratio"` + Sentry *entsentry.ConfigOpts `json:"sentry" yaml:"sentry"` + MetadataServer MetadataServer `json:"metadata_server" yaml:"metadata_server"` + SchemaHandlerConfig SchemaHandlerConfig `json:"schema" yaml:"schema"` + DistributedTasks DistributedTasksConfig `json:"distributed_tasks" yaml:"distributed_tasks"` + ReplicationEngineMaxWorkers int `json:"replication_engine_max_workers" yaml:"replication_engine_max_workers"` + ReplicationEngineFileCopyWorkers int `json:"replication_engine_file_copy_workers" yaml:"replication_engine_file_copy_workers"` + // Raft Specific configuration + // TODO-RAFT: Do we want to be able to specify these with config file as well ? + Raft Raft + + // map[className][]propertyName + ReindexIndexesAtStartup map[string][]string `json:"reindex_indexes_at_startup" yaml:"reindex_indexes_at_startup"` + + RuntimeOverrides RuntimeOverrides `json:"runtime_overrides" yaml:"runtime_overrides"` + + ReplicaMovementDisabled bool `json:"replica_movement_disabled" yaml:"replica_movement_disabled"` + ReplicaMovementMinimumAsyncWait *runtime.DynamicValue[time.Duration] `json:"REPLICA_MOVEMENT_MINIMUM_ASYNC_WAIT" yaml:"REPLICA_MOVEMENT_MINIMUM_ASYNC_WAIT"` + + // TenantActivityReadLogLevel is 'debug' by default as every single READ + // interaction with a tenant leads to a log line. However, this may + // temporarily be desired, e.g. for analysis or debugging purposes. In this + // case the log level can be elevated, e.g. to 'info'. This is overall less + // noisy than changing the global log level, but still allows to see all + // tenant read activity. + TenantActivityReadLogLevel *runtime.DynamicValue[string] `json:"tenant_activity_read_log_level" yaml:"tenant_activity_read_log_level"` + // TenantActivityWriteLogLevel is 'debug' by default as every single WRITE + // interaction with a tenant leads to a log line. However, this may + // temporarily be desired, e.g. for analysis or debugging purposes. In this + // case the log level can be elevated, e.g. to 'info'. This is overall less + // noisy than changing the global log level, but still allows to see all + // tenant write activity. + TenantActivityWriteLogLevel *runtime.DynamicValue[string] `json:"tenant_activity_write_log_level" yaml:"tenant_activity_write_log_level"` + + // RevectorizeCheck is an optimization where Weaviate checks if a vector can + // be reused from a previous version of the object, for example because the + // only change was an update of a property that is excluded from + // vectorization. This check is on by default (backward-compatibility). + // + // However, this check comes at a cost, it means that every single insert + // will turn into a read-before-write pattern, even if the inserted object is + // new. That is because the logic first needs to check if the object even + // exists. In cases where write throughput matters and the overwhelming + // majority of inserts are new, unique objects, it might be advisable to turn + // this feature off using the provided flag. + RevectorizeCheckDisabled *runtime.DynamicValue[bool] `json:"revectorize_check_disabled" yaml:"revectorize_check_disabled"` + + QuerySlowLogEnabled *runtime.DynamicValue[bool] `json:"query_slow_log_enabled" yaml:"query_slow_log_enabled"` + QuerySlowLogThreshold *runtime.DynamicValue[time.Duration] `json:"query_slow_log_threshold" yaml:"query_slow_log_threshold"` + + // New classes will be created with the default quantization + DefaultQuantization *runtime.DynamicValue[string] `json:"default_quantization" yaml:"default_quantization"` + + QueryBitmapBufsMaxMemory int `json:"query_bitmap_bufs_max_memory" yaml:"query_bitmap_bufs_max_memory"` + QueryBitmapBufsMaxBufSize int `json:"query_bitmap_bufs_max_buf_size" yaml:"query_bitmap_bufs_max_buf_size"` + + // InvertedSorterDisabled forces the "objects bucket" strategy and doesn't + // not consider inverted sorting, even when the query planner thinks this is + // the better option. + // + // Most users should never set this flag, it exists for two reasons: + // - For benchmarking reasons, this flag can be used to evaluate the + // (positive) impact of the inverted sorter. + // - As a safety net to revert to the old behavior in case there is a bug + // in the inverted indexer despite the very extensive testing. + // + // This flat may be removed in the future. + InvertedSorterDisabled *runtime.DynamicValue[bool] `json:"inverted_sorter_disabled" yaml:"inverted_sorter_disabled"` + + // Usage configuration for the usage module + Usage usagetypes.UsageConfig `json:"usage" yaml:"usage"` + + // The minimum timeout for the server to wait before it returns an error + MinimumInternalTimeout time.Duration `json:"minimum_internal_timeout" yaml:"minimum_internal_timeout"` +} + +type MapToBlockamaxConfig struct { + SwapBuckets bool `json:"swap_buckets" yaml:"swap_buckets"` + UnswapBuckets bool `json:"unswap_buckets" yaml:"unswap_buckets"` + TidyBuckets bool `json:"tidy_buckets" yaml:"tidy_buckets"` + ReloadShards bool `json:"reload_shards" yaml:"reload_shards"` + Rollback bool `json:"rollback" yaml:"rollback"` + ConditionalStart bool `json:"conditional_start" yaml:"conditional_start"` + ProcessingDurationSeconds int `json:"processing_duration_seconds" yaml:"processing_duration_seconds"` + PauseDurationSeconds int `json:"pause_duration_seconds" yaml:"pause_duration_seconds"` + PerObjectDelayMilliseconds int `json:"per_object_delay_milliseconds" yaml:"per_object_delay_milliseconds"` + Selected []CollectionPropsTenants `json:"selected" yaml:"selected"` +} + +type CollectionPropsTenants struct { + Collection string `json:"collection" yaml:"collection"` + Props []string `json:"props" yaml:"props"` + Tenants []string `json:"tenants" yaml:"tenants"` +} + +// Validate the configuration +func (c *Config) Validate() error { + if err := c.Authentication.Validate(); err != nil { + return configErr(err) + } + + if err := c.Authorization.Validate(); err != nil { + return configErr(err) + } + + if c.Authentication.AnonymousAccess.Enabled && c.Authorization.Rbac.Enabled { + return fmt.Errorf("cannot enable anonymous access and rbac authorization") + } + + if err := c.Persistence.Validate(); err != nil { + return configErr(err) + } + + if err := c.AutoSchema.Validate(); err != nil { + return configErr(err) + } + + if err := c.ResourceUsage.Validate(); err != nil { + return configErr(err) + } + + if err := c.Raft.Validate(); err != nil { + return configErr(err) + } + + return nil +} + +// ValidateModules validates the non-nested parameters. Nested objects must provide their own +// validation methods +func (c *Config) ValidateModules(modProv moduleProvider) error { + if err := c.validateDefaultVectorizerModule(modProv); err != nil { + return errors.Wrap(err, "default vectorizer module") + } + + if err := c.validateDefaultVectorDistanceMetric(); err != nil { + return errors.Wrap(err, "default vector distance metric") + } + + return nil +} + +func (c *Config) validateDefaultVectorizerModule(modProv moduleProvider) error { + if c.DefaultVectorizerModule == VectorizerModuleNone { + return nil + } + + return modProv.ValidateVectorizer(c.DefaultVectorizerModule) +} + +type moduleProvider interface { + ValidateVectorizer(moduleName string) error +} + +func (c *Config) validateDefaultVectorDistanceMetric() error { + switch c.DefaultVectorDistanceMetric { + case "", common.DistanceCosine, common.DistanceDot, common.DistanceL2Squared, common.DistanceManhattan, common.DistanceHamming: + return nil + default: + return fmt.Errorf("must be one of [\"cosine\", \"dot\", \"l2-squared\", \"manhattan\",\"hamming\"]") + } +} + +type AutoSchema struct { + Enabled *runtime.DynamicValue[bool] `json:"enabled" yaml:"enabled"` + DefaultString string `json:"defaultString" yaml:"defaultString"` + DefaultNumber string `json:"defaultNumber" yaml:"defaultNumber"` + DefaultDate string `json:"defaultDate" yaml:"defaultDate"` +} + +func (a AutoSchema) Validate() error { + if a.DefaultNumber != "int" && a.DefaultNumber != "number" { + return fmt.Errorf("autoSchema.defaultNumber must be either 'int' or 'number") + } + if a.DefaultString != schema.DataTypeText.String() && + a.DefaultString != schema.DataTypeString.String() { + return fmt.Errorf("autoSchema.defaultString must be either 'string' or 'text") + } + if a.DefaultDate != "date" && + a.DefaultDate != schema.DataTypeText.String() && + a.DefaultDate != schema.DataTypeString.String() { + return fmt.Errorf("autoSchema.defaultDate must be either 'date' or 'string' or 'text") + } + + return nil +} + +// QueryDefaults for optional parameters +type QueryDefaults struct { + Limit int64 `json:"limit" yaml:"limit"` + LimitGraphQL int64 `json:"limitGraphQL" yaml:"limitGraphQL"` +} + +// DefaultQueryDefaultsLimit is the default query limit when no limit is provided +const ( + DefaultQueryDefaultsLimit int64 = 10 + DefaultQueryDefaultsLimitGraphQL int64 = 100 +) + +type Contextionary struct { + URL string `json:"url" yaml:"url"` +} + +// Support independent TLS credentials for gRPC +type GRPC struct { + Port int `json:"port" yaml:"port"` + CertFile string `json:"certFile" yaml:"certFile"` + KeyFile string `json:"keyFile" yaml:"keyFile"` + MaxMsgSize int `json:"maxMsgSize" yaml:"maxMsgSize"` +} + +type Profiling struct { + BlockProfileRate int `json:"blockProfileRate" yaml:"blockProfileRate"` + MutexProfileFraction int `json:"mutexProfileFraction" yaml:"mutexProfileFraction"` + Disabled bool `json:"disabled" yaml:"disabled"` + Port int `json:"port" yaml:"port"` +} + +type DistributedTasksConfig struct { + Enabled bool `json:"enabled" yaml:"enabled"` + CompletedTaskTTL time.Duration `json:"completedTaskTTL" yaml:"completedTaskTTL"` + SchedulerTickInterval time.Duration `json:"schedulerTickInterval" yaml:"schedulerTickInterval"` +} + +type Persistence struct { + DataPath string `json:"dataPath" yaml:"dataPath"` + MemtablesFlushDirtyAfter int `json:"flushDirtyMemtablesAfter" yaml:"flushDirtyMemtablesAfter"` + MemtablesMaxSizeMB int `json:"memtablesMaxSizeMB" yaml:"memtablesMaxSizeMB"` + MemtablesMinActiveDurationSeconds int `json:"memtablesMinActiveDurationSeconds" yaml:"memtablesMinActiveDurationSeconds"` + MemtablesMaxActiveDurationSeconds int `json:"memtablesMaxActiveDurationSeconds" yaml:"memtablesMaxActiveDurationSeconds"` + LSMMaxSegmentSize int64 `json:"lsmMaxSegmentSize" yaml:"lsmMaxSegmentSize"` + LSMSegmentsCleanupIntervalSeconds int `json:"lsmSegmentsCleanupIntervalSeconds" yaml:"lsmSegmentsCleanupIntervalSeconds"` + LSMSeparateObjectsCompactions bool `json:"lsmSeparateObjectsCompactions" yaml:"lsmSeparateObjectsCompactions"` + LSMEnableSegmentsChecksumValidation bool `json:"lsmEnableSegmentsChecksumValidation" yaml:"lsmEnableSegmentsChecksumValidation"` + LSMCycleManagerRoutinesFactor int `json:"lsmCycleManagerRoutinesFactor" yaml:"lsmCycleManagerRoutinesFactor"` + IndexRangeableInMemory bool `json:"indexRangeableInMemory" yaml:"indexRangeableInMemory"` + MinMMapSize int64 `json:"minMMapSize" yaml:"minMMapSize"` + LazySegmentsDisabled bool `json:"lazySegmentsDisabled" yaml:"lazySegmentsDisabled"` + SegmentInfoIntoFileNameEnabled bool `json:"segmentFileInfoEnabled" yaml:"segmentFileInfoEnabled"` + WriteMetadataFilesEnabled bool `json:"writeMetadataFilesEnabled" yaml:"writeMetadataFilesEnabled"` + MaxReuseWalSize int64 `json:"MaxReuseWalSize" yaml:"MaxReuseWalSize"` + HNSWMaxLogSize int64 `json:"hnswMaxLogSize" yaml:"hnswMaxLogSize"` + HNSWDisableSnapshots bool `json:"hnswDisableSnapshots" yaml:"hnswDisableSnapshots"` + HNSWSnapshotIntervalSeconds int `json:"hnswSnapshotIntervalSeconds" yaml:"hnswSnapshotIntervalSeconds"` + HNSWSnapshotOnStartup bool `json:"hnswSnapshotOnStartup" yaml:"hnswSnapshotOnStartup"` + HNSWSnapshotMinDeltaCommitlogsNumber int `json:"hnswSnapshotMinDeltaCommitlogsNumber" yaml:"hnswSnapshotMinDeltaCommitlogsNumber"` + HNSWSnapshotMinDeltaCommitlogsSizePercentage int `json:"hnswSnapshotMinDeltaCommitlogsSizePercentage" yaml:"hnswSnapshotMinDeltaCommitlogsSizePercentage"` +} + +// DefaultPersistenceDataPath is the default location for data directory when no location is provided +const DefaultPersistenceDataPath string = "./data" + +// DefaultPersistenceLSMMaxSegmentSize is effectively unlimited for backward +// compatibility. TODO: consider changing this in a future release and make +// some noise about it. This is technically a breaking change. +const DefaultPersistenceLSMMaxSegmentSize = math.MaxInt64 + +// DefaultPersistenceLSMSegmentsCleanupIntervalSeconds = 0 for backward compatibility. +// value = 0 means cleanup is turned off. +const DefaultPersistenceLSMSegmentsCleanupIntervalSeconds = 0 + +// DefaultPersistenceLSMCycleManagerRoutinesFactor - determines how many goroutines +// are started for cyclemanager (factor * NUMCPU) +const DefaultPersistenceLSMCycleManagerRoutinesFactor = 2 + +const DefaultPersistenceHNSWMaxLogSize = 500 * 1024 * 1024 // 500MB for backward compatibility + +const ( + // minimal interval for new hnws snapshot to be created after last one + DefaultHNSWSnapshotIntervalSeconds = 6 * 3600 // 6h + DefaultHNSWSnapshotDisabled = true + DefaultHNSWSnapshotOnStartup = true + DefaultHNSWSnapshotMinDeltaCommitlogsNumber = 1 + DefaultHNSWSnapshotMinDeltaCommitlogsSizePercentage = 5 // 5% +) + +const ( + DefaultReindexerGoroutinesFactor = 0.5 + + DefaultMapToBlockmaxProcessingDurationSeconds = 3 * 60 + DefaultMapToBlockmaxPauseDurationSeconds = 60 + DefaultMapToBlockmaxPerObjectDelayMilliseconds = 0 +) + +// MetadataServer is experimental. +type MetadataServer struct { + // When enabled startup will include a "metadata server" + // for separation of storage/compute Weaviate. + Enabled bool `json:"enabled" yaml:"enabled"` + GrpcListenAddress string `json:"grpc_listen_address" yaml:"grpc_listen_address"` + DataEventsChannelCapacity int `json:"data_events_channel_capacity" yaml:"data_events_channel_capacity"` +} + +const ( + DefaultMetadataServerGrpcListenAddress = ":9050" + DefaultMetadataServerDataEventsChannelCapacity = 100 +) + +const DefaultHNSWVisitedListPoolSize = -1 // unlimited for backward compatibility + +const DefaultHNSWFlatSearchConcurrency = 1 // 1 for backward compatibility + +const ( + DefaultPersistenceMinMMapSize = 8192 // 8kb by default + DefaultPersistenceMaxReuseWalSize = 4096 // 4kb by default +) + +func (p Persistence) Validate() error { + if p.DataPath == "" { + return fmt.Errorf("persistence.dataPath must be set") + } + + return nil +} + +type DiskUse struct { + WarningPercentage uint64 `json:"warning_percentage" yaml:"warning_percentage"` + ReadOnlyPercentage uint64 `json:"readonly_percentage" yaml:"readonly_percentage"` +} + +func (d DiskUse) Validate() error { + if d.WarningPercentage > 100 { + return fmt.Errorf("disk_use.read_only_percentage must be between 0 and 100") + } + + if d.ReadOnlyPercentage > 100 { + return fmt.Errorf("disk_use.read_only_percentage must be between 0 and 100") + } + + return nil +} + +type MemUse struct { + WarningPercentage uint64 `json:"warning_percentage" yaml:"warning_percentage"` + ReadOnlyPercentage uint64 `json:"readonly_percentage" yaml:"readonly_percentage"` +} + +func (m MemUse) Validate() error { + if m.WarningPercentage > 100 { + return fmt.Errorf("mem_use.read_only_percentage must be between 0 and 100") + } + + if m.ReadOnlyPercentage > 100 { + return fmt.Errorf("mem_use.read_only_percentage must be between 0 and 100") + } + + return nil +} + +type ResourceUsage struct { + DiskUse DiskUse + MemUse MemUse +} + +type CORS struct { + AllowOrigin string `json:"allow_origin" yaml:"allow_origin"` + AllowMethods string `json:"allow_methods" yaml:"allow_methods"` + AllowHeaders string `json:"allow_headers" yaml:"allow_headers"` +} + +const ( + DefaultCORSAllowOrigin = "*" + DefaultCORSAllowMethods = "*" + DefaultCORSAllowHeaders = "Content-Type, Authorization, Batch, X-Openai-Api-Key, X-Openai-Organization, X-Openai-Baseurl, X-Anyscale-Baseurl, X-Anyscale-Api-Key, X-Cohere-Api-Key, X-Cohere-Baseurl, X-Huggingface-Api-Key, X-Azure-Api-Key, X-Azure-Deployment-Id, X-Azure-Resource-Name, X-Azure-Concurrency, X-Azure-Block-Size, X-Google-Api-Key, X-Google-Vertex-Api-Key, X-Google-Studio-Api-Key, X-Goog-Api-Key, X-Goog-Vertex-Api-Key, X-Goog-Studio-Api-Key, X-Palm-Api-Key, X-Jinaai-Api-Key, X-Aws-Access-Key, X-Aws-Secret-Key, X-Voyageai-Baseurl, X-Voyageai-Api-Key, X-Mistral-Baseurl, X-Mistral-Api-Key, X-Anthropic-Baseurl, X-Anthropic-Api-Key, X-Databricks-Endpoint, X-Databricks-Token, X-Databricks-User-Agent, X-Friendli-Token, X-Friendli-Baseurl, X-Weaviate-Api-Key, X-Weaviate-Cluster-Url, X-Nvidia-Api-Key, X-Nvidia-Baseurl" +) + +func (r ResourceUsage) Validate() error { + if err := r.DiskUse.Validate(); err != nil { + return err + } + + if err := r.MemUse.Validate(); err != nil { + return err + } + + return nil +} + +type Raft struct { + Port int + InternalRPCPort int + RPCMessageMaxSize int + Join []string + + SnapshotInterval time.Duration + SnapshotThreshold uint64 + TrailingLogs uint64 + + HeartbeatTimeout time.Duration + ElectionTimeout time.Duration + LeaderLeaseTimeout time.Duration + TimeoutsMultiplier int + ConsistencyWaitTimeout time.Duration + + BootstrapTimeout time.Duration + BootstrapExpect int + MetadataOnlyVoters bool + + EnableOneNodeRecovery bool + ForceOneNodeRecovery bool +} + +func (r *Raft) Validate() error { + if r.Port == 0 { + return fmt.Errorf("raft.port must be greater than 0") + } + + if r.InternalRPCPort == 0 { + return fmt.Errorf("raft.intra_rpc_port must be greater than 0") + } + + uniqueMap := make(map[string]struct{}, len(r.Join)) + updatedJoinList := make([]string, len(r.Join)) + for i, nodeNameAndPort := range r.Join { + // Check that the format is correct. In case only node name is present we append the default raft port + nodeNameAndPortSplitted := strings.Split(nodeNameAndPort, ":") + if len(nodeNameAndPortSplitted) == 0 { + return fmt.Errorf("raft.join element %s has no node name", nodeNameAndPort) + } else if len(nodeNameAndPortSplitted) < 2 { + // If user only specify a node name and no port, use the default raft port + nodeNameAndPortSplitted = append(nodeNameAndPortSplitted, fmt.Sprintf("%d", DefaultRaftPort)) + } else if len(nodeNameAndPortSplitted) > 2 { + return fmt.Errorf("raft.join element %s has unexpected amount of element", nodeNameAndPort) + } + + // Check that the node name is unique + nodeName := nodeNameAndPortSplitted[0] + if _, ok := uniqueMap[nodeName]; ok { + return fmt.Errorf("raft.join contains the value %s multiple times. Joined nodes must have a unique id", nodeName) + } else { + uniqueMap[nodeName] = struct{}{} + } + + // TODO-RAFT START + // Validate host and port + + updatedJoinList[i] = strings.Join(nodeNameAndPortSplitted, ":") + } + r.Join = updatedJoinList + + if r.BootstrapExpect == 0 { + return fmt.Errorf("raft.bootstrap_expect must be greater than 0") + } + + if r.BootstrapExpect > len(r.Join) { + return fmt.Errorf("raft.bootstrap.expect must be less than or equal to the length of raft.join") + } + + if r.SnapshotInterval <= 0 { + return fmt.Errorf("raft.bootstrap.snapshot_interval must be more than 0") + } + + if r.SnapshotThreshold <= 0 { + return fmt.Errorf("raft.bootstrap.snapshot_threshold must be more than 0") + } + + if r.ConsistencyWaitTimeout <= 0 { + return fmt.Errorf("raft.bootstrap.consistency_wait_timeout must be more than 0") + } + + return nil +} + +// GetConfigOptionGroup creates an option group for swagger +func GetConfigOptionGroup() *swag.CommandLineOptionsGroup { + commandLineOptionsGroup := swag.CommandLineOptionsGroup{ + ShortDescription: "Connector, raft & MQTT config", + LongDescription: "", + Options: &Flags{}, + } + + return &commandLineOptionsGroup +} + +// WeaviateConfig represents the used schema's +type WeaviateConfig struct { + Config Config + Hostname string + Scheme string +} + +// GetHostAddress from config locations +func (f *WeaviateConfig) GetHostAddress() string { + return fmt.Sprintf("%s://%s", f.Scheme, f.Hostname) +} + +// LoadConfig from config locations. The load order for configuration values if the following +// 1. Config file +// 2. Environment variables +// 3. Command line flags +// If a config option is specified multiple times in different locations, the latest one will be used in this order. +func (f *WeaviateConfig) LoadConfig(flags *swag.CommandLineOptionsGroup, logger logrus.FieldLogger) error { + // Get command line flags + configFileName := flags.Options.(*Flags).ConfigFile + // Set default if not given + if configFileName == "" { + configFileName = DefaultConfigFile + } + + // Read config file + file, err := os.ReadFile(configFileName) + _ = err // explicitly ignore + + // Load config from config file if present + if len(file) > 0 { + logger.WithField("action", "config_load").WithField("config_file_path", configFileName). + Info("Usage of the weaviate.conf.json file is deprecated and will be removed in the future. Please use environment variables.") + config, err := f.parseConfigFile(file, configFileName) + if err != nil { + return configErr(err) + } + f.Config = config + + deprecations.Log(logger, "config-files") + } + + // Load config from env + if err := FromEnv(&f.Config); err != nil { + return configErr(err) + } + + // Load config from flags + f.fromFlags(flags.Options.(*Flags)) + + return f.Config.Validate() +} + +func (f *WeaviateConfig) parseConfigFile(file []byte, name string) (Config, error) { + var config Config + + m := regexp.MustCompile(`.*\.(\w+)$`).FindStringSubmatch(name) + if len(m) < 2 { + return config, fmt.Errorf("config file does not have a file ending, got '%s'", name) + } + + switch m[1] { + case "json": + err := json.Unmarshal(file, &config) + if err != nil { + return config, fmt.Errorf("error unmarshalling the json config file: %w", err) + } + case "yaml": + err := yaml.Unmarshal(file, &config) + if err != nil { + return config, fmt.Errorf("error unmarshalling the yaml config file: %w", err) + } + default: + return config, fmt.Errorf("unsupported config file extension '%s', use .yaml or .json", m[1]) + } + + return config, nil +} + +// fromFlags parses values from flags given as parameter and overrides values in the config +func (f *WeaviateConfig) fromFlags(flags *Flags) { + if flags.RaftPort > 0 { + f.Config.Raft.Port = flags.RaftPort + } + if flags.RaftInternalRPCPort > 0 { + f.Config.Raft.InternalRPCPort = flags.RaftInternalRPCPort + } + if flags.RaftRPCMessageMaxSize > 0 { + f.Config.Raft.RPCMessageMaxSize = flags.RaftRPCMessageMaxSize + } + if flags.RaftJoin != nil { + f.Config.Raft.Join = flags.RaftJoin + } + if flags.RaftBootstrapTimeout > 0 { + f.Config.Raft.BootstrapTimeout = time.Second * time.Duration(flags.RaftBootstrapTimeout) + } + if flags.RaftBootstrapExpect > 0 { + f.Config.Raft.BootstrapExpect = flags.RaftBootstrapExpect + } + if flags.RaftSnapshotInterval > 0 { + f.Config.Raft.SnapshotInterval = time.Second * time.Duration(flags.RaftSnapshotInterval) + } + if flags.RaftSnapshotThreshold > 0 { + f.Config.Raft.SnapshotThreshold = uint64(flags.RaftSnapshotThreshold) + } + if flags.RaftMetadataOnlyVoters { + f.Config.Raft.MetadataOnlyVoters = true + } + + if flags.RuntimeOverridesEnabled { + f.Config.RuntimeOverrides.Enabled = flags.RuntimeOverridesEnabled + } + + if flags.RuntimeOverridesPath != "" { + f.Config.RuntimeOverrides.Path = flags.RuntimeOverridesPath + } + + if flags.RuntimeOverridesLoadInterval > 0 { + f.Config.RuntimeOverrides.LoadInterval = flags.RuntimeOverridesLoadInterval + } +} + +func configErr(err error) error { + return fmt.Errorf("invalid config: %w", err) +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/config/config_handler_test.go b/platform/dbops/binaries/weaviate-src/usecases/config/config_handler_test.go new file mode 100644 index 0000000000000000000000000000000000000000..dbafe6e68a893e9f1bba0b35b2f3e8445211366b --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/config/config_handler_test.go @@ -0,0 +1,294 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package config + +import ( + "fmt" + "os" + "testing" + + "github.com/weaviate/weaviate/usecases/auth/authorization/rbac/rbacconf" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestConfigModules(t *testing.T) { + t.Run("invalid DefaultVectorDistanceMetric", func(t *testing.T) { + moduleProvider := &fakeModuleProvider{ + valid: []string{"text2vec-contextionary"}, + } + config := Config{ + DefaultVectorizerModule: "text2vec-contextionary", + DefaultVectorDistanceMetric: "euclidean", + } + err := config.ValidateModules(moduleProvider) + assert.EqualError( + t, + err, + "default vector distance metric: must be one of [\"cosine\", \"dot\", \"l2-squared\", \"manhattan\",\"hamming\"]", + ) + }) + + t.Run("invalid DefaultVectorizerModule", func(t *testing.T) { + moduleProvider := &fakeModuleProvider{ + valid: []string{"text2vec-contextionary"}, + } + config := Config{ + DefaultVectorizerModule: "contextionary", + DefaultVectorDistanceMetric: "cosine", + } + err := config.ValidateModules(moduleProvider) + assert.EqualError( + t, + err, + "default vectorizer module: invalid vectorizer \"contextionary\"", + ) + }) + + t.Run("all valid configurations", func(t *testing.T) { + moduleProvider := &fakeModuleProvider{ + valid: []string{"text2vec-contextionary"}, + } + config := Config{ + DefaultVectorizerModule: "text2vec-contextionary", + DefaultVectorDistanceMetric: "l2-squared", + } + err := config.ValidateModules(moduleProvider) + assert.Nil(t, err, "should not error") + }) + + t.Run("without DefaultVectorDistanceMetric", func(t *testing.T) { + moduleProvider := &fakeModuleProvider{ + valid: []string{"text2vec-contextionary"}, + } + config := Config{ + DefaultVectorizerModule: "text2vec-contextionary", + } + err := config.ValidateModules(moduleProvider) + assert.Nil(t, err, "should not error") + }) + + t.Run("with none DefaultVectorizerModule", func(t *testing.T) { + moduleProvider := &fakeModuleProvider{ + valid: []string{"text2vec-contextionary"}, + } + config := Config{ + DefaultVectorizerModule: "none", + } + err := config.ValidateModules(moduleProvider) + assert.Nil(t, err, "should not error") + }) + + t.Run("parse config.yaml file", func(t *testing.T) { + configFileName := "config.yaml" + configYaml := `authentication: + apikey: + enabled: true + allowed_keys: + - api-key-1 + users: + - readonly@weaviate.io` + + filepath := fmt.Sprintf("%s/%s", t.TempDir(), configFileName) + f, err := os.Create(filepath) + require.Nil(t, err) + defer f.Close() + _, err2 := f.WriteString(configYaml) + require.Nil(t, err2) + + file, err := os.ReadFile(filepath) + require.Nil(t, err) + weaviateConfig := &WeaviateConfig{} + config, err := weaviateConfig.parseConfigFile(file, configFileName) + require.Nil(t, err) + + assert.True(t, config.Authentication.APIKey.Enabled) + assert.ElementsMatch(t, []string{"api-key-1"}, config.Authentication.APIKey.AllowedKeys) + assert.ElementsMatch(t, []string{"readonly@weaviate.io"}, config.Authentication.APIKey.Users) + }) +} + +func TestConfigParsing(t *testing.T) { + t.Run("parse config.yaml with oidc config - yaml", func(t *testing.T) { + configFileName := "config.yaml" + configYaml := `authentication: + oidc: + enabled: true + issuer: http://localhost:9090/auth/realms/weaviate + username_claim: preferred_username + groups_claim: groups + client_id: demo + skip_client_id_check: false + scopes: ['email', 'openid'] + certificate: "valid-certificate" +` + + filepath := fmt.Sprintf("%s/%s", t.TempDir(), configFileName) + f, err := os.Create(filepath) + require.Nil(t, err) + defer f.Close() + _, err2 := f.WriteString(configYaml) + require.Nil(t, err2) + + file, err := os.ReadFile(filepath) + require.Nil(t, err) + weaviateConfig := &WeaviateConfig{} + config, err := weaviateConfig.parseConfigFile(file, configFileName) + require.Nil(t, err) + + assert.True(t, config.Authentication.OIDC.Enabled) + assert.Equal(t, "http://localhost:9090/auth/realms/weaviate", config.Authentication.OIDC.Issuer.Get()) + assert.Equal(t, "preferred_username", config.Authentication.OIDC.UsernameClaim.Get()) + assert.Equal(t, "groups", config.Authentication.OIDC.GroupsClaim.Get()) + assert.Equal(t, "demo", config.Authentication.OIDC.ClientID.Get()) + assert.False(t, config.Authentication.OIDC.SkipClientIDCheck.Get()) + assert.ElementsMatch(t, []string{"email", "openid"}, config.Authentication.OIDC.Scopes.Get()) + assert.Equal(t, "valid-certificate", config.Authentication.OIDC.Certificate.Get()) + }) + + t.Run("parse config.yaml with oidc config - json", func(t *testing.T) { + configFileName := "config.json" + configYaml := `{ + "authentication": { + "oidc": { + "enabled": true, + "issuer": "http://localhost:9090/auth/realms/weaviate", + "username_claim": "preferred_username", + "groups_claim": "groups", + "client_id": "demo", + "skip_client_id_check": false, + "scopes": ["email", "openid"], + "certificate": "valid-certificate" + } + } +} +` + + filepath := fmt.Sprintf("%s/%s", t.TempDir(), configFileName) + f, err := os.Create(filepath) + require.Nil(t, err) + defer f.Close() + _, err2 := f.WriteString(configYaml) + require.Nil(t, err2) + + file, err := os.ReadFile(filepath) + require.Nil(t, err) + weaviateConfig := &WeaviateConfig{} + config, err := weaviateConfig.parseConfigFile(file, configFileName) + require.Nil(t, err) + + assert.True(t, config.Authentication.OIDC.Enabled) + assert.Equal(t, "http://localhost:9090/auth/realms/weaviate", config.Authentication.OIDC.Issuer.Get()) + assert.Equal(t, "preferred_username", config.Authentication.OIDC.UsernameClaim.Get()) + assert.Equal(t, "groups", config.Authentication.OIDC.GroupsClaim.Get()) + assert.Equal(t, "demo", config.Authentication.OIDC.ClientID.Get()) + assert.False(t, config.Authentication.OIDC.SkipClientIDCheck.Get()) + assert.ElementsMatch(t, []string{"email", "openid"}, config.Authentication.OIDC.Scopes.Get()) + assert.Equal(t, "valid-certificate", config.Authentication.OIDC.Certificate.Get()) + }) + + t.Run("parse config.yaml file with admin_list and read_only_users", func(t *testing.T) { + configFileName := "config.yaml" + configYaml := `authorization: + admin_list: + enabled: true + users: + - userA + read_only_users: + - userA@read.only + - userB@read.only` + + filepath := fmt.Sprintf("%s/%s", t.TempDir(), configFileName) + f, err := os.Create(filepath) + require.Nil(t, err) + defer f.Close() + _, err2 := f.WriteString(configYaml) + require.Nil(t, err2) + + file, err := os.ReadFile(filepath) + require.Nil(t, err) + weaviateConfig := &WeaviateConfig{} + config, err := weaviateConfig.parseConfigFile(file, configFileName) + require.Nil(t, err) + + assert.True(t, config.Authorization.AdminList.Enabled) + assert.ElementsMatch(t, []string{"userA"}, config.Authorization.AdminList.Users) + assert.ElementsMatch(t, []string{"userA@read.only", "userB@read.only"}, config.Authorization.AdminList.ReadOnlyUsers) + }) + + t.Run("parse config.yaml file multiple keys and users", func(t *testing.T) { + configFileName := "config.yaml" + configYaml := `authentication: + apikey: + enabled: true + allowed_keys: + - api-key-1 + - api-key-2 + - api-key-3 + users: + - user1@weaviate.io + - user2@weaviate.io` + + filepath := fmt.Sprintf("%s/%s", t.TempDir(), configFileName) + f, err := os.Create(filepath) + require.Nil(t, err) + defer f.Close() + _, err2 := f.WriteString(configYaml) + require.Nil(t, err2) + + file, err := os.ReadFile(filepath) + require.Nil(t, err) + weaviateConfig := &WeaviateConfig{} + config, err := weaviateConfig.parseConfigFile(file, configFileName) + require.Nil(t, err) + + assert.True(t, config.Authentication.APIKey.Enabled) + assert.ElementsMatch(t, []string{"api-key-1", "api-key-2", "api-key-3"}, config.Authentication.APIKey.AllowedKeys) + assert.ElementsMatch(t, []string{"user1@weaviate.io", "user2@weaviate.io"}, config.Authentication.APIKey.Users) + }) +} + +func TestConfigValidation(t *testing.T) { + tests := []struct { + name string + config *Config + expected bool + }{ + { + name: "invalid combination of rbac and anon access", + config: &Config{ + Authentication: Authentication{AnonymousAccess: AnonymousAccess{Enabled: true}}, + Authorization: Authorization{Rbac: rbacconf.Config{Enabled: true}}, + }, + expected: true, + }, + { + name: "valid combination of anon access and no authorization", + config: &Config{ + Authentication: Authentication{AnonymousAccess: AnonymousAccess{Enabled: true}}, + }, + expected: true, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + err := test.config.Validate() + if test.expected { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + }) + } +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/config/environment.go b/platform/dbops/binaries/weaviate-src/usecases/config/environment.go new file mode 100644 index 0000000000000000000000000000000000000000..7b2915d4de4c8a5a037ac83d4e9b4b15778a0f97 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/config/environment.go @@ -0,0 +1,1629 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package config + +import ( + "fmt" + "math" + "os" + "regexp" + "slices" + "strconv" + "strings" + "time" + + dbhelpers "github.com/weaviate/weaviate/adapters/repos/db/helpers" + entcfg "github.com/weaviate/weaviate/entities/config" + "github.com/weaviate/weaviate/entities/errorcompounder" + "github.com/weaviate/weaviate/entities/schema" + "github.com/weaviate/weaviate/entities/sentry" + "github.com/weaviate/weaviate/usecases/cluster" + "github.com/weaviate/weaviate/usecases/config/runtime" +) + +const ( + DefaultRaftPort = 8300 + DefaultRaftInternalPort = 8301 + DefaultRaftGRPCMaxSize = 1024 * 1024 * 1024 + // DefaultRaftBootstrapTimeout is the time raft will wait to bootstrap or rejoin the cluster on a restart. We set it + // to 600 because if we're loading a large DB we need to wait for it to load before being able to join the cluster + // on a single node cluster. + DefaultRaftBootstrapTimeout = 600 + DefaultRaftBootstrapExpect = 1 + DefaultRaftDir = "raft" + DefaultHNSWAcornFilterRatio = 0.4 + + DefaultRuntimeOverridesLoadInterval = 2 * time.Minute + + DefaultDistributedTasksSchedulerTickInterval = time.Minute + DefaultDistributedTasksCompletedTaskTTL = 5 * 24 * time.Hour + + DefaultReplicationEngineMaxWorkers = 10 + DefaultReplicaMovementMinimumAsyncWait = 60 * time.Second + DefaultReplicationEngineFileCopyWorkers = 10 + + DefaultTransferInactivityTimeout = 5 * time.Minute + + DefaultTrackVectorDimensionsInterval = 5 * time.Minute +) + +// FromEnv takes a *Config as it will respect initial config that has been +// provided by other means (e.g. a config file) and will only extend those that +// are set +func FromEnv(config *Config) error { + if entcfg.Enabled(os.Getenv("PROMETHEUS_MONITORING_ENABLED")) { + config.Monitoring.Enabled = true + config.Monitoring.Tool = "prometheus" + config.Monitoring.Port = 2112 + config.Monitoring.MetricsNamespace = "" // to support backward compabitlity. Metric names won't have prefix by default. + + if entcfg.Enabled(os.Getenv("PROMETHEUS_MONITORING_GROUP_CLASSES")) || + entcfg.Enabled(os.Getenv("PROMETHEUS_MONITORING_GROUP")) { + // The variable was renamed with v1.20. Prior to v1.20 the recommended + // way to do MT was using classes. This lead to a lot of metrics which + // could be grouped with this variable. With v1.20 we introduced native + // multi-tenancy. Now all you need is a single class, but you would + // still get one set of metrics per shard. To prevent this, you still + // want to group. The new name reflects that it's just about grouping, + // not about classes or shards. + config.Monitoring.Group = true + } + + if val := strings.TrimSpace(os.Getenv("PROMETHEUS_MONITORING_METRIC_NAMESPACE")); val != "" { + config.Monitoring.MetricsNamespace = val + } + + if entcfg.Enabled(os.Getenv("PROMETHEUS_MONITOR_CRITICAL_BUCKETS_ONLY")) { + config.Monitoring.MonitorCriticalBucketsOnly = true + } + } + + if entcfg.Enabled(os.Getenv("TRACK_VECTOR_DIMENSIONS")) { + config.TrackVectorDimensions = true + } + + timeout := 30 * time.Second + opt := os.Getenv("MINIMUM_INTERNAL_TIMEOUT") + if opt != "" { + if parsed, err := time.ParseDuration(opt); err == nil { + timeout = parsed + } else { + return fmt.Errorf("parse MINIMUM_INTERNAL_TIMEOUT as duration: %w", err) + } + } + + config.MinimumInternalTimeout = timeout + + if v := os.Getenv("TRACK_VECTOR_DIMENSIONS_INTERVAL"); v != "" { + interval, err := time.ParseDuration(v) + if err != nil { + return fmt.Errorf("parse TRACK_VECTOR_DIMENSIONS_INTERVAL as duration: %w", err) + } + config.TrackVectorDimensionsInterval = interval + } else { + config.TrackVectorDimensionsInterval = DefaultTrackVectorDimensionsInterval + } + + if entcfg.Enabled(os.Getenv("REINDEX_VECTOR_DIMENSIONS_AT_STARTUP")) { + config.ReindexVectorDimensionsAtStartup = true + } + + if entcfg.Enabled(os.Getenv("DISABLE_LAZY_LOAD_SHARDS")) { + config.DisableLazyLoadShards = true + } + + if entcfg.Enabled(os.Getenv("FORCE_FULL_REPLICAS_SEARCH")) { + config.ForceFullReplicasSearch = true + } + + if v := os.Getenv("TRANSFER_INACTIVITY_TIMEOUT"); v != "" { + timeout, err := time.ParseDuration(v) + if err != nil { + return fmt.Errorf("parse TRANSFER_INACTIVITY_TIMEOUT as duration: %w", err) + } + config.TransferInactivityTimeout = timeout + } else { + config.TransferInactivityTimeout = DefaultTransferInactivityTimeout + } + + // Recount all property lengths at startup to support accurate BM25 scoring + if entcfg.Enabled(os.Getenv("RECOUNT_PROPERTIES_AT_STARTUP")) { + config.RecountPropertiesAtStartup = true + } + + if entcfg.Enabled(os.Getenv("REINDEX_SET_TO_ROARINGSET_AT_STARTUP")) { + config.ReindexSetToRoaringsetAtStartup = true + } + + if entcfg.Enabled(os.Getenv("INDEX_MISSING_TEXT_FILTERABLE_AT_STARTUP")) { + config.IndexMissingTextFilterableAtStartup = true + } + + cptParser := newCollectionPropsTenantsParser() + + // variable expects string in format: + // "Class1:property11,property12;Class2:property21,property22" + if v := os.Getenv("REINDEX_INDEXES_AT_STARTUP"); v != "" { + cpts, err := cptParser.parse(v) + if err != nil { + return fmt.Errorf("parse REINDEX_INDEXES_AT_STARTUP as class with props: %w", err) + } + + asClassesWithProps := make(map[string][]string, len(cpts)) + for _, cpt := range cpts { + asClassesWithProps[cpt.Collection] = cpt.Props + } + config.ReindexIndexesAtStartup = asClassesWithProps + } + + if v := os.Getenv("PROMETHEUS_MONITORING_PORT"); v != "" { + asInt, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("parse PROMETHEUS_MONITORING_PORT as int: %w", err) + } + + config.Monitoring.Port = asInt + } + + if v := os.Getenv("GO_PROFILING_PORT"); v != "" { + asInt, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("parse GO_PROFILING_PORT as int: %w", err) + } + + config.Profiling.Port = asInt + } + + if entcfg.Enabled(os.Getenv("AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED")) { + config.Authentication.AnonymousAccess.Enabled = true + } + + if entcfg.Enabled(os.Getenv("AUTHENTICATION_OIDC_ENABLED")) { + config.Authentication.OIDC.Enabled = true + var ( + skipClientCheck bool + issuer string + clientID string + scopes []string + userClaim string + groupsClaim string + certificate string + jwksUrl string + ) + + if entcfg.Enabled(os.Getenv("AUTHENTICATION_OIDC_SKIP_CLIENT_ID_CHECK")) { + skipClientCheck = true + } + + if v := os.Getenv("AUTHENTICATION_OIDC_ISSUER"); v != "" { + issuer = v + } + + if v := os.Getenv("AUTHENTICATION_OIDC_CLIENT_ID"); v != "" { + clientID = v + } + + if v := os.Getenv("AUTHENTICATION_OIDC_SCOPES"); v != "" { + scopes = strings.Split(v, ",") + } + + if v := os.Getenv("AUTHENTICATION_OIDC_USERNAME_CLAIM"); v != "" { + userClaim = v + } + + if v := os.Getenv("AUTHENTICATION_OIDC_GROUPS_CLAIM"); v != "" { + groupsClaim = v + } + + if v := os.Getenv("AUTHENTICATION_OIDC_CERTIFICATE"); v != "" { + certificate = v + } + + if v := os.Getenv("AUTHENTICATION_OIDC_JWKS_URL"); v != "" { + jwksUrl = v + } + + config.Authentication.OIDC.SkipClientIDCheck = runtime.NewDynamicValue(skipClientCheck) + config.Authentication.OIDC.Issuer = runtime.NewDynamicValue(issuer) + config.Authentication.OIDC.ClientID = runtime.NewDynamicValue(clientID) + config.Authentication.OIDC.Scopes = runtime.NewDynamicValue(scopes) + config.Authentication.OIDC.UsernameClaim = runtime.NewDynamicValue(userClaim) + config.Authentication.OIDC.GroupsClaim = runtime.NewDynamicValue(groupsClaim) + config.Authentication.OIDC.Certificate = runtime.NewDynamicValue(certificate) + config.Authentication.OIDC.JWKSUrl = runtime.NewDynamicValue(jwksUrl) + } + + if entcfg.Enabled(os.Getenv("AUTHENTICATION_DB_USERS_ENABLED")) { + config.Authentication.DBUsers.Enabled = true + } + + if entcfg.Enabled(os.Getenv("AUTHENTICATION_APIKEY_ENABLED")) { + config.Authentication.APIKey.Enabled = true + + if rawKeys, ok := os.LookupEnv("AUTHENTICATION_APIKEY_ALLOWED_KEYS"); ok { + keys := strings.Split(rawKeys, ",") + config.Authentication.APIKey.AllowedKeys = keys + } + + if rawUsers, ok := os.LookupEnv("AUTHENTICATION_APIKEY_USERS"); ok { + users := strings.Split(rawUsers, ",") + config.Authentication.APIKey.Users = users + } + + } + + if entcfg.Enabled(os.Getenv("AUTHORIZATION_ADMINLIST_ENABLED")) { + config.Authorization.AdminList.Enabled = true + + usersString, ok := os.LookupEnv("AUTHORIZATION_ADMINLIST_USERS") + if ok { + config.Authorization.AdminList.Users = strings.Split(usersString, ",") + } + + roUsersString, ok := os.LookupEnv("AUTHORIZATION_ADMINLIST_READONLY_USERS") + if ok { + config.Authorization.AdminList.ReadOnlyUsers = strings.Split(roUsersString, ",") + } + + groupsString, ok := os.LookupEnv("AUTHORIZATION_ADMINLIST_GROUPS") + if ok { + config.Authorization.AdminList.Groups = strings.Split(groupsString, ",") + } + + roGroupsString, ok := os.LookupEnv("AUTHORIZATION_ADMINLIST_READONLY_GROUPS") + if ok { + config.Authorization.AdminList.ReadOnlyGroups = strings.Split(roGroupsString, ",") + } + } + + if entcfg.Enabled(os.Getenv("AUTHORIZATION_ENABLE_RBAC")) || entcfg.Enabled(os.Getenv("AUTHORIZATION_RBAC_ENABLED")) { + config.Authorization.Rbac.Enabled = true + + if entcfg.Enabled(os.Getenv("AUTHORIZATION_RBAC_IP_IN_AUDIT_LOG_DISABLED")) { + config.Authorization.Rbac.IpInAuditDisabled = true + } + + adminsString, ok := os.LookupEnv("AUTHORIZATION_RBAC_ROOT_USERS") + if ok { + config.Authorization.Rbac.RootUsers = strings.Split(adminsString, ",") + } else { + adminsString, ok := os.LookupEnv("AUTHORIZATION_ADMIN_USERS") + if ok { + config.Authorization.Rbac.RootUsers = strings.Split(adminsString, ",") + } + } + + groupString, ok := os.LookupEnv("AUTHORIZATION_RBAC_ROOT_GROUPS") + if ok { + config.Authorization.Rbac.RootGroups = strings.Split(groupString, ",") + } + + viewerGroupString, ok := os.LookupEnv("AUTHORIZATION_RBAC_READONLY_GROUPS") + if ok { + config.Authorization.Rbac.ReadOnlyGroups = strings.Split(viewerGroupString, ",") + } else { + // delete this after 1.30.11 + 1.31.3 is the minimum version in WCD + viewerGroupString, ok := os.LookupEnv("EXPERIMENTAL_AUTHORIZATION_RBAC_READONLY_ROOT_GROUPS") + if ok { + config.Authorization.Rbac.ReadOnlyGroups = strings.Split(viewerGroupString, ",") + } + } + + readOnlyUsersString, ok := os.LookupEnv("EXPERIMENTAL_AUTHORIZATION_RBAC_READONLY_USERS") + if ok { + config.Authorization.Rbac.ViewerUsers = strings.Split(readOnlyUsersString, ",") + } + + adminUsersString, ok := os.LookupEnv("EXPERIMENTAL_AUTHORIZATION_RBAC_ADMIN_USERS") + if ok { + config.Authorization.Rbac.AdminUsers = strings.Split(adminUsersString, ",") + } + } + + config.Profiling.Disabled = entcfg.Enabled(os.Getenv("GO_PROFILING_DISABLE")) + + if !config.Authentication.AnyAuthMethodSelected() { + config.Authentication = DefaultAuthentication + } + + if os.Getenv("PERSISTENCE_LSM_ACCESS_STRATEGY") == "pread" { + config.AvoidMmap = true + } + + if v := os.Getenv("PERSISTENCE_LSM_MAX_SEGMENT_SIZE"); v != "" { + parsed, err := parseResourceString(v) + if err != nil { + return fmt.Errorf("parse PERSISTENCE_LSM_MAX_SEGMENT_SIZE: %w", err) + } + + config.Persistence.LSMMaxSegmentSize = parsed + } else { + config.Persistence.LSMMaxSegmentSize = DefaultPersistenceLSMMaxSegmentSize + } + + if err := parseNonNegativeInt( + "PERSISTENCE_LSM_SEGMENTS_CLEANUP_INTERVAL_HOURS", + func(hours int) { config.Persistence.LSMSegmentsCleanupIntervalSeconds = hours * 3600 }, + DefaultPersistenceLSMSegmentsCleanupIntervalSeconds, + ); err != nil { + return err + } + + if entcfg.Enabled(os.Getenv("PERSISTENCE_LSM_SEPARATE_OBJECTS_COMPACTIONS")) { + config.Persistence.LSMSeparateObjectsCompactions = true + } + + if entcfg.Enabled(os.Getenv("PERSISTENCE_LSM_ENABLE_SEGMENTS_CHECKSUM_VALIDATION")) { + config.Persistence.LSMEnableSegmentsChecksumValidation = true + } + + if v := os.Getenv("PERSISTENCE_MIN_MMAP_SIZE"); v != "" { + parsed, err := parseResourceString(v) + if err != nil { + return fmt.Errorf("parse PERSISTENCE_MIN_MMAP_SIZE: %w", err) + } + + config.Persistence.MinMMapSize = parsed + } else { + config.Persistence.MinMMapSize = DefaultPersistenceMinMMapSize + } + + if entcfg.Enabled(os.Getenv("PERSISTENCE_LAZY_SEGMENTS_DISABLED")) { + config.Persistence.LazySegmentsDisabled = true + } + + if entcfg.Enabled(os.Getenv("PERSISTENCE_SEGMENT_INFO_FROM_FILE_DISABLED")) { + config.Persistence.SegmentInfoIntoFileNameEnabled = false + } else { + config.Persistence.SegmentInfoIntoFileNameEnabled = true + } + + if entcfg.Enabled(os.Getenv("PERSISTENCE_WRITE_METADATA_FILES_DISABLED")) { + config.Persistence.WriteMetadataFilesEnabled = false + } else { + config.Persistence.WriteMetadataFilesEnabled = true + } + + if v := os.Getenv("PERSISTENCE_MAX_REUSE_WAL_SIZE"); v != "" { + parsed, err := parseResourceString(v) + if err != nil { + return fmt.Errorf("parse PERSISTENCE_MAX_REUSE_WAL_SIZE: %w", err) + } + + config.Persistence.MaxReuseWalSize = parsed + } else { + config.Persistence.MaxReuseWalSize = DefaultPersistenceMaxReuseWalSize + } + + if err := parseInt( + "PERSISTENCE_LSM_CYCLEMANAGER_ROUTINES_FACTOR", + func(factor int) { config.Persistence.LSMCycleManagerRoutinesFactor = factor }, + DefaultPersistenceLSMCycleManagerRoutinesFactor, + ); err != nil { + return err + } + + if v := os.Getenv("PERSISTENCE_HNSW_MAX_LOG_SIZE"); v != "" { + parsed, err := parseResourceString(v) + if err != nil { + return fmt.Errorf("parse PERSISTENCE_HNSW_MAX_LOG_SIZE: %w", err) + } + + config.Persistence.HNSWMaxLogSize = parsed + } else { + config.Persistence.HNSWMaxLogSize = DefaultPersistenceHNSWMaxLogSize + } + + // ---- HNSW snapshots ---- + config.Persistence.HNSWDisableSnapshots = DefaultHNSWSnapshotDisabled + if v := os.Getenv("PERSISTENCE_HNSW_DISABLE_SNAPSHOTS"); v != "" { + config.Persistence.HNSWDisableSnapshots = entcfg.Enabled(v) + } + + if err := parseNonNegativeInt( + "PERSISTENCE_HNSW_SNAPSHOT_INTERVAL_SECONDS", + func(seconds int) { config.Persistence.HNSWSnapshotIntervalSeconds = seconds }, + DefaultHNSWSnapshotIntervalSeconds, + ); err != nil { + return err + } + + config.Persistence.HNSWSnapshotOnStartup = DefaultHNSWSnapshotOnStartup + if v := os.Getenv("PERSISTENCE_HNSW_SNAPSHOT_ON_STARTUP"); v != "" { + config.Persistence.HNSWSnapshotOnStartup = entcfg.Enabled(v) + } + + if err := parsePositiveInt( + "PERSISTENCE_HNSW_SNAPSHOT_MIN_DELTA_COMMITLOGS_NUMBER", + func(number int) { config.Persistence.HNSWSnapshotMinDeltaCommitlogsNumber = number }, + DefaultHNSWSnapshotMinDeltaCommitlogsNumber, + ); err != nil { + return err + } + + if err := parseNonNegativeInt( + "PERSISTENCE_HNSW_SNAPSHOT_MIN_DELTA_COMMITLOGS_SIZE_PERCENTAGE", + func(percentage int) { config.Persistence.HNSWSnapshotMinDeltaCommitlogsSizePercentage = percentage }, + DefaultHNSWSnapshotMinDeltaCommitlogsSizePercentage, + ); err != nil { + return err + } + // ---- HNSW snapshots ---- + + defaultQuantization := "" + if v := os.Getenv("DEFAULT_QUANTIZATION"); v != "" { + defaultQuantization = strings.ToLower(v) + } + config.DefaultQuantization = runtime.NewDynamicValue(defaultQuantization) + + if entcfg.Enabled(os.Getenv("INDEX_RANGEABLE_IN_MEMORY")) { + config.Persistence.IndexRangeableInMemory = true + } + + if err := parseInt( + "HNSW_VISITED_LIST_POOL_MAX_SIZE", + func(size int) { config.HNSWVisitedListPoolMaxSize = size }, + DefaultHNSWVisitedListPoolSize, + ); err != nil { + return err + } + + if err := parseNonNegativeInt( + "HNSW_FLAT_SEARCH_CONCURRENCY", + func(val int) { config.HNSWFlatSearchConcurrency = val }, + DefaultHNSWFlatSearchConcurrency, + ); err != nil { + return err + } + + if err := parsePercentage( + "HNSW_ACORN_FILTER_RATIO", + func(val float64) { config.HNSWAcornFilterRatio = val }, + DefaultHNSWAcornFilterRatio, + ); err != nil { + return err + } + + clusterCfg, err := parseClusterConfig() + if err != nil { + return err + } + config.Cluster = clusterCfg + + if v := os.Getenv("PERSISTENCE_DATA_PATH"); v != "" { + config.Persistence.DataPath = v + } else { + if config.Persistence.DataPath == "" { + config.Persistence.DataPath = DefaultPersistenceDataPath + } + } + + parsePositiveFloat("REINDEXER_GOROUTINES_FACTOR", + func(val float64) { config.ReindexerGoroutinesFactor = val }, + DefaultReindexerGoroutinesFactor) + + if enabledForHost("REINDEX_MAP_TO_BLOCKMAX_AT_STARTUP", clusterCfg.Hostname) { + config.ReindexMapToBlockmaxAtStartup = true + if enabledForHost("REINDEX_MAP_TO_BLOCKMAX_SWAP_BUCKETS", clusterCfg.Hostname) { + config.ReindexMapToBlockmaxConfig.SwapBuckets = true + } + if enabledForHost("REINDEX_MAP_TO_BLOCKMAX_UNSWAP_BUCKETS", clusterCfg.Hostname) { + config.ReindexMapToBlockmaxConfig.UnswapBuckets = true + } + if enabledForHost("REINDEX_MAP_TO_BLOCKMAX_TIDY_BUCKETS", clusterCfg.Hostname) { + config.ReindexMapToBlockmaxConfig.TidyBuckets = true + } + if enabledForHost("REINDEX_MAP_TO_BLOCKMAX_RELOAD_SHARDS", clusterCfg.Hostname) { + config.ReindexMapToBlockmaxConfig.ReloadShards = true + } + if enabledForHost("REINDEX_MAP_TO_BLOCKMAX_ROLLBACK", clusterCfg.Hostname) { + config.ReindexMapToBlockmaxConfig.Rollback = true + } + if enabledForHost("REINDEX_MAP_TO_BLOCKMAX_CONDITIONAL_START", clusterCfg.Hostname) { + config.ReindexMapToBlockmaxConfig.ConditionalStart = true + } + parsePositiveInt("REINDEX_MAP_TO_BLOCKMAX_PROCESSING_DURATION_SECONDS", + func(val int) { config.ReindexMapToBlockmaxConfig.ProcessingDurationSeconds = val }, + DefaultMapToBlockmaxProcessingDurationSeconds) + parsePositiveInt("REINDEX_MAP_TO_BLOCKMAX_PAUSE_DURATION_SECONDS", + func(val int) { config.ReindexMapToBlockmaxConfig.PauseDurationSeconds = val }, + DefaultMapToBlockmaxPauseDurationSeconds) + parsePositiveInt("REINDEX_MAP_TO_BLOCKMAX_PER_OBJECT_DELAY_MILLISECONDS", + func(val int) { config.ReindexMapToBlockmaxConfig.PerObjectDelayMilliseconds = val }, + DefaultMapToBlockmaxPerObjectDelayMilliseconds) + + cptSelected, err := cptParser.parse(os.Getenv("REINDEX_MAP_TO_BLOCKMAX_SELECT")) + if err != nil { + return err + } + config.ReindexMapToBlockmaxConfig.Selected = cptSelected + } + + if err := config.parseMemtableConfig(); err != nil { + return err + } + + if err := config.parseCORSConfig(); err != nil { + return err + } + + if v := os.Getenv("ORIGIN"); v != "" { + config.Origin = v + } + + if v := os.Getenv("CONTEXTIONARY_URL"); v != "" { + config.Contextionary.URL = v + } + + if v := os.Getenv("QUERY_DEFAULTS_LIMIT"); v != "" { + asInt, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("parse QUERY_DEFAULTS_LIMIT as int: %w", err) + } + + config.QueryDefaults.Limit = int64(asInt) + } else { + if config.QueryDefaults.Limit == 0 { + config.QueryDefaults.Limit = DefaultQueryDefaultsLimit + } + } + + if v := os.Getenv("QUERY_DEFAULTS_LIMIT_GRAPHQL"); v != "" { + asInt, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("parse QUERY_DEFAULTS_LIMIT_GRAPHQL as int: %w", err) + } + + config.QueryDefaults.LimitGraphQL = int64(asInt) + } else { + if config.QueryDefaults.LimitGraphQL == 0 { + config.QueryDefaults.LimitGraphQL = DefaultQueryDefaultsLimitGraphQL + } + } + + if v := os.Getenv("QUERY_MAXIMUM_RESULTS"); v != "" { + asInt, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("parse QUERY_MAXIMUM_RESULTS as int: %w", err) + } + + config.QueryMaximumResults = int64(asInt) + } else { + config.QueryMaximumResults = DefaultQueryMaximumResults + } + + if v := os.Getenv("QUERY_HYBRID_MAXIMUM_RESULTS"); v != "" { + asInt, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("parse QUERY_HYBRID_MAXIMUM_RESULTS as int: %w", err) + } + config.QueryHybridMaximumResults = int64(asInt) + } else { + config.QueryHybridMaximumResults = DefaultQueryHybridMaximumResults + } + + if v := os.Getenv("QUERY_NESTED_CROSS_REFERENCE_LIMIT"); v != "" { + limit, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("parse QUERY_NESTED_CROSS_REFERENCE_LIMIT as int: %w", err) + } else if limit <= 0 { + limit = math.MaxInt + } + config.QueryNestedCrossReferenceLimit = limit + } else { + config.QueryNestedCrossReferenceLimit = DefaultQueryNestedCrossReferenceLimit + } + + if err := parsePositiveInt( + "QUERY_CROSS_REFERENCE_DEPTH_LIMIT", + func(val int) { config.QueryCrossReferenceDepthLimit = val }, + DefaultQueryCrossReferenceDepthLimit, + ); err != nil { + return err + } + + if v := os.Getenv("MAX_IMPORT_GOROUTINES_FACTOR"); v != "" { + asFloat, err := strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("parse MAX_IMPORT_GOROUTINES_FACTOR as float: %w", err) + } else if asFloat <= 0 { + return fmt.Errorf("negative MAX_IMPORT_GOROUTINES_FACTOR factor") + } + + config.MaxImportGoroutinesFactor = asFloat + } else { + config.MaxImportGoroutinesFactor = DefaultMaxImportGoroutinesFactor + } + + if v := os.Getenv("DEFAULT_VECTORIZER_MODULE"); v != "" { + config.DefaultVectorizerModule = v + } else { + // env not set, this could either mean, we already have a value from a file + // or we explicitly want to set the value to "none" + if config.DefaultVectorizerModule == "" { + config.DefaultVectorizerModule = VectorizerModuleNone + } + } + + if v := os.Getenv("MODULES_CLIENT_TIMEOUT"); v != "" { + timeout, err := time.ParseDuration(v) + if err != nil { + return fmt.Errorf("parse MODULES_CLIENT_TIMEOUT as time.Duration: %w", err) + } + config.ModuleHttpClientTimeout = timeout + } else { + config.ModuleHttpClientTimeout = 50 * time.Second + } + + if v := os.Getenv("DEFAULT_VECTOR_DISTANCE_METRIC"); v != "" { + config.DefaultVectorDistanceMetric = v + } + + if v := os.Getenv("ENABLE_MODULES"); v != "" { + config.EnableModules = v + } + + if entcfg.Enabled(os.Getenv("API_BASED_MODULES_DISABLED")) { + config.EnableApiBasedModules = false + } else { + config.EnableApiBasedModules = true + } + + autoSchemaEnabled := true + if v := os.Getenv("AUTOSCHEMA_ENABLED"); v != "" { + autoSchemaEnabled = !(strings.ToLower(v) == "false") + } + config.AutoSchema.Enabled = runtime.NewDynamicValue(autoSchemaEnabled) + + config.AutoSchema.DefaultString = schema.DataTypeText.String() + if v := os.Getenv("AUTOSCHEMA_DEFAULT_STRING"); v != "" { + config.AutoSchema.DefaultString = v + } + config.AutoSchema.DefaultNumber = "number" + if v := os.Getenv("AUTOSCHEMA_DEFAULT_NUMBER"); v != "" { + config.AutoSchema.DefaultNumber = v + } + config.AutoSchema.DefaultDate = "date" + if v := os.Getenv("AUTOSCHEMA_DEFAULT_DATE"); v != "" { + config.AutoSchema.DefaultDate = v + } + + tenantActivityReadLogLevel := "debug" + if v := os.Getenv("TENANT_ACTIVITY_READ_LOG_LEVEL"); v != "" { + tenantActivityReadLogLevel = v + } + config.TenantActivityReadLogLevel = runtime.NewDynamicValue(tenantActivityReadLogLevel) + + tenantActivityWriteLogLevel := "debug" + if v := os.Getenv("TENANT_ACTIVITY_WRITE_LOG_LEVEL"); v != "" { + tenantActivityWriteLogLevel = v + } + config.TenantActivityWriteLogLevel = runtime.NewDynamicValue(tenantActivityWriteLogLevel) + + ru, err := parseResourceUsageEnvVars() + if err != nil { + return err + } + config.ResourceUsage = ru + + if v := os.Getenv("GO_BLOCK_PROFILE_RATE"); v != "" { + asInt, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("parse GO_BLOCK_PROFILE_RATE as int: %w", err) + } + + config.Profiling.BlockProfileRate = asInt + } + + if v := os.Getenv("GO_MUTEX_PROFILE_FRACTION"); v != "" { + asInt, err := strconv.Atoi(v) + if err != nil { + return fmt.Errorf("parse GO_MUTEX_PROFILE_FRACTION as int: %w", err) + } + + config.Profiling.MutexProfileFraction = asInt + } + + if v := os.Getenv("MAXIMUM_CONCURRENT_GET_REQUESTS"); v != "" { + asInt, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("parse MAXIMUM_CONCURRENT_GET_REQUESTS as int: %w", err) + } + config.MaximumConcurrentGetRequests = int(asInt) + } else { + config.MaximumConcurrentGetRequests = DefaultMaxConcurrentGetRequests + } + + if err = parsePositiveInt( + "MAXIMUM_CONCURRENT_SHARD_LOADS", + func(val int) { config.MaximumConcurrentShardLoads = val }, + DefaultMaxConcurrentShardLoads, + ); err != nil { + return err + } + + if err := parsePositiveInt( + "GRPC_MAX_MESSAGE_SIZE", + func(val int) { config.GRPC.MaxMsgSize = val }, + DefaultGRPCMaxMsgSize, + ); err != nil { + return err + } + if err := parsePositiveInt( + "GRPC_PORT", + func(val int) { config.GRPC.Port = val }, + DefaultGRPCPort, + ); err != nil { + return err + } + config.GRPC.CertFile = "" + if v := os.Getenv("GRPC_CERT_FILE"); v != "" { + config.GRPC.CertFile = v + } + config.GRPC.KeyFile = "" + if v := os.Getenv("GRPC_KEY_FILE"); v != "" { + config.GRPC.KeyFile = v + } + + config.DisableGraphQL = entcfg.Enabled(os.Getenv("DISABLE_GRAPHQL")) + + if config.Raft, err = parseRAFTConfig(config.Cluster.Hostname); err != nil { + return fmt.Errorf("parse raft config: %w", err) + } + + if err := parsePositiveInt( + "REPLICATION_MINIMUM_FACTOR", + func(val int) { config.Replication.MinimumFactor = val }, + DefaultMinimumReplicationFactor, + ); err != nil { + return err + } + + config.Replication.AsyncReplicationDisabled = runtime.NewDynamicValue(entcfg.Enabled(os.Getenv("ASYNC_REPLICATION_DISABLED"))) + + if v := os.Getenv("REPLICATION_FORCE_DELETION_STRATEGY"); v != "" { + config.Replication.DeletionStrategy = v + } + + config.DisableTelemetry = false + if entcfg.Enabled(os.Getenv("DISABLE_TELEMETRY")) { + config.DisableTelemetry = true + } + + if entcfg.Enabled(os.Getenv("HNSW_STARTUP_WAIT_FOR_VECTOR_CACHE")) { + config.HNSWStartupWaitForVectorCache = true + } + + if err := parseInt( + "MAXIMUM_ALLOWED_COLLECTIONS_COUNT", + func(val int) { + config.SchemaHandlerConfig.MaximumAllowedCollectionsCount = runtime.NewDynamicValue(val) + }, + DefaultMaximumAllowedCollectionsCount, + ); err != nil { + return err + } + + // explicitly reset sentry config + sentry.Config = nil + config.Sentry, err = sentry.InitSentryConfig() + if err != nil { + return fmt.Errorf("parse sentry config from env: %w", err) + } + + config.MetadataServer.Enabled = false + if entcfg.Enabled(os.Getenv("EXPERIMENTAL_METADATA_SERVER_ENABLED")) { + config.MetadataServer.Enabled = true + } + config.MetadataServer.GrpcListenAddress = DefaultMetadataServerGrpcListenAddress + if v := os.Getenv("EXPERIMENTAL_METADATA_SERVER_GRPC_LISTEN_ADDRESS"); v != "" { + config.MetadataServer.GrpcListenAddress = v + } + if err := parsePositiveInt( + "EXPERIMENTAL_METADATA_SERVER_DATA_EVENTS_CHANNEL_CAPACITY", + func(val int) { config.MetadataServer.DataEventsChannelCapacity = val }, + DefaultMetadataServerDataEventsChannelCapacity, + ); err != nil { + return err + } + + config.RuntimeOverrides.Enabled = entcfg.Enabled(os.Getenv("RUNTIME_OVERRIDES_ENABLED")) + + if v := os.Getenv("RUNTIME_OVERRIDES_PATH"); v != "" { + config.RuntimeOverrides.Path = v + } + + config.RuntimeOverrides.LoadInterval = DefaultRuntimeOverridesLoadInterval + if v := os.Getenv("RUNTIME_OVERRIDES_LOAD_INTERVAL"); v != "" { + interval, err := time.ParseDuration(v) + if err != nil { + return fmt.Errorf("parse RUNTIME_OVERRIDES_LOAD_INTERVAL as time.Duration: %w", err) + } + config.RuntimeOverrides.LoadInterval = interval + } + + if err = parsePositiveInt( + "DISTRIBUTED_TASKS_SCHEDULER_TICK_INTERVAL_SECONDS", + func(val int) { config.DistributedTasks.SchedulerTickInterval = time.Duration(val) * time.Second }, + int(DefaultDistributedTasksSchedulerTickInterval.Seconds()), + ); err != nil { + return err + } + + if err = parsePositiveInt( + "DISTRIBUTED_TASKS_COMPLETED_TASK_TTL_HOURS", + func(val int) { config.DistributedTasks.CompletedTaskTTL = time.Duration(val) * time.Hour }, + int(DefaultDistributedTasksCompletedTaskTTL.Hours()), + ); err != nil { + return err + } + + if v := os.Getenv("DISTRIBUTED_TASKS_ENABLED"); v != "" { + config.DistributedTasks.Enabled = entcfg.Enabled(v) + } + + if v := os.Getenv("REPLICA_MOVEMENT_DISABLED"); v != "" { + config.ReplicaMovementDisabled = entcfg.Enabled(v) + } + + if v := os.Getenv("REPLICA_MOVEMENT_MINIMUM_ASYNC_WAIT"); v != "" { + duration, err := time.ParseDuration(v) + if err != nil { + return fmt.Errorf("parse REPLICA_MOVEMENT_MINIMUM_ASYNC_WAIT as time.Duration: %w", err) + } + if duration < 0 { + return fmt.Errorf("REPLICA_MOVEMENT_MINIMUM_ASYNC_WAIT must be a positive duration") + } + config.ReplicaMovementMinimumAsyncWait = runtime.NewDynamicValue(duration) + } else { + config.ReplicaMovementMinimumAsyncWait = runtime.NewDynamicValue(DefaultReplicaMovementMinimumAsyncWait) + } + revoctorizeCheckDisabled := false + if v := os.Getenv("REVECTORIZE_CHECK_DISABLED"); v != "" { + revoctorizeCheckDisabled = !(strings.ToLower(v) == "false") + } + config.RevectorizeCheckDisabled = runtime.NewDynamicValue(revoctorizeCheckDisabled) + + querySlowLogEnabled := entcfg.Enabled(os.Getenv("QUERY_SLOW_LOG_ENABLED")) + config.QuerySlowLogEnabled = runtime.NewDynamicValue(querySlowLogEnabled) + + querySlowLogThreshold := dbhelpers.DefaultSlowLogThreshold + if v := os.Getenv("QUERY_SLOW_LOG_THRESHOLD"); v != "" { + threshold, err := time.ParseDuration(v) + if err != nil { + return fmt.Errorf("parse QUERY_SLOW_LOG_THRESHOLD as time.Duration: %w", err) + } + querySlowLogThreshold = threshold + } + config.QuerySlowLogThreshold = runtime.NewDynamicValue(querySlowLogThreshold) + + envName := "QUERY_BITMAP_BUFS_MAX_MEMORY" + config.QueryBitmapBufsMaxMemory = DefaultQueryBitmapBufsMaxMemory + if v := os.Getenv(envName); v != "" { + bytes, err := parseResourceString(v) + if err != nil { + return fmt.Errorf("%s: %w", envName, err) + } + config.QueryBitmapBufsMaxMemory = int(bytes) + } + + envName = "QUERY_BITMAP_BUFS_MAX_BUF_SIZE" + config.QueryBitmapBufsMaxBufSize = DefaultQueryBitmapBufsMaxBufSize + if v := os.Getenv(envName); v != "" { + bytes, err := parseResourceString(v) + if err != nil { + return fmt.Errorf("%s: %w", envName, err) + } + config.QueryBitmapBufsMaxBufSize = int(bytes) + } + + invertedSorterDisabled := false + if v := os.Getenv("INVERTED_SORTER_DISABLED"); v != "" { + invertedSorterDisabled = !(strings.ToLower(v) == "false") + } + config.InvertedSorterDisabled = runtime.NewDynamicValue(invertedSorterDisabled) + + return nil +} + +func parseRAFTConfig(hostname string) (Raft, error) { + // flag.IntVar() + cfg := Raft{ + MetadataOnlyVoters: entcfg.Enabled(os.Getenv("RAFT_METADATA_ONLY_VOTERS")), + } + + if err := parsePositiveInt( + "RAFT_PORT", + func(val int) { cfg.Port = val }, + DefaultRaftPort, + ); err != nil { + return cfg, err + } + + if err := parsePositiveInt( + "RAFT_INTERNAL_RPC_PORT", + func(val int) { cfg.InternalRPCPort = val }, + DefaultRaftInternalPort, + ); err != nil { + return cfg, err + } + + if err := parsePositiveInt( + "RAFT_GRPC_MESSAGE_MAX_SIZE", + func(val int) { cfg.RPCMessageMaxSize = val }, + DefaultRaftGRPCMaxSize, + ); err != nil { + return cfg, err + } + + parseStringList( + "RAFT_JOIN", + func(val []string) { cfg.Join = val }, + // Default RAFT_JOIN must be the configured node name and the configured raft port. This allows us to have a one-node raft cluster + // able to bootstrap itself if the user doesn't pass any raft parameter. + []string{fmt.Sprintf("%s:%d", hostname, cfg.InternalRPCPort)}, + ) + if err := parsePositiveInt( + "RAFT_BOOTSTRAP_TIMEOUT", + func(val int) { cfg.BootstrapTimeout = time.Second * time.Duration(val) }, + DefaultRaftBootstrapTimeout, + ); err != nil { + return cfg, err + } + + if err := parsePositiveInt( + "RAFT_BOOTSTRAP_EXPECT", + func(val int) { cfg.BootstrapExpect = val }, + DefaultRaftBootstrapExpect, + ); err != nil { + return cfg, err + } + + if err := parsePositiveInt( + "RAFT_HEARTBEAT_TIMEOUT", + func(val int) { cfg.HeartbeatTimeout = time.Second * time.Duration(val) }, + 1, // raft default + ); err != nil { + return cfg, err + } + + if err := parsePositiveInt( + "RAFT_ELECTION_TIMEOUT", + func(val int) { cfg.ElectionTimeout = time.Second * time.Duration(val) }, + 1, // raft default + ); err != nil { + return cfg, err + } + + if err := parsePositiveFloat( + "RAFT_LEADER_LEASE_TIMEOUT", + func(val float64) { cfg.LeaderLeaseTimeout = time.Second * time.Duration(val) }, + 0.5, // raft default + ); err != nil { + return cfg, err + } + + if err := parsePositiveInt( + "RAFT_TIMEOUTS_MULTIPLIER", + func(val int) { cfg.TimeoutsMultiplier = val }, + 1, // raft default + ); err != nil { + return cfg, err + } + + if err := parsePositiveInt( + "RAFT_SNAPSHOT_INTERVAL", + func(val int) { cfg.SnapshotInterval = time.Second * time.Duration(val) }, + 120, // raft default + ); err != nil { + return cfg, err + } + + if err := parsePositiveInt( + "RAFT_SNAPSHOT_THRESHOLD", + func(val int) { cfg.SnapshotThreshold = uint64(val) }, + 8192, // raft default + ); err != nil { + return cfg, err + } + + if err := parsePositiveInt( + "RAFT_TRAILING_LOGS", + func(val int) { cfg.TrailingLogs = uint64(val) }, + 10240, // raft default + ); err != nil { + return cfg, err + } + + if err := parsePositiveInt( + "RAFT_CONSISTENCY_WAIT_TIMEOUT", + func(val int) { cfg.ConsistencyWaitTimeout = time.Second * time.Duration(val) }, + 10, + ); err != nil { + return cfg, err + } + + cfg.EnableOneNodeRecovery = entcfg.Enabled(os.Getenv("RAFT_ENABLE_ONE_NODE_RECOVERY")) + cfg.ForceOneNodeRecovery = entcfg.Enabled(os.Getenv("RAFT_FORCE_ONE_NODE_RECOVERY")) + + return cfg, nil +} + +func (c *Config) parseCORSConfig() error { + if v := os.Getenv("CORS_ALLOW_ORIGIN"); v != "" { + c.CORS.AllowOrigin = v + } else { + c.CORS.AllowOrigin = DefaultCORSAllowOrigin + } + + if v := os.Getenv("CORS_ALLOW_METHODS"); v != "" { + c.CORS.AllowMethods = v + } else { + c.CORS.AllowMethods = DefaultCORSAllowMethods + } + + if v := os.Getenv("CORS_ALLOW_HEADERS"); v != "" { + c.CORS.AllowHeaders = v + } else { + c.CORS.AllowHeaders = DefaultCORSAllowHeaders + } + + return nil +} + +func (c *Config) parseMemtableConfig() error { + // first parse old idle name for flush value + if err := parsePositiveInt( + "PERSISTENCE_FLUSH_IDLE_MEMTABLES_AFTER", + func(val int) { c.Persistence.MemtablesFlushDirtyAfter = val }, + DefaultPersistenceMemtablesFlushDirtyAfter, + ); err != nil { + return err + } + // then parse with new idle name and use previous value in case it's not set + if err := parsePositiveInt( + "PERSISTENCE_MEMTABLES_FLUSH_IDLE_AFTER_SECONDS", + func(val int) { c.Persistence.MemtablesFlushDirtyAfter = val }, + c.Persistence.MemtablesFlushDirtyAfter, + ); err != nil { + return err + } + // then parse with dirty name and use idle value as fallback + if err := parsePositiveInt( + "PERSISTENCE_MEMTABLES_FLUSH_DIRTY_AFTER_SECONDS", + func(val int) { c.Persistence.MemtablesFlushDirtyAfter = val }, + c.Persistence.MemtablesFlushDirtyAfter, + ); err != nil { + return err + } + + if err := parsePositiveInt( + "PERSISTENCE_MEMTABLES_MAX_SIZE_MB", + func(val int) { c.Persistence.MemtablesMaxSizeMB = val }, + DefaultPersistenceMemtablesMaxSize, + ); err != nil { + return err + } + + if err := parsePositiveInt( + "PERSISTENCE_MEMTABLES_MIN_ACTIVE_DURATION_SECONDS", + func(val int) { c.Persistence.MemtablesMinActiveDurationSeconds = val }, + DefaultPersistenceMemtablesMinDuration, + ); err != nil { + return err + } + + if err := parsePositiveInt( + "PERSISTENCE_MEMTABLES_MAX_ACTIVE_DURATION_SECONDS", + func(val int) { c.Persistence.MemtablesMaxActiveDurationSeconds = val }, + DefaultPersistenceMemtablesMaxDuration, + ); err != nil { + return err + } + + if err := parsePositiveInt( + "REPLICATION_ENGINE_MAX_WORKERS", + func(val int) { c.ReplicationEngineMaxWorkers = val }, + DefaultReplicationEngineMaxWorkers, + ); err != nil { + return err + } + + if err := parsePositiveInt( + "REPLICATION_ENGINE_FILE_COPY_WORKERS", + func(val int) { c.ReplicationEngineFileCopyWorkers = val }, + DefaultReplicationEngineFileCopyWorkers, + ); err != nil { + return err + } + + return nil +} + +func parsePercentage(envName string, cb func(val float64), defaultValue float64) error { + return parseFloat64(envName, defaultValue, func(val float64) error { + if val < 0 || val > 1 { + return fmt.Errorf("%s must be between 0 and 1", envName) + } + return nil + }, cb) +} + +func parseFloat64(envName string, defaultValue float64, verify func(val float64) error, cb func(val float64)) error { + var err error + asFloat := defaultValue + + if v := os.Getenv(envName); v != "" { + asFloat, err = strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("parse %s as float64: %w", envName, err) + } + if err = verify(asFloat); err != nil { + return err + } + } + + cb(asFloat) + return nil +} + +func parseInt(envName string, cb func(val int), defaultValue int) error { + return parseIntVerify(envName, defaultValue, cb, func(val int, envName string) error { return nil }) +} + +func parsePositiveInt(envName string, cb func(val int), defaultValue int) error { + return parseIntVerify(envName, defaultValue, cb, func(val int, envName string) error { + if val <= 0 { + return fmt.Errorf("%s must be an integer greater than 0. Got: %v", envName, val) + } + return nil + }) +} + +func parseNonNegativeInt(envName string, cb func(val int), defaultValue int) error { + return parseIntVerify(envName, defaultValue, cb, func(val int, envName string) error { + if val < 0 { + return fmt.Errorf("%s must be an integer greater than or equal 0. Got %v", envName, val) + } + return nil + }) +} + +func parseIntVerify(envName string, defaultValue int, cb func(val int), verify func(val int, envName string) error) error { + var err error + asInt := defaultValue + + if v := os.Getenv(envName); v != "" { + asInt, err = strconv.Atoi(v) + if err != nil { + return fmt.Errorf("parse %s as int: %w", envName, err) + } + if err = verify(asInt, envName); err != nil { + return err + } + } + + cb(asInt) + return nil +} + +// func parseFloat(envName string, cb func(val float64), defaultValue float64) error { +// return parseFloatVerify(envName, defaultValue, cb, func(val float64) error { return nil }) +// } + +func parsePositiveFloat(envName string, cb func(val float64), defaultValue float64) error { + return parseFloatVerify(envName, defaultValue, cb, func(val float64) error { + if val <= 0 { + return fmt.Errorf("%s must be a float greater than 0. Got: %v", envName, val) + } + return nil + }) +} + +// func parseNonNegativeFloat(envName string, cb func(val float64), defaultValue float64) error { +// return parseFloatVerify(envName, defaultValue, cb, func(val float64) error { +// if val < 0 { +// return fmt.Errorf("%s must be a float greater than or equal 0. Got %v", envName, val) +// } +// return nil +// }) +// } + +func parseFloatVerify(envName string, defaultValue float64, cb func(val float64), verify func(val float64) error) error { + var err error + asFloat := defaultValue + + if v := os.Getenv(envName); v != "" { + asFloat, err = strconv.ParseFloat(v, 64) + if err != nil { + return fmt.Errorf("parse %s as float: %w", envName, err) + } + if err = verify(asFloat); err != nil { + return err + } + } + + cb(asFloat) + return nil +} + +const ( + DefaultQueryMaximumResults = int64(10000) + DefaultQueryHybridMaximumResults = int64(100) + // DefaultQueryNestedCrossReferenceLimit describes the max number of nested crossrefs returned for a query + DefaultQueryNestedCrossReferenceLimit = int64(100000) + // DefaultQueryCrossReferenceDepthLimit describes the max depth of nested crossrefs in a query + DefaultQueryCrossReferenceDepthLimit = 5 + + DefaultQueryBitmapBufsMaxBufSize = 1 << 25 // 32MB + DefaultQueryBitmapBufsMaxMemory = 1 << 27 // 128MB (2x 32MB, 2x 16MB, 2x 8MB, 2x 4MB, 4x 2MB) +) + +const ( + DefaultPersistenceMemtablesFlushDirtyAfter = 60 + DefaultPersistenceMemtablesMaxSize = 200 + DefaultPersistenceMemtablesMinDuration = 15 + DefaultPersistenceMemtablesMaxDuration = 45 + DefaultMaxConcurrentGetRequests = 0 + DefaultMaxConcurrentShardLoads = 500 + DefaultGRPCPort = 50051 + DefaultGRPCMaxMsgSize = 104858000 // 100 * 1024 * 1024 + 400 + DefaultMinimumReplicationFactor = 1 + DefaultMaximumAllowedCollectionsCount = -1 // unlimited +) + +const VectorizerModuleNone = "none" + +// DefaultGossipBindPort uses the hashicorp/memberlist default +// port value assigned with the use of DefaultLocalConfig +const DefaultGossipBindPort = 7946 + +// TODO: This should be retrieved dynamically from all installed modules +const VectorizerModuleText2VecContextionary = "text2vec-contextionary" + +func parseStringList(varName string, cb func(val []string), defaultValue []string) { + if v := os.Getenv(varName); v != "" { + cb(strings.Split(v, ",")) + } else { + cb(defaultValue) + } +} + +func parseResourceUsageEnvVars() (ResourceUsage, error) { + ru := ResourceUsage{} + + if v := os.Getenv("DISK_USE_WARNING_PERCENTAGE"); v != "" { + asUint, err := strconv.ParseUint(v, 10, 64) + if err != nil { + return ru, fmt.Errorf("parse DISK_USE_WARNING_PERCENTAGE as uint: %w", err) + } + ru.DiskUse.WarningPercentage = asUint + } else { + ru.DiskUse.WarningPercentage = DefaultDiskUseWarningPercentage + } + + if v := os.Getenv("DISK_USE_READONLY_PERCENTAGE"); v != "" { + asUint, err := strconv.ParseUint(v, 10, 64) + if err != nil { + return ru, fmt.Errorf("parse DISK_USE_READONLY_PERCENTAGE as uint: %w", err) + } + ru.DiskUse.ReadOnlyPercentage = asUint + } else { + ru.DiskUse.ReadOnlyPercentage = DefaultDiskUseReadonlyPercentage + } + + if v := os.Getenv("MEMORY_WARNING_PERCENTAGE"); v != "" { + asUint, err := strconv.ParseUint(v, 10, 64) + if err != nil { + return ru, fmt.Errorf("parse MEMORY_WARNING_PERCENTAGE as uint: %w", err) + } + ru.MemUse.WarningPercentage = asUint + } else { + ru.MemUse.WarningPercentage = DefaultMemUseWarningPercentage + } + + if v := os.Getenv("MEMORY_READONLY_PERCENTAGE"); v != "" { + asUint, err := strconv.ParseUint(v, 10, 64) + if err != nil { + return ru, fmt.Errorf("parse MEMORY_READONLY_PERCENTAGE as uint: %w", err) + } + ru.MemUse.ReadOnlyPercentage = asUint + } else { + ru.MemUse.ReadOnlyPercentage = DefaultMemUseReadonlyPercentage + } + + return ru, nil +} + +func parseClusterConfig() (cluster.Config, error) { + cfg := cluster.Config{} + + // by default memberlist assigns hostname to os.Hostname() incase hostname is empty + // ref: https://github.com/hashicorp/memberlist/blob/3f82dc10a89f82efe300228752f7077d0d9f87e4/config.go#L303 + // it's handled at parseClusterConfig step to be consistent from the config start point and conveyed to all + // underlying functions see parseRAFTConfig(..) for example + cfg.Hostname = os.Getenv("CLUSTER_HOSTNAME") + if cfg.Hostname == "" { + cfg.Hostname, _ = os.Hostname() + } + cfg.Join = os.Getenv("CLUSTER_JOIN") + + advertiseAddr, advertiseAddrSet := os.LookupEnv("CLUSTER_ADVERTISE_ADDR") + advertisePort, advertisePortSet := os.LookupEnv("CLUSTER_ADVERTISE_PORT") + + cfg.Localhost = entcfg.Enabled(os.Getenv("CLUSTER_IN_LOCALHOST")) + gossipBind, gossipBindSet := os.LookupEnv("CLUSTER_GOSSIP_BIND_PORT") + dataBind, dataBindSet := os.LookupEnv("CLUSTER_DATA_BIND_PORT") + + if advertiseAddrSet { + cfg.AdvertiseAddr = advertiseAddr + } + + if advertisePortSet { + asInt, err := strconv.Atoi(advertisePort) + if err != nil { + return cfg, fmt.Errorf("parse CLUSTER_ADVERTISE_PORT as int: %w", err) + } + cfg.AdvertisePort = asInt + } + + if gossipBindSet { + asInt, err := strconv.Atoi(gossipBind) + if err != nil { + return cfg, fmt.Errorf("parse CLUSTER_GOSSIP_BIND_PORT as int: %w", err) + } + cfg.GossipBindPort = asInt + } else { + cfg.GossipBindPort = DefaultGossipBindPort + } + + if dataBindSet { + asInt, err := strconv.Atoi(dataBind) + if err != nil { + return cfg, fmt.Errorf("parse CLUSTER_DATA_BIND_PORT as int: %w", err) + } + cfg.DataBindPort = asInt + } else { + // it is convention in this server that the data bind point is + // equal to the data bind port + 1 + cfg.DataBindPort = cfg.GossipBindPort + 1 + } + + cfg.IgnoreStartupSchemaSync = entcfg.Enabled( + os.Getenv("CLUSTER_IGNORE_SCHEMA_SYNC")) + cfg.SkipSchemaSyncRepair = entcfg.Enabled( + os.Getenv("CLUSTER_SKIP_SCHEMA_REPAIR")) + + basicAuthUsername := os.Getenv("CLUSTER_BASIC_AUTH_USERNAME") + basicAuthPassword := os.Getenv("CLUSTER_BASIC_AUTH_PASSWORD") + + cfg.AuthConfig = cluster.AuthConfig{ + BasicAuth: cluster.BasicAuth{ + Username: basicAuthUsername, + Password: basicAuthPassword, + }, + } + + cfg.FastFailureDetection = entcfg.Enabled(os.Getenv("FAST_FAILURE_DETECTION")) + + // MAINTENANCE_NODES is experimental and subject to removal/change. It is an optional, comma + // separated list of hostnames that are in maintenance mode. In maintenance mode, the node will + // return an error for all data requests, but will still participate in the raft cluster and + // schema operations. This can be helpful is a node is too overwhelmed by startup tasks to handle + // data requests and you need to start up the node to give it time to "catch up". Note that in + // general one should not use the MaintenanceNodes field directly, but since we don't have + // access to the State here and the cluster has not yet initialized, we have to set it here. + + // avoid the case where strings.Split creates a slice with only the empty string as I think + // that will be confusing for future code. eg ([]string{""}) instead of an empty slice ([]string{}). + // https://go.dev/play/p/3BDp1vhbkYV shows len(1) when m = "". + cfg.MaintenanceNodes = []string{} + if m := os.Getenv("MAINTENANCE_NODES"); m != "" { + for _, node := range strings.Split(m, ",") { + if node != "" { + cfg.MaintenanceNodes = append(cfg.MaintenanceNodes, node) + } + } + } + + return cfg, nil +} + +func enabledForHost(envName string, localHostname string) bool { + if v := os.Getenv(envName); v != "" { + if entcfg.Enabled(v) { + return true + } + return slices.Contains(strings.Split(v, ","), localHostname) + } + return false +} + +/* +parses variable of format "colName1:propNames1:tenantNames1;colName2:propNames2:tenantNames2" +propNames = prop1,prop2,... +tenantNames = tenant1,tenant2,... + +examples: + - collection: + "ColName1" + "ColName1;ColName2" + - collection + properties: + "ColName1:propName1" + "ColName1:propName1,propName2;ColName2:propName3" + - collection + properties + tenants/shards: + "ColName1:propName1:tenantName1,tenantName2" + "ColName1:propName1:tenantName1,tenantName2;ColName2:propName2,propName3:tenantName3" + - collection + tenants/shards: + "ColName1::tenantName1" + "ColName1::tenantName1,tenantName2;ColName2::tenantName3" +*/ +type collectionPropsTenantsParser struct { + regexpCollection *regexp.Regexp + regexpProp *regexp.Regexp + regexpTenant *regexp.Regexp +} + +func newCollectionPropsTenantsParser() *collectionPropsTenantsParser { + return &collectionPropsTenantsParser{ + regexpCollection: regexp.MustCompile(`^` + schema.ClassNameRegexCore + `$`), + regexpProp: regexp.MustCompile(`^` + schema.PropertyNameRegex + `$`), + regexpTenant: regexp.MustCompile(`^` + schema.ShardNameRegexCore + `$`), + } +} + +func (p *collectionPropsTenantsParser) parse(v string) ([]CollectionPropsTenants, error) { + if v = strings.TrimSpace(v); v == "" { + return []CollectionPropsTenants{}, nil + } + + split := strings.Split(v, ";") + count := len(split) + cpts := make([]CollectionPropsTenants, 0, count) + uniqMapIdx := make(map[string]int, count) + + ec := errorcompounder.New() + for _, single := range split { + if single = strings.TrimSpace(single); single != "" { + if cpt, err := p.parseSingle(single); err != nil { + ec.Add(fmt.Errorf("parse '%s': %w", single, err)) + } else { + if prevIdx, ok := uniqMapIdx[cpt.Collection]; ok { + cpts[prevIdx] = p.mergeCpt(cpts[prevIdx], cpt) + } else { + uniqMapIdx[cpt.Collection] = len(cpts) + cpts = append(cpts, cpt) + } + } + } + } + + return cpts, ec.ToError() +} + +func (p *collectionPropsTenantsParser) parseSingle(single string) (CollectionPropsTenants, error) { + split := strings.Split(single, ":") + empty := CollectionPropsTenants{} + + switch count := len(split); count { + case 1: + collection, err := p.parseCollection(split[0]) + if err != nil { + return empty, err + } + return CollectionPropsTenants{Collection: collection}, nil + + case 2: + collection, err := p.parseCollection(split[0]) + if err != nil { + return empty, err + } + props, err := p.parseProps(split[1]) + if err != nil { + return empty, err + } + return CollectionPropsTenants{Collection: collection, Props: props}, nil + + case 3: + collection, err := p.parseCollection(split[0]) + if err != nil { + return empty, err + } + props, err := p.parseProps(split[1]) + if err != nil { + return empty, err + } + tenants, err := p.parseTenants(split[2]) + if err != nil { + return empty, err + } + return CollectionPropsTenants{Collection: collection, Props: props, Tenants: tenants}, nil + + default: + return empty, fmt.Errorf("too many parts in '%s'. Expected 1-3, got %d", single, count) + } +} + +func (p *collectionPropsTenantsParser) parseCollection(collection string) (string, error) { + collection = strings.TrimSpace(collection) + if collection == "" { + return "", fmt.Errorf("missing collection name") + } + if !p.regexpCollection.MatchString(collection) { + return "", fmt.Errorf("invalid collection name '%s'. Does not match regexp", collection) + } + return collection, nil +} + +func (p *collectionPropsTenantsParser) parseProps(propsStr string) ([]string, error) { + return p.parseElems(propsStr, p.regexpProp, "invalid property name '%s'. Does not match regexp") +} + +func (p *collectionPropsTenantsParser) parseTenants(tenantsStr string) ([]string, error) { + return p.parseElems(tenantsStr, p.regexpTenant, "invalid tenant/shard name '%s'. Does not match regexp") +} + +func (p *collectionPropsTenantsParser) parseElems(str string, reg *regexp.Regexp, errMsg string) ([]string, error) { + split := strings.Split(str, ",") + count := len(split) + elems := make([]string, 0, count) + uniqMap := make(map[string]struct{}, count) + + ec := errorcompounder.New() + for _, elem := range split { + if elem = strings.TrimSpace(elem); elem != "" { + if reg.MatchString(elem) { + if _, ok := uniqMap[elem]; !ok { + elems = append(elems, elem) + uniqMap[elem] = struct{}{} + } + } else { + ec.Add(fmt.Errorf(errMsg, elem)) + } + } + } + + if len(elems) == 0 { + return nil, ec.ToError() + } + return elems, ec.ToError() +} + +func (p *collectionPropsTenantsParser) mergeCpt(cptDst, cptSrc CollectionPropsTenants) CollectionPropsTenants { + if cptDst.Collection != cptSrc.Collection { + return cptDst + } + cptDst.Props = p.mergeUniqueElems(cptDst.Props, cptSrc.Props) + cptDst.Tenants = p.mergeUniqueElems(cptDst.Tenants, cptSrc.Tenants) + return cptDst +} + +func (p *collectionPropsTenantsParser) mergeUniqueElems(uniqueA, uniqueB []string) []string { + lA, lB := len(uniqueA), len(uniqueB) + if lB == 0 { + return uniqueA + } + if lA == 0 { + return uniqueB + } + + uniqMapA := make(map[string]struct{}, lA) + for _, a := range uniqueA { + uniqMapA[a] = struct{}{} + } + for _, b := range uniqueB { + if _, ok := uniqMapA[b]; !ok { + uniqueA = append(uniqueA, b) + } + } + return uniqueA +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/config/environment_test.go b/platform/dbops/binaries/weaviate-src/usecases/config/environment_test.go new file mode 100644 index 0000000000000000000000000000000000000000..2f0e05b8e5483697d92bfd8d405b22bd4e4f4ae2 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/config/environment_test.go @@ -0,0 +1,1293 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package config + +import ( + "fmt" + "os" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/weaviate/weaviate/usecases/cluster" + "github.com/weaviate/weaviate/usecases/config/runtime" +) + +const DefaultGoroutineFactor = 1.5 + +func TestEnvironmentImportGoroutineFactor(t *testing.T) { + factors := []struct { + name string + goroutineFactor []string + expected float64 + expectedErr bool + }{ + {"Valid factor", []string{"1"}, 1, false}, + {"Low factor", []string{"0.5"}, 0.5, false}, + {"not given", []string{}, DefaultGoroutineFactor, false}, + {"High factor", []string{"5"}, 5, false}, + {"invalid factor", []string{"-1"}, -1, true}, + {"not parsable", []string{"I'm not a number"}, -1, true}, + } + for _, tt := range factors { + t.Run(tt.name, func(t *testing.T) { + if len(tt.goroutineFactor) == 1 { + t.Setenv("MAX_IMPORT_GOROUTINES_FACTOR", tt.goroutineFactor[0]) + } + conf := Config{} + err := FromEnv(&conf) + + if tt.expectedErr { + require.NotNil(t, err) + } else { + require.Equal(t, tt.expected, conf.MaxImportGoroutinesFactor) + } + }) + } +} + +func TestEnvironmentSetFlushAfter_AllNames(t *testing.T) { + factors := []struct { + name string + flushAfter []string + expected int + expectedErr bool + }{ + {"Valid", []string{"1"}, 1, false}, + {"not given", []string{}, DefaultPersistenceMemtablesFlushDirtyAfter, false}, + {"invalid factor", []string{"-1"}, -1, true}, + {"zero factor", []string{"0"}, -1, true}, + {"not parsable", []string{"I'm not a number"}, -1, true}, + } + envNames := []struct { + name string + envName string + }{ + {name: "fallback idle (1st)", envName: "PERSISTENCE_FLUSH_IDLE_MEMTABLES_AFTER"}, + {name: "fallback idle (2nd)", envName: "PERSISTENCE_MEMTABLES_FLUSH_IDLE_AFTER_SECONDS"}, + {name: "dirty", envName: "PERSISTENCE_MEMTABLES_FLUSH_DIRTY_AFTER_SECONDS"}, + } + + for _, n := range envNames { + t.Run(n.name, func(t *testing.T) { + for _, tt := range factors { + t.Run(tt.name, func(t *testing.T) { + if len(tt.flushAfter) == 1 { + t.Setenv(n.envName, tt.flushAfter[0]) + } + conf := Config{} + err := FromEnv(&conf) + + if tt.expectedErr { + require.NotNil(t, err) + } else { + require.Equal(t, tt.expected, conf.Persistence.MemtablesFlushDirtyAfter) + } + }) + } + }) + } +} + +func TestEnvironmentFlushConflictingValues(t *testing.T) { + // if all 3 variable names are used, the newest variable name + // should be taken into consideration + os.Clearenv() + t.Setenv("PERSISTENCE_FLUSH_IDLE_MEMTABLES_AFTER", "16") + t.Setenv("PERSISTENCE_MEMTABLES_FLUSH_IDLE_AFTER_SECONDS", "17") + t.Setenv("PERSISTENCE_MEMTABLES_FLUSH_DIRTY_AFTER_SECONDS", "18") + conf := Config{} + err := FromEnv(&conf) + require.Nil(t, err) + + assert.Equal(t, 18, conf.Persistence.MemtablesFlushDirtyAfter) +} + +func TestEnvironmentPersistence_dataPath(t *testing.T) { + factors := []struct { + name string + value []string + config Config + expected string + }{ + { + name: "given", + value: []string{"/var/lib/weaviate"}, + config: Config{}, + expected: "/var/lib/weaviate", + }, + { + name: "given with config set", + value: []string{"/var/lib/weaviate"}, + config: Config{ + Persistence: Persistence{ + DataPath: "/var/data/weaviate", + }, + }, + expected: "/var/lib/weaviate", + }, + { + name: "not given", + value: []string{}, + config: Config{}, + expected: DefaultPersistenceDataPath, + }, + { + name: "not given with config set", + value: []string{}, + config: Config{ + Persistence: Persistence{ + DataPath: "/var/data/weaviate", + }, + }, + expected: "/var/data/weaviate", + }, + } + for _, tt := range factors { + t.Run(tt.name, func(t *testing.T) { + if len(tt.value) == 1 { + t.Setenv("PERSISTENCE_DATA_PATH", tt.value[0]) + } + conf := tt.config + err := FromEnv(&conf) + require.Nil(t, err) + require.Equal(t, tt.expected, conf.Persistence.DataPath) + }) + } +} + +func TestEnvironmentMemtable_MaxSize(t *testing.T) { + factors := []struct { + name string + value []string + expected int + expectedErr bool + }{ + {"Valid", []string{"100"}, 100, false}, + {"not given", []string{}, DefaultPersistenceMemtablesMaxSize, false}, + {"invalid factor", []string{"-1"}, -1, true}, + {"zero factor", []string{"0"}, -1, true}, + {"not parsable", []string{"I'm not a number"}, -1, true}, + } + for _, tt := range factors { + t.Run(tt.name, func(t *testing.T) { + if len(tt.value) == 1 { + t.Setenv("PERSISTENCE_MEMTABLES_MAX_SIZE_MB", tt.value[0]) + } + conf := Config{} + err := FromEnv(&conf) + + if tt.expectedErr { + require.NotNil(t, err) + } else { + require.Equal(t, tt.expected, conf.Persistence.MemtablesMaxSizeMB) + } + }) + } +} + +func TestEnvironmentMemtable_MinDuration(t *testing.T) { + factors := []struct { + name string + value []string + expected int + expectedErr bool + }{ + {"Valid", []string{"100"}, 100, false}, + {"not given", []string{}, DefaultPersistenceMemtablesMinDuration, false}, + {"invalid factor", []string{"-1"}, -1, true}, + {"zero factor", []string{"0"}, -1, true}, + {"not parsable", []string{"I'm not a number"}, -1, true}, + } + for _, tt := range factors { + t.Run(tt.name, func(t *testing.T) { + if len(tt.value) == 1 { + t.Setenv("PERSISTENCE_MEMTABLES_MIN_ACTIVE_DURATION_SECONDS", tt.value[0]) + } + conf := Config{} + err := FromEnv(&conf) + + if tt.expectedErr { + require.NotNil(t, err) + } else { + require.Equal(t, tt.expected, conf.Persistence.MemtablesMinActiveDurationSeconds) + } + }) + } +} + +func TestEnvironmentMemtable_MaxDuration(t *testing.T) { + factors := []struct { + name string + value []string + expected int + expectedErr bool + }{ + {"Valid", []string{"100"}, 100, false}, + {"not given", []string{}, DefaultPersistenceMemtablesMaxDuration, false}, + {"invalid factor", []string{"-1"}, -1, true}, + {"zero factor", []string{"0"}, -1, true}, + {"not parsable", []string{"I'm not a number"}, -1, true}, + } + for _, tt := range factors { + t.Run(tt.name, func(t *testing.T) { + if len(tt.value) == 1 { + t.Setenv("PERSISTENCE_MEMTABLES_MAX_ACTIVE_DURATION_SECONDS", tt.value[0]) + } + conf := Config{} + err := FromEnv(&conf) + + if tt.expectedErr { + require.NotNil(t, err) + } else { + require.Equal(t, tt.expected, conf.Persistence.MemtablesMaxActiveDurationSeconds) + } + }) + } +} + +func TestEnvironmentParseClusterConfig(t *testing.T) { + hostname, _ := os.Hostname() + tests := []struct { + name string + envVars map[string]string + expectedResult cluster.Config + expectedErr error + }{ + { + name: "valid cluster config - ports and advertiseaddr provided", + envVars: map[string]string{ + "CLUSTER_GOSSIP_BIND_PORT": "7100", + "CLUSTER_DATA_BIND_PORT": "7101", + "CLUSTER_ADVERTISE_ADDR": "193.0.0.1", + "CLUSTER_ADVERTISE_PORT": "9999", + }, + expectedResult: cluster.Config{ + Hostname: hostname, + GossipBindPort: 7100, + DataBindPort: 7101, + AdvertiseAddr: "193.0.0.1", + AdvertisePort: 9999, + MaintenanceNodes: make([]string, 0), + }, + }, + { + name: "valid cluster config - no ports and advertiseaddr provided", + expectedResult: cluster.Config{ + Hostname: hostname, + GossipBindPort: DefaultGossipBindPort, + DataBindPort: DefaultGossipBindPort + 1, + AdvertiseAddr: "", + MaintenanceNodes: make([]string, 0), + }, + }, + { + name: "valid cluster config - only gossip bind port provided", + envVars: map[string]string{ + "CLUSTER_GOSSIP_BIND_PORT": "7777", + }, + expectedResult: cluster.Config{ + Hostname: hostname, + GossipBindPort: 7777, + DataBindPort: 7778, + MaintenanceNodes: make([]string, 0), + }, + }, + { + name: "valid cluster config - both ports provided", + envVars: map[string]string{ + "CLUSTER_GOSSIP_BIND_PORT": "7100", + "CLUSTER_DATA_BIND_PORT": "7111", + }, + expectedResult: cluster.Config{ + Hostname: hostname, + GossipBindPort: 7100, + DataBindPort: 7111, + MaintenanceNodes: make([]string, 0), + }, + }, + { + name: "schema sync disabled", + envVars: map[string]string{ + "CLUSTER_IGNORE_SCHEMA_SYNC": "true", + }, + expectedResult: cluster.Config{ + Hostname: hostname, + GossipBindPort: 7946, + DataBindPort: 7947, + IgnoreStartupSchemaSync: true, + MaintenanceNodes: make([]string, 0), + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + for k, v := range test.envVars { + t.Setenv(k, v) + } + cfg, err := parseClusterConfig() + if test.expectedErr != nil { + assert.EqualError(t, err, test.expectedErr.Error(), + "expected err: %v, got: %v", test.expectedErr, err) + } else { + assert.Nil(t, err, "expected nil, got: %v", err) + assert.EqualValues(t, test.expectedResult, cfg) + } + }) + } +} + +func TestEnvironmentSetDefaultVectorDistanceMetric(t *testing.T) { + t.Run("DefaultVectorDistanceMetricIsEmpty", func(t *testing.T) { + os.Clearenv() + conf := Config{} + FromEnv(&conf) + require.Equal(t, "", conf.DefaultVectorDistanceMetric) + }) + + t.Run("NonEmptyDefaultVectorDistanceMetric", func(t *testing.T) { + os.Clearenv() + t.Setenv("DEFAULT_VECTOR_DISTANCE_METRIC", "l2-squared") + conf := Config{} + FromEnv(&conf) + require.Equal(t, "l2-squared", conf.DefaultVectorDistanceMetric) + }) +} + +func TestEnvironmentMaxConcurrentGetRequests(t *testing.T) { + factors := []struct { + name string + value []string + expected int + expectedErr bool + }{ + {"Valid", []string{"100"}, 100, false}, + {"not given", []string{}, DefaultMaxConcurrentGetRequests, false}, + {"unlimited", []string{"-1"}, -1, false}, + {"not parsable", []string{"I'm not a number"}, -1, true}, + } + for _, tt := range factors { + t.Run(tt.name, func(t *testing.T) { + if len(tt.value) == 1 { + t.Setenv("MAXIMUM_CONCURRENT_GET_REQUESTS", tt.value[0]) + } + conf := Config{} + err := FromEnv(&conf) + + if tt.expectedErr { + require.NotNil(t, err) + } else { + require.Equal(t, tt.expected, conf.MaximumConcurrentGetRequests) + } + }) + } +} + +func TestEnvironmentCORS_Origin(t *testing.T) { + factors := []struct { + name string + value []string + expected string + expectedErr bool + }{ + {"Valid", []string{"http://foo.com"}, "http://foo.com", false}, + {"not given", []string{}, DefaultCORSAllowOrigin, false}, + } + for _, tt := range factors { + t.Run(tt.name, func(t *testing.T) { + os.Clearenv() + if len(tt.value) == 1 { + os.Setenv("CORS_ALLOW_ORIGIN", tt.value[0]) + } + conf := Config{} + err := FromEnv(&conf) + + if tt.expectedErr { + require.NotNil(t, err) + } else { + require.Equal(t, tt.expected, conf.CORS.AllowOrigin) + } + }) + } +} + +func TestEnvironmentGRPCPort(t *testing.T) { + factors := []struct { + name string + value []string + expected int + expectedErr bool + }{ + {"Valid", []string{"50052"}, 50052, false}, + {"not given", []string{}, DefaultGRPCPort, false}, + {"invalid factor", []string{"-1"}, -1, true}, + {"zero factor", []string{"0"}, -1, true}, + {"not parsable", []string{"I'm not a number"}, -1, true}, + } + for _, tt := range factors { + t.Run(tt.name, func(t *testing.T) { + if len(tt.value) == 1 { + t.Setenv("GRPC_PORT", tt.value[0]) + } + conf := Config{} + err := FromEnv(&conf) + + if tt.expectedErr { + require.NotNil(t, err) + } else { + require.Equal(t, tt.expected, conf.GRPC.Port) + } + }) + } +} + +func TestEnvironmentCORS_Methods(t *testing.T) { + factors := []struct { + name string + value []string + expected string + expectedErr bool + }{ + {"Valid", []string{"POST"}, "POST", false}, + {"not given", []string{}, DefaultCORSAllowMethods, false}, + } + for _, tt := range factors { + t.Run(tt.name, func(t *testing.T) { + os.Clearenv() + if len(tt.value) == 1 { + os.Setenv("CORS_ALLOW_METHODS", tt.value[0]) + } + conf := Config{} + err := FromEnv(&conf) + + if tt.expectedErr { + require.NotNil(t, err) + } else { + require.Equal(t, tt.expected, conf.CORS.AllowMethods) + } + }) + } +} + +func TestEnvironmentDisableGraphQL(t *testing.T) { + factors := []struct { + name string + value []string + expected bool + expectedErr bool + }{ + {"Valid: true", []string{"true"}, true, false}, + {"Valid: false", []string{"false"}, false, false}, + {"Valid: 1", []string{"1"}, true, false}, + {"Valid: 0", []string{"0"}, false, false}, + {"Valid: on", []string{"on"}, true, false}, + {"Valid: off", []string{"off"}, false, false}, + {"not given", []string{}, false, false}, + } + for _, tt := range factors { + t.Run(tt.name, func(t *testing.T) { + if len(tt.value) == 1 { + t.Setenv("DISABLE_GRAPHQL", tt.value[0]) + } + conf := Config{} + err := FromEnv(&conf) + + if tt.expectedErr { + require.NotNil(t, err) + } else { + require.Equal(t, tt.expected, conf.DisableGraphQL) + } + }) + } +} + +func TestEnvironmentCORS_Headers(t *testing.T) { + factors := []struct { + name string + value []string + expected string + expectedErr bool + }{ + {"Valid", []string{"Authorization"}, "Authorization", false}, + {"not given", []string{}, DefaultCORSAllowHeaders, false}, + } + for _, tt := range factors { + t.Run(tt.name, func(t *testing.T) { + os.Clearenv() + if len(tt.value) == 1 { + os.Setenv("CORS_ALLOW_HEADERS", tt.value[0]) + } + conf := Config{} + err := FromEnv(&conf) + + if tt.expectedErr { + require.NotNil(t, err) + } else { + require.Equal(t, tt.expected, conf.CORS.AllowHeaders) + } + }) + } +} + +func TestEnvironmentPrometheusGroupClasses_OldName(t *testing.T) { + factors := []struct { + name string + value []string + expected bool + expectedErr bool + }{ + {"Valid: true", []string{"true"}, true, false}, + {"Valid: false", []string{"false"}, false, false}, + {"Valid: 1", []string{"1"}, true, false}, + {"Valid: 0", []string{"0"}, false, false}, + {"Valid: on", []string{"on"}, true, false}, + {"Valid: off", []string{"off"}, false, false}, + {"not given", []string{}, false, false}, + } + for _, tt := range factors { + t.Run(tt.name, func(t *testing.T) { + t.Setenv("PROMETHEUS_MONITORING_ENABLED", "true") + if len(tt.value) == 1 { + t.Setenv("PROMETHEUS_MONITORING_GROUP_CLASSES", tt.value[0]) + } + conf := Config{} + err := FromEnv(&conf) + + if tt.expectedErr { + require.NotNil(t, err) + } else { + require.Equal(t, tt.expected, conf.Monitoring.Group) + } + }) + } +} + +func TestEnvironmentPrometheusGroupClasses_NewName(t *testing.T) { + factors := []struct { + name string + value []string + expected bool + expectedErr bool + }{ + {"Valid: true", []string{"true"}, true, false}, + {"Valid: false", []string{"false"}, false, false}, + {"Valid: 1", []string{"1"}, true, false}, + {"Valid: 0", []string{"0"}, false, false}, + {"Valid: on", []string{"on"}, true, false}, + {"Valid: off", []string{"off"}, false, false}, + {"not given", []string{}, false, false}, + } + for _, tt := range factors { + t.Run(tt.name, func(t *testing.T) { + t.Setenv("PROMETHEUS_MONITORING_ENABLED", "true") + if len(tt.value) == 1 { + t.Setenv("PROMETHEUS_MONITORING_GROUP", tt.value[0]) + } + conf := Config{} + err := FromEnv(&conf) + + if tt.expectedErr { + require.NotNil(t, err) + } else { + require.Equal(t, tt.expected, conf.Monitoring.Group) + } + }) + } +} + +func TestEnvironmentMinimumReplicationFactor(t *testing.T) { + factors := []struct { + name string + value []string + expected int + expectedErr bool + }{ + {"Valid", []string{"3"}, 3, false}, + {"not given", []string{}, DefaultMinimumReplicationFactor, false}, + {"invalid factor", []string{"-1"}, -1, true}, + {"zero factor", []string{"0"}, -1, true}, + {"not parsable", []string{"I'm not a number"}, -1, true}, + } + for _, tt := range factors { + t.Run(tt.name, func(t *testing.T) { + if len(tt.value) == 1 { + t.Setenv("REPLICATION_MINIMUM_FACTOR", tt.value[0]) + } + conf := Config{} + err := FromEnv(&conf) + + if tt.expectedErr { + require.NotNil(t, err) + } else { + require.Equal(t, tt.expected, conf.Replication.MinimumFactor) + } + }) + } +} + +func TestEnvironmentQueryDefaults_Limit(t *testing.T) { + factors := []struct { + name string + value []string + config Config + expected int64 + }{ + { + name: "Valid", + value: []string{"3"}, + config: Config{}, + expected: 3, + }, + { + name: "Valid with config already set", + value: []string{"3"}, + config: Config{ + QueryDefaults: QueryDefaults{ + Limit: 20, + }, + }, + expected: 3, + }, + { + name: "not given with config set", + value: []string{}, + config: Config{ + QueryDefaults: QueryDefaults{ + Limit: 20, + }, + }, + expected: 20, + }, + { + name: "not given with config set", + value: []string{}, + config: Config{}, + expected: DefaultQueryDefaultsLimit, + }, + } + for _, tt := range factors { + t.Run(tt.name, func(t *testing.T) { + if len(tt.value) == 1 { + t.Setenv("QUERY_DEFAULTS_LIMIT", tt.value[0]) + } + conf := tt.config + err := FromEnv(&conf) + + require.Nil(t, err) + require.Equal(t, tt.expected, conf.QueryDefaults.Limit) + }) + } +} + +func TestEnvironmentAuthentication(t *testing.T) { + factors := []struct { + name string + auth_env_var []string + expected Authentication + }{ + { + name: "Valid API Key", + auth_env_var: []string{"AUTHENTICATION_APIKEY_ENABLED"}, + expected: Authentication{ + APIKey: StaticAPIKey{ + Enabled: true, + }, + }, + }, + { + name: "Valid Anonymous Access", + auth_env_var: []string{"AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED"}, + expected: Authentication{ + AnonymousAccess: AnonymousAccess{ + Enabled: true, + }, + }, + }, + { + name: "Valid OIDC Auth", + auth_env_var: []string{"AUTHENTICATION_OIDC_ENABLED"}, + expected: Authentication{ + OIDC: OIDC{ + Enabled: true, + Issuer: runtime.NewDynamicValue(""), + ClientID: runtime.NewDynamicValue(""), + SkipClientIDCheck: runtime.NewDynamicValue(false), + UsernameClaim: runtime.NewDynamicValue(""), + GroupsClaim: runtime.NewDynamicValue(""), + Scopes: runtime.NewDynamicValue([]string(nil)), + Certificate: runtime.NewDynamicValue(""), + JWKSUrl: runtime.NewDynamicValue(""), + }, + }, + }, + { + name: "Enabled db user", + auth_env_var: []string{"AUTHENTICATION_DB_USERS_ENABLED"}, + expected: Authentication{ + DBUsers: DbUsers{Enabled: true}, + }, + }, + { + name: "not given", + auth_env_var: []string{}, + expected: Authentication{ + AnonymousAccess: AnonymousAccess{ + Enabled: true, + }, + }, + }, + } + for _, tt := range factors { + t.Run(tt.name, func(t *testing.T) { + if len(tt.auth_env_var) == 1 { + t.Setenv(tt.auth_env_var[0], "true") + } + conf := Config{} + err := FromEnv(&conf) + require.Nil(t, err) + require.Equal(t, tt.expected, conf.Authentication) + }) + } +} + +func TestEnvironmentHNSWMaxLogSize(t *testing.T) { + factors := []struct { + name string + value []string + expected int64 + expectedErr bool + }{ + {"Valid no unit", []string{"3"}, 3, false}, + {"Valid IEC unit", []string{"3KB"}, 3000, false}, + {"Valid SI unit", []string{"3KiB"}, 3 * 1024, false}, + {"not given", []string{}, DefaultPersistenceHNSWMaxLogSize, false}, + {"invalid factor", []string{"-1"}, -1, true}, + {"not parsable", []string{"I'm not a number"}, -1, true}, + } + for _, tt := range factors { + t.Run(tt.name, func(t *testing.T) { + if len(tt.value) == 1 { + t.Setenv("PERSISTENCE_HNSW_MAX_LOG_SIZE", tt.value[0]) + } + conf := Config{} + err := FromEnv(&conf) + + if tt.expectedErr { + require.NotNil(t, err) + } else { + require.Equal(t, tt.expected, conf.Persistence.HNSWMaxLogSize) + } + }) + } +} + +func TestEnvironmentHNSWWaitForPrefill(t *testing.T) { + factors := []struct { + name string + value []string + expected bool + expectedErr bool + }{ + {"Valid: true", []string{"true"}, true, false}, + {"Valid: false", []string{"false"}, false, false}, + {"Valid: 1", []string{"1"}, true, false}, + {"Valid: 0", []string{"0"}, false, false}, + {"Valid: on", []string{"on"}, true, false}, + {"Valid: off", []string{"off"}, false, false}, + {"not given", []string{}, false, false}, + } + for _, tt := range factors { + t.Run(tt.name, func(t *testing.T) { + if len(tt.value) == 1 { + t.Setenv("HNSW_STARTUP_WAIT_FOR_VECTOR_CACHE", tt.value[0]) + } + conf := Config{} + err := FromEnv(&conf) + + if tt.expectedErr { + require.NotNil(t, err) + } else { + require.Equal(t, tt.expected, conf.HNSWStartupWaitForVectorCache) + } + }) + } +} + +func TestEnvironmentHNSWVisitedListPoolMaxSize(t *testing.T) { + factors := []struct { + name string + value []string + expected int + expectedErr bool + }{ + {"Valid", []string{"3"}, 3, false}, + {"not given", []string{}, DefaultHNSWVisitedListPoolSize, false}, + {"valid negative", []string{"-1"}, -1, false}, + {"not parsable", []string{"I'm not a number"}, -1, true}, + } + for _, tt := range factors { + t.Run(tt.name, func(t *testing.T) { + if len(tt.value) == 1 { + t.Setenv("HNSW_VISITED_LIST_POOL_MAX_SIZE", tt.value[0]) + } + conf := Config{} + err := FromEnv(&conf) + + if tt.expectedErr { + require.NotNil(t, err) + } else { + require.Equal(t, tt.expected, conf.HNSWVisitedListPoolMaxSize) + } + }) + } +} + +func TestEnvironmentHNSWFlatSearchConcurrency(t *testing.T) { + factors := []struct { + name string + value []string + expected int + expectedErr bool + }{ + {"Valid", []string{"3"}, 3, false}, + {"not given", []string{}, DefaultHNSWFlatSearchConcurrency, false}, + {"valid negative", []string{"-1"}, -1, true}, + {"not parsable", []string{"I'm not a number"}, -1, true}, + } + for _, tt := range factors { + t.Run(tt.name, func(t *testing.T) { + if len(tt.value) == 1 { + t.Setenv("HNSW_FLAT_SEARCH_CONCURRENCY", tt.value[0]) + } + conf := Config{} + err := FromEnv(&conf) + + if tt.expectedErr { + require.NotNil(t, err) + } else { + require.Equal(t, tt.expected, conf.HNSWFlatSearchConcurrency) + } + }) + } +} + +func TestEnvironmentHNSWAcornFilterRatio(t *testing.T) { + factors := []struct { + name string + value []string + expected float64 + expectedErr bool + }{ + {"Valid", []string{"0.5"}, 0.5, false}, + {"not given", []string{}, 0.4, false}, + {"max", []string{"0.0"}, 0.0, false}, + {"min", []string{"1.0"}, 1.0, false}, + {"negative", []string{"-1.2"}, -1.0, true}, + {"too large", []string{"1.2"}, -1.0, true}, + } + for _, tt := range factors { + t.Run(tt.name, func(t *testing.T) { + if len(tt.value) == 1 { + t.Setenv("HNSW_ACORN_FILTER_RATIO", tt.value[0]) + } + conf := Config{} + err := FromEnv(&conf) + + if tt.expectedErr { + require.NotNil(t, err) + } else { + require.Equal(t, tt.expected, conf.HNSWAcornFilterRatio) + } + }) + } +} + +func TestEnabledForHost(t *testing.T) { + localHostname := "weaviate-1" + envName := "HOSTBASED_SETTING" + + enabledVals := []string{"enabled", "1", "true", "on", "weaviate-1", "weaviate-0,weaviate-1,weaviate-2"} + for _, val := range enabledVals { + t.Run(fmt.Sprintf("enabled %q", val), func(t *testing.T) { + t.Setenv(envName, val) + assert.True(t, enabledForHost(envName, localHostname)) + }) + } + + disabledVals := []string{"disabled", "0", "false", "off", "weaviate-0", "weaviate-0,weaviate-2,weaviate-3", ""} + for _, val := range disabledVals { + t.Run(fmt.Sprintf("disabled %q", val), func(t *testing.T) { + t.Setenv(envName, val) + assert.False(t, enabledForHost(envName, localHostname)) + }) + } +} + +func TestParseCollectionPropsTenants(t *testing.T) { + type testCase struct { + env string + expected []CollectionPropsTenants + expectedErrMsg string + } + + p := newCollectionPropsTenantsParser() + + testCases := []testCase{ + { + env: "", + expected: []CollectionPropsTenants{}, + }, + + // collections + { + env: "Collection1", + expected: []CollectionPropsTenants{ + {Collection: "Collection1"}, + }, + }, + { + env: "Collection1; Collection2; ;", + expected: []CollectionPropsTenants{ + {Collection: "Collection1"}, + {Collection: "Collection2"}, + }, + }, + { + env: "Collection1:; Collection2::; ;", + expected: []CollectionPropsTenants{ + {Collection: "Collection1"}, + {Collection: "Collection2"}, + }, + }, + + // collections + props + { + env: "Collection1:prop1,prop2", + expected: []CollectionPropsTenants{ + { + Collection: "Collection1", + Props: []string{"prop1", "prop2"}, + }, + }, + }, + { + env: "Collection1:prop1, prop2;Collection2:prop3: ;", + expected: []CollectionPropsTenants{ + { + Collection: "Collection1", + Props: []string{"prop1", "prop2"}, + }, + { + Collection: "Collection2", + Props: []string{"prop3"}, + }, + }, + }, + + // collections + tenants + { + env: "Collection1::tenant1,tenant2", + expected: []CollectionPropsTenants{ + { + Collection: "Collection1", + Tenants: []string{"tenant1", "tenant2"}, + }, + }, + }, + { + env: "Collection1::tenant1, tenant2;Collection2::tenant3", + expected: []CollectionPropsTenants{ + { + Collection: "Collection1", + Tenants: []string{"tenant1", "tenant2"}, + }, + { + Collection: "Collection2", + Tenants: []string{"tenant3"}, + }, + }, + }, + + // collections + props + tenants + { + env: "Collection1:prop1:tenant1,tenant2", + expected: []CollectionPropsTenants{ + { + Collection: "Collection1", + Props: []string{"prop1"}, + Tenants: []string{"tenant1", "tenant2"}, + }, + }, + }, + { + env: "Collection1:prop1 :tenant1, tenant2;Collection2:prop2,prop3 :tenant3 ; ", + expected: []CollectionPropsTenants{ + { + Collection: "Collection1", + Props: []string{"prop1"}, + Tenants: []string{"tenant1", "tenant2"}, + }, + { + Collection: "Collection2", + Props: []string{"prop2", "prop3"}, + Tenants: []string{"tenant3"}, + }, + }, + }, + + // unique / merged + { + env: "Collection1:prop1,prop2:tenant1,tenant2;Collection2:propX;Collection1:prop2,prop3;Collection3::tenantY;Collection1:prop4:tenant2,tenant3", + expected: []CollectionPropsTenants{ + { + Collection: "Collection1", + Props: []string{"prop1", "prop2", "prop3", "prop4"}, + Tenants: []string{"tenant1", "tenant2", "tenant3"}, + }, + { + Collection: "Collection2", + Props: []string{"propX"}, + }, + { + Collection: "Collection3", + Tenants: []string{"tenantY"}, + }, + }, + }, + + // errors + { + env: "lowerCaseCollectionName", + expectedErrMsg: "invalid collection name", + }, + { + env: "InvalidChars#", + expectedErrMsg: "invalid collection name", + }, + { + env: "Collection1:InvalidChars#", + expectedErrMsg: "invalid property name", + }, + { + env: "Collection1::InvalidChars#", + expectedErrMsg: "invalid tenant/shard name", + }, + { + env: ":prop", + expectedErrMsg: "missing collection name", + }, + { + env: "::tenant", + expectedErrMsg: "missing collection name", + }, + { + env: ":prop:tenant", + expectedErrMsg: "missing collection name", + }, + { + env: "Collection1:::", + expectedErrMsg: "too many parts", + }, + { + env: "Collection1:prop:tenant:", + expectedErrMsg: "too many parts", + }, + { + env: "Collection1:prop:tenant:something", + expectedErrMsg: "too many parts", + }, + } + + for _, tc := range testCases { + t.Run(tc.env, func(t *testing.T) { + cpts, err := p.parse(tc.env) + + if tc.expectedErrMsg != "" { + assert.ErrorContains(t, err, tc.expectedErrMsg) + } else { + assert.NoError(t, err) + } + + assert.ElementsMatch(t, tc.expected, cpts) + }) + } +} + +func TestEnvironmentPersistenceMinMMapSize(t *testing.T) { + factors := []struct { + name string + value []string + expected int64 + expectedErr bool + }{ + {"Valid no unit", []string{"3"}, 3, false}, + {"Valid IEC unit", []string{"3KB"}, 3000, false}, + {"Valid SI unit", []string{"3KiB"}, 3 * 1024, false}, + {"not given", []string{}, DefaultPersistenceMinMMapSize, false}, + {"invalid factor", []string{"-1"}, -1, true}, + {"not parsable", []string{"I'm not a number"}, -1, true}, + } + for _, tt := range factors { + t.Run(tt.name, func(t *testing.T) { + if len(tt.value) == 1 { + t.Setenv("PERSISTENCE_MIN_MMAP_SIZE", tt.value[0]) + } + conf := Config{} + err := FromEnv(&conf) + + if tt.expectedErr { + require.NotNil(t, err) + } else { + require.Equal(t, tt.expected, conf.Persistence.MinMMapSize) + } + }) + } +} + +func TestEnvironmentPersistenceMaxReuseWalSize(t *testing.T) { + factors := []struct { + name string + value []string + expected int64 + expectedErr bool + }{ + {"Valid no unit", []string{"3"}, 3, false}, + {"Valid IEC unit", []string{"3KB"}, 3000, false}, + {"Valid SI unit", []string{"3KiB"}, 3 * 1024, false}, + {"not given", []string{}, DefaultPersistenceMaxReuseWalSize, false}, + {"invalid factor", []string{"-1"}, -1, true}, + {"not parsable", []string{"I'm not a number"}, -1, true}, + } + for _, tt := range factors { + t.Run(tt.name, func(t *testing.T) { + if len(tt.value) == 1 { + t.Setenv("PERSISTENCE_MAX_REUSE_WAL_SIZE", tt.value[0]) + } + conf := Config{} + err := FromEnv(&conf) + + if tt.expectedErr { + require.NotNil(t, err) + } else { + require.Equal(t, tt.expected, conf.Persistence.MaxReuseWalSize) + } + }) + } +} + +func TestParsePositiveFloat(t *testing.T) { + tests := []struct { + name string + envName string + envValue string + defaultValue float64 + expected float64 + expectError bool + }{ + { + name: "valid positive float", + envName: "TEST_POSITIVE_FLOAT", + envValue: "1.5", + defaultValue: 2.0, + expected: 1.5, + expectError: false, + }, + { + name: "valid integer as float", + envName: "TEST_POSITIVE_FLOAT", + envValue: "2", + defaultValue: 1.0, + expected: 2.0, + expectError: false, + }, + { + name: "use default when env not set", + envName: "TEST_POSITIVE_FLOAT", + envValue: "", + defaultValue: 3.0, + expected: 3.0, + expectError: false, + }, + { + name: "zero value should error", + envName: "TEST_POSITIVE_FLOAT", + envValue: "0", + defaultValue: 1.0, + expected: 0, + expectError: true, + }, + { + name: "negative value should error", + envName: "TEST_POSITIVE_FLOAT", + envValue: "-1.5", + defaultValue: 1.0, + expected: 0, + expectError: true, + }, + { + name: "invalid float should error", + envName: "TEST_POSITIVE_FLOAT", + envValue: "not-a-float", + defaultValue: 1.0, + expected: 0, + expectError: true, + }, + { + name: "very small positive float", + envName: "TEST_POSITIVE_FLOAT", + envValue: "0.0000001", + defaultValue: 1.0, + expected: 0.0000001, + expectError: false, + }, + { + name: "very large positive float", + envName: "TEST_POSITIVE_FLOAT", + envValue: "999999.999999", + defaultValue: 1.0, + expected: 999999.999999, + expectError: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Set up environment + if tt.envValue != "" { + t.Setenv(tt.envName, tt.envValue) + } else { + os.Unsetenv(tt.envName) + } + + // Create a variable to store the result + var result float64 + + // Call the function + err := parsePositiveFloat(tt.envName, func(val float64) { + result = val + }, tt.defaultValue) + + // Check error + if tt.expectError { + assert.Error(t, err) + if tt.envValue != "" { + assert.Contains(t, err.Error(), tt.envName) + } + } else { + assert.NoError(t, err) + assert.Equal(t, tt.expected, result) + } + }) + } +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/config/helpers_for_test.go b/platform/dbops/binaries/weaviate-src/usecases/config/helpers_for_test.go new file mode 100644 index 0000000000000000000000000000000000000000..056137731ead960f8c0ee9071d80a11a0d0c6c19 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/config/helpers_for_test.go @@ -0,0 +1,28 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package config + +import "github.com/pkg/errors" + +type fakeModuleProvider struct { + valid []string +} + +func (f *fakeModuleProvider) ValidateVectorizer(moduleName string) error { + for _, valid := range f.valid { + if moduleName == valid { + return nil + } + } + + return errors.Errorf("invalid vectorizer %q", moduleName) +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/config/parse_resource_strings.go b/platform/dbops/binaries/weaviate-src/usecases/config/parse_resource_strings.go new file mode 100644 index 0000000000000000000000000000000000000000..adc561929fe4f12bf6c88537788a6a9ce53318fb --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/config/parse_resource_strings.go @@ -0,0 +1,64 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package config + +import ( + "fmt" + "math" + "strconv" + "strings" +) + +// parseResourceString takes a string like "1024", "1KiB", "43TiB" and converts it to an integer number of bytes. +func parseResourceString(resource string) (int64, error) { + resource = strings.TrimSpace(resource) + + if strings.EqualFold(resource, "unlimited") || strings.EqualFold(resource, "nolimit") { + return math.MaxInt64, nil + } + + // Find where the digits end + lastDigit := len(resource) + for i, r := range resource { + if r < '0' || r > '9' { + lastDigit = i + break + } + } + + // Split the numeric part and the unit + number, unit := resource[:lastDigit], resource[lastDigit:] + unit = strings.TrimSpace(unit) // Clean up any surrounding whitespace + value, err := strconv.ParseInt(number, 10, 64) + if err != nil { + return 0, err + } + + unitMultipliers := map[string]int64{ + "": 1, // No unit means bytes + "B": 1, + "KiB": 1024, + "MiB": 1024 * 1024, + "GiB": 1024 * 1024 * 1024, + "TiB": 1024 * 1024 * 1024 * 1024, + "KB": 1000, + "MB": 1000 * 1000, + "GB": 1000 * 1000 * 1000, + "TB": 1000 * 1000 * 1000 * 1000, + } + multiplier, exists := unitMultipliers[unit] + if !exists { + return 0, fmt.Errorf("invalid or unsupported unit") + } + + return value * multiplier, nil +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/config/parse_resource_strings_test.go b/platform/dbops/binaries/weaviate-src/usecases/config/parse_resource_strings_test.go new file mode 100644 index 0000000000000000000000000000000000000000..64742a6b994c97098e3c137329d9176bfcb7cbff --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/config/parse_resource_strings_test.go @@ -0,0 +1,55 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package config + +import ( + "math" + "testing" +) + +func TestParseResourceString(t *testing.T) { + tests := []struct { + name string + input string + expected int64 + err bool + }{ + {"ValidBytes", "1024", 1024, false}, + {"ValidKiB", "1KiB", 1024, false}, + {"ValidMiB", "500MiB", 500 * 1024 * 1024, false}, + {"ValidTiB", "43TiB", 43 * 1024 * 1024 * 1024 * 1024, false}, + {"ValidKB", "1KB", 1000, false}, + {"ValidMB", "500MB", 500 * 1e6, false}, + {"ValidTB", "43TB", 43 * 1e12, false}, + {"InvalidUnit", "100GiL", 0, true}, + {"InvalidNumber", "tenKiB", 0, true}, + {"InvalidFormat", "1024 KiB", 1024 * 1024, false}, + {"EmptyString", "", 0, true}, + {"NoUnit", "12345", 12345, false}, + {"Unlimited lower case", "unlimited", math.MaxInt64, false}, + {"Unlimited unlimited upper case", "UNLIMITED", math.MaxInt64, false}, + {"Nolimit lower case", "nolimit", math.MaxInt64, false}, + {"Nolimit upper case", "NOLIMIT", math.MaxInt64, false}, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + result, err := parseResourceString(tc.input) + if (err != nil) != tc.err { + t.Errorf("parseResourceString(%s) expected error: %v, got: %v", tc.input, tc.err, err != nil) + } + if result != tc.expected { + t.Errorf("parseResourceString(%s) expected %d, got %d", tc.input, tc.expected, result) + } + }) + } +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/config/runtimeconfig.go b/platform/dbops/binaries/weaviate-src/usecases/config/runtimeconfig.go new file mode 100644 index 0000000000000000000000000000000000000000..abbb174cd924009ba063fd9a3e3edf2446e4c8bd --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/config/runtimeconfig.go @@ -0,0 +1,263 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package config + +import ( + "bytes" + "fmt" + "io" + "reflect" + "strings" + "time" + + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "gopkg.in/yaml.v3" + + "github.com/weaviate/weaviate/usecases/config/runtime" +) + +// WeaviateRuntimeConfig is the collection all the supported configs that is +// managed dynamically and can be overridden during runtime. +type WeaviateRuntimeConfig struct { + MaximumAllowedCollectionsCount *runtime.DynamicValue[int] `json:"maximum_allowed_collections_count" yaml:"maximum_allowed_collections_count"` + AutoschemaEnabled *runtime.DynamicValue[bool] `json:"autoschema_enabled" yaml:"autoschema_enabled"` + AsyncReplicationDisabled *runtime.DynamicValue[bool] `json:"async_replication_disabled" yaml:"async_replication_disabled"` + RevectorizeCheckDisabled *runtime.DynamicValue[bool] `json:"revectorize_check_disabled" yaml:"revectorize_check_disabled"` + ReplicaMovementMinimumAsyncWait *runtime.DynamicValue[time.Duration] `json:"replica_movement_minimum_async_wait" yaml:"replica_movement_minimum_async_wait"` + TenantActivityReadLogLevel *runtime.DynamicValue[string] `json:"tenant_activity_read_log_level" yaml:"tenant_activity_read_log_level"` + TenantActivityWriteLogLevel *runtime.DynamicValue[string] `json:"tenant_activity_write_log_level" yaml:"tenant_activity_write_log_level"` + QuerySlowLogEnabled *runtime.DynamicValue[bool] `json:"query_slow_log_enabled" yaml:"query_slow_log_enabled"` + QuerySlowLogThreshold *runtime.DynamicValue[time.Duration] `json:"query_slow_log_threshold" yaml:"query_slow_log_threshold"` + InvertedSorterDisabled *runtime.DynamicValue[bool] `json:"inverted_sorter_disabled" yaml:"inverted_sorter_disabled"` + UsageGCSBucket *runtime.DynamicValue[string] `json:"usage_gcs_bucket" yaml:"usage_gcs_bucket"` + UsageGCSPrefix *runtime.DynamicValue[string] `json:"usage_gcs_prefix" yaml:"usage_gcs_prefix"` + UsageS3Bucket *runtime.DynamicValue[string] `json:"usage_s3_bucket" yaml:"usage_s3_bucket"` + UsageS3Prefix *runtime.DynamicValue[string] `json:"usage_s3_prefix" yaml:"usage_s3_prefix"` + UsageScrapeInterval *runtime.DynamicValue[time.Duration] `json:"usage_scrape_interval" yaml:"usage_scrape_interval"` + UsageShardJitterInterval *runtime.DynamicValue[time.Duration] `json:"usage_shard_jitter_interval" yaml:"usage_shard_jitter_interval"` + UsagePolicyVersion *runtime.DynamicValue[string] `json:"usage_policy_version" yaml:"usage_policy_version"` + UsageVerifyPermissions *runtime.DynamicValue[bool] `json:"usage_verify_permissions" yaml:"usage_verify_permissions"` + + // Experimental configs. Will be removed in the future. + OIDCIssuer *runtime.DynamicValue[string] `json:"exp_oidc_issuer" yaml:"exp_oidc_issuer"` + OIDCClientID *runtime.DynamicValue[string] `json:"exp_oidc_client_id" yaml:"exp_oidc_client_id"` + OIDCSkipClientIDCheck *runtime.DynamicValue[bool] `yaml:"exp_oidc_skip_client_id_check" json:"exp_oidc_skip_client_id_check"` + OIDCUsernameClaim *runtime.DynamicValue[string] `yaml:"exp_oidc_username_claim" json:"exp_oidc_username_claim"` + OIDCGroupsClaim *runtime.DynamicValue[string] `yaml:"exp_oidc_groups_claim" json:"exp_oidc_groups_claim"` + OIDCScopes *runtime.DynamicValue[[]string] `yaml:"exp_oidc_scopes" json:"exp_oidc_scopes"` + OIDCCertificate *runtime.DynamicValue[string] `yaml:"exp_oidc_certificate" json:"exp_oidc_certificate"` + DefaultQuantization *runtime.DynamicValue[string] `yaml:"default_quantization" json:"default_quantization"` +} + +// ParseRuntimeConfig decode WeaviateRuntimeConfig from raw bytes of YAML. +func ParseRuntimeConfig(buf []byte) (*WeaviateRuntimeConfig, error) { + var conf WeaviateRuntimeConfig + + dec := yaml.NewDecoder(bytes.NewReader(buf)) + + // To catch fields different than ones in the struct (say typo) + dec.KnownFields(true) + + // Am empty runtime yaml file is still a valid file. So treating io.EOF as + // non-error case returns default values of config. + if err := dec.Decode(&conf); err != nil && !errors.Is(err, io.EOF) { + return nil, err + } + return &conf, nil +} + +// UpdateConfig does in-place update of `source` config based on values available in +// `parsed` config. +func UpdateRuntimeConfig(log logrus.FieldLogger, source, parsed *WeaviateRuntimeConfig, hooks map[string]func() error) error { + if source == nil || parsed == nil { + return fmt.Errorf("source and parsed cannot be nil") + } + + updateRuntimeConfig(log, reflect.ValueOf(*source), reflect.ValueOf(*parsed), hooks) + return nil +} + +/* +Alright. `updateRuntimeConfig` needs some explanation. + +We could have avoided using `reflection` all together, if we have written something like this. + + func updateRuntimeConfig(source, parsed *WeaviateRuntimeConfig) error { + if parsed.MaximumAllowedCollectionsCount != nil { + source.MaximumAllowedCollectionsCount.SetValue(parsed.MaximumAllowedCollectionsCount.Get()) + } else { + source.MaximumAllowedCollectionsCount.Reset() + } + + if parsed.AsyncReplicationDisabled != nil { + source.AsyncReplicationDisabled.SetValue(parsed.AsyncReplicationDisabled.Get()) + } else { + source.AsyncReplicationDisabled.Reset() + } + + if parsed.AutoschemaEnabled != nil { + source.AutoschemaEnabled.SetValue(parsed.AutoschemaEnabled.Get()) + } else { + source.AutoschemaEnabled.Reset() + } + + return nil + } + +But this approach has two serious drawbacks + 1. Everytime new config is supported, this function gets verbose as we have update for every struct fields in WeaviateRuntimeConfig + 2. The much bigger one is, what if consumer added a struct field, but failed to **update** this function?. This was a serious concern for me, more work for + consumers. + +With this reflection method, we avoided that extra step from the consumer. This reflection approach is "logically" same as above implementation. +See "runtimeconfig_test.go" for more examples. +*/ + +func updateRuntimeConfig(log logrus.FieldLogger, source, parsed reflect.Value, hooks map[string]func() error) { + // Basically we do following + // + // 1. Loop through all the `source` fields + // 2. Check if any of those fields exists in `parsed` (non-nil) + // 3. If parsed config doesn't contain the field from `source`, We reset source's field. + // so that it's default value takes preference. + // 4. If parsed config does contain the field from `source`, We update the value via `SetValue`. + + logRecords := make([]updateLogRecord, 0) + + for i := range source.NumField() { + sf := source.Field(i) + pf := parsed.Field(i) + + r := updateLogRecord{ + field: source.Type().Field(i).Name, + } + + si := sf.Interface() + var pi any + if !pf.IsNil() { + pi = pf.Interface() + } + + switch sv := si.(type) { + case *runtime.DynamicValue[int]: + r.oldV = sv.Get() + if pf.IsNil() { + // Means the config is removed + sv.Reset() + } else { + p := pi.(*runtime.DynamicValue[int]) + sv.SetValue(p.Get()) + } + r.newV = sv.Get() + case *runtime.DynamicValue[float64]: + r.oldV = sv.Get() + if pf.IsNil() { + // Means the config is removed + sv.Reset() + } else { + p := pi.(*runtime.DynamicValue[float64]) + sv.SetValue(p.Get()) + } + r.newV = sv.Get() + case *runtime.DynamicValue[bool]: + r.oldV = sv.Get() + if pf.IsNil() { + // Means the config is removed + sv.Reset() + } else { + p := pi.(*runtime.DynamicValue[bool]) + sv.SetValue(p.Get()) + } + r.newV = sv.Get() + case *runtime.DynamicValue[time.Duration]: + r.oldV = sv.Get() + if pf.IsNil() { + // Means the config is removed + sv.Reset() + } else { + p := pi.(*runtime.DynamicValue[time.Duration]) + sv.SetValue(p.Get()) + } + r.newV = sv.Get() + case *runtime.DynamicValue[string]: + r.oldV = sv.Get() + if pf.IsNil() { + // Means the config is removed + sv.Reset() + } else { + p := pi.(*runtime.DynamicValue[string]) + sv.SetValue(p.Get()) + } + r.newV = sv.Get() + case *runtime.DynamicValue[[]string]: + r.oldV = sv.Get() + if pf.IsNil() { + // Means the config is removed + sv.Reset() + } else { + p := pi.(*runtime.DynamicValue[[]string]) + sv.SetValue(p.Get()) + } + r.newV = sv.Get() + default: + panic(fmt.Sprintf("not recognized type: %#v, %#v", pi, si)) + } + + if !reflect.DeepEqual(r.newV, r.oldV) { + logRecords = append(logRecords, r) + } + + } + + // log the changes made as INFO for auditing. + for _, v := range logRecords { + log.WithFields(logrus.Fields{ + "action": "runtime_overrides_changed", + "field": v.field, + "old_value": v.oldV, + "new_value": v.newV, + }).Infof("runtime overrides: config '%v' changed from '%v' to '%v'", v.field, v.oldV, v.newV) + } + + for match, f := range hooks { + if matchUpdatedFields(match, logRecords) { + err := f() + if err != nil { + log.WithFields(logrus.Fields{ + "action": "runtime_overrides_hooks", + "match": match, + }).Errorf("error calling runtime hooks for match %s, %v", match, err) + continue + } + log.WithFields(logrus.Fields{ + "action": "runtime_overrides_hooks", + "match": match, + }).Infof("runtime overrides: hook ran for matching '%v' pattern", match) + } + } +} + +// updateLogRecord is used to record changes during updating runtime config. +type updateLogRecord struct { + field string + oldV, newV any +} + +func matchUpdatedFields(match string, records []updateLogRecord) bool { + for _, v := range records { + if strings.Contains(v.field, match) { + return true + } + } + return false +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/config/runtimeconfig_test.go b/platform/dbops/binaries/weaviate-src/usecases/config/runtimeconfig_test.go new file mode 100644 index 0000000000000000000000000000000000000000..2d68daeb37f03374967579fd3534b06dcba45afb --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/config/runtimeconfig_test.go @@ -0,0 +1,345 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package config + +import ( + "bytes" + "io" + "regexp" + "testing" + "time" + + "github.com/go-jose/go-jose/v4/json" + "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "gopkg.in/yaml.v3" + + "github.com/weaviate/weaviate/usecases/config/runtime" +) + +func TestParseRuntimeConfig(t *testing.T) { + // parser should fail if any unknown fields exist in the file + t.Run("parser should fail if any unknown fields exist in the file", func(t *testing.T) { + // rationale: Catch and fail early if any typo on the config file. + + buf := []byte(`autoschema_enabled: true`) + cfg, err := ParseRuntimeConfig(buf) + require.NoError(t, err) + assert.Equal(t, true, cfg.AutoschemaEnabled.Get()) + + buf = []byte(`autoschema_enbaled: false`) // note: typo. + cfg, err = ParseRuntimeConfig(buf) + require.ErrorContains(t, err, "autoschema_enbaled") // should contain misspelled field + assert.Nil(t, cfg) + }) + + t.Run("YAML tag should be lower_snake_case", func(t *testing.T) { + var r WeaviateRuntimeConfig + + jd, err := json.Marshal(r) + require.NoError(t, err) + + var vv map[string]any + require.NoError(t, json.Unmarshal(jd, &vv)) + + for k := range vv { + // check if all the keys lower_snake_case. + assertConfigKey(t, k) + } + }) + + t.Run("JSON tag should be lower_snake_case in the runtime config", func(t *testing.T) { + var r WeaviateRuntimeConfig + + yd, err := yaml.Marshal(r) + require.NoError(t, err) + + var vv map[string]any + require.NoError(t, yaml.Unmarshal(yd, &vv)) + + for k := range vv { + // check if all the keys lower_snake_case. + assertConfigKey(t, k) + } + }) +} + +func TestUpdateRuntimeConfig(t *testing.T) { + log := logrus.New() + log.SetOutput(io.Discard) + + t.Run("updating should reflect changes in registered configs", func(t *testing.T) { + var ( + colCount runtime.DynamicValue[int] + autoSchema runtime.DynamicValue[bool] + asyncRep runtime.DynamicValue[bool] + readLogLevel runtime.DynamicValue[string] + writeLogLevel runtime.DynamicValue[string] + revectorizeCheckDisabled runtime.DynamicValue[bool] + minFinWait runtime.DynamicValue[time.Duration] + ) + + reg := &WeaviateRuntimeConfig{ + MaximumAllowedCollectionsCount: &colCount, + AutoschemaEnabled: &autoSchema, + AsyncReplicationDisabled: &asyncRep, + TenantActivityReadLogLevel: &readLogLevel, + TenantActivityWriteLogLevel: &writeLogLevel, + RevectorizeCheckDisabled: &revectorizeCheckDisabled, + ReplicaMovementMinimumAsyncWait: &minFinWait, + } + + // parsed from yaml configs for example + buf := []byte(`autoschema_enabled: true +maximum_allowed_collections_count: 13 +replica_movement_minimum_async_wait: 10s`) + parsed, err := ParseRuntimeConfig(buf) + require.NoError(t, err) + + // before update (zero values) + assert.Equal(t, false, autoSchema.Get()) + assert.Equal(t, 0, colCount.Get()) + assert.Equal(t, 0*time.Second, minFinWait.Get()) + + require.NoError(t, UpdateRuntimeConfig(log, reg, parsed, nil)) + + // after update (reflect from parsed values) + assert.Equal(t, true, autoSchema.Get()) + assert.Equal(t, 13, colCount.Get()) + assert.Equal(t, 10*time.Second, minFinWait.Get()) + }) + + t.Run("Add and remove workflow", func(t *testing.T) { + // 1. We start with empty overrides and see it doesn't change the .Get() value of source configs. + // 2. We add some overrides. Check .Get() value + // 3. Remove the overrides. check .Get() value goes back to default + + source := &WeaviateRuntimeConfig{ + MaximumAllowedCollectionsCount: runtime.NewDynamicValue(10), + AutoschemaEnabled: runtime.NewDynamicValue(true), + AsyncReplicationDisabled: runtime.NewDynamicValue(true), + TenantActivityReadLogLevel: runtime.NewDynamicValue("INFO"), + TenantActivityWriteLogLevel: runtime.NewDynamicValue("INFO"), + RevectorizeCheckDisabled: runtime.NewDynamicValue(true), + } + + assert.Equal(t, 10, source.MaximumAllowedCollectionsCount.Get()) + assert.Equal(t, true, source.AutoschemaEnabled.Get()) + assert.Equal(t, true, source.AsyncReplicationDisabled.Get()) + assert.Equal(t, "INFO", source.TenantActivityReadLogLevel.Get()) + assert.Equal(t, "INFO", source.TenantActivityWriteLogLevel.Get()) + assert.Equal(t, true, source.RevectorizeCheckDisabled.Get()) + + // Empty Parsing + buf := []byte("") + parsed, err := ParseRuntimeConfig(buf) + require.NoError(t, err) + + assert.Nil(t, parsed.AsyncReplicationDisabled) + assert.Nil(t, parsed.MaximumAllowedCollectionsCount) + assert.Nil(t, parsed.AutoschemaEnabled) + assert.Nil(t, parsed.TenantActivityReadLogLevel) + assert.Nil(t, parsed.TenantActivityWriteLogLevel) + assert.Nil(t, parsed.RevectorizeCheckDisabled) + + require.NoError(t, UpdateRuntimeConfig(log, source, parsed, nil)) + assert.Equal(t, 10, source.MaximumAllowedCollectionsCount.Get()) + assert.Equal(t, true, source.AutoschemaEnabled.Get()) + assert.Equal(t, true, source.AsyncReplicationDisabled.Get()) + assert.Equal(t, "INFO", source.TenantActivityReadLogLevel.Get()) + assert.Equal(t, "INFO", source.TenantActivityWriteLogLevel.Get()) + assert.Equal(t, true, source.RevectorizeCheckDisabled.Get()) + + // Non-empty parsing + buf = []byte(`autoschema_enabled: false +maximum_allowed_collections_count: 13`) // leaving out `asyncRep` config + parsed, err = ParseRuntimeConfig(buf) + require.NoError(t, err) + + require.NoError(t, UpdateRuntimeConfig(log, source, parsed, nil)) + assert.Equal(t, 13, source.MaximumAllowedCollectionsCount.Get()) // changed + assert.Equal(t, false, source.AutoschemaEnabled.Get()) // changed + assert.Equal(t, true, source.AsyncReplicationDisabled.Get()) + assert.Equal(t, "INFO", source.TenantActivityReadLogLevel.Get()) + assert.Equal(t, "INFO", source.TenantActivityWriteLogLevel.Get()) + assert.Equal(t, true, source.RevectorizeCheckDisabled.Get()) + + // Empty parsing again. Should go back to default values + buf = []byte("") + parsed, err = ParseRuntimeConfig(buf) + require.NoError(t, err) + + require.NoError(t, UpdateRuntimeConfig(log, source, parsed, nil)) + assert.Equal(t, 10, source.MaximumAllowedCollectionsCount.Get()) + assert.Equal(t, true, source.AutoschemaEnabled.Get()) + assert.Equal(t, true, source.AsyncReplicationDisabled.Get()) + assert.Equal(t, "INFO", source.TenantActivityReadLogLevel.Get()) + assert.Equal(t, "INFO", source.TenantActivityWriteLogLevel.Get()) + assert.Equal(t, true, source.RevectorizeCheckDisabled.Get()) + }) + + t.Run("Reset() of non-exist config values in parsed yaml shouldn't panic", func(t *testing.T) { + var ( + colCount runtime.DynamicValue[int] + autoSchema runtime.DynamicValue[bool] + // leaving out `asyncRep` config + ) + + reg := &WeaviateRuntimeConfig{ + MaximumAllowedCollectionsCount: &colCount, + AutoschemaEnabled: &autoSchema, + // leaving out `asyncRep` config + } + + // parsed from yaml configs for example + buf := []byte(`autoschema_enabled: true +maximum_allowed_collections_count: 13`) // leaving out `asyncRep` config + parsed, err := ParseRuntimeConfig(buf) + require.NoError(t, err) + + // before update (zero values) + assert.Equal(t, false, autoSchema.Get()) + assert.Equal(t, 0, colCount.Get()) + + require.NotPanics(t, func() { UpdateRuntimeConfig(log, reg, parsed, nil) }) + + // after update (reflect from parsed values) + assert.Equal(t, true, autoSchema.Get()) + assert.Equal(t, 13, colCount.Get()) + }) + + t.Run("updating config should split out corresponding log lines", func(t *testing.T) { + log := logrus.New() + logs := bytes.Buffer{} + log.SetOutput(&logs) + + var ( + colCount = runtime.NewDynamicValue(7) + autoSchema runtime.DynamicValue[bool] + ) + + reg := &WeaviateRuntimeConfig{ + MaximumAllowedCollectionsCount: colCount, + AutoschemaEnabled: &autoSchema, + } + + // parsed from yaml configs for example + buf := []byte(`autoschema_enabled: true +maximum_allowed_collections_count: 13`) // leaving out `asyncRep` config + parsed, err := ParseRuntimeConfig(buf) + require.NoError(t, err) + + // before update (zero values) + assert.Equal(t, false, autoSchema.Get()) + assert.Equal(t, 7, colCount.Get()) + + require.NoError(t, UpdateRuntimeConfig(log, reg, parsed, nil)) + assert.Contains(t, logs.String(), `level=info msg="runtime overrides: config 'MaximumAllowedCollectionsCount' changed from '7' to '13'" action=runtime_overrides_changed field=MaximumAllowedCollectionsCount new_value=13 old_value=7`) + assert.Contains(t, logs.String(), `level=info msg="runtime overrides: config 'AutoschemaEnabled' changed from 'false' to 'true'" action=runtime_overrides_changed field=AutoschemaEnabled new_value=true old_value=false`) + logs.Reset() + + // change configs + buf = []byte(`autoschema_enabled: false +maximum_allowed_collections_count: 10`) + parsed, err = ParseRuntimeConfig(buf) + require.NoError(t, err) + + require.NoError(t, UpdateRuntimeConfig(log, reg, parsed, nil)) + assert.Contains(t, logs.String(), `level=info msg="runtime overrides: config 'MaximumAllowedCollectionsCount' changed from '13' to '10'" action=runtime_overrides_changed field=MaximumAllowedCollectionsCount new_value=10 old_value=13`) + assert.Contains(t, logs.String(), `level=info msg="runtime overrides: config 'AutoschemaEnabled' changed from 'true' to 'false'" action=runtime_overrides_changed field=AutoschemaEnabled new_value=false old_value=true`) + logs.Reset() + + // remove configs (`maximum_allowed_collections_count`) + buf = []byte(`autoschema_enabled: false`) + parsed, err = ParseRuntimeConfig(buf) + require.NoError(t, err) + + require.NoError(t, UpdateRuntimeConfig(log, reg, parsed, nil)) + assert.Contains(t, logs.String(), `level=info msg="runtime overrides: config 'MaximumAllowedCollectionsCount' changed from '10' to '7'" action=runtime_overrides_changed field=MaximumAllowedCollectionsCount new_value=7 old_value=10`) + }) + + t.Run("updating priorities", func(t *testing.T) { + // invariants: + // 1. If field doesn't exist, should return default value + // 2. If field exist, but removed next time, should return default value not the old value. + + var ( + colCount runtime.DynamicValue[int] + autoSchema runtime.DynamicValue[bool] + asyncRep runtime.DynamicValue[bool] + readLogLevel runtime.DynamicValue[string] + writeLogLevel runtime.DynamicValue[string] + revectorizeCheckDisabled runtime.DynamicValue[bool] + minFinWait runtime.DynamicValue[time.Duration] + ) + + reg := &WeaviateRuntimeConfig{ + MaximumAllowedCollectionsCount: &colCount, + AutoschemaEnabled: &autoSchema, + AsyncReplicationDisabled: &asyncRep, + TenantActivityReadLogLevel: &readLogLevel, + TenantActivityWriteLogLevel: &writeLogLevel, + RevectorizeCheckDisabled: &revectorizeCheckDisabled, + ReplicaMovementMinimumAsyncWait: &minFinWait, + } + + // parsed from yaml configs for example + buf := []byte(`autoschema_enabled: true +maximum_allowed_collections_count: 13 +replica_movement_minimum_async_wait: 10s`) + parsed, err := ParseRuntimeConfig(buf) + require.NoError(t, err) + + // before update (zero values) + assert.Equal(t, false, autoSchema.Get()) + assert.Equal(t, 0, colCount.Get()) + assert.Equal(t, false, asyncRep.Get()) // this field doesn't exist in original config file. + assert.Equal(t, 0*time.Second, minFinWait.Get()) + + require.NoError(t, UpdateRuntimeConfig(log, reg, parsed, nil)) + + // after update (reflect from parsed values) + assert.Equal(t, true, autoSchema.Get()) + assert.Equal(t, 13, colCount.Get()) + assert.Equal(t, false, asyncRep.Get()) // this field doesn't exist in original config file, should return default value. + assert.Equal(t, 10*time.Second, minFinWait.Get()) + + // removing `maximum_allowed_collection_count` from config + buf = []byte(`autoschema_enabled: false`) + parsed, err = ParseRuntimeConfig(buf) + require.NoError(t, err) + + // before update. Should have old values + assert.Equal(t, true, autoSchema.Get()) + assert.Equal(t, 13, colCount.Get()) + assert.Equal(t, false, asyncRep.Get()) // this field doesn't exist in original config file, should return default value. + + require.NoError(t, UpdateRuntimeConfig(log, reg, parsed, nil)) + + // after update. + assert.Equal(t, false, autoSchema.Get()) + assert.Equal(t, 0, colCount.Get()) // this should still return `default` value. not old value + assert.Equal(t, false, asyncRep.Get()) // this field doesn't exist in original config file, should return default value. + }) +} + +// helpers +// assertConfigKey asserts if the `yaml` key is standard `lower_snake_case` (e.g: not `UPPER_CASE`) +func assertConfigKey(t *testing.T, key string) { + t.Helper() + + re := regexp.MustCompile(`^[a-z0-9]+(_[a-z0-9]+)*$`) + if !re.MatchString(key) { + t.Fatalf("given key %v is not lower snake case. The json/yaml tag for runtime config should be all lower snake case (e.g my_key, not MY_KEY)", key) + } +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/connstate/manager.go b/platform/dbops/binaries/weaviate-src/usecases/connstate/manager.go new file mode 100644 index 0000000000000000000000000000000000000000..a190266f4020cb0254ca48ab3add46651ec2b3ae --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/connstate/manager.go @@ -0,0 +1,89 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package connstate + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/sirupsen/logrus" +) + +// Manager can save and load a connector's internal state into a remote storage +type Manager struct { + repo Repo + state json.RawMessage + logger logrus.FieldLogger +} + +// Repo describes the dependencies of the connector state manager to an +// external storage +type Repo interface { + Save(ctx context.Context, state json.RawMessage) error + Load(ctx context.Context) (json.RawMessage, error) +} + +// NewManager for Connector State +func NewManager(repo Repo, logger logrus.FieldLogger) (*Manager, error) { + m := &Manager{repo: repo, logger: logger} + if err := m.loadOrInitialize(context.Background()); err != nil { + return nil, fmt.Errorf("could not load or initialize: %w", err) + } + + return m, nil +} + +// GetInitialState is only supposed to be used during initialization of the +// connector. +func (m *Manager) GetInitialState() json.RawMessage { + return m.state +} + +// SetState form outside (i.e. from the connector) +func (m *Manager) SetState(ctx context.Context, state json.RawMessage) error { + m.state = state + return m.save(ctx) +} + +// func (l *etcdSchemaManager) SetStateConnector(stateConnector connector_state.Connector) { +// l.connectorStateSetter = stateConnector +// } + +func (m *Manager) loadOrInitialize(ctx context.Context) error { + state, err := m.repo.Load(ctx) + if err != nil { + return fmt.Errorf("could not load connector state: %w", err) + } + + if state == nil { + m.state = json.RawMessage([]byte("{}")) + return m.save(ctx) + } + + m.state = state + return nil +} + +func (m *Manager) save(ctx context.Context) error { + m.logger. + WithField("action", "connector_state_update"). + WithField("configuration_store", "etcd"). + Debug("saving updated connector state to configuration store") + + err := m.repo.Save(ctx, m.state) + if err != nil { + return fmt.Errorf("could not save connector state: %w", err) + } + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/distributedtask/handler.go b/platform/dbops/binaries/weaviate-src/usecases/distributedtask/handler.go new file mode 100644 index 0000000000000000000000000000000000000000..d0b649e9650afc9025d0a42906f100dea22284bd --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/distributedtask/handler.go @@ -0,0 +1,81 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package distributedtask + +import ( + "context" + "encoding/json" + "fmt" + "sort" + + "github.com/go-openapi/strfmt" + "github.com/weaviate/weaviate/cluster/distributedtask" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/usecases/auth/authorization" +) + +type Handler struct { + authorizer authorization.Authorizer + tasksLister distributedtask.TasksLister +} + +func NewHandler(authorizer authorization.Authorizer, taskLister distributedtask.TasksLister) *Handler { + return &Handler{ + authorizer: authorizer, + tasksLister: taskLister, + } +} + +func (h *Handler) ListTasks(ctx context.Context, principal *models.Principal) (models.DistributedTasks, error) { + if err := h.authorizer.Authorize(ctx, principal, authorization.READ, authorization.Cluster()); err != nil { + return nil, err + } + + tasksByNamespace, err := h.tasksLister.ListDistributedTasks(ctx) + if err != nil { + return nil, fmt.Errorf("list distributed tasks: %w", err) + } + + resp := models.DistributedTasks{} + for namespace, tasks := range tasksByNamespace { + resp[namespace] = make([]models.DistributedTask, 0, len(tasks)) + for _, task := range tasks { + var finishedNodes []string + for node := range task.FinishedNodes { + finishedNodes = append(finishedNodes, node) + } + // sort so it would be more deterministic and easier to test + sort.Strings(finishedNodes) + + // Try to unmarshal the raw payload into a generic JSON object. + // If we introduce sensitive information to the payload, we can + // add another method to Provider to unmarshal the payload and strip all the sensitive data. + var payload map[string]interface{} + if err = json.Unmarshal(task.Payload, &payload); err != nil { + return nil, fmt.Errorf("unmarshal payload: %w", err) + } + + resp[namespace] = append(resp[namespace], models.DistributedTask{ + ID: task.ID, + Version: int64(task.Version), + Status: task.Status.String(), + Error: task.Error, + StartedAt: strfmt.DateTime(task.StartedAt), + FinishedAt: strfmt.DateTime(task.FinishedAt), + FinishedNodes: finishedNodes, + Payload: payload, + }) + } + } + + return resp, nil +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/distributedtask/handler_test.go b/platform/dbops/binaries/weaviate-src/usecases/distributedtask/handler_test.go new file mode 100644 index 0000000000000000000000000000000000000000..7fc7cbdd0c011a8ba226498e4ec01dbc1e1d9429 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/distributedtask/handler_test.go @@ -0,0 +1,85 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package distributedtask + +import ( + "context" + "testing" + "time" + + "github.com/go-openapi/strfmt" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/cluster/distributedtask" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/usecases/auth/authorization" +) + +func TestHandler_ListTasks(t *testing.T) { + var ( + authorizer = authorization.NewMockAuthorizer(t) + now = time.Now() + + namespace = "testNamespace" + lister = taskListerStub{ + items: map[string][]*distributedtask.Task{ + namespace: { + { + Namespace: namespace, + TaskDescriptor: distributedtask.TaskDescriptor{ + ID: "test-task-1", + Version: 10, + }, + Payload: []byte(`{"hello": "world"}`), + Status: distributedtask.TaskStatusFailed, + StartedAt: now.Add(-time.Hour), + FinishedAt: now, + Error: "server is on fire", + FinishedNodes: map[string]bool{ + "node1": true, + "node2": true, + }, + }, + }, + }, + } + h = NewHandler(authorizer, lister) + ) + + authorizer.EXPECT().Authorize(mock.Anything, mock.Anything, authorization.READ, authorization.Cluster()).Return(nil) + + tasks, err := h.ListTasks(context.Background(), &models.Principal{}) + require.NoError(t, err) + + require.Equal(t, models.DistributedTasks{ + "testNamespace": []models.DistributedTask{ + { + ID: "test-task-1", + Version: 10, + Status: "FAILED", + Error: "server is on fire", + StartedAt: strfmt.DateTime(now.Add(-time.Hour)), + FinishedAt: strfmt.DateTime(now), + FinishedNodes: []string{"node1", "node2"}, + Payload: map[string]interface{}{"hello": "world"}, + }, + }, + }, tasks) +} + +type taskListerStub struct { + items map[string][]*distributedtask.Task +} + +func (t taskListerStub) ListDistributedTasks(ctx context.Context) (map[string][]*distributedtask.Task, error) { + return t.items, nil +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/fakes/fake_cluster_state.go b/platform/dbops/binaries/weaviate-src/usecases/fakes/fake_cluster_state.go new file mode 100644 index 0000000000000000000000000000000000000000..edec33ecbbf5d8dec815121609e1164fa92c5c53 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/fakes/fake_cluster_state.go @@ -0,0 +1,75 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package fakes + +import ( + command "github.com/weaviate/weaviate/cluster/proto/api" + "github.com/weaviate/weaviate/usecases/cluster" + "github.com/weaviate/weaviate/usecases/cluster/mocks" +) + +type FakeClusterState struct { + cluster.NodeSelector + syncIgnored bool + skipRepair bool +} + +func NewFakeClusterState(hosts ...string) *FakeClusterState { + if len(hosts) == 0 { + hosts = []string{"node-1"} + } + + return &FakeClusterState{ + NodeSelector: mocks.NewMockNodeSelector(hosts...), + } +} + +func (f *FakeClusterState) SchemaSyncIgnored() bool { + return f.syncIgnored +} + +func (f *FakeClusterState) SkipSchemaRepair() bool { + return f.skipRepair +} + +func (f *FakeClusterState) Hostnames() []string { + return f.StorageCandidates() +} + +func (f *FakeClusterState) AllNames() []string { + return f.StorageCandidates() +} + +func (f *FakeClusterState) LocalName() string { + return "node1" +} + +func (f *FakeClusterState) NodeCount() int { + return 1 +} + +func (f *FakeClusterState) ClusterHealthScore() int { + return 0 +} + +func (f *FakeClusterState) ResolveParentNodes(string, string, +) (map[string]string, error) { + return nil, nil +} + +func (f *FakeClusterState) NodeHostname(host string) (string, bool) { + return f.NodeSelector.NodeHostname(host) +} + +func (f *FakeClusterState) Execute(cmd *command.ApplyRequest) error { + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/fakes/fake_raft_address_resolver.go b/platform/dbops/binaries/weaviate-src/usecases/fakes/fake_raft_address_resolver.go new file mode 100644 index 0000000000000000000000000000000000000000..bf72c12803212da6429855286a1ce3d3684be674 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/fakes/fake_raft_address_resolver.go @@ -0,0 +1,25 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package fakes + +type FakeRPCAddressResolver struct { + addr string + err error +} + +func NewFakeRPCAddressResolver(addr string, err error) *FakeRPCAddressResolver { + return &FakeRPCAddressResolver{addr: addr, err: err} +} + +func (m *FakeRPCAddressResolver) Address(raftAddress string) (string, error) { + return m.addr, m.err +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/fakes/fake_schema_class_parser.go b/platform/dbops/binaries/weaviate-src/usecases/fakes/fake_schema_class_parser.go new file mode 100644 index 0000000000000000000000000000000000000000..88c694256beffe3882548af91508652ed4bdb828 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/fakes/fake_schema_class_parser.go @@ -0,0 +1,35 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package fakes + +import ( + "github.com/stretchr/testify/mock" + "github.com/weaviate/weaviate/entities/models" +) + +type MockParser struct { + mock.Mock +} + +func NewMockParser() *MockParser { + return &MockParser{} +} + +func (m *MockParser) ParseClass(class *models.Class) error { + args := m.Called(class) + return args.Error(0) +} + +func (m *MockParser) ParseClassUpdate(class, update *models.Class) (*models.Class, error) { + args := m.Called(class) + return update, args.Error(1) +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/fakes/fake_schema_executor.go b/platform/dbops/binaries/weaviate-src/usecases/fakes/fake_schema_executor.go new file mode 100644 index 0000000000000000000000000000000000000000..665f0e6ba2c1b735ab1d85c457f425504d23a3a4 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/fakes/fake_schema_executor.go @@ -0,0 +1,129 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package fakes + +import ( + "context" + + "github.com/stretchr/testify/mock" + + cmd "github.com/weaviate/weaviate/cluster/proto/api" + "github.com/weaviate/weaviate/entities/models" +) + +type MockSchemaExecutor struct { + mock.Mock +} + +func NewMockSchemaExecutor() *MockSchemaExecutor { + return &MockSchemaExecutor{} +} + +func (m *MockSchemaExecutor) AddClass(req cmd.AddClassRequest) error { + args := m.Called(req) + return args.Error(0) +} + +func (m *MockSchemaExecutor) RestoreClassDir(class string) error { + args := m.Called(class) + return args.Error(0) +} + +func (m *MockSchemaExecutor) UpdateClass(req cmd.UpdateClassRequest) error { + args := m.Called(req) + return args.Error(0) +} + +func (m *MockSchemaExecutor) AddReplicaToShard(class string, shard string, targetNode string) error { + args := m.Called(class, shard, targetNode) + return args.Error(0) +} + +func (m *MockSchemaExecutor) DeleteReplicaFromShard(class string, shard string, targetNode string) error { + args := m.Called(class, shard, targetNode) + return args.Error(0) +} + +func (m *MockSchemaExecutor) LoadShard(class string, shard string) { + m.Called(class, shard) +} + +func (m *MockSchemaExecutor) DropShard(class string, shard string) { + m.Called(class, shard) +} + +func (m *MockSchemaExecutor) ShutdownShard(class string, shard string) { + m.Called(class, shard) +} + +func (m *MockSchemaExecutor) UpdateIndex(req cmd.UpdateClassRequest) error { + args := m.Called(req) + return args.Error(0) +} + +func (m *MockSchemaExecutor) ReloadLocalDB(ctx context.Context, all []cmd.UpdateClassRequest) error { + return nil +} + +func (m *MockSchemaExecutor) DeleteClass(name string, hasFrozen bool) error { + args := m.Called(name) + return args.Error(0) +} + +func (m *MockSchemaExecutor) AddProperty(class string, req cmd.AddPropertyRequest) error { + args := m.Called(class, req) + return args.Error(0) +} + +func (m *MockSchemaExecutor) AddTenants(class string, req *cmd.AddTenantsRequest) error { + args := m.Called(class, req) + return args.Error(0) +} + +func (m *MockSchemaExecutor) UpdateTenants(class string, req *cmd.UpdateTenantsRequest) error { + args := m.Called(class, req) + return args.Error(0) +} + +func (m *MockSchemaExecutor) UpdateTenantsProcess(class string, req *cmd.TenantProcessRequest) error { + args := m.Called(class, req) + return args.Error(0) +} + +func (m *MockSchemaExecutor) DeleteTenants(class string, tenants []*models.Tenant) error { + args := m.Called(class, tenants) + return args.Error(0) +} + +func (m *MockSchemaExecutor) UpdateShardStatus(req *cmd.UpdateShardStatusRequest) error { + args := m.Called(req) + return args.Error(0) +} + +func (m *MockSchemaExecutor) GetShardsStatus(class, tenant string) (models.ShardStatusList, error) { + args := m.Called(class, tenant) + return models.ShardStatusList{}, args.Error(1) +} + +func (m *MockSchemaExecutor) Open(ctx context.Context) error { + args := m.Called(ctx) + return args.Error(0) +} + +func (m *MockSchemaExecutor) Close(ctx context.Context) error { + args := m.Called(ctx) + return args.Error(0) +} + +func (m *MockSchemaExecutor) TriggerSchemaUpdateCallbacks() { + m.Called() +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/file/file_metadata.go b/platform/dbops/binaries/weaviate-src/usecases/file/file_metadata.go new file mode 100644 index 0000000000000000000000000000000000000000..f60fc2b16adf158d6d33e2ee3d00bbbed06c7373 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/file/file_metadata.go @@ -0,0 +1,47 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package file + +import ( + "os" + "syscall" + + "github.com/weaviate/weaviate/usecases/integrity" +) + +type FileMetadata struct { + Name string `json:"name"` + Size int64 `json:"size"` + CRC32 uint32 `json:"crc32"` +} + +func GetFileMetadata(filePath string) (FileMetadata, error) { + fi, err := os.Stat(filePath) + if err != nil { + return FileMetadata{}, err + } + + if fi.IsDir() { + return FileMetadata{}, syscall.EISDIR + } + + size, crc32, err := integrity.CRC32(filePath) + if err != nil { + return FileMetadata{}, err + } + + return FileMetadata{ + Name: fi.Name(), + Size: size, + CRC32: crc32, + }, nil +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/file/file_metadata_test.go b/platform/dbops/binaries/weaviate-src/usecases/file/file_metadata_test.go new file mode 100644 index 0000000000000000000000000000000000000000..d9c1b7e6a13f30e7f180e90ab4eeef82eab555a9 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/file/file_metadata_test.go @@ -0,0 +1,43 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package file + +import ( + "hash/crc32" + "os" + "path/filepath" + "syscall" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestGetFileMetadata(t *testing.T) { + tmpDir := t.TempDir() + + _, err := GetFileMetadata(tmpDir) + require.ErrorIs(t, err, syscall.EISDIR) + + _, err = GetFileMetadata("non-existing-file") + require.ErrorIs(t, err, os.ErrNotExist) + + tmpFilePath := filepath.Join(tmpDir, "testfile1.txt") + tmpFileContent := []byte("hello, world") + + err = os.WriteFile(tmpFilePath, tmpFileContent, 0o644) + require.NoError(t, err) + + md, err := GetFileMetadata(tmpFilePath) + require.NoError(t, err) + require.EqualValues(t, len(tmpFileContent), md.Size) + require.Equal(t, crc32.ChecksumIEEE(tmpFileContent), md.CRC32) +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/floatcomp/delta.go b/platform/dbops/binaries/weaviate-src/usecases/floatcomp/delta.go new file mode 100644 index 0000000000000000000000000000000000000000..495616a13450ae0204e77ba5d23d931b67ae4105 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/floatcomp/delta.go @@ -0,0 +1,19 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package floatcomp + +import "math" + +func InDelta(f1, f2 float64, delta float64) bool { + diff := math.Abs(f1 - f2) + return diff <= delta +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/integrity/checksum_reader.go b/platform/dbops/binaries/weaviate-src/usecases/integrity/checksum_reader.go new file mode 100644 index 0000000000000000000000000000000000000000..fe51229cc437407b170a895f47eeaefc49a675a3 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/integrity/checksum_reader.go @@ -0,0 +1,60 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package integrity + +import ( + "hash" + "hash/crc32" + "io" +) + +type ChecksumReader interface { + io.Reader + N() int + Hash() []byte + Reset() +} + +var _ ChecksumReader = (*CRC32Reader)(nil) + +type CRC32Reader struct { + r io.Reader + n int + hash hash.Hash32 +} + +func NewCRC32Reader(r io.Reader) *CRC32Reader { + return &CRC32Reader{ + r: r, + hash: crc32.NewIEEE(), + } +} + +func (rc *CRC32Reader) Read(p []byte) (n int, err error) { + n, err = rc.r.Read(p) + rc.n += n + rc.hash.Write(p[:n]) + return n, err +} + +func (rc *CRC32Reader) N() int { + return rc.n +} + +func (rc *CRC32Reader) Hash() []byte { + return rc.hash.Sum(nil) +} + +func (rc *CRC32Reader) Reset() { + rc.n = 0 + rc.hash.Reset() +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/integrity/checksum_writer.go b/platform/dbops/binaries/weaviate-src/usecases/integrity/checksum_writer.go new file mode 100644 index 0000000000000000000000000000000000000000..10afe843d478de91bfc6b1916e1a806e01b94861 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/integrity/checksum_writer.go @@ -0,0 +1,72 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package integrity + +import ( + "hash" + "hash/crc32" + "io" +) + +type ChecksumWriter interface { + io.Writer + N() int + Hash() []byte + HashWrite([]byte) (int, error) + Reset() +} + +var _ ChecksumWriter = (*CRC32Writer)(nil) + +type CRC32Writer struct { + w io.Writer + n int + hash hash.Hash32 +} + +func NewCRC32Writer(w io.Writer) *CRC32Writer { + return &CRC32Writer{ + w: w, + hash: crc32.NewIEEE(), + } +} + +func NewCRC32WriterWithSeed(w io.Writer, seed uint32) *CRC32Writer { + return &CRC32Writer{ + w: w, + hash: NewCRC32Resumable(seed), + } +} + +func (wc *CRC32Writer) Write(p []byte) (n int, err error) { + n, err = wc.w.Write(p) + wc.n += n + wc.hash.Write(p[:n]) + return n, err +} + +func (wc *CRC32Writer) HashWrite(p []byte) (int, error) { + return wc.hash.Write(p) +} + +func (wc *CRC32Writer) N() int { + return wc.n +} + +func (wc *CRC32Writer) Hash() []byte { + return wc.hash.Sum(nil) +} + +func (wc *CRC32Writer) Reset() { + wc.n = 0 + wc.hash.Reset() +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/integrity/file_crc32.go b/platform/dbops/binaries/weaviate-src/usecases/integrity/file_crc32.go new file mode 100644 index 0000000000000000000000000000000000000000..7818710d33bb622b9db4624339cfd0b6ead8c6e3 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/integrity/file_crc32.go @@ -0,0 +1,35 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package integrity + +import ( + "hash/crc32" + "io" + "os" +) + +func CRC32(path string) (n int64, checksum uint32, err error) { + file, err := os.Open(path) + if err != nil { + return 0, 0, err + } + defer file.Close() + + h := crc32.NewIEEE() + + n, err = io.Copy(h, file) + if err != nil { + return 0, 0, err + } + + return n, h.Sum32(), nil +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/integrity/resumable_crc32.go b/platform/dbops/binaries/weaviate-src/usecases/integrity/resumable_crc32.go new file mode 100644 index 0000000000000000000000000000000000000000..6d4f5d9972f7ce876756fbd4d665efe15d924323 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/integrity/resumable_crc32.go @@ -0,0 +1,66 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package integrity + +import ( + "hash/crc32" +) + +// Fixed table for crc32.IEEE +var ieeeTable = crc32.MakeTable(crc32.IEEE) + +// CRC32Resumable implements hash.Hash32 and supports reseeding. +// Always uses crc32.IEEE polynomial. +type CRC32Resumable struct { + crc uint32 +} + +// NewCRC32Resumable creates a new CRC32Resumable starting at a given CRC seed. +func NewCRC32Resumable(seed uint32) *CRC32Resumable { + return &CRC32Resumable{crc: seed} +} + +// Write updates the CRC with data. +func (c *CRC32Resumable) Write(p []byte) (n int, err error) { + c.crc = crc32.Update(c.crc, ieeeTable, p) + return len(p), nil +} + +// Sum returns the checksum appended to b in big-endian order. +func (c *CRC32Resumable) Sum(b []byte) []byte { + s := c.Sum32() + return append(b, + byte(s>>24), + byte(s>>16), + byte(s>>8), + byte(s), + ) +} + +// Reset resets the CRC to zero. +func (c *CRC32Resumable) Reset() { + c.crc = 0 +} + +// Size returns the number of bytes Sum will return (4). +func (c *CRC32Resumable) Size() int { return 4 } + +// BlockSize returns the block size of the hash (1). +func (c *CRC32Resumable) BlockSize() int { return 1 } + +// Sum32 returns the current CRC value. +func (c *CRC32Resumable) Sum32() uint32 { return c.crc } + +// Reseed manually sets a new CRC seed value. +func (c *CRC32Resumable) Reseed(seed uint32) { + c.crc = seed +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/logrusext/sampling.go b/platform/dbops/binaries/weaviate-src/usecases/logrusext/sampling.go new file mode 100644 index 0000000000000000000000000000000000000000..d79ad9d0ba0527ba5593fcdbe2ce01fbca79b361 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/logrusext/sampling.go @@ -0,0 +1,57 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package logrusext + +import ( + "sync" + "time" + + "github.com/sirupsen/logrus" +) + +// Sampler is a very simple log sampler which prints up to N log messages +// every tick duration. +type Sampler struct { + l logrus.FieldLogger + + mu sync.Mutex + counter int + limit int + tick time.Duration + lastReset time.Time +} + +func NewSampler(l logrus.FieldLogger, n int, tick time.Duration) *Sampler { + return &Sampler{ + l: l, + limit: n, + tick: tick, + } +} + +func (s *Sampler) WithSampling(fn func(l logrus.FieldLogger)) { + now := time.Now() + + s.mu.Lock() + counter := s.counter + if now.Sub(s.lastReset) > s.tick { + counter = 0 + s.lastReset = now + } + counter++ + s.counter = counter + s.mu.Unlock() + + if counter <= s.limit { + fn(s.l) + } +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/logrusext/sampling_test.go b/platform/dbops/binaries/weaviate-src/usecases/logrusext/sampling_test.go new file mode 100644 index 0000000000000000000000000000000000000000..c31069f5345e3a6ce50d317fa366b28fe5cd0ab5 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/logrusext/sampling_test.go @@ -0,0 +1,44 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package logrusext + +import ( + "testing" + "time" + + "github.com/sirupsen/logrus" + "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/require" +) + +func TestSampler_Simple(t *testing.T) { + logger, hook := test.NewNullLogger() + sampler := NewSampler(logger, 3, 100*time.Millisecond) + + for range 10 { + sampler.WithSampling(func(l logrus.FieldLogger) { + l.Infof("hello") + }) + } + + require.Len(t, hook.AllEntries(), 3) + hook.Reset() + + time.Sleep(200 * time.Millisecond) + + for range 5 { + sampler.WithSampling(func(l logrus.FieldLogger) { + l.Infof("hello") + }) + } + require.Len(t, hook.AllEntries(), 3) +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/memwatch/monitor.go b/platform/dbops/binaries/weaviate-src/usecases/memwatch/monitor.go new file mode 100644 index 0000000000000000000000000000000000000000..f1fead769ad0bf2892e45a505f7c1def20dab399 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/memwatch/monitor.go @@ -0,0 +1,344 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package memwatch + +import ( + "bufio" + "fmt" + "math" + "os" + "runtime" + "runtime/metrics" + "strconv" + "sync" + "time" + + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/storobj" + "github.com/weaviate/weaviate/usecases/monitoring" +) + +const ( + B = 1 + KiB = 1 << (10 * iota) // 2^10 + MiB = 1 << (10 * iota) // 2^20 + GiB = 1 << (10 * iota) // 2^30 + TiB = 1 << (10 * iota) // 2^40 +) + +const ( + MappingDelayInS = 2 + mappingsEntries = 60 + MappingDelayInS +) + +var ( + ErrNotEnoughMemory = fmt.Errorf("not enough memory") + ErrNotEnoughMappings = fmt.Errorf("not enough memory mappings") +) + +// Monitor allows making statements about the memory ratio used by the application +type Monitor struct { + metricsReader metricsReader + limitSetter limitSetter + maxRatio float64 + maxMemoryMappings int64 + + // state + mu sync.Mutex + limit int64 + usedMemory int64 + usedMappings int64 + reservedMappings int64 + reservedMappingsBuffer []int64 + lastReservationsClear time.Time +} + +// Refresh retrieves the current memory stats from the runtime and stores them +// in the local cache +func (m *Monitor) Refresh(updateMappings bool) { + m.obtainCurrentUsage() + m.updateLimit() + if updateMappings { + m.obtainCurrentMappings() + } +} + +// we have no intentions of ever modifying the limit, but SetMemoryLimit with a +// negative value is the only way to read the limit from the runtime +type limitSetter func(size int64) int64 + +// NewMonitor creates a [Monitor] with the given metrics reader and target +// ratio +// +// Typically this would be called with LiveHeapReader and +// debug.SetMemoryLimit +func NewMonitor(metricsReader metricsReader, limitSetter limitSetter, + maxRatio float64, +) *Monitor { + m := &Monitor{ + metricsReader: metricsReader, + limitSetter: limitSetter, + maxRatio: maxRatio, + maxMemoryMappings: getMaxMemoryMappings(), + reservedMappingsBuffer: make([]int64, mappingsEntries), // one entry per second + buffer to handle delays + lastReservationsClear: time.Now(), + } + m.Refresh(true) + return m +} + +func (m *Monitor) CheckAlloc(sizeInBytes int64) error { + m.mu.Lock() + defer m.mu.Unlock() + + if float64(m.usedMemory+sizeInBytes)/float64(m.limit) > m.maxRatio { + return ErrNotEnoughMemory + } + + return nil +} + +func (m *Monitor) CheckMappingAndReserve(numberMappings int64, reservationTimeInS int) error { + m.mu.Lock() + defer m.mu.Unlock() + + // mappings are only updated every Xs, so we need to extend the reservation time + if reservationTimeInS > 0 { + reservationTimeInS += MappingDelayInS + } + if reservationTimeInS > len(m.reservedMappingsBuffer) { + reservationTimeInS = len(m.reservedMappingsBuffer) + } + + // expire old mappings + now := time.Now() + m.reservedMappings -= clearReservedMappings(m.lastReservationsClear, now, m.reservedMappingsBuffer) + + if m.usedMappings+numberMappings+m.reservedMappings > m.maxMemoryMappings { + return ErrNotEnoughMappings + } + if reservationTimeInS > 0 { + m.reservedMappings += numberMappings + m.reservedMappingsBuffer[(now.Second()+reservationTimeInS)%(mappingsEntries)] += numberMappings + } + + m.lastReservationsClear = now + + return nil +} + +func clearReservedMappings(lastClear time.Time, now time.Time, reservedMappingsBuffer []int64) int64 { + clearedMappings := int64(0) + if now.Sub(lastClear) >= mappingsEntries*time.Second { + for i := 0; i < len(reservedMappingsBuffer); i++ { + clearedMappings += reservedMappingsBuffer[i] + reservedMappingsBuffer[i] = 0 + } + } else if now.Second() == lastClear.Second() { + // do nothing + } else if now.Second() > lastClear.Second() { + // the value of the last refresh was already cleared + for i := lastClear.Second() + 1; i <= now.Second(); i++ { + clearedMappings += reservedMappingsBuffer[i] + reservedMappingsBuffer[i] = 0 + } + } else { + // wrap around, the value of the last refresh was already cleared + for i := lastClear.Second() + 1; i < len(reservedMappingsBuffer); i++ { + clearedMappings += reservedMappingsBuffer[i] + reservedMappingsBuffer[i] = 0 + } + for i := 0; i < now.Second(); i++ { + clearedMappings += reservedMappingsBuffer[i] + reservedMappingsBuffer[i] = 0 + } + } + return clearedMappings +} + +func (m *Monitor) Ratio() float64 { + m.mu.Lock() + defer m.mu.Unlock() + + return float64(m.usedMemory) / float64(m.limit) +} + +// obtainCurrentUsage obtains the most recent live heap from runtime/metrics +func (m *Monitor) obtainCurrentUsage() { + m.setUsed(m.metricsReader()) +} + +func (m *Monitor) obtainCurrentMappings() { + used := getCurrentMappings() + monitoring.GetMetrics().MmapProcMaps.Set(float64(used)) + m.mu.Lock() + defer m.mu.Unlock() + m.usedMappings = used +} + +func getCurrentMappings() int64 { + switch runtime.GOOS { + case "linux": + return currentMappingsLinux() + default: + return 0 + } +} + +// Counts the number of mappings by counting the number of lines within the maps file +func currentMappingsLinux() int64 { + filePath := fmt.Sprintf("/proc/%d/maps", os.Getpid()) + file, err := os.Open(filePath) + if err != nil { + return 0 + } + defer file.Close() + + var mappings int64 + scanner := bufio.NewScanner(file) + for scanner.Scan() { + mappings++ + } + + if err := scanner.Err(); err != nil { + return 0 + } + + return mappings +} + +func getMaxMemoryMappings() int64 { + maxMappings := int64(math.MaxInt64) + + // get user provided default + if v := os.Getenv("MAX_MEMORY_MAPPINGS"); v != "" { + asInt, err := strconv.Atoi(v) + if err == nil { + return int64(asInt) + } + } + + // different operating systems have different ways of finding the max + switch runtime.GOOS { + case "linux": + return readMaxMemoryMappingsLinux(maxMappings) + default: + return maxMappings // macos does not seem to have a readable limit + } +} + +func readMaxMemoryMappingsLinux(defaultValue int64) int64 { + file, err := os.Open("/proc/sys/vm/max_map_count") + if err != nil { + return defaultValue + } + defer file.Close() + + scanner := bufio.NewScanner(file) + // Read the value from the file + if scanner.Scan() { + asInt, err := strconv.Atoi(scanner.Text()) + if err != nil { + return defaultValue + } + return int64(float64(asInt) * 0.7) // leave room for other processes on the system + } + return defaultValue +} + +func LiveHeapReader() int64 { + const liveHeapBytesMetric = "/gc/heap/live:bytes" + sample := make([]metrics.Sample, 1) + sample[0].Name = liveHeapBytesMetric + metrics.Read(sample) + + if sample[0].Value.Kind() == metrics.KindBad { + panic(fmt.Sprintf("metric %q no longer supported", liveHeapBytesMetric)) + } + + return int64(sample[0].Value.Uint64()) +} + +// setUsed is a thread-safe way to set the current usage +func (m *Monitor) setUsed(used int64) { + m.mu.Lock() + defer m.mu.Unlock() + + m.usedMemory = used +} + +func (m *Monitor) updateLimit() { + m.mu.Lock() + defer m.mu.Unlock() + + // setting a negative limit is the only way to obtain the current limit + m.limit = m.limitSetter(-1) +} + +func NewDummyMonitor() *Monitor { + m := &Monitor{ + metricsReader: func() int64 { return 0 }, + limitSetter: func(size int64) int64 { return TiB }, + maxRatio: 1, + maxMemoryMappings: 10000000, + reservedMappingsBuffer: make([]int64, mappingsEntries), + lastReservationsClear: time.Now(), + } + m.Refresh(true) + return m +} + +type metricsReader func() int64 + +type AllocChecker interface { + CheckAlloc(sizeInBytes int64) error + CheckMappingAndReserve(numberMappings int64, reservationTimeInS int) error + Refresh(updateMappings bool) +} + +func EstimateObjectMemory(object *models.Object) int64 { + // Note: This is very much oversimplified. It assumes that we always need + // the footprint of the full vector and it assumes a fixed overhead of 30B + // per vector. In reality this depends on the HNSW settings - and possibly + // in the future we might have completely different index types. + // + // However, in the meantime this should be a fairly reasonable estimate, as + // it's not meant to fail exactly on the last available byte, but rather + // prevent OOM crashes. Given the fuzziness and async style of the + // memtracking somewhat decent estimate should be good enough. + return int64(len(object.Vector)*4 + 30) +} + +func EstimateStorObjectMemory(object *storobj.Object) int64 { + // Note: The estimation is not super accurate. It assumes that the + // memory is mostly used by the vector of float32 + the fixed + // overhead. It assumes a fixed overhead of 46 Bytes per object + // (30 Bytes from the data field models.Object + 16 Bytes from + // remaining data fields of storobj.Object). + return int64(len(object.Vector)*4 + 46) +} + +func EstimateObjectDeleteMemory() int64 { + // When deleting an object we attach a tombstone to the object in the HNSW and a new segment in the Memtable and + // additional other temporary allocations. + // The total amount is hard to guess, so we go with a default of 100 bytes. + estimate := int64(100) + if v := os.Getenv("MEMORY_ESTIMATE_DELETE_BYTES"); v != "" { + asInt, err := strconv.Atoi(v) + if err != nil { + return estimate + } + + return int64(asInt) + } + return estimate +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/memwatch/monitor_test.go b/platform/dbops/binaries/weaviate-src/usecases/memwatch/monitor_test.go new file mode 100644 index 0000000000000000000000000000000000000000..1e0b94283f1b1c1393778b22d6a831aeeb077bbf --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/memwatch/monitor_test.go @@ -0,0 +1,306 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package memwatch + +import ( + "errors" + "math" + "os" + "runtime" + "runtime/debug" + "strconv" + "syscall" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestEstimation(t *testing.T) { + t.Run("set correctly", func(t *testing.T) { + t.Setenv("MEMORY_ESTIMATE_DELETE_BYTES", "120") + assert.Equal(t, int64(120), EstimateObjectDeleteMemory()) + }) + + t.Run("set wrong - use default", func(t *testing.T) { + t.Setenv("MEMORY_ESTIMATE_DELETE_BYTES", "abc") + assert.Equal(t, int64(100), EstimateObjectDeleteMemory()) + }) + + t.Run("unset - use default", func(t *testing.T) { + t.Setenv("MEMORY_ESTIMATE_DELETE_BYTES", "") + assert.Equal(t, int64(100), EstimateObjectDeleteMemory()) + }) +} + +func TestMonitor(t *testing.T) { + t.Run("with constant profiles (no changes)", func(t *testing.T) { + metrics := &fakeHeapReader{val: 30000} + limiter := &fakeLimitSetter{limit: 100000} + + m := NewMonitor(metrics.Read, limiter.SetMemoryLimit, 0.97) + m.Refresh(true) + + assert.Equal(t, 0.3, m.Ratio()) + }) + + t.Run("with less memory than the threshold", func(t *testing.T) { + metrics := &fakeHeapReader{val: 700 * MiB} + limiter := &fakeLimitSetter{limit: 1 * GiB} + + m := NewMonitor(metrics.Read, limiter.SetMemoryLimit, 0.97) + m.Refresh(true) + + err := m.CheckAlloc(100 * MiB) + assert.NoError(t, err, "with 700 allocated, an additional 100 would be about 80% which is not a problem") + + err = m.CheckAlloc(299 * MiB) + assert.Error(t, err, "with 700 allocated, an additional 299 would be about 97.5% which is not allowed") + + err = m.CheckAlloc(400 * MiB) + assert.Error(t, err, "with 700 allocated, an additional 400 would be about 110% which is not allowed") + }) + + t.Run("with memory already over the threshold", func(t *testing.T) { + metrics := &fakeHeapReader{val: 1025 * MiB} + limiter := &fakeLimitSetter{limit: 1 * GiB} + + m := NewMonitor(metrics.Read, limiter.SetMemoryLimit, 0.97) + m.Refresh(true) + + err := m.CheckAlloc(1 * B) + assert.Error(t, err, + "any check should fail, since we're already over the limit") + + err = m.CheckAlloc(10 * MiB) + assert.Error(t, err, "any check should fail, since we're already over the limit") + + err = m.CheckAlloc(1 * TiB) + assert.Error(t, err, "any check should fail, since we're already over the limit") + }) + + t.Run("with real dependencies", func(t *testing.T) { + m := NewMonitor(LiveHeapReader, debug.SetMemoryLimit, 0.97) + _ = m.Ratio() + }) +} + +func TestMappings(t *testing.T) { + // dont matter here + metrics := &fakeHeapReader{val: 30000} + limiter := &fakeLimitSetter{limit: 100000} + + t.Run("max memory mappings set correctly", func(t *testing.T) { + t.Setenv("MAX_MEMORY_MAPPINGS", "120") + assert.Equal(t, int64(120), getMaxMemoryMappings()) + }) + + t.Run("max memory mappings incorrectly", func(t *testing.T) { + t.Setenv("MAX_MEMORY_MAPPINGS", "abc") + switch runtime.GOOS { + case "linux": + // we can read the max value, but it does not exist on all systems + if _, err := os.Stat("/proc/sys/vm/max_map_count"); errors.Is(err, os.ErrNotExist) { + assert.Equal(t, getMaxMemoryMappings(), int64(math.MaxInt64)) + } else { + assert.Greater(t, getMaxMemoryMappings(), int64(0)) + assert.Less(t, getMaxMemoryMappings(), int64(math.MaxInt64)) + } + default: + // cant read on other OS so we use max int + assert.Equal(t, getMaxMemoryMappings(), int64(math.MaxInt64)) + } + }) + + t.Run("max memory mappings not set", func(t *testing.T) { + t.Setenv("MAX_MEMORY_MAPPINGS", "") + switch runtime.GOOS { + case "linux": + // we can read the max value, but it does not exist on all systems + if _, err := os.Stat("/proc/sys/vm/max_map_count"); errors.Is(err, os.ErrNotExist) { + assert.Equal(t, getMaxMemoryMappings(), int64(math.MaxInt64)) + } else { + assert.Greater(t, getMaxMemoryMappings(), int64(0)) + assert.Less(t, getMaxMemoryMappings(), int64(math.MaxInt64)) + } + default: + // cant read on other OS so we use max int + assert.Equal(t, getMaxMemoryMappings(), int64(math.MaxInt64)) + } + }) + + t.Run("current memory settings", func(t *testing.T) { + switch runtime.GOOS { + case "linux": + assert.Greater(t, getCurrentMappings(), int64(0)) + assert.Less(t, getCurrentMappings(), int64(math.MaxInt64)) + case "darwin": + assert.Equal(t, getCurrentMappings(), int64(0)) + } + }) + + t.Run("check mappings, by open many file mappings and close them only after the test is done", func(t *testing.T) { + if runtime.GOOS == "darwin" { + t.Skip("macOS does not have a limit on mappings") + } + currentMappings := getCurrentMappings() + addMappings := 15 + t.Setenv("MAX_MEMORY_MAPPINGS", strconv.FormatInt(currentMappings+int64(addMappings), 10)) + m := NewMonitor(metrics.Read, limiter.SetMemoryLimit, 0.97) + m.Refresh(true) + + mappingsLeft := getMaxMemoryMappings() - currentMappings + assert.InDelta(t, mappingsLeft, addMappings, 10) // other things can happen at the same time + path := t.TempDir() + + limitReached := false + + // use up available mappings + for i := 0; i < int(mappingsLeft)*2; i++ { + m.Refresh(true) + file, err := os.OpenFile(path+"example"+strconv.FormatInt(int64(i), 10)+".txt", os.O_CREATE|os.O_RDWR, 0o666) + require.Nil(t, err) + defer file.Close() // defer inside the loop because files should stay open until end of test to continue to use mappings + _, err = file.Write([]byte("Hello")) + require.Nil(t, err) + + fileInfo, err := file.Stat() + require.Nil(t, err) + + // there might be other processes that use mappings. Don't check any specific number just that we have + // reached the limit + if mappingsLeft := getMaxMemoryMappings() - getCurrentMappings(); mappingsLeft <= 0 { + limitReached = true + break + } else { + data, err := syscall.Mmap(int(file.Fd()), 0, int(fileInfo.Size()), syscall.PROT_READ, syscall.MAP_SHARED) + require.Nil(t, err) + + defer syscall.Munmap(data) + } + } + + // Try to reserve a large amount and have it fail (checker only runs on linux) + switch runtime.GOOS { + case "linux": + // ensure that we have hit the limit of available mappings + require.True(t, limitReached) + // any further mapping should fail + require.Error(t, m.CheckMappingAndReserve(int64(addMappings), 60)) + case "darwin": + // ensure that we don't hit the limit of available mappings + require.False(t, limitReached) + // any further mapping should not fail + require.Nil(t, m.CheckMappingAndReserve(int64(addMappings), 60)) + } + }) + + t.Run("check mappings for dummy, to check that it never blocks", func(t *testing.T) { + m := NewDummyMonitor() + m.Refresh(true) + + path := t.TempDir() + // use many mappings, dummy monitor should never block + for i := 0; i < 100; i++ { + m.Refresh(true) + file, err := os.OpenFile(path+"example"+strconv.FormatInt(int64(i), 10)+".txt", os.O_CREATE|os.O_RDWR, 0o666) + require.Nil(t, err) + defer file.Close() // defer inside the loop because files should stay open until end of test to continue to use mappings + _, err = file.Write([]byte("Hello")) + require.Nil(t, err) + + fileInfo, err := file.Stat() + require.Nil(t, err) + + require.Nil(t, m.CheckMappingAndReserve(1, 1)) + data, err := syscall.Mmap(int(file.Fd()), 0, int(fileInfo.Size()), syscall.PROT_READ, syscall.MAP_SHARED) + require.Nil(t, err) + + defer syscall.Munmap(data) + } + }) + + t.Run("check reservations", func(t *testing.T) { + currentMappings := getCurrentMappings() + addMappings := 15 + t.Setenv("MAX_MEMORY_MAPPINGS", strconv.FormatInt(currentMappings+int64(addMappings), 10)) + maxMappings := getMaxMemoryMappings() + m := NewMonitor(metrics.Read, limiter.SetMemoryLimit, 0.97) + m.Refresh(true) + + // reserve up available mappings + for i := 0; i < int(addMappings)+5; i++ { + // there might be other processes that use mappings + if maxMappings-m.usedMappings-int64(i) <= 0 { + require.NotNil(t, m.CheckMappingAndReserve(1, 60)) + } else { + require.Nil(t, m.CheckMappingAndReserve(1, 60)) + } + } + + // any further mapping should fail + require.NotNil(t, m.CheckMappingAndReserve(1, 60)) + }) +} + +func TestMappingsReservationClearing(t *testing.T) { + baseTime, err := time.Parse(time.RFC3339, "2021-01-01T00:00:30Z") + require.Nil(t, err) + cases := []struct { + name string + baseLineShift int + nowShift int + reservations map[int]int64 + expectedClearing int64 + }{ + {name: "no reservations", reservations: map[int]int64{}, expectedClearing: 0}, + {name: "reservations present, no expiration", nowShift: 1, reservations: map[int]int64{1: 45, 2: 14}, expectedClearing: 0}, + {name: "reservations present, one expiration", nowShift: 1, reservations: map[int]int64{1: 45, 31: 14}, expectedClearing: 14}, + {name: "reservations present, clear all", nowShift: 62, reservations: map[int]int64{0: 1, 1: 1, 2: 1, 59: 1}, expectedClearing: 4}, + {name: "reservations present, clear nothing (same time)", reservations: map[int]int64{0: 1, 30: 1, 2: 1, 59: 1}, expectedClearing: 0}, + {name: "clear range", nowShift: 20, reservations: map[int]int64{0: 10, 29: 10, 30: 1, 31: 1, 50: 1, 51: 10}, expectedClearing: 2}, + {name: "clear over minute wraparound", nowShift: 45, reservations: map[int]int64{0: 1, 1: 1, 2: 1, 29: 10, 59: 1}, expectedClearing: 4}, + {name: "dont clear value of last refresh", nowShift: 2, reservations: map[int]int64{0: 1, 30: 1, 31: 1, 32: 1, 33: 1}, expectedClearing: 2}, + } + for _, tt := range cases { + t.Run(tt.name, func(t *testing.T) { + baseTimeLocal := baseTime.Add(time.Duration(tt.baseLineShift) * time.Second) + now := baseTime.Add(time.Duration(tt.nowShift) * time.Second) + reservationBuffer := make([]int64, 62) + for i, v := range tt.reservations { + reservationBuffer[i] = v + } + require.Equal(t, clearReservedMappings(baseTimeLocal, now, reservationBuffer), tt.expectedClearing) + }) + } +} + +type fakeHeapReader struct { + val int64 +} + +func (f fakeHeapReader) Read() int64 { + return f.val +} + +type fakeLimitSetter struct { + limit int64 +} + +func (f *fakeLimitSetter) SetMemoryLimit(newLimit int64) int64 { + if newLimit >= 0 { + panic("should have been read only") + } + + return f.limit +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/mmap/mmap.go b/platform/dbops/binaries/weaviate-src/usecases/mmap/mmap.go new file mode 100644 index 0000000000000000000000000000000000000000..b197faafae7f821b9084b0c9ec81ce42c3a4b052 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/mmap/mmap.go @@ -0,0 +1,34 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Package mmap is replicating the functionality of the mmap-go package, however, +// it optimizes usage on darwin and linux to use raw syscalls instead of the golang.org/x/sys/unix +// package which introduces an additional mutex. +package mmap + +const ( + // RDONLY maps the memory read-only. + // Attempts to write to the MMap object will result in undefined behavior. + RDONLY = 0 + // RDWR maps the memory as read-write. Writes to the MMap object will update the + // underlying file. + RDWR = 1 << iota + // COPY maps the memory as copy-on-write. Writes to the MMap object will affect + // memory, but the underlying file will remain unchanged. + COPY + // If EXEC is set, the mapped memory is marked as executable. + EXEC +) + +const ( + // If the ANON flag is set, the mapped memory will not be backed by a file. + ANON = 1 << iota +) diff --git a/platform/dbops/binaries/weaviate-src/usecases/mmap/mmap_other.go b/platform/dbops/binaries/weaviate-src/usecases/mmap/mmap_other.go new file mode 100644 index 0000000000000000000000000000000000000000..ad8251632d8f1269c08fe6cc5efad64398aad7c4 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/mmap/mmap_other.go @@ -0,0 +1,26 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +//go:build !(darwin || linux) + +package mmap + +import ( + "os" + + "github.com/edsrzf/mmap-go" +) + +type MMap = mmap.MMap + +func MapRegion(f *os.File, length int, prot, flags int, offset int64) (MMap, error) { + return mmap.MapRegion(f, length, prot, flags, offset) +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/mmap/mmap_test.go b/platform/dbops/binaries/weaviate-src/usecases/mmap/mmap_test.go new file mode 100644 index 0000000000000000000000000000000000000000..4035bc9f46646c8e958b9df464811fe38e2e4527 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/mmap/mmap_test.go @@ -0,0 +1,55 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +//go:build darwin || linux + +package mmap + +import ( + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestMmapFile(t *testing.T) { + var ( + tmpDir = t.TempDir() + filePath = filepath.Join(tmpDir, "mmap.txt") + + data = generateData(10 * 1024 * 1024) // 10 MB + ) + + require.NoError(t, os.WriteFile(filePath, data, 0o600)) + + file, err := os.Open(filePath) + require.NoError(t, err) + defer file.Close() + + fileInfo, err := file.Stat() + require.NoError(t, err) + + mmapedData, err := MapRegion(file, int(fileInfo.Size()), RDONLY, 0, 0) + require.NoError(t, err) + + require.Equal(t, data, []byte(mmapedData)) + + require.NoError(t, mmapedData.Unmap()) +} + +func generateData(size int) []byte { + data := make([]byte, size) + for i := 0; i < size; i++ { + data[i] = byte(i % 256) + } + return data +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/mmap/mmap_unix.go b/platform/dbops/binaries/weaviate-src/usecases/mmap/mmap_unix.go new file mode 100644 index 0000000000000000000000000000000000000000..e1ed35f56dd79425c0dd14bbcc2b09af6a019d18 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/mmap/mmap_unix.go @@ -0,0 +1,96 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +//go:build darwin || linux + +package mmap + +import ( + "fmt" + "os" + "syscall" + "unsafe" + + "golang.org/x/sys/unix" +) + +type MMap []byte + +func MapRegion(f *os.File, length int, prot, flags int, offset int64) (MMap, error) { + if offset%int64(os.Getpagesize()) != 0 { + return nil, fmt.Errorf("offset parameter must be a multiple of the system's page size") + } + + var fd uintptr + if flags&ANON == 0 { + fd = uintptr(f.Fd()) + if length < 0 { + fi, err := f.Stat() + if err != nil { + return nil, err + } + length = int(fi.Size()) + } + } else { + if length <= 0 { + return nil, fmt.Errorf("anonymous mapping requires non-zero length") + } + fd = ^uintptr(0) + } + + return mmap(length, uintptr(prot), uintptr(flags), fd, offset) +} + +func mmap(len int, inprot, inflags, fd uintptr, off int64) ([]byte, error) { + flags := unix.MAP_SHARED + prot := unix.PROT_READ + switch { + case inprot© != 0: + prot |= unix.PROT_WRITE + flags = unix.MAP_PRIVATE + case inprot&RDWR != 0: + prot |= unix.PROT_WRITE + } + if inprot&EXEC != 0 { + prot |= unix.PROT_EXEC + } + if inflags&ANON != 0 { + flags |= unix.MAP_ANON + } + + b, err := unixMmap(int(fd), off, len, prot, flags) + if err != nil { + return nil, err + } + return b, nil +} + +func unixMmap(fd int, offset int64, length int, prot int, flags int) ([]byte, error) { + addr, _, errno := syscall.Syscall6(syscall.SYS_MMAP, 0, uintptr(length), uintptr(prot), uintptr(flags), uintptr(fd), uintptr(offset)) + if errno != 0 { + return nil, errno + } + return unsafe.Slice((*byte)(unsafe.Pointer(addr)), length), nil +} + +func (m *MMap) Unmap() error { + data := *m + if len(data) == 0 { + return nil + } + addr := uintptr(unsafe.Pointer(&data[0])) + _, _, errno := syscall.Syscall(syscall.SYS_MUNMAP, addr, uintptr(len(data)), 0) + if errno != 0 { + return errno + } + *m = nil + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/client_results.go b/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/client_results.go new file mode 100644 index 0000000000000000000000000000000000000000..77524c7e6c51c4723819b4af1727ab4b8a59a786 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/client_results.go @@ -0,0 +1,124 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package modulecomponents + +import ( + "time" + + "github.com/weaviate/weaviate/entities/dto" + "github.com/weaviate/weaviate/usecases/monitoring" +) + +type RateLimits struct { + LastOverwrite time.Time + AfterRequestFunction func(limits *RateLimits, tokensUsed int, deductRequest bool) + LimitRequests int + LimitTokens int + RemainingRequests int + RemainingTokens int + ReservedRequests int + ReservedTokens int + ResetRequests time.Time + ResetTokens time.Time + Label string + UpdateWithMissingValues bool +} + +func (rl *RateLimits) ResetAfterRequestFunction(tokensUsed int) { + if rl.AfterRequestFunction != nil { + rl.AfterRequestFunction(rl, tokensUsed, true) + } +} + +func (rl *RateLimits) CheckForReset() { + if rl.AfterRequestFunction != nil { + rl.AfterRequestFunction(rl, 0, false) + } +} + +func (rl *RateLimits) CanSendFullBatch(numRequests int, batchTokens int, addMetrics bool, metricsLabel string) bool { + freeRequests := rl.RemainingRequests - rl.ReservedRequests + freeTokens := rl.RemainingTokens - rl.ReservedTokens + + stats := monitoring.GetMetrics().T2VRepeatStats + + if addMetrics { + stats.WithLabelValues(metricsLabel, "free_requests").Set(float64(freeRequests)) + stats.WithLabelValues(metricsLabel, "free_tokens").Set(float64(freeTokens)) + stats.WithLabelValues(metricsLabel, "expected_requests").Set(float64(numRequests)) + stats.WithLabelValues(metricsLabel, "expected_tokens").Set(float64(batchTokens)) + } + + fitsCurrentBatch := freeRequests >= numRequests && freeTokens >= batchTokens + if !fitsCurrentBatch { + return false + } + + // also make sure that we do not "spend" all the rate limit at once + var percentageOfRequests, percentageOfTokens int + if rl.LimitRequests > 0 { + percentageOfRequests = numRequests * 100 / rl.LimitRequests + } + if rl.LimitTokens > 0 { + percentageOfTokens = batchTokens * 100 / rl.LimitTokens + } + + if addMetrics { + stats.WithLabelValues(metricsLabel, "percentage_of_requests").Set(float64(percentageOfRequests)) + stats.WithLabelValues(metricsLabel, "percentage_of_tokens").Set(float64(percentageOfTokens)) + } + + // the clients aim for 10s per batch, or 6 batches per minute in sequential-mode. 15% is somewhat below that to + // account for some variance in the rate limits + return percentageOfRequests <= 15 && percentageOfTokens <= 15 +} + +func (rl *RateLimits) UpdateWithRateLimit(other *RateLimits) { + if other.UpdateWithMissingValues { + return + } + rl.LimitRequests = other.LimitRequests + rl.LimitTokens = other.LimitTokens + rl.ResetRequests = other.ResetRequests + rl.ResetTokens = other.ResetTokens + rl.RemainingRequests = other.RemainingRequests + rl.RemainingTokens = other.RemainingTokens +} + +func (rl *RateLimits) IsInitialized() bool { + return rl.RemainingRequests == 0 && rl.RemainingTokens == 0 +} + +type VectorizationResult[T dto.Embedding] struct { + Text []string + Dimensions int + Vector []T + Errors []error +} + +type VectorizationCLIPResult[T dto.Embedding] struct { + TextVectors []T + ImageVectors []T +} + +type Usage struct { + CompletionTokens int `json:"completion_tokens,omitempty"` + PromptTokens int `json:"prompt_tokens,omitempty"` + TotalTokens int `json:"total_tokens,omitempty"` +} + +func GetTotalTokens(usage *Usage) int { + if usage == nil { + return -1 + } + return usage.TotalTokens +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/generic_module.go b/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/generic_module.go new file mode 100644 index 0000000000000000000000000000000000000000..eb1d48eb6d602deead9b2f41576dbd0ca4dc11c5 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/generic_module.go @@ -0,0 +1,96 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package modulecomponents + +import ( + "fmt" + + "github.com/sirupsen/logrus" + "github.com/tailor-inc/graphql" + "github.com/weaviate/weaviate/entities/modulecapabilities" + "github.com/weaviate/weaviate/usecases/modulecomponents/additional" + "github.com/weaviate/weaviate/usecases/modulecomponents/arguments/nearAudio" + "github.com/weaviate/weaviate/usecases/modulecomponents/arguments/nearDepth" + "github.com/weaviate/weaviate/usecases/modulecomponents/arguments/nearImage" + "github.com/weaviate/weaviate/usecases/modulecomponents/arguments/nearImu" + "github.com/weaviate/weaviate/usecases/modulecomponents/arguments/nearText" + "github.com/weaviate/weaviate/usecases/modulecomponents/arguments/nearThermal" + "github.com/weaviate/weaviate/usecases/modulecomponents/arguments/nearVideo" +) + +type ArgumentType int + +const ( + Get ArgumentType = iota + Aggregate + Explore +) + +const AdditionalPropertyGenerate = additional.PropertyGenerate + +func GetGenericArgument(name, className string, argumentType ArgumentType, + nearTextTransformer modulecapabilities.TextTransform, +) *graphql.ArgumentConfig { + switch name { + case nearText.Name: + return getGenericArgument(nearText.New(nearTextTransformer).Arguments()[name], className, argumentType) + case nearImage.Name: + return getGenericArgument(nearImage.New().Arguments()[name], className, argumentType) + case nearAudio.Name: + return getGenericArgument(nearAudio.New().Arguments()[name], className, argumentType) + case nearDepth.Name: + return getGenericArgument(nearDepth.New().Arguments()[name], className, argumentType) + case nearImu.Name: + return getGenericArgument(nearImu.New().Arguments()[name], className, argumentType) + case nearThermal.Name: + return getGenericArgument(nearThermal.New().Arguments()[name], className, argumentType) + case nearVideo.Name: + return getGenericArgument(nearVideo.New().Arguments()[name], className, argumentType) + default: + panic(fmt.Sprintf("Unknown generic argument: %s", name)) + } +} + +func getGenericArgument(arg modulecapabilities.GraphQLArgument, + className string, argumentType ArgumentType, +) *graphql.ArgumentConfig { + switch argumentType { + case Get: + return arg.GetArgumentsFunction(className) + case Aggregate: + return arg.AggregateArgumentsFunction(className) + case Explore: + return arg.ExploreArgumentsFunction() + default: + panic(fmt.Sprintf("Unknown argument type: %v", argumentType)) + } +} + +func GetGenericAdditionalProperty(name, className string) *modulecapabilities.AdditionalProperty { + switch name { + case additional.PropertyFeatureProjection: + fp := additional.NewText2VecProvider().AdditionalProperties()[additional.PropertyFeatureProjection] + return &fp + default: + return nil + } +} + +func GetGenericGenerateProperty( + className string, + additionalGenerativeParameters map[string]modulecapabilities.GenerativeProperty, + defaultProviderName string, + logger logrus.FieldLogger, +) *modulecapabilities.AdditionalProperty { + generate := additional.NewGenericGenerativeProvider(className, additionalGenerativeParameters, defaultProviderName, logger).AdditionalProperties()[additional.PropertyGenerate] + return &generate +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/header_values.go b/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/header_values.go new file mode 100644 index 0000000000000000000000000000000000000000..bb3108778fc7c88132bf6b8ab476dad5209994b7 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/header_values.go @@ -0,0 +1,66 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package modulecomponents + +import ( + "context" + "fmt" + "strconv" + "strings" + + "google.golang.org/grpc/metadata" +) + +func GetValueFromGRPC(ctx context.Context, key string) []string { + md, ok := metadata.FromIncomingContext(ctx) + if ok { + // the grpc library will lowercase all md keys, so we need to make sure to check a lowercase key + apiKey, ok := md[strings.ToLower(key)] + if ok { + return apiKey + } + } + return nil +} + +func GetValueFromContext(ctx context.Context, key string) string { + if value := ctx.Value(key); value != nil { + if keyHeader, ok := value.([]string); ok && len(keyHeader) > 0 && len(keyHeader[0]) > 0 { + return keyHeader[0] + } + } + // try getting header from GRPC if not successful + if value := GetValueFromGRPC(ctx, key); len(value) > 0 && len(value[0]) > 0 { + return value[0] + } + + return "" +} + +func GetRateLimitFromContext(ctx context.Context, moduleName string, defaultRPM, defaultTPM int) (int, int) { + returnRPM := defaultRPM + returnTPM := defaultTPM + if rpmS := GetValueFromContext(ctx, fmt.Sprintf("X-%s-Ratelimit-RequestPM-Embedding", moduleName)); rpmS != "" { + s, err := strconv.Atoi(rpmS) + if err == nil { + returnRPM = s + } + } + if tpmS := GetValueFromContext(ctx, fmt.Sprintf("X-%s-Ratelimit-TokenPM-Embedding", moduleName)); tpmS != "" { + s, err := strconv.Atoi(tpmS) + if err == nil { + returnTPM = s + } + } + + return returnRPM, returnTPM +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/modules/compare.go b/platform/dbops/binaries/weaviate-src/usecases/modules/compare.go new file mode 100644 index 0000000000000000000000000000000000000000..5c389a7ae1377c478e180eb17c65fa90ac31d18a --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/modules/compare.go @@ -0,0 +1,237 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package modules + +import ( + "context" + "fmt" + + "github.com/weaviate/weaviate/entities/additional" + "github.com/weaviate/weaviate/entities/dto" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/modulecapabilities" + "github.com/weaviate/weaviate/entities/moduletools" + "github.com/weaviate/weaviate/entities/schema" + "github.com/weaviate/weaviate/entities/search" +) + +func reVectorize(ctx context.Context, + cfg moduletools.ClassConfig, + mod modulecapabilities.Vectorizer[[]float32], + object *models.Object, + class *models.Class, + sourceProperties []string, + targetVector string, + findObjectFn modulecapabilities.FindObjectFn, + reVectorizeDisabled bool, +) (bool, models.AdditionalProperties, []float32, error) { + if reVectorizeDisabled { + return true, nil, nil, nil + } + + shouldReVectorize, oldObject := reVectorizeEmbeddings(ctx, cfg, mod, object, class, sourceProperties, findObjectFn) + if shouldReVectorize { + return shouldReVectorize, nil, nil, nil + } + + if targetVector == "" { + return false, oldObject.AdditionalProperties, oldObject.Vector, nil + } else { + vector, err := getVector(oldObject.Vectors[targetVector]) + if err != nil { + return false, nil, nil, fmt.Errorf("get vector: %w", err) + } + return false, oldObject.AdditionalProperties, vector, nil + } +} + +func getVector(v models.Vector) ([]float32, error) { + switch vector := v.(type) { + case nil: + return nil, nil + case []float32: + return vector, nil + default: + return nil, fmt.Errorf("unrecognized vector type: %T", v) + } +} + +func reVectorizeMulti(ctx context.Context, + cfg moduletools.ClassConfig, + mod modulecapabilities.Vectorizer[[][]float32], + object *models.Object, + class *models.Class, + sourceProperties []string, + targetVector string, + findObjectFn modulecapabilities.FindObjectFn, + reVectorizeDisabled bool, +) (bool, models.AdditionalProperties, [][]float32, error) { + if reVectorizeDisabled { + return true, nil, nil, nil + } + + shouldReVectorize, oldObject := reVectorizeEmbeddings(ctx, cfg, mod, object, class, sourceProperties, findObjectFn) + if shouldReVectorize { + return shouldReVectorize, nil, nil, nil + } + + if targetVector == "" { + return false, oldObject.AdditionalProperties, nil, nil + } else { + multiVector, err := getMultiVector(oldObject.Vectors[targetVector]) + if err != nil { + return false, nil, nil, fmt.Errorf("get multi vector: %w", err) + } + return false, oldObject.AdditionalProperties, multiVector, nil + } +} + +func getMultiVector(v models.Vector) ([][]float32, error) { + switch vector := v.(type) { + case nil: + return nil, nil + case [][]float32: + return vector, nil + default: + return nil, fmt.Errorf("unrecognized multi vector type: %T", v) + } +} + +func reVectorizeEmbeddings[T dto.Embedding](ctx context.Context, + cfg moduletools.ClassConfig, + mod modulecapabilities.Vectorizer[T], + object *models.Object, + class *models.Class, + sourceProperties []string, + findObjectFn modulecapabilities.FindObjectFn, +) (bool, *search.Result) { + textProps, mediaProps, err := mod.VectorizableProperties(cfg) + if err != nil { + return true, nil + } + + type compareProps struct { + Name string + IsArray bool + } + propsToCompare := make([]compareProps, 0) + + var sourcePropsSet map[string]struct{} = nil + if len(sourceProperties) > 0 { + sourcePropsSet = make(map[string]struct{}, len(sourceProperties)) + for _, sourceProp := range sourceProperties { + sourcePropsSet[sourceProp] = struct{}{} + } + } + mediaPropsSet := make(map[string]struct{}, len(mediaProps)) + for _, mediaProp := range mediaProps { + mediaPropsSet[mediaProp] = struct{}{} + } + + for _, prop := range class.Properties { + if len(prop.DataType) > 1 { + continue // multi cref + } + + // for named vectors with explicit source properties, skip if not in the list + if sourcePropsSet != nil { + if _, ok := sourcePropsSet[prop.Name]; !ok { + continue + } + } + + if prop.ModuleConfig != nil { + if modConfig, ok := prop.ModuleConfig.(map[string]interface{})[class.Vectorizer]; ok { + if skip, ok2 := modConfig.(map[string]interface{})["skip"]; ok2 && skip == true { + continue + } + } + } + + if prop.DataType[0] == schema.DataTypeText.String() && textProps { + propsToCompare = append(propsToCompare, compareProps{Name: prop.Name, IsArray: false}) + continue + } + + if prop.DataType[0] == schema.DataTypeTextArray.String() && textProps { + propsToCompare = append(propsToCompare, compareProps{Name: prop.Name, IsArray: true}) + continue + } + + if _, ok := mediaPropsSet[prop.Name]; ok { + propsToCompare = append(propsToCompare, compareProps{Name: prop.Name, IsArray: schema.IsArrayDataType(prop.DataType)}) + continue + } + } + + // if no properties to compare, we can skip the comparison. Return vectors of old object if present + if len(propsToCompare) == 0 { + oldObject, err := findObjectFn(ctx, class.Class, object.ID, nil, additional.Properties{}, object.Tenant) + if err != nil || oldObject == nil { + return true, nil + } + return false, oldObject + } + + returnProps := make(search.SelectProperties, 0, len(propsToCompare)) + for _, prop := range propsToCompare { + returnProps = append(returnProps, search.SelectProperty{Name: prop.Name, IsPrimitive: true, IsObject: false}) + } + oldObject, err := findObjectFn(ctx, class.Class, object.ID, returnProps, additional.Properties{}, object.Tenant) + if err != nil || oldObject == nil { + return true, nil + } + oldProps := oldObject.Schema.(map[string]interface{}) + var newProps map[string]interface{} + if object.Properties == nil { + newProps = make(map[string]interface{}) + } else { + newProps = object.Properties.(map[string]interface{}) + } + for _, propStruct := range propsToCompare { + valNew, isPresentNew := newProps[propStruct.Name] + valOld, isPresentOld := oldProps[propStruct.Name] + + if isPresentNew != isPresentOld { + return true, nil + } + + if !isPresentNew { + continue + } + + if propStruct.IsArray { + // empty strings do not have type information saved with them - the new value can also come from disk if + // an update happens + if _, ok := valOld.([]interface{}); ok && len(valOld.([]interface{})) == 0 { + valOld = []string{} + } + if _, ok := valNew.([]interface{}); ok && len(valNew.([]interface{})) == 0 { + valNew = []string{} + } + + if len(valOld.([]string)) != len(valNew.([]string)) { + return true, nil + } + for i, val := range valOld.([]string) { + if val != valNew.([]string)[i] { + return true, nil + } + } + } else { + if valOld != valNew { + return true, nil + } + } + } + return false, oldObject +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/modules/compare_test.go b/platform/dbops/binaries/weaviate-src/usecases/modules/compare_test.go new file mode 100644 index 0000000000000000000000000000000000000000..7d99d4612b322438c2e4b780c92decb444cbcbf0 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/modules/compare_test.go @@ -0,0 +1,204 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package modules + +import ( + "context" + "testing" + + "github.com/go-openapi/strfmt" + "github.com/google/uuid" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/entities/additional" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/schema" + "github.com/weaviate/weaviate/entities/search" +) + +var objsToReturn = make(map[string]interface{}) + +func findObject(ctx context.Context, class string, id strfmt.UUID, + props search.SelectProperties, adds additional.Properties, tenant string, +) (*search.Result, error) { + obj, ok := objsToReturn[id.String()] + if !ok { + return nil, nil + } + + return &search.Result{Schema: obj}, nil +} + +func TestCompareRevectorize(t *testing.T) { + class := &models.Class{ + Class: "MyClass", + Vectorizer: "my-module", + Properties: []*models.Property{ + {Name: "text", DataType: []string{schema.DataTypeText.String()}}, + {Name: "text_array", DataType: []string{schema.DataTypeTextArray.String()}}, + {Name: "text", DataType: []string{schema.DataTypeText.String()}}, + {Name: "image", DataType: []string{schema.DataTypeBlob.String()}}, + {Name: "number", DataType: []string{schema.DataTypeInt.String()}}, + {Name: "text_not_vectorized", DataType: []string{schema.DataTypeText.String()}, ModuleConfig: map[string]interface{}{"my-module": map[string]interface{}{"skip": true}}}, + }, + } + cfg := NewClassBasedModuleConfig(class, "my-module", "tenant", "", nil) + module := newDummyText2VecModule("my-module", []string{"image", "video"}) + + cases := []struct { + name string + oldProps map[string]interface{} + newProps map[string]interface{} + different bool + disabled bool + }{ + {name: "same text prop", oldProps: map[string]interface{}{"text": "value1"}, newProps: map[string]interface{}{"text": "value1"}, different: false}, + {name: "different text prop", oldProps: map[string]interface{}{"text": "value1"}, newProps: map[string]interface{}{"text": "value2"}, different: true}, + {name: "different text - not vectorized", oldProps: map[string]interface{}{"text_not_vectorized": "value1"}, newProps: map[string]interface{}{"text_not_vectorized": "value2"}, different: false}, + {name: "same text array prop", oldProps: map[string]interface{}{"text_array": []string{"first sentence", "second long sentence"}}, newProps: map[string]interface{}{"text_array": []string{"first sentence", "second long sentence"}}, different: false}, + {name: "different text array prop", oldProps: map[string]interface{}{"text_array": []string{"first sentence", "second long sentence"}}, newProps: map[string]interface{}{"text_array": []string{"first sentence", "second different sentence"}}, different: true}, + {name: "different text array prop length", oldProps: map[string]interface{}{"text_array": []string{"first sentence", "second long sentence"}}, newProps: map[string]interface{}{"text_array": []string{"first sentence"}}, different: true}, + {name: "old object not present", oldProps: nil, newProps: map[string]interface{}{"text": "value1"}, different: true}, + {name: "changed prop does not matter", oldProps: map[string]interface{}{"number": 2}, newProps: map[string]interface{}{"number": 1}, different: false}, + {name: "media prop changed", oldProps: map[string]interface{}{"image": "abc"}, newProps: map[string]interface{}{"image": "def"}, different: true}, + {name: "many props changed", oldProps: map[string]interface{}{"image": "abc", "text": "abc", "text_array": []string{"abc"}}, newProps: map[string]interface{}{"image": "def", "text": "def", "text_array": []string{"def"}}, different: true}, + {name: "many props - only irrelevant changed", oldProps: map[string]interface{}{"image": "abc", "text": "abc", "text_array": []string{"abc"}, "number": 1}, newProps: map[string]interface{}{"image": "abc", "text": "abc", "text_array": []string{"abc"}, "number": 2}, different: false}, + {name: "new props are nil", oldProps: map[string]interface{}{"text": "value1"}, newProps: nil, different: true}, + {name: "same text prop, but feature globally disabled", oldProps: map[string]interface{}{"text": "value1"}, newProps: map[string]interface{}{"text": "value1"}, disabled: true, different: true}, + } + for _, tt := range cases { + t.Run(tt.name, func(t *testing.T) { + uid, _ := uuid.NewUUID() + uidfmt := strfmt.UUID(uid.String()) + objNew := &models.Object{Class: class.Class, Properties: tt.newProps, ID: uidfmt} + if tt.oldProps != nil { + objsToReturn[uid.String()] = tt.oldProps + } + different, _, _, err := reVectorize(context.Background(), cfg, module, objNew, class, nil, "", findObject, tt.disabled) + require.NoError(t, err) + require.Equal(t, different, tt.different) + }) + } +} + +func TestCompareRevectorizeNamedVectors(t *testing.T) { + class := &models.Class{ + Class: "MyClass", + Properties: []*models.Property{ + {Name: "text", DataType: []string{schema.DataTypeText.String()}}, + {Name: "text_array", DataType: []string{schema.DataTypeTextArray.String()}}, + }, + VectorConfig: map[string]models.VectorConfig{ + "text": { + Vectorizer: map[string]interface{}{ + "my-module": map[string]interface{}{ + "vectorizeClassName": false, + "properties": []string{"text"}, + }, + }, + VectorIndexType: "hnsw", + }, + "text_array": { + Vectorizer: map[string]interface{}{ + "my-module": map[string]interface{}{ + "vectorizeClassName": false, + "properties": []string{"text_array"}, + }, + }, + VectorIndexType: "hnsw", + }, + "all": { + Vectorizer: map[string]interface{}{ + "my-module": map[string]interface{}{ + "vectorizeClassName": false, + }, + }, + VectorIndexType: "hnsw", + }, + "all_explicit": { + Vectorizer: map[string]interface{}{ + "my-module": map[string]interface{}{ + "vectorizeClassName": false, + }, + }, + VectorIndexType: "hnsw", + }, + }, + } + cfg := NewClassBasedModuleConfig(class, "my-module", "tenant", "", nil) + module := newDummyText2VecModule("my-module", []string{"image", "video"}) + + cases := []struct { + name string + oldProps map[string]interface{} + newProps map[string]interface{} + targetVectors []string + different bool + }{ + {name: "same text prop, part of target vec", oldProps: map[string]interface{}{"text": "value1"}, newProps: map[string]interface{}{"text": "value1"}, targetVectors: []string{"text"}, different: false}, + {name: "different text prop, part of target vec", oldProps: map[string]interface{}{"text": "value1"}, newProps: map[string]interface{}{"text": "value2"}, targetVectors: []string{"text"}, different: true}, + {name: "different text prop, not part of target vec", oldProps: map[string]interface{}{"text": "value1"}, newProps: map[string]interface{}{"text": "value2"}, targetVectors: []string{"text_array"}, different: false}, + {name: "multiple props text prop, not part of target vec", oldProps: map[string]interface{}{"text": "value1", "image": "abc"}, newProps: map[string]interface{}{"text": "value2", "image": "def"}, targetVectors: []string{"text_array"}, different: false}, + {name: "multiple props text prop, one is part of text prop", oldProps: map[string]interface{}{"text": "value1", "image": "abc"}, newProps: map[string]interface{}{"text": "value2", "image": "def"}, targetVectors: []string{"text_array", "image"}, different: false}, + } + for _, tt := range cases { + t.Run(tt.name, func(t *testing.T) { + uid, _ := uuid.NewUUID() + uidfmt := strfmt.UUID(uid.String()) + objNew := &models.Object{Class: class.Class, Properties: tt.newProps, ID: uidfmt} + if tt.oldProps != nil { + objsToReturn[uid.String()] = tt.oldProps + } + disabled := false + different, _, _, err := reVectorize(context.Background(), cfg, module, objNew, class, tt.targetVectors, "", findObject, disabled) + require.NoError(t, err) + require.Equal(t, different, tt.different) + }) + } +} + +func TestCompareRevectorizeDisabled(t *testing.T) { + class := &models.Class{ + Class: "MyClass", + Properties: []*models.Property{ + {Name: "text", DataType: []string{schema.DataTypeText.String()}}, + }, + VectorConfig: map[string]models.VectorConfig{ + "text": { + Vectorizer: map[string]interface{}{ + "my-module": map[string]interface{}{ + "vectorizeClassName": false, + "properties": []string{"text"}, + }, + }, + VectorIndexType: "hnsw", + }, + }, + } + cfg := NewClassBasedModuleConfig(class, "my-module", "tenant", "", nil) + module := newDummyText2VecModule("my-module", []string{"image", "video"}) + + props := map[string]interface{}{ + "text": "value1", + } + uid, _ := uuid.NewUUID() + uidfmt := strfmt.UUID(uid.String()) + objNew := &models.Object{Class: class.Class, Properties: props, ID: uidfmt} + disabled := true + findObjectMock := func(ctx context.Context, class string, id strfmt.UUID, + props search.SelectProperties, adds additional.Properties, tenant string, + ) (*search.Result, error) { + panic("why did you call me?") + } + different, _, _, err := reVectorize(context.Background(), cfg, module, objNew, class, []string{"text"}, "", findObjectMock, disabled) + require.NoError(t, err) + require.Equal(t, different, true) +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/modules/fakes_for_test.go b/platform/dbops/binaries/weaviate-src/usecases/modules/fakes_for_test.go new file mode 100644 index 0000000000000000000000000000000000000000..dd11a9106982b051e8f4d42301e416f45e9e4a9d --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/modules/fakes_for_test.go @@ -0,0 +1,192 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package modules + +import ( + "context" + + "github.com/go-openapi/strfmt" + "github.com/stretchr/testify/mock" + + "github.com/weaviate/weaviate/entities/additional" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/modulecapabilities" + "github.com/weaviate/weaviate/entities/moduletools" + "github.com/weaviate/weaviate/entities/schema" + "github.com/weaviate/weaviate/entities/search" +) + +func newDummyModule(name string, t modulecapabilities.ModuleType) modulecapabilities.Module { + switch t { + case modulecapabilities.Text2Vec: + return newDummyText2VecModule(name, nil) + case modulecapabilities.Text2Multivec: + return newDummyText2ColBERTModule(name, nil) + case modulecapabilities.Ref2Vec: + return newDummyRef2VecModule(name) + default: + return newDummyNonVectorizerModule(name) + } +} + +func newDummyText2VecModule(name string, mediaProperties []string) dummyText2VecModuleNoCapabilities { + return dummyText2VecModuleNoCapabilities{name: name, mediaProperties: mediaProperties} +} + +type dummyText2VecModuleNoCapabilities struct { + name string + mediaProperties []string +} + +func (m dummyText2VecModuleNoCapabilities) Name() string { + return m.name +} + +func (m dummyText2VecModuleNoCapabilities) Init(ctx context.Context, + params moduletools.ModuleInitParams, +) error { + return nil +} + +func (m dummyText2VecModuleNoCapabilities) Type() modulecapabilities.ModuleType { + return modulecapabilities.Text2Vec +} + +func (m dummyText2VecModuleNoCapabilities) VectorizeObject(ctx context.Context, + in *models.Object, cfg moduletools.ClassConfig, +) ([]float32, models.AdditionalProperties, error) { + return []float32{1, 2, 3}, nil, nil +} + +func (m dummyText2VecModuleNoCapabilities) VectorizableProperties(cfg moduletools.ClassConfig) (bool, []string, error) { + return true, m.mediaProperties, nil +} + +func (m dummyText2VecModuleNoCapabilities) VectorizeBatch(ctx context.Context, objs []*models.Object, skipObject []bool, cfg moduletools.ClassConfig) ([][]float32, []models.AdditionalProperties, map[int]error) { + errs := make(map[int]error, 0) + vecs := make([][]float32, len(objs)) + for i := range vecs { + vecs[i] = []float32{1, 2, 3} + } + return vecs, nil, errs +} + +func newDummyText2ColBERTModule(name string, mediaProperties []string) dummyText2ColBERTModuleNoCapabilities { + return dummyText2ColBERTModuleNoCapabilities{name: name, mediaProperties: mediaProperties} +} + +type dummyText2ColBERTModuleNoCapabilities struct { + name string + mediaProperties []string +} + +func (m dummyText2ColBERTModuleNoCapabilities) Name() string { + return m.name +} + +func (m dummyText2ColBERTModuleNoCapabilities) Init(ctx context.Context, + params moduletools.ModuleInitParams, +) error { + return nil +} + +func (m dummyText2ColBERTModuleNoCapabilities) Type() modulecapabilities.ModuleType { + return modulecapabilities.Text2Multivec +} + +func (m dummyText2ColBERTModuleNoCapabilities) VectorizeObject(ctx context.Context, + in *models.Object, cfg moduletools.ClassConfig, +) ([][]float32, models.AdditionalProperties, error) { + return [][]float32{{0.11, 0.22, 0.33}, {0.11, 0.22, 0.33}}, nil, nil +} + +func (m dummyText2ColBERTModuleNoCapabilities) VectorizableProperties(cfg moduletools.ClassConfig) (bool, []string, error) { + return true, m.mediaProperties, nil +} + +func (m dummyText2ColBERTModuleNoCapabilities) VectorizeBatch(ctx context.Context, objs []*models.Object, skipObject []bool, cfg moduletools.ClassConfig) ([][][]float32, []models.AdditionalProperties, map[int]error) { + errs := make(map[int]error, 0) + vecs := make([][][]float32, len(objs)) + for i := range vecs { + vecs[i] = [][]float32{{0.1, 0.2, 0.3}, {0.1, 0.2, 0.3}} + } + return vecs, nil, errs +} + +func newDummyRef2VecModule(name string) dummyRef2VecModuleNoCapabilities { + return dummyRef2VecModuleNoCapabilities{name: name} +} + +type dummyRef2VecModuleNoCapabilities struct { + name string +} + +func (m dummyRef2VecModuleNoCapabilities) Name() string { + return m.name +} + +func (m dummyRef2VecModuleNoCapabilities) Init(ctx context.Context, + params moduletools.ModuleInitParams, +) error { + return nil +} + +func (m dummyRef2VecModuleNoCapabilities) Type() modulecapabilities.ModuleType { + return modulecapabilities.Ref2Vec +} + +func (m dummyRef2VecModuleNoCapabilities) VectorizeObject(ctx context.Context, + in *models.Object, cfg moduletools.ClassConfig, + findRefVecsFn modulecapabilities.FindObjectFn, +) ([]float32, error) { + return []float32{1, 2, 3}, nil +} + +func newDummyNonVectorizerModule(name string) dummyNonVectorizerModule { + return dummyNonVectorizerModule{name: name} +} + +type dummyNonVectorizerModule struct { + name string +} + +func (m dummyNonVectorizerModule) Name() string { + return m.name +} + +func (m dummyNonVectorizerModule) Init(ctx context.Context, + params moduletools.ModuleInitParams, +) error { + return nil +} + +func (m dummyNonVectorizerModule) Type() modulecapabilities.ModuleType { + var non modulecapabilities.ModuleType = "NonVectorizer" + return non +} + +type fakeSchemaGetter struct{ schema schema.Schema } + +func (f *fakeSchemaGetter) ReadOnlyClass(name string) *models.Class { + return f.schema.GetClass(name) +} + +type fakeObjectsRepo struct { + mock.Mock +} + +func (r *fakeObjectsRepo) Object(ctx context.Context, class string, + id strfmt.UUID, props search.SelectProperties, + addl additional.Properties, tenant string, +) (*search.Result, error) { + return nil, nil +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/modules/module_config.go b/platform/dbops/binaries/weaviate-src/usecases/modules/module_config.go new file mode 100644 index 0000000000000000000000000000000000000000..f1a7b6f6acda9619b705951c108b923411e4aa6a --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/modules/module_config.go @@ -0,0 +1,128 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package modules + +import ( + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/schema" + "github.com/weaviate/weaviate/usecases/config" +) + +type ClassBasedModuleConfig struct { + class *models.Class + moduleName string + tenant string + targetVector string + cfg *config.Config +} + +func NewClassBasedModuleConfig(class *models.Class, + moduleName, tenant, targetVector string, + cfg *config.Config, +) *ClassBasedModuleConfig { + return &ClassBasedModuleConfig{ + class: class, + moduleName: moduleName, + tenant: tenant, + targetVector: targetVector, + cfg: cfg, + } +} + +func NewCrossClassModuleConfig() *ClassBasedModuleConfig { + // explicitly setting tenant to "" in order to flag that a cross class search + // is being done without a tenant context + return &ClassBasedModuleConfig{tenant: ""} +} + +func (cbmc *ClassBasedModuleConfig) Class() map[string]interface{} { + return cbmc.ClassByModuleName(cbmc.moduleName) +} + +func (cbmc *ClassBasedModuleConfig) Tenant() string { + return cbmc.tenant +} + +func (cbmc *ClassBasedModuleConfig) TargetVector() string { + return cbmc.targetVector +} + +func (cbmc *ClassBasedModuleConfig) PropertiesDataTypes() map[string]schema.DataType { + primitiveProps := map[string]schema.DataType{} + for _, schemaProp := range cbmc.class.Properties { + dt, err := schema.GetValueDataTypeFromString(schemaProp.DataType[0]) + if err != nil { + continue + } + primitiveProps[schemaProp.Name] = *dt + } + return primitiveProps +} + +func (cbmc *ClassBasedModuleConfig) ClassByModuleName(moduleName string) map[string]interface{} { + defaultConf := map[string]interface{}{} + asMap, ok := cbmc.getModuleConfig().(map[string]interface{}) + if !ok { + return defaultConf + } + + moduleCfg, ok := asMap[moduleName] + if !ok { + return defaultConf + } + + asMap, ok = moduleCfg.(map[string]interface{}) + if !ok { + return defaultConf + } + + return asMap +} + +func (cbmc *ClassBasedModuleConfig) getModuleConfig() interface{} { + if cbmc.targetVector != "" { + if vectorConfig, ok := cbmc.class.VectorConfig[cbmc.targetVector]; ok { + return vectorConfig.Vectorizer + } + return nil + } + return cbmc.class.ModuleConfig +} + +func (cbmc *ClassBasedModuleConfig) Property(propName string) map[string]interface{} { + defaultConf := map[string]interface{}{} + prop, err := schema.GetPropertyByName(cbmc.class, propName) + if err != nil { + return defaultConf + } + + asMap, ok := prop.ModuleConfig.(map[string]interface{}) + if !ok { + return defaultConf + } + + moduleCfg, ok := asMap[cbmc.moduleName] + if !ok { + return defaultConf + } + + asMap, ok = moduleCfg.(map[string]interface{}) + if !ok { + return defaultConf + } + + return asMap +} + +func (cbmc *ClassBasedModuleConfig) Config() *config.Config { + return cbmc.cfg +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/modules/module_config_init_and_validate.go b/platform/dbops/binaries/weaviate-src/usecases/modules/module_config_init_and_validate.go new file mode 100644 index 0000000000000000000000000000000000000000..9cb095038521f3569d9d15fc68a2a6235f3aef78 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/modules/module_config_init_and_validate.go @@ -0,0 +1,298 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package modules + +import ( + "context" + "fmt" + + "github.com/pkg/errors" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/modelsext" + "github.com/weaviate/weaviate/entities/modulecapabilities" + "github.com/weaviate/weaviate/entities/schema" +) + +// SetClassDefaults sets the module-specific defaults for the class itself, but +// also for each prop +func (p *Provider) SetClassDefaults(class *models.Class) { + if modelsext.ClassHasLegacyVectorIndex(class) || len(class.VectorConfig) == 0 { + p.setClassDefaults(class, class.Vectorizer, "", func(vectorizerConfig map[string]interface{}) { + if class.ModuleConfig == nil { + class.ModuleConfig = map[string]interface{}{} + } + class.ModuleConfig.(map[string]interface{})[class.Vectorizer] = vectorizerConfig + }) + } + + for targetVector, vectorConfig := range class.VectorConfig { + if moduleConfig, ok := vectorConfig.Vectorizer.(map[string]interface{}); ok && len(moduleConfig) == 1 { + for vectorizer := range moduleConfig { + p.setClassDefaults(class, vectorizer, targetVector, func(vectorizerConfig map[string]interface{}) { + moduleConfig[vectorizer] = vectorizerConfig + }) + } + } + } +} + +func (p *Provider) setClassDefaults(class *models.Class, vectorizer string, + targetVector string, storeFn func(vectorizerConfig map[string]interface{}), +) { + if vectorizer == "none" { + // the class does not use a vectorizer, nothing to do for us + return + } + + mod := p.GetByName(vectorizer) + cc, ok := mod.(modulecapabilities.ClassConfigurator) + if !ok { + // the module exists, but is not a class configurator, nothing to do for us + return + } + + cfg := NewClassBasedModuleConfig(class, vectorizer, "", targetVector, &p.cfg) + + p.setPerClassConfigDefaults(cfg, cc, storeFn) + for _, prop := range class.Properties { + p.setSinglePropertyConfigDefaults(prop, vectorizer, cc) + } +} + +func (p *Provider) setPerClassConfigDefaults(cfg *ClassBasedModuleConfig, + cc modulecapabilities.ClassConfigurator, + storeFn func(vectorizerConfig map[string]interface{}), +) { + modDefaults := cc.ClassConfigDefaults() + userSpecified := cfg.Class() + mergedConfig := map[string]interface{}{} + + for key, value := range modDefaults { + mergedConfig[key] = value + } + for key, value := range userSpecified { + mergedConfig[key] = value + } + + if len(mergedConfig) > 0 { + storeFn(mergedConfig) + } +} + +// SetSinglePropertyDefaults can be used when a property is added later, e.g. +// as part of merging in a ref prop after a class has already been created +func (p *Provider) SetSinglePropertyDefaults(class *models.Class, + props ...*models.Property, +) { + for _, prop := range props { + if modelsext.ClassHasLegacyVectorIndex(class) || len(class.VectorConfig) == 0 { + p.setSinglePropertyDefaults(prop, class.Vectorizer) + } + + for _, vectorConfig := range class.VectorConfig { + if moduleConfig, ok := vectorConfig.Vectorizer.(map[string]interface{}); ok && len(moduleConfig) == 1 { + for vectorizer := range moduleConfig { + p.setSinglePropertyDefaults(prop, vectorizer) + } + } + } + } +} + +func (p *Provider) setSinglePropertyDefaults(prop *models.Property, vectorizer string) { + if vectorizer == "none" { + // the class does not use a vectorizer, nothing to do for us + return + } + + mod := p.GetByName(vectorizer) + cc, ok := mod.(modulecapabilities.ClassConfigurator) + if !ok { + // the module exists, but is not a class configurator, nothing to do for us + return + } + + p.setSinglePropertyConfigDefaults(prop, vectorizer, cc) +} + +func (p *Provider) setSinglePropertyConfigDefaults(prop *models.Property, + vectorizer string, cc modulecapabilities.ClassConfigurator, +) { + dt, _ := schema.GetValueDataTypeFromString(prop.DataType[0]) + modDefaults := cc.PropertyConfigDefaults(dt) + userSpecified := map[string]interface{}{} + mergedConfig := map[string]interface{}{} + + if prop.ModuleConfig != nil { + if vectorizerConfig, ok := prop.ModuleConfig.(map[string]interface{})[vectorizer]; ok { + if mcvm, ok := vectorizerConfig.(map[string]interface{}); ok { + userSpecified = mcvm + } + } + } + + for key, value := range modDefaults { + mergedConfig[key] = value + } + for key, value := range userSpecified { + mergedConfig[key] = value + } + + if len(mergedConfig) > 0 { + if prop.ModuleConfig == nil { + prop.ModuleConfig = map[string]interface{}{} + } + prop.ModuleConfig.(map[string]interface{})[vectorizer] = mergedConfig + } +} + +func (p *Provider) ValidateClass(ctx context.Context, class *models.Class) error { + switch len(class.VectorConfig) { + case 0: + // legacy configuration + if class.Vectorizer == "none" { + // the class does not use a vectorizer, nothing to do for us + return nil + } + if err := p.validateClassesModuleConfig(ctx, class, class.ModuleConfig); err != nil { + return err + } + return nil + default: + // named vectors configuration + for targetVector, vectorConfig := range class.VectorConfig { + if len(targetVector) > schema.TargetVectorNameMaxLength { + return errors.Errorf("class.VectorConfig target vector name %q is not valid. "+ + "Target vector name should not be longer than %d characters.", + targetVector, schema.TargetVectorNameMaxLength) + } + if !p.targetVectorNameValidator.MatchString(targetVector) { + return errors.Errorf("class.VectorConfig target vector name %q is not valid, "+ + "in Weaviate target vector names are restricted to valid GraphQL names, "+ + "which must be “/%s/”.", targetVector, schema.TargetVectorNameRegex) + } + vectorizer, ok := vectorConfig.Vectorizer.(map[string]interface{}) + if !ok { + return errors.Errorf("class.VectorConfig.Vectorizer must be an object, got %T", vectorConfig.Vectorizer) + } + if len(vectorizer) != 1 { + return errors.Errorf("class.VectorConfig.Vectorizer must consist only 1 configuration, got: %v", len(vectorizer)) + } + for modName := range vectorizer { + if modName == "none" { + // the class does not use a vectorizer, nothing to do for us + continue + } + if mod := p.GetByName(modName); mod == nil { + return errors.Errorf("class.VectorConfig.Vectorizer module with name %s doesn't exist", modName) + } + if err := p.validateClassModuleConfig(ctx, class, modName, targetVector); err != nil { + return err + } + } + } + // check module config configuration in case that there are other none vectorizer modules defined + if err := p.validateClassesModuleConfigNoneVectorizers(ctx, class, "", class.ModuleConfig); err != nil { + return err + } + return nil + } +} + +func (p *Provider) validateClassesModuleConfigNoneVectorizers(ctx context.Context, + class *models.Class, targetVector string, moduleConfig interface{}, +) error { + modConfig, ok := moduleConfig.(map[string]interface{}) + if !ok { + return nil + } + for modName := range modConfig { + mod := p.GetByName(modName) + if mod == nil { + return errors.Errorf("module with name %s doesn't exist", modName) + } + if !p.isVectorizerModule(mod.Type()) { + if err := p.validateClassModuleConfig(ctx, class, modName, ""); err != nil { + return err + } + } + } + return nil +} + +func (p *Provider) validateClassesModuleConfig(ctx context.Context, + class *models.Class, moduleConfig interface{}, +) error { + modConfig, ok := moduleConfig.(map[string]interface{}) + if !ok { + return nil + } + configuredVectorizers := make([]string, 0, len(modConfig)) + for modName := range modConfig { + if err := p.validateClassModuleConfig(ctx, class, modName, ""); err != nil { + return err + } + if err := p.ValidateVectorizer(modName); err == nil { + configuredVectorizers = append(configuredVectorizers, modName) + } + } + if len(configuredVectorizers) > 1 { + return fmt.Errorf("multiple vectorizers configured in class's moduleConfig: %v. class.vectorizer is set to %q", + configuredVectorizers, class.Vectorizer) + } + if len(configuredVectorizers) == 1 && p.IsMultiVector(configuredVectorizers[0]) { + return fmt.Errorf("multi vector vectorizer: %q is only allowed to be defined using named vector configuration", configuredVectorizers[0]) + } + if p.IsMultiVector(class.Vectorizer) { + return fmt.Errorf("multi vector vectorizer: %q is only allowed to be defined using named vector configuration", class.Vectorizer) + } + return nil +} + +func (p *Provider) validateClassModuleConfig(ctx context.Context, + class *models.Class, moduleName, targetVector string, +) error { + mod := p.GetByName(moduleName) + cc, ok := mod.(modulecapabilities.ClassConfigurator) + if !ok { + // the module exists, but is not a class configurator, nothing to do for us + return nil + } + + cfg := NewClassBasedModuleConfig(class, moduleName, "", targetVector, &p.cfg) + err := cc.ValidateClass(ctx, class, cfg) + if err != nil { + return errors.Wrapf(err, "module '%s'", moduleName) + } + + p.validateVectorConfig(class, moduleName, targetVector) + + return nil +} + +func (p *Provider) validateVectorConfig(class *models.Class, moduleName string, targetVector string) { + mod := p.GetByName(moduleName) + + if class.VectorConfig == nil || !p.implementsVectorizer(mod) { + return + } + + // named vector props need to be a string array + props, ok := class.VectorConfig[targetVector].Vectorizer.(map[string]interface{})[moduleName].(map[string]interface{})["properties"] + if ok { + propsTyped := make([]string, len(props.([]interface{}))) + for i, v := range props.([]interface{}) { + propsTyped[i] = v.(string) // was validated by the module + } + class.VectorConfig[targetVector].Vectorizer.(map[string]interface{})[moduleName].(map[string]interface{})["properties"] = propsTyped + } +}